locking, kprobes: Annotate the hash locks and kretprobe.lock as raw
The kprobe locks can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
76bf6877fe
commit
ec484608c5
2 changed files with 18 additions and 18 deletions
|
@ -181,7 +181,7 @@ struct kretprobe {
|
|||
int nmissed;
|
||||
size_t data_size;
|
||||
struct hlist_head free_instances;
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
struct kretprobe_instance {
|
||||
|
|
|
@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
|
|||
static DEFINE_MUTEX(kprobe_mutex);
|
||||
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
|
||||
static struct {
|
||||
spinlock_t lock ____cacheline_aligned_in_smp;
|
||||
raw_spinlock_t lock ____cacheline_aligned_in_smp;
|
||||
} kretprobe_table_locks[KPROBE_TABLE_SIZE];
|
||||
|
||||
static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
|
||||
static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
|
||||
{
|
||||
return &(kretprobe_table_locks[hash].lock);
|
||||
}
|
||||
|
@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
|
|||
hlist_del(&ri->hlist);
|
||||
INIT_HLIST_NODE(&ri->hlist);
|
||||
if (likely(rp)) {
|
||||
spin_lock(&rp->lock);
|
||||
raw_spin_lock(&rp->lock);
|
||||
hlist_add_head(&ri->hlist, &rp->free_instances);
|
||||
spin_unlock(&rp->lock);
|
||||
raw_spin_unlock(&rp->lock);
|
||||
} else
|
||||
/* Unregistering */
|
||||
hlist_add_head(&ri->hlist, head);
|
||||
|
@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
|
|||
__acquires(hlist_lock)
|
||||
{
|
||||
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
|
||||
spinlock_t *hlist_lock;
|
||||
raw_spinlock_t *hlist_lock;
|
||||
|
||||
*head = &kretprobe_inst_table[hash];
|
||||
hlist_lock = kretprobe_table_lock_ptr(hash);
|
||||
spin_lock_irqsave(hlist_lock, *flags);
|
||||
raw_spin_lock_irqsave(hlist_lock, *flags);
|
||||
}
|
||||
|
||||
static void __kprobes kretprobe_table_lock(unsigned long hash,
|
||||
unsigned long *flags)
|
||||
__acquires(hlist_lock)
|
||||
{
|
||||
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
|
||||
spin_lock_irqsave(hlist_lock, *flags);
|
||||
raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
|
||||
raw_spin_lock_irqsave(hlist_lock, *flags);
|
||||
}
|
||||
|
||||
void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
|
||||
|
@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
|
|||
__releases(hlist_lock)
|
||||
{
|
||||
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
|
||||
spinlock_t *hlist_lock;
|
||||
raw_spinlock_t *hlist_lock;
|
||||
|
||||
hlist_lock = kretprobe_table_lock_ptr(hash);
|
||||
spin_unlock_irqrestore(hlist_lock, *flags);
|
||||
raw_spin_unlock_irqrestore(hlist_lock, *flags);
|
||||
}
|
||||
|
||||
static void __kprobes kretprobe_table_unlock(unsigned long hash,
|
||||
unsigned long *flags)
|
||||
__releases(hlist_lock)
|
||||
{
|
||||
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
|
||||
spin_unlock_irqrestore(hlist_lock, *flags);
|
||||
raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
|
||||
raw_spin_unlock_irqrestore(hlist_lock, *flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1663,12 +1663,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
|
|||
|
||||
/*TODO: consider to only swap the RA after the last pre_handler fired */
|
||||
hash = hash_ptr(current, KPROBE_HASH_BITS);
|
||||
spin_lock_irqsave(&rp->lock, flags);
|
||||
raw_spin_lock_irqsave(&rp->lock, flags);
|
||||
if (!hlist_empty(&rp->free_instances)) {
|
||||
ri = hlist_entry(rp->free_instances.first,
|
||||
struct kretprobe_instance, hlist);
|
||||
hlist_del(&ri->hlist);
|
||||
spin_unlock_irqrestore(&rp->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rp->lock, flags);
|
||||
|
||||
ri->rp = rp;
|
||||
ri->task = current;
|
||||
|
@ -1685,7 +1685,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
|
|||
kretprobe_table_unlock(hash, &flags);
|
||||
} else {
|
||||
rp->nmissed++;
|
||||
spin_unlock_irqrestore(&rp->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rp->lock, flags);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1721,7 +1721,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
|
|||
rp->maxactive = num_possible_cpus();
|
||||
#endif
|
||||
}
|
||||
spin_lock_init(&rp->lock);
|
||||
raw_spin_lock_init(&rp->lock);
|
||||
INIT_HLIST_HEAD(&rp->free_instances);
|
||||
for (i = 0; i < rp->maxactive; i++) {
|
||||
inst = kmalloc(sizeof(struct kretprobe_instance) +
|
||||
|
@ -1959,7 +1959,7 @@ static int __init init_kprobes(void)
|
|||
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
||||
INIT_HLIST_HEAD(&kprobe_table[i]);
|
||||
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
|
||||
spin_lock_init(&(kretprobe_table_locks[i].lock));
|
||||
raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue