Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
- down(&kprobe_mutex);
+ mutex_lock(&kprobe_mutex);
free_insn_slot(p->ainsn.insn);
- up(&kprobe_mutex);
+ mutex_unlock(&kprobe_mutex);
}
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
- down(&kprobe_mutex);
+ mutex_lock(&kprobe_mutex);
free_insn_slot(p->ainsn.insn);
- up(&kprobe_mutex);
+ mutex_unlock(&kprobe_mutex);
}
static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
- down(&kprobe_mutex);
+ mutex_lock(&kprobe_mutex);
free_insn_slot(p->ainsn.insn);
- up(&kprobe_mutex);
+ mutex_unlock(&kprobe_mutex);
}
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/mutex.h>
#ifdef CONFIG_KPROBES
#include <asm/kprobes.h>
};
extern spinlock_t kretprobe_lock;
-extern struct semaphore kprobe_mutex;
+extern struct mutex kprobe_mutex;
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
-DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
+DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
}
p->nmissed = 0;
- down(&kprobe_mutex);
+ mutex_lock(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (old_p) {
ret = register_aggr_kprobe(old_p, p);
arch_arm_kprobe(p);
out:
- up(&kprobe_mutex);
+ mutex_unlock(&kprobe_mutex);
if (ret && probed_mod)
module_put(probed_mod);
struct kprobe *old_p, *list_p;
int cleanup_p;
- down(&kprobe_mutex);
+ mutex_lock(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (unlikely(!old_p)) {
- up(&kprobe_mutex);
+ mutex_unlock(&kprobe_mutex);
return;
}
if (p != old_p) {
if (list_p == p)
/* kprobe p is a valid probe */
goto valid_p;
- up(&kprobe_mutex);
+ mutex_unlock(&kprobe_mutex);
return;
}
valid_p:
cleanup_p = 0;
}
- up(&kprobe_mutex);
+ mutex_unlock(&kprobe_mutex);
synchronize_sched();
if (p->mod_refcounted &&