aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnil S Keshavamurthy <anil.s.keshavamurthy@intel.com>2006-01-09 23:52:43 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-10 11:01:40 -0500
commit49a2a1b83ba6fa40c41968d6a28ba16e7ed0c3f7 (patch)
treef257b535d0f09f9ac2531d40feb732349993665c
parent41dead49ccb4d7f0a34d56478f487342a3c3ab2b (diff)
[PATCH] kprobes: changed from using spinlock to mutex
Since Kprobes runtime exception handlers is now lock free as this code path is now using RCU to walk through the list, there is no need for the register/unregister{_kprobe} to use spin_{lock/unlock}_isr{save/restore}. The serialization during registration/unregistration is now possible using just a mutex. In the above process, this patch also fixes a minor memory leak for x86_64 and powerpc. Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/kprobes.c14
-rw-r--r--arch/sparc64/kernel/kprobes.c6
-rw-r--r--arch/x86_64/kernel/kprobes.c7
-rw-r--r--include/asm-ia64/kprobes.h5
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--kernel/kprobes.c91
7 files changed, 53 insertions, 77 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 19edcd526ba4..68fe10250486 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -58,13 +58,9 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
58 58
59int __kprobes arch_prepare_kprobe(struct kprobe *p) 59int __kprobes arch_prepare_kprobe(struct kprobe *p)
60{ 60{
61 return 0;
62}
63
64void __kprobes arch_copy_kprobe(struct kprobe *p)
65{
66 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 61 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
67 p->opcode = *p->addr; 62 p->opcode = *p->addr;
63 return 0;
68} 64}
69 65
70void __kprobes arch_arm_kprobe(struct kprobe *p) 66void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 5368f9c2e6bf..331e169e8629 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -60,13 +60,13 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
60 if (!p->ainsn.insn) 60 if (!p->ainsn.insn)
61 ret = -ENOMEM; 61 ret = -ENOMEM;
62 } 62 }
63 return ret;
64}
65 63
66void __kprobes arch_copy_kprobe(struct kprobe *p) 64 if (!ret) {
67{ 65 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
68 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 66 p->opcode = *p->addr;
69 p->opcode = *p->addr; 67 }
68
69 return ret;
70} 70}
71 71
72void __kprobes arch_arm_kprobe(struct kprobe *p) 72void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -85,9 +85,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
85 85
86void __kprobes arch_remove_kprobe(struct kprobe *p) 86void __kprobes arch_remove_kprobe(struct kprobe *p)
87{ 87{
88 down(&kprobe_mutex);
89 free_insn_slot(p->ainsn.insn); 88 free_insn_slot(p->ainsn.insn);
90 up(&kprobe_mutex);
91} 89}
92 90
93static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 91static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index a97b0f0727ab..bbd5aa6818ea 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -43,14 +43,10 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43 43
44int __kprobes arch_prepare_kprobe(struct kprobe *p) 44int __kprobes arch_prepare_kprobe(struct kprobe *p)
45{ 45{
46 return 0;
47}
48
49void __kprobes arch_copy_kprobe(struct kprobe *p)
50{
51 p->ainsn.insn[0] = *p->addr; 46 p->ainsn.insn[0] = *p->addr;
52 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; 47 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
53 p->opcode = *p->addr; 48 p->opcode = *p->addr;
49 return 0;
54} 50}
55 51
56void __kprobes arch_arm_kprobe(struct kprobe *p) 52void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index afe11f4fbd1d..8b8943bfb89e 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -42,8 +42,8 @@
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/kdebug.h> 43#include <asm/kdebug.h>
44 44
45static DECLARE_MUTEX(kprobe_mutex);
46void jprobe_return_end(void); 45void jprobe_return_end(void);
46void __kprobes arch_copy_kprobe(struct kprobe *p);
47 47
48DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 48DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
49DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 49DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -69,12 +69,11 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
69int __kprobes arch_prepare_kprobe(struct kprobe *p) 69int __kprobes arch_prepare_kprobe(struct kprobe *p)
70{ 70{
71 /* insn: must be on special executable page on x86_64. */ 71 /* insn: must be on special executable page on x86_64. */
72 down(&kprobe_mutex);
73 p->ainsn.insn = get_insn_slot(); 72 p->ainsn.insn = get_insn_slot();
74 up(&kprobe_mutex);
75 if (!p->ainsn.insn) { 73 if (!p->ainsn.insn) {
76 return -ENOMEM; 74 return -ENOMEM;
77 } 75 }
76 arch_copy_kprobe(p);
78 return 0; 77 return 0;
79} 78}
80 79
@@ -223,9 +222,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
223 222
224void __kprobes arch_remove_kprobe(struct kprobe *p) 223void __kprobes arch_remove_kprobe(struct kprobe *p)
225{ 224{
226 down(&kprobe_mutex);
227 free_insn_slot(p->ainsn.insn); 225 free_insn_slot(p->ainsn.insn);
228 up(&kprobe_mutex);
229} 226}
230 227
231static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 228static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 5b26462674a7..12a0b93020da 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -110,11 +110,6 @@ struct arch_specific_insn {
110 unsigned short target_br_reg; 110 unsigned short target_br_reg;
111}; 111};
112 112
113/* ia64 does not need this */
114static inline void arch_copy_kprobe(struct kprobe *p)
115{
116}
117
118extern int kprobe_exceptions_notify(struct notifier_block *self, 113extern int kprobe_exceptions_notify(struct notifier_block *self,
119 unsigned long val, void *data); 114 unsigned long val, void *data);
120 115
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index c03f2dc933de..ad6e4fe970fd 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -150,7 +150,6 @@ struct kretprobe_instance {
150 150
151extern spinlock_t kretprobe_lock; 151extern spinlock_t kretprobe_lock;
152extern int arch_prepare_kprobe(struct kprobe *p); 152extern int arch_prepare_kprobe(struct kprobe *p);
153extern void arch_copy_kprobe(struct kprobe *p);
154extern void arch_arm_kprobe(struct kprobe *p); 153extern void arch_arm_kprobe(struct kprobe *p);
155extern void arch_disarm_kprobe(struct kprobe *p); 154extern void arch_disarm_kprobe(struct kprobe *p);
156extern void arch_remove_kprobe(struct kprobe *p); 155extern void arch_remove_kprobe(struct kprobe *p);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3897630d2335..f14ccd35e9b6 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -48,7 +48,7 @@
48static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 48static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
49static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 49static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
50 50
51static DEFINE_SPINLOCK(kprobe_lock); /* Protects kprobe_table */ 51static DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
52DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ 52DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
53static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 53static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
54 54
@@ -167,7 +167,7 @@ static inline void reset_kprobe_instance(void)
167 167
168/* 168/*
169 * This routine is called either: 169 * This routine is called either:
170 * - under the kprobe_lock spinlock - during kprobe_[un]register() 170 * - under the kprobe_mutex - during kprobe_[un]register()
171 * OR 171 * OR
172 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 172 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
173 */ 173 */
@@ -420,7 +420,6 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
420/* 420/*
421 * This is the second or subsequent kprobe at the address - handle 421 * This is the second or subsequent kprobe at the address - handle
422 * the intricacies 422 * the intricacies
423 * TODO: Move kcalloc outside the spin_lock
424 */ 423 */
425static int __kprobes register_aggr_kprobe(struct kprobe *old_p, 424static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
426 struct kprobe *p) 425 struct kprobe *p)
@@ -442,25 +441,6 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
442 return ret; 441 return ret;
443} 442}
444 443
445/* kprobe removal house-keeping routines */
446static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
447{
448 arch_disarm_kprobe(p);
449 hlist_del_rcu(&p->hlist);
450 spin_unlock_irqrestore(&kprobe_lock, flags);
451 arch_remove_kprobe(p);
452}
453
454static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
455 struct kprobe *p, unsigned long flags)
456{
457 list_del_rcu(&p->list);
458 if (list_empty(&old_p->list))
459 cleanup_kprobe(old_p, flags);
460 else
461 spin_unlock_irqrestore(&kprobe_lock, flags);
462}
463
464static int __kprobes in_kprobes_functions(unsigned long addr) 444static int __kprobes in_kprobes_functions(unsigned long addr)
465{ 445{
466 if (addr >= (unsigned long)__kprobes_text_start 446 if (addr >= (unsigned long)__kprobes_text_start
@@ -472,7 +452,6 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
472int __kprobes register_kprobe(struct kprobe *p) 452int __kprobes register_kprobe(struct kprobe *p)
473{ 453{
474 int ret = 0; 454 int ret = 0;
475 unsigned long flags = 0;
476 struct kprobe *old_p; 455 struct kprobe *old_p;
477 struct module *mod; 456 struct module *mod;
478 457
@@ -484,18 +463,17 @@ int __kprobes register_kprobe(struct kprobe *p)
484 (unlikely(!try_module_get(mod)))) 463 (unlikely(!try_module_get(mod))))
485 return -EINVAL; 464 return -EINVAL;
486 465
487 if ((ret = arch_prepare_kprobe(p)) != 0)
488 goto rm_kprobe;
489
490 p->nmissed = 0; 466 p->nmissed = 0;
491 spin_lock_irqsave(&kprobe_lock, flags); 467 down(&kprobe_mutex);
492 old_p = get_kprobe(p->addr); 468 old_p = get_kprobe(p->addr);
493 if (old_p) { 469 if (old_p) {
494 ret = register_aggr_kprobe(old_p, p); 470 ret = register_aggr_kprobe(old_p, p);
495 goto out; 471 goto out;
496 } 472 }
497 473
498 arch_copy_kprobe(p); 474 if ((ret = arch_prepare_kprobe(p)) != 0)
475 goto out;
476
499 INIT_HLIST_NODE(&p->hlist); 477 INIT_HLIST_NODE(&p->hlist);
500 hlist_add_head_rcu(&p->hlist, 478 hlist_add_head_rcu(&p->hlist,
501 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 479 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
@@ -503,10 +481,8 @@ int __kprobes register_kprobe(struct kprobe *p)
503 arch_arm_kprobe(p); 481 arch_arm_kprobe(p);
504 482
505out: 483out:
506 spin_unlock_irqrestore(&kprobe_lock, flags); 484 up(&kprobe_mutex);
507rm_kprobe: 485
508 if (ret == -EEXIST)
509 arch_remove_kprobe(p);
510 if (ret && mod) 486 if (ret && mod)
511 module_put(mod); 487 module_put(mod);
512 return ret; 488 return ret;
@@ -514,29 +490,48 @@ rm_kprobe:
514 490
515void __kprobes unregister_kprobe(struct kprobe *p) 491void __kprobes unregister_kprobe(struct kprobe *p)
516{ 492{
517 unsigned long flags;
518 struct kprobe *old_p;
519 struct module *mod; 493 struct module *mod;
494 struct kprobe *old_p, *cleanup_p;
520 495
521 spin_lock_irqsave(&kprobe_lock, flags); 496 down(&kprobe_mutex);
522 old_p = get_kprobe(p->addr); 497 old_p = get_kprobe(p->addr);
523 if (old_p) { 498 if (unlikely(!old_p)) {
524 /* cleanup_*_kprobe() does the spin_unlock_irqrestore */ 499 up(&kprobe_mutex);
525 if (old_p->pre_handler == aggr_pre_handler) 500 return;
526 cleanup_aggr_kprobe(old_p, p, flags); 501 }
527 else
528 cleanup_kprobe(p, flags);
529 502
530 synchronize_sched(); 503 if ((old_p->pre_handler == aggr_pre_handler) &&
504 (p->list.next == &old_p->list) &&
505 (p->list.prev == &old_p->list)) {
506 /* Only one element in the aggregate list */
507 arch_disarm_kprobe(p);
508 hlist_del_rcu(&old_p->hlist);
509 cleanup_p = old_p;
510 } else if (old_p == p) {
511 /* Only one kprobe element in the hash list */
512 arch_disarm_kprobe(p);
513 hlist_del_rcu(&p->hlist);
514 cleanup_p = p;
515 } else {
516 list_del_rcu(&p->list);
517 cleanup_p = NULL;
518 }
531 519
532 if ((mod = module_text_address((unsigned long)p->addr))) 520 up(&kprobe_mutex);
533 module_put(mod);
534 521
535 if (old_p->pre_handler == aggr_pre_handler && 522 synchronize_sched();
536 list_empty(&old_p->list)) 523 if ((mod = module_text_address((unsigned long)p->addr)))
524 module_put(mod);
525
526 if (cleanup_p) {
527 if (cleanup_p->pre_handler == aggr_pre_handler) {
528 list_del_rcu(&p->list);
537 kfree(old_p); 529 kfree(old_p);
538 } else 530 }
539 spin_unlock_irqrestore(&kprobe_lock, flags); 531 down(&kprobe_mutex);
532 arch_remove_kprobe(p);
533 up(&kprobe_mutex);
534 }
540} 535}
541 536
542static struct notifier_block kprobe_exceptions_nb = { 537static struct notifier_block kprobe_exceptions_nb = {