aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/kprobes.c
diff options
context:
space:
mode:
authorAnanth N Mavinakayanahalli <ananth@in.ibm.com>2005-11-07 04:00:07 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:45 -0500
commit66ff2d0691e00e1e7bfdf398a970310c9a0fe671 (patch)
treef53bcd846be8fbaee5a5ee65f9bacc5b34392090 /arch/x86_64/kernel/kprobes.c
parentb385676b355549afc9a7507ce09c7df47f166521 (diff)
[PATCH] Kprobes: rearrange preempt_disable/enable() calls
The following set of patches are aimed at improving kprobes scalability. We currently serialize kprobe registration, unregistration and handler execution using a single spinlock - kprobe_lock. With these changes, kprobe handlers can run without any locks held. It also allows for simultaneous kprobe handler executions on different processors as we now track kprobe execution on a per processor basis. It is now necessary that the handlers be re-entrant since handlers can run concurrently on multiple processors. All changes have been tested on i386, ia64, ppc64 and x86_64, while sparc64 has been compile tested only. The patches can be viewed as 3 logical chunks: patch 1: Reorder preempt_(dis/en)able calls patches 2-7: Introduce per_cpu data areas to track kprobe execution patches 8-9: Use RCU to synchronize kprobe (un)registration and handler execution. Thanks to Maneesh Soni, James Keniston and Anil Keshavamurthy for their review and suggestions. Thanks again to Anil, Hien Nguyen and Kevin Stafford for testing the patches. This patch: Reorder preempt_disable/enable() calls in arch kprobes files in preparation to introduce locking changes. No functional changes introduced by this patch. Signed-off-by: Ananth N Mavinakayahanalli <ananth@in.ibm.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/kprobes.c')
-rw-r--r--arch/x86_64/kernel/kprobes.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 76a28b007be9..ebfa2c9241ca 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -302,9 +302,6 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
302 int ret = 0; 302 int ret = 0;
303 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t)); 303 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t));
304 304
305 /* We're in an interrupt, but this is clear and BUG()-safe. */
306 preempt_disable();
307
308 /* Check we're not actually recursing */ 305 /* Check we're not actually recursing */
309 if (kprobe_running()) { 306 if (kprobe_running()) {
310 /* We *are* holding lock here, so this is safe. 307 /* We *are* holding lock here, so this is safe.
@@ -372,6 +369,11 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
372 goto no_kprobe; 369 goto no_kprobe;
373 } 370 }
374 371
372 /*
373 * This preempt_disable() matches the preempt_enable_no_resched()
374 * in post_kprobe_handler()
375 */
376 preempt_disable();
375 kprobe_status = KPROBE_HIT_ACTIVE; 377 kprobe_status = KPROBE_HIT_ACTIVE;
376 set_current_kprobe(p, regs); 378 set_current_kprobe(p, regs);
377 379
@@ -385,7 +387,6 @@ ss_probe:
385 return 1; 387 return 1;
386 388
387no_kprobe: 389no_kprobe:
388 preempt_enable_no_resched();
389 return ret; 390 return ret;
390} 391}
391 392
@@ -456,7 +457,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
456 /* 457 /*
457 * By returning a non-zero value, we are telling 458 * By returning a non-zero value, we are telling
458 * kprobe_handler() that we have handled unlocking 459 * kprobe_handler() that we have handled unlocking
459 * and re-enabling preemption. 460 * and re-enabling preemption
460 */ 461 */
461 return 1; 462 return 1;
462} 463}
@@ -599,29 +600,29 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
599 unsigned long val, void *data) 600 unsigned long val, void *data)
600{ 601{
601 struct die_args *args = (struct die_args *)data; 602 struct die_args *args = (struct die_args *)data;
603 int ret = NOTIFY_DONE;
604
605 preempt_disable();
602 switch (val) { 606 switch (val) {
603 case DIE_INT3: 607 case DIE_INT3:
604 if (kprobe_handler(args->regs)) 608 if (kprobe_handler(args->regs))
605 return NOTIFY_STOP; 609 ret = NOTIFY_STOP;
606 break; 610 break;
607 case DIE_DEBUG: 611 case DIE_DEBUG:
608 if (post_kprobe_handler(args->regs)) 612 if (post_kprobe_handler(args->regs))
609 return NOTIFY_STOP; 613 ret = NOTIFY_STOP;
610 break; 614 break;
611 case DIE_GPF: 615 case DIE_GPF:
612 if (kprobe_running() &&
613 kprobe_fault_handler(args->regs, args->trapnr))
614 return NOTIFY_STOP;
615 break;
616 case DIE_PAGE_FAULT: 616 case DIE_PAGE_FAULT:
617 if (kprobe_running() && 617 if (kprobe_running() &&
618 kprobe_fault_handler(args->regs, args->trapnr)) 618 kprobe_fault_handler(args->regs, args->trapnr))
619 return NOTIFY_STOP; 619 ret = NOTIFY_STOP;
620 break; 620 break;
621 default: 621 default:
622 break; 622 break;
623 } 623 }
624 return NOTIFY_DONE; 624 preempt_enable();
625 return ret;
625} 626}
626 627
627int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 628int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
@@ -647,7 +648,6 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
647 648
648void __kprobes jprobe_return(void) 649void __kprobes jprobe_return(void)
649{ 650{
650 preempt_enable_no_resched();
651 asm volatile (" xchg %%rbx,%%rsp \n" 651 asm volatile (" xchg %%rbx,%%rsp \n"
652 " int3 \n" 652 " int3 \n"
653 " .globl jprobe_return_end \n" 653 " .globl jprobe_return_end \n"