aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorAnanth N Mavinakayanahalli <ananth@in.ibm.com>2005-11-07 04:00:14 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:46 -0500
commit991a51d83a3d9bebfafdd1e692cf310899d60791 (patch)
tree4cc6eaa2a868838e59c7737da9868f2358f2bb19 /arch/x86_64
parent3516a46042508a495fac13c2e73530d936ebe015 (diff)
[PATCH] Kprobes: Use RCU for (un)register synchronization - arch changes
Changes to the arch kprobes infrastructure to take advantage of the locking changes introduced by usage of RCU for synchronization. All handlers are now run without any locks held, so they have to be re-entrant or provide their own synchronization. Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/kprobes.c25
1 files changed, 6 insertions, 19 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 6cb40d133b7c..9bef2c8dc12c 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -34,7 +34,6 @@
34#include <linux/config.h> 34#include <linux/config.h>
35#include <linux/kprobes.h> 35#include <linux/kprobes.h>
36#include <linux/ptrace.h> 36#include <linux/ptrace.h>
37#include <linux/spinlock.h>
38#include <linux/string.h> 37#include <linux/string.h>
39#include <linux/slab.h> 38#include <linux/slab.h>
40#include <linux/preempt.h> 39#include <linux/preempt.h>
@@ -266,6 +265,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
266 regs->rip = (unsigned long)p->ainsn.insn; 265 regs->rip = (unsigned long)p->ainsn.insn;
267} 266}
268 267
268/* Called with kretprobe_lock held */
269void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, 269void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
270 struct pt_regs *regs) 270 struct pt_regs *regs)
271{ 271{
@@ -299,15 +299,12 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
299 299
300 /* Check we're not actually recursing */ 300 /* Check we're not actually recursing */
301 if (kprobe_running()) { 301 if (kprobe_running()) {
302 /* We *are* holding lock here, so this is safe.
303 Disarm the probe we just hit, and ignore it. */
304 p = get_kprobe(addr); 302 p = get_kprobe(addr);
305 if (p) { 303 if (p) {
306 if (kcb->kprobe_status == KPROBE_HIT_SS && 304 if (kcb->kprobe_status == KPROBE_HIT_SS &&
307 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 305 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
308 regs->eflags &= ~TF_MASK; 306 regs->eflags &= ~TF_MASK;
309 regs->eflags |= kcb->kprobe_saved_rflags; 307 regs->eflags |= kcb->kprobe_saved_rflags;
310 unlock_kprobes();
311 goto no_kprobe; 308 goto no_kprobe;
312 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) { 309 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
313 /* TODO: Provide re-entrancy from 310 /* TODO: Provide re-entrancy from
@@ -340,14 +337,11 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
340 goto ss_probe; 337 goto ss_probe;
341 } 338 }
342 } 339 }
343 /* If it's not ours, can't be delete race, (we hold lock). */
344 goto no_kprobe; 340 goto no_kprobe;
345 } 341 }
346 342
347 lock_kprobes();
348 p = get_kprobe(addr); 343 p = get_kprobe(addr);
349 if (!p) { 344 if (!p) {
350 unlock_kprobes();
351 if (*addr != BREAKPOINT_INSTRUCTION) { 345 if (*addr != BREAKPOINT_INSTRUCTION) {
352 /* 346 /*
353 * The breakpoint instruction was removed right 347 * The breakpoint instruction was removed right
@@ -406,9 +400,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
406 struct kretprobe_instance *ri = NULL; 400 struct kretprobe_instance *ri = NULL;
407 struct hlist_head *head; 401 struct hlist_head *head;
408 struct hlist_node *node, *tmp; 402 struct hlist_node *node, *tmp;
409 unsigned long orig_ret_address = 0; 403 unsigned long flags, orig_ret_address = 0;
410 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 404 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
411 405
406 spin_lock_irqsave(&kretprobe_lock, flags);
412 head = kretprobe_inst_table_head(current); 407 head = kretprobe_inst_table_head(current);
413 408
414 /* 409 /*
@@ -448,7 +443,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
448 regs->rip = orig_ret_address; 443 regs->rip = orig_ret_address;
449 444
450 reset_current_kprobe(); 445 reset_current_kprobe();
451 unlock_kprobes(); 446 spin_unlock_irqrestore(&kretprobe_lock, flags);
452 preempt_enable_no_resched(); 447 preempt_enable_no_resched();
453 448
454 /* 449 /*
@@ -536,10 +531,6 @@ static void __kprobes resume_execution(struct kprobe *p,
536 } 531 }
537} 532}
538 533
539/*
540 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
541 * remain disabled thoroughout this function. And we hold kprobe lock.
542 */
543int __kprobes post_kprobe_handler(struct pt_regs *regs) 534int __kprobes post_kprobe_handler(struct pt_regs *regs)
544{ 535{
545 struct kprobe *cur = kprobe_running(); 536 struct kprobe *cur = kprobe_running();
@@ -560,8 +551,6 @@ int __kprobes post_kprobe_handler(struct pt_regs *regs)
560 if (kcb->kprobe_status == KPROBE_REENTER) { 551 if (kcb->kprobe_status == KPROBE_REENTER) {
561 restore_previous_kprobe(kcb); 552 restore_previous_kprobe(kcb);
562 goto out; 553 goto out;
563 } else {
564 unlock_kprobes();
565 } 554 }
566 reset_current_kprobe(); 555 reset_current_kprobe();
567out: 556out:
@@ -578,7 +567,6 @@ out:
578 return 1; 567 return 1;
579} 568}
580 569
581/* Interrupts disabled, kprobe_lock held. */
582int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 570int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
583{ 571{
584 struct kprobe *cur = kprobe_running(); 572 struct kprobe *cur = kprobe_running();
@@ -592,7 +580,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
592 regs->eflags |= kcb->kprobe_old_rflags; 580 regs->eflags |= kcb->kprobe_old_rflags;
593 581
594 reset_current_kprobe(); 582 reset_current_kprobe();
595 unlock_kprobes();
596 preempt_enable_no_resched(); 583 preempt_enable_no_resched();
597 } 584 }
598 return 0; 585 return 0;
@@ -607,7 +594,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
607 struct die_args *args = (struct die_args *)data; 594 struct die_args *args = (struct die_args *)data;
608 int ret = NOTIFY_DONE; 595 int ret = NOTIFY_DONE;
609 596
610 preempt_disable(); 597 rcu_read_lock();
611 switch (val) { 598 switch (val) {
612 case DIE_INT3: 599 case DIE_INT3:
613 if (kprobe_handler(args->regs)) 600 if (kprobe_handler(args->regs))
@@ -626,7 +613,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
626 default: 613 default:
627 break; 614 break;
628 } 615 }
629 preempt_enable(); 616 rcu_read_unlock();
630 return ret; 617 return ret;
631} 618}
632 619