diff options
author | Ananth N Mavinakayanahalli <ananth@in.ibm.com> | 2005-11-07 04:00:14 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-07 10:53:46 -0500 |
commit | d217d5450f11d8c907c0458d175b0dc999b4d06d (patch) | |
tree | ebd68eb9e0b632049d22240a3589887ca27077a5 /arch/x86_64/kernel/kprobes.c | |
parent | 991a51d83a3d9bebfafdd1e692cf310899d60791 (diff) |
[PATCH] Kprobes: preempt_disable/enable() simplification
Reorganize the preempt_disable/enable calls to eliminate the extra preempt
depth. Changes based on Paul McKenney's review suggestions for the kprobes
RCU changeset.
Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/kprobes.c')
-rw-r--r-- | arch/x86_64/kernel/kprobes.c | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 9bef2c8dc12c..dddeb678b440 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
@@ -286,16 +286,19 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, | |||
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
289 | /* | ||
290 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | ||
291 | * remain disabled thorough out this function. | ||
292 | */ | ||
293 | int __kprobes kprobe_handler(struct pt_regs *regs) | 289 | int __kprobes kprobe_handler(struct pt_regs *regs) |
294 | { | 290 | { |
295 | struct kprobe *p; | 291 | struct kprobe *p; |
296 | int ret = 0; | 292 | int ret = 0; |
297 | kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t)); | 293 | kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t)); |
298 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 294 | struct kprobe_ctlblk *kcb; |
295 | |||
296 | /* | ||
297 | * We don't want to be preempted for the entire | ||
298 | * duration of kprobe processing | ||
299 | */ | ||
300 | preempt_disable(); | ||
301 | kcb = get_kprobe_ctlblk(); | ||
299 | 302 | ||
300 | /* Check we're not actually recursing */ | 303 | /* Check we're not actually recursing */ |
301 | if (kprobe_running()) { | 304 | if (kprobe_running()) { |
@@ -359,11 +362,6 @@ int __kprobes kprobe_handler(struct pt_regs *regs) | |||
359 | goto no_kprobe; | 362 | goto no_kprobe; |
360 | } | 363 | } |
361 | 364 | ||
362 | /* | ||
363 | * This preempt_disable() matches the preempt_enable_no_resched() | ||
364 | * in post_kprobe_handler() | ||
365 | */ | ||
366 | preempt_disable(); | ||
367 | set_current_kprobe(p, regs, kcb); | 365 | set_current_kprobe(p, regs, kcb); |
368 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | 366 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
369 | 367 | ||
@@ -377,6 +375,7 @@ ss_probe: | |||
377 | return 1; | 375 | return 1; |
378 | 376 | ||
379 | no_kprobe: | 377 | no_kprobe: |
378 | preempt_enable_no_resched(); | ||
380 | return ret; | 379 | return ret; |
381 | } | 380 | } |
382 | 381 | ||
@@ -448,8 +447,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
448 | 447 | ||
449 | /* | 448 | /* |
450 | * By returning a non-zero value, we are telling | 449 | * By returning a non-zero value, we are telling |
451 | * kprobe_handler() that we have handled unlocking | 450 | * kprobe_handler() that we don't want the post_handler |
452 | * and re-enabling preemption | 451 | * to run (and have re-enabled preemption) |
453 | */ | 452 | */ |
454 | return 1; | 453 | return 1; |
455 | } | 454 | } |
@@ -594,7 +593,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
594 | struct die_args *args = (struct die_args *)data; | 593 | struct die_args *args = (struct die_args *)data; |
595 | int ret = NOTIFY_DONE; | 594 | int ret = NOTIFY_DONE; |
596 | 595 | ||
597 | rcu_read_lock(); | ||
598 | switch (val) { | 596 | switch (val) { |
599 | case DIE_INT3: | 597 | case DIE_INT3: |
600 | if (kprobe_handler(args->regs)) | 598 | if (kprobe_handler(args->regs)) |
@@ -606,14 +604,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
606 | break; | 604 | break; |
607 | case DIE_GPF: | 605 | case DIE_GPF: |
608 | case DIE_PAGE_FAULT: | 606 | case DIE_PAGE_FAULT: |
607 | /* kprobe_running() needs smp_processor_id() */ | ||
608 | preempt_disable(); | ||
609 | if (kprobe_running() && | 609 | if (kprobe_running() && |
610 | kprobe_fault_handler(args->regs, args->trapnr)) | 610 | kprobe_fault_handler(args->regs, args->trapnr)) |
611 | ret = NOTIFY_STOP; | 611 | ret = NOTIFY_STOP; |
612 | preempt_enable(); | ||
612 | break; | 613 | break; |
613 | default: | 614 | default: |
614 | break; | 615 | break; |
615 | } | 616 | } |
616 | rcu_read_unlock(); | ||
617 | return ret; | 617 | return ret; |
618 | } | 618 | } |
619 | 619 | ||
@@ -675,6 +675,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
675 | *regs = kcb->jprobe_saved_regs; | 675 | *regs = kcb->jprobe_saved_regs; |
676 | memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, | 676 | memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, |
677 | MIN_STACK_SIZE(stack_addr)); | 677 | MIN_STACK_SIZE(stack_addr)); |
678 | preempt_enable_no_resched(); | ||
678 | return 1; | 679 | return 1; |
679 | } | 680 | } |
680 | return 0; | 681 | return 0; |