diff options
author | Jiri Olsa <jolsa@redhat.com> | 2011-05-11 07:06:13 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-05-11 07:21:23 -0400 |
commit | 9bbeacf52f66d165739a4bbe9c018d17493a74b5 (patch) | |
tree | d47a268e184907bc1c962e91f361907f29b4093b | |
parent | 693d92a1bbc9e42681c42ed190bd42b636ca876f (diff) |
kprobes, x86: Disable irqs during optimized callback
Disable irqs during optimized callback, so we dont miss any in-irq kprobes.
The following commands:
# cd /debug/tracing/
# echo "p mutex_unlock" >> kprobe_events
# echo "p _raw_spin_lock" >> kprobe_events
# echo "p smp_apic_timer_interrupt" >> ./kprobe_events
# echo 1 > events/enable
Cause the optimized kprobes to be missed. None is missed
with the fix applied.
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Link: http://lkml.kernel.org/r/20110511110613.GB2390@jolsa.brq.redhat.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/kprobes.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index c969fd9d1566..f1a6244d7d93 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, | |||
1183 | struct pt_regs *regs) | 1183 | struct pt_regs *regs) |
1184 | { | 1184 | { |
1185 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 1185 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1186 | unsigned long flags; | ||
1186 | 1187 | ||
1187 | /* This is possible if op is under delayed unoptimizing */ | 1188 | /* This is possible if op is under delayed unoptimizing */ |
1188 | if (kprobe_disabled(&op->kp)) | 1189 | if (kprobe_disabled(&op->kp)) |
1189 | return; | 1190 | return; |
1190 | 1191 | ||
1191 | preempt_disable(); | 1192 | local_irq_save(flags); |
1192 | if (kprobe_running()) { | 1193 | if (kprobe_running()) { |
1193 | kprobes_inc_nmissed_count(&op->kp); | 1194 | kprobes_inc_nmissed_count(&op->kp); |
1194 | } else { | 1195 | } else { |
@@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, | |||
1207 | opt_pre_handler(&op->kp, regs); | 1208 | opt_pre_handler(&op->kp, regs); |
1208 | __this_cpu_write(current_kprobe, NULL); | 1209 | __this_cpu_write(current_kprobe, NULL); |
1209 | } | 1210 | } |
1210 | preempt_enable_no_resched(); | 1211 | local_irq_restore(flags); |
1211 | } | 1212 | } |
1212 | 1213 | ||
1213 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | 1214 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) |