aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/kprobes.c22
-rw-r--r--arch/ia64/kernel/kprobes.c16
-rw-r--r--arch/ppc64/kernel/kprobes.c24
-rw-r--r--arch/sparc64/kernel/kprobes.c14
-rw-r--r--arch/x86_64/kernel/kprobes.c25
5 files changed, 27 insertions, 74 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 99565a66915d..ad469299267a 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -31,7 +31,6 @@
31#include <linux/config.h> 31#include <linux/config.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/ptrace.h> 33#include <linux/ptrace.h>
34#include <linux/spinlock.h>
35#include <linux/preempt.h> 34#include <linux/preempt.h>
36#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
37#include <asm/kdebug.h> 36#include <asm/kdebug.h>
@@ -123,6 +122,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
123 regs->eip = (unsigned long)&p->ainsn.insn; 122 regs->eip = (unsigned long)&p->ainsn.insn;
124} 123}
125 124
125/* Called with kretprobe_lock held */
126void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, 126void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
127 struct pt_regs *regs) 127 struct pt_regs *regs)
128{ 128{
@@ -168,15 +168,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
168 } 168 }
169 /* Check we're not actually recursing */ 169 /* Check we're not actually recursing */
170 if (kprobe_running()) { 170 if (kprobe_running()) {
171 /* We *are* holding lock here, so this is safe.
172 Disarm the probe we just hit, and ignore it. */
173 p = get_kprobe(addr); 171 p = get_kprobe(addr);
174 if (p) { 172 if (p) {
175 if (kcb->kprobe_status == KPROBE_HIT_SS && 173 if (kcb->kprobe_status == KPROBE_HIT_SS &&
176 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 174 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
177 regs->eflags &= ~TF_MASK; 175 regs->eflags &= ~TF_MASK;
178 regs->eflags |= kcb->kprobe_saved_eflags; 176 regs->eflags |= kcb->kprobe_saved_eflags;
179 unlock_kprobes();
180 goto no_kprobe; 177 goto no_kprobe;
181 } 178 }
182 /* We have reentered the kprobe_handler(), since 179 /* We have reentered the kprobe_handler(), since
@@ -197,14 +194,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
197 goto ss_probe; 194 goto ss_probe;
198 } 195 }
199 } 196 }
200 /* If it's not ours, can't be delete race, (we hold lock). */
201 goto no_kprobe; 197 goto no_kprobe;
202 } 198 }
203 199
204 lock_kprobes();
205 p = get_kprobe(addr); 200 p = get_kprobe(addr);
206 if (!p) { 201 if (!p) {
207 unlock_kprobes();
208 if (regs->eflags & VM_MASK) { 202 if (regs->eflags & VM_MASK) {
209 /* We are in virtual-8086 mode. Return 0 */ 203 /* We are in virtual-8086 mode. Return 0 */
210 goto no_kprobe; 204 goto no_kprobe;
@@ -268,9 +262,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
268 struct kretprobe_instance *ri = NULL; 262 struct kretprobe_instance *ri = NULL;
269 struct hlist_head *head; 263 struct hlist_head *head;
270 struct hlist_node *node, *tmp; 264 struct hlist_node *node, *tmp;
271 unsigned long orig_ret_address = 0; 265 unsigned long flags, orig_ret_address = 0;
272 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 266 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
273 267
268 spin_lock_irqsave(&kretprobe_lock, flags);
274 head = kretprobe_inst_table_head(current); 269 head = kretprobe_inst_table_head(current);
275 270
276 /* 271 /*
@@ -310,7 +305,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
310 regs->eip = orig_ret_address; 305 regs->eip = orig_ret_address;
311 306
312 reset_current_kprobe(); 307 reset_current_kprobe();
313 unlock_kprobes(); 308 spin_unlock_irqrestore(&kretprobe_lock, flags);
314 preempt_enable_no_resched(); 309 preempt_enable_no_resched();
315 310
316 /* 311 /*
@@ -395,7 +390,7 @@ static void __kprobes resume_execution(struct kprobe *p,
395 390
396/* 391/*
397 * Interrupts are disabled on entry as trap1 is an interrupt gate and they 392 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
398 * remain disabled thoroughout this function. And we hold kprobe lock. 393 * remain disabled thoroughout this function.
399 */ 394 */
400static inline int post_kprobe_handler(struct pt_regs *regs) 395static inline int post_kprobe_handler(struct pt_regs *regs)
401{ 396{
@@ -419,7 +414,6 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
419 goto out; 414 goto out;
420 } 415 }
421 reset_current_kprobe(); 416 reset_current_kprobe();
422 unlock_kprobes();
423out: 417out:
424 preempt_enable_no_resched(); 418 preempt_enable_no_resched();
425 419
@@ -434,7 +428,6 @@ out:
434 return 1; 428 return 1;
435} 429}
436 430
437/* Interrupts disabled, kprobe_lock held. */
438static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 431static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
439{ 432{
440 struct kprobe *cur = kprobe_running(); 433 struct kprobe *cur = kprobe_running();
@@ -448,7 +441,6 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
448 regs->eflags |= kcb->kprobe_old_eflags; 441 regs->eflags |= kcb->kprobe_old_eflags;
449 442
450 reset_current_kprobe(); 443 reset_current_kprobe();
451 unlock_kprobes();
452 preempt_enable_no_resched(); 444 preempt_enable_no_resched();
453 } 445 }
454 return 0; 446 return 0;
@@ -463,7 +455,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
463 struct die_args *args = (struct die_args *)data; 455 struct die_args *args = (struct die_args *)data;
464 int ret = NOTIFY_DONE; 456 int ret = NOTIFY_DONE;
465 457
466 preempt_disable(); 458 rcu_read_lock();
467 switch (val) { 459 switch (val) {
468 case DIE_INT3: 460 case DIE_INT3:
469 if (kprobe_handler(args->regs)) 461 if (kprobe_handler(args->regs))
@@ -482,7 +474,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
482 default: 474 default:
483 break; 475 break;
484 } 476 }
485 preempt_enable(); 477 rcu_read_unlock();
486 return ret; 478 return ret;
487} 479}
488 480
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 17e70b1b8d79..fddbac32d44a 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -26,7 +26,6 @@
26#include <linux/config.h> 26#include <linux/config.h>
27#include <linux/kprobes.h> 27#include <linux/kprobes.h>
28#include <linux/ptrace.h> 28#include <linux/ptrace.h>
29#include <linux/spinlock.h>
30#include <linux/string.h> 29#include <linux/string.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32#include <linux/preempt.h> 31#include <linux/preempt.h>
@@ -343,10 +342,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
343 struct kretprobe_instance *ri = NULL; 342 struct kretprobe_instance *ri = NULL;
344 struct hlist_head *head; 343 struct hlist_head *head;
345 struct hlist_node *node, *tmp; 344 struct hlist_node *node, *tmp;
346 unsigned long orig_ret_address = 0; 345 unsigned long flags, orig_ret_address = 0;
347 unsigned long trampoline_address = 346 unsigned long trampoline_address =
348 ((struct fnptr *)kretprobe_trampoline)->ip; 347 ((struct fnptr *)kretprobe_trampoline)->ip;
349 348
349 spin_lock_irqsave(&kretprobe_lock, flags);
350 head = kretprobe_inst_table_head(current); 350 head = kretprobe_inst_table_head(current);
351 351
352 /* 352 /*
@@ -386,7 +386,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
386 regs->cr_iip = orig_ret_address; 386 regs->cr_iip = orig_ret_address;
387 387
388 reset_current_kprobe(); 388 reset_current_kprobe();
389 unlock_kprobes(); 389 spin_unlock_irqrestore(&kretprobe_lock, flags);
390 preempt_enable_no_resched(); 390 preempt_enable_no_resched();
391 391
392 /* 392 /*
@@ -397,6 +397,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
397 return 1; 397 return 1;
398} 398}
399 399
400/* Called with kretprobe_lock held */
400void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, 401void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
401 struct pt_regs *regs) 402 struct pt_regs *regs)
402{ 403{
@@ -612,7 +613,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
612 if ((kcb->kprobe_status == KPROBE_HIT_SS) && 613 if ((kcb->kprobe_status == KPROBE_HIT_SS) &&
613 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { 614 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
614 ia64_psr(regs)->ss = 0; 615 ia64_psr(regs)->ss = 0;
615 unlock_kprobes();
616 goto no_kprobe; 616 goto no_kprobe;
617 } 617 }
618 /* We have reentered the pre_kprobe_handler(), since 618 /* We have reentered the pre_kprobe_handler(), since
@@ -641,10 +641,8 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
641 } 641 }
642 } 642 }
643 643
644 lock_kprobes();
645 p = get_kprobe(addr); 644 p = get_kprobe(addr);
646 if (!p) { 645 if (!p) {
647 unlock_kprobes();
648 if (!is_ia64_break_inst(regs)) { 646 if (!is_ia64_break_inst(regs)) {
649 /* 647 /*
650 * The breakpoint instruction was removed right 648 * The breakpoint instruction was removed right
@@ -707,7 +705,6 @@ static int __kprobes post_kprobes_handler(struct pt_regs *regs)
707 goto out; 705 goto out;
708 } 706 }
709 reset_current_kprobe(); 707 reset_current_kprobe();
710 unlock_kprobes();
711 708
712out: 709out:
713 preempt_enable_no_resched(); 710 preempt_enable_no_resched();
@@ -728,7 +725,6 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
728 if (kcb->kprobe_status & KPROBE_HIT_SS) { 725 if (kcb->kprobe_status & KPROBE_HIT_SS) {
729 resume_execution(cur, regs); 726 resume_execution(cur, regs);
730 reset_current_kprobe(); 727 reset_current_kprobe();
731 unlock_kprobes();
732 preempt_enable_no_resched(); 728 preempt_enable_no_resched();
733 } 729 }
734 730
@@ -741,7 +737,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
741 struct die_args *args = (struct die_args *)data; 737 struct die_args *args = (struct die_args *)data;
742 int ret = NOTIFY_DONE; 738 int ret = NOTIFY_DONE;
743 739
744 preempt_disable(); 740 rcu_read_lock();
745 switch(val) { 741 switch(val) {
746 case DIE_BREAK: 742 case DIE_BREAK:
747 if (pre_kprobes_handler(args)) 743 if (pre_kprobes_handler(args))
@@ -757,7 +753,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
757 default: 753 default:
758 break; 754 break;
759 } 755 }
760 preempt_enable(); 756 rcu_read_unlock();
761 return ret; 757 return ret;
762} 758}
763 759
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 3f89f3e5584a..e0a25b35437f 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -30,7 +30,6 @@
30#include <linux/config.h> 30#include <linux/config.h>
31#include <linux/kprobes.h> 31#include <linux/kprobes.h>
32#include <linux/ptrace.h> 32#include <linux/ptrace.h>
33#include <linux/spinlock.h>
34#include <linux/preempt.h> 33#include <linux/preempt.h>
35#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
36#include <asm/kdebug.h> 35#include <asm/kdebug.h>
@@ -125,6 +124,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
125 kcb->kprobe_saved_msr = regs->msr; 124 kcb->kprobe_saved_msr = regs->msr;
126} 125}
127 126
127/* Called with kretprobe_lock held */
128void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, 128void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
129 struct pt_regs *regs) 129 struct pt_regs *regs)
130{ 130{
@@ -152,8 +152,6 @@ static inline int kprobe_handler(struct pt_regs *regs)
152 152
153 /* Check we're not actually recursing */ 153 /* Check we're not actually recursing */
154 if (kprobe_running()) { 154 if (kprobe_running()) {
155 /* We *are* holding lock here, so this is safe.
156 Disarm the probe we just hit, and ignore it. */
157 p = get_kprobe(addr); 155 p = get_kprobe(addr);
158 if (p) { 156 if (p) {
159 kprobe_opcode_t insn = *p->ainsn.insn; 157 kprobe_opcode_t insn = *p->ainsn.insn;
@@ -161,7 +159,6 @@ static inline int kprobe_handler(struct pt_regs *regs)
161 is_trap(insn)) { 159 is_trap(insn)) {
162 regs->msr &= ~MSR_SE; 160 regs->msr &= ~MSR_SE;
163 regs->msr |= kcb->kprobe_saved_msr; 161 regs->msr |= kcb->kprobe_saved_msr;
164 unlock_kprobes();
165 goto no_kprobe; 162 goto no_kprobe;
166 } 163 }
167 /* We have reentered the kprobe_handler(), since 164 /* We have reentered the kprobe_handler(), since
@@ -183,14 +180,11 @@ static inline int kprobe_handler(struct pt_regs *regs)
183 goto ss_probe; 180 goto ss_probe;
184 } 181 }
185 } 182 }
186 /* If it's not ours, can't be delete race, (we hold lock). */
187 goto no_kprobe; 183 goto no_kprobe;
188 } 184 }
189 185
190 lock_kprobes();
191 p = get_kprobe(addr); 186 p = get_kprobe(addr);
192 if (!p) { 187 if (!p) {
193 unlock_kprobes();
194 if (*addr != BREAKPOINT_INSTRUCTION) { 188 if (*addr != BREAKPOINT_INSTRUCTION) {
195 /* 189 /*
196 * PowerPC has multiple variants of the "trap" 190 * PowerPC has multiple variants of the "trap"
@@ -254,9 +248,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
254 struct kretprobe_instance *ri = NULL; 248 struct kretprobe_instance *ri = NULL;
255 struct hlist_head *head; 249 struct hlist_head *head;
256 struct hlist_node *node, *tmp; 250 struct hlist_node *node, *tmp;
257 unsigned long orig_ret_address = 0; 251 unsigned long flags, orig_ret_address = 0;
258 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 252 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
259 253
254 spin_lock_irqsave(&kretprobe_lock, flags);
260 head = kretprobe_inst_table_head(current); 255 head = kretprobe_inst_table_head(current);
261 256
262 /* 257 /*
@@ -296,7 +291,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
296 regs->nip = orig_ret_address; 291 regs->nip = orig_ret_address;
297 292
298 reset_current_kprobe(); 293 reset_current_kprobe();
299 unlock_kprobes(); 294 spin_unlock_irqrestore(&kretprobe_lock, flags);
300 preempt_enable_no_resched(); 295 preempt_enable_no_resched();
301 296
302 /* 297 /*
@@ -348,7 +343,6 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
348 goto out; 343 goto out;
349 } 344 }
350 reset_current_kprobe(); 345 reset_current_kprobe();
351 unlock_kprobes();
352out: 346out:
353 preempt_enable_no_resched(); 347 preempt_enable_no_resched();
354 348
@@ -363,7 +357,6 @@ out:
363 return 1; 357 return 1;
364} 358}
365 359
366/* Interrupts disabled, kprobe_lock held. */
367static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 360static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
368{ 361{
369 struct kprobe *cur = kprobe_running(); 362 struct kprobe *cur = kprobe_running();
@@ -378,7 +371,6 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
378 regs->msr |= kcb->kprobe_saved_msr; 371 regs->msr |= kcb->kprobe_saved_msr;
379 372
380 reset_current_kprobe(); 373 reset_current_kprobe();
381 unlock_kprobes();
382 preempt_enable_no_resched(); 374 preempt_enable_no_resched();
383 } 375 }
384 return 0; 376 return 0;
@@ -393,11 +385,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
393 struct die_args *args = (struct die_args *)data; 385 struct die_args *args = (struct die_args *)data;
394 int ret = NOTIFY_DONE; 386 int ret = NOTIFY_DONE;
395 387
396 /* 388 rcu_read_lock();
397 * Interrupts are not disabled here. We need to disable
398 * preemption, because kprobe_running() uses smp_processor_id().
399 */
400 preempt_disable();
401 switch (val) { 389 switch (val) {
402 case DIE_BPT: 390 case DIE_BPT:
403 if (kprobe_handler(args->regs)) 391 if (kprobe_handler(args->regs))
@@ -415,7 +403,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
415 default: 403 default:
416 break; 404 break;
417 } 405 }
418 preempt_enable_no_resched(); 406 rcu_read_unlock();
419 return ret; 407 return ret;
420} 408}
421 409
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index b95984154dba..58a815e90373 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -116,15 +116,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
116 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 116 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
117 117
118 if (kprobe_running()) { 118 if (kprobe_running()) {
119 /* We *are* holding lock here, so this is safe.
120 * Disarm the probe we just hit, and ignore it.
121 */
122 p = get_kprobe(addr); 119 p = get_kprobe(addr);
123 if (p) { 120 if (p) {
124 if (kcb->kprobe_status == KPROBE_HIT_SS) { 121 if (kcb->kprobe_status == KPROBE_HIT_SS) {
125 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 122 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
126 kcb->kprobe_orig_tstate_pil); 123 kcb->kprobe_orig_tstate_pil);
127 unlock_kprobes();
128 goto no_kprobe; 124 goto no_kprobe;
129 } 125 }
130 /* We have reentered the kprobe_handler(), since 126 /* We have reentered the kprobe_handler(), since
@@ -144,14 +140,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
144 if (p->break_handler && p->break_handler(p, regs)) 140 if (p->break_handler && p->break_handler(p, regs))
145 goto ss_probe; 141 goto ss_probe;
146 } 142 }
147 /* If it's not ours, can't be delete race, (we hold lock). */
148 goto no_kprobe; 143 goto no_kprobe;
149 } 144 }
150 145
151 lock_kprobes();
152 p = get_kprobe(addr); 146 p = get_kprobe(addr);
153 if (!p) { 147 if (!p) {
154 unlock_kprobes();
155 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { 148 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
156 /* 149 /*
157 * The breakpoint instruction was removed right 150 * The breakpoint instruction was removed right
@@ -296,14 +289,12 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
296 goto out; 289 goto out;
297 } 290 }
298 reset_current_kprobe(); 291 reset_current_kprobe();
299 unlock_kprobes();
300out: 292out:
301 preempt_enable_no_resched(); 293 preempt_enable_no_resched();
302 294
303 return 1; 295 return 1;
304} 296}
305 297
306/* Interrupts disabled, kprobe_lock held. */
307static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 298static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
308{ 299{
309 struct kprobe *cur = kprobe_running(); 300 struct kprobe *cur = kprobe_running();
@@ -316,7 +307,6 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
316 resume_execution(cur, regs, kcb); 307 resume_execution(cur, regs, kcb);
317 308
318 reset_current_kprobe(); 309 reset_current_kprobe();
319 unlock_kprobes();
320 preempt_enable_no_resched(); 310 preempt_enable_no_resched();
321 } 311 }
322 return 0; 312 return 0;
@@ -331,7 +321,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
331 struct die_args *args = (struct die_args *)data; 321 struct die_args *args = (struct die_args *)data;
332 int ret = NOTIFY_DONE; 322 int ret = NOTIFY_DONE;
333 323
334 preempt_disable(); 324 rcu_read_lock();
335 switch (val) { 325 switch (val) {
336 case DIE_DEBUG: 326 case DIE_DEBUG:
337 if (kprobe_handler(args->regs)) 327 if (kprobe_handler(args->regs))
@@ -350,7 +340,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
350 default: 340 default:
351 break; 341 break;
352 } 342 }
353 preempt_enable(); 343 rcu_read_unlock();
354 return ret; 344 return ret;
355} 345}
356 346
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 6cb40d133b7c..9bef2c8dc12c 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -34,7 +34,6 @@
34#include <linux/config.h> 34#include <linux/config.h>
35#include <linux/kprobes.h> 35#include <linux/kprobes.h>
36#include <linux/ptrace.h> 36#include <linux/ptrace.h>
37#include <linux/spinlock.h>
38#include <linux/string.h> 37#include <linux/string.h>
39#include <linux/slab.h> 38#include <linux/slab.h>
40#include <linux/preempt.h> 39#include <linux/preempt.h>
@@ -266,6 +265,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
266 regs->rip = (unsigned long)p->ainsn.insn; 265 regs->rip = (unsigned long)p->ainsn.insn;
267} 266}
268 267
268/* Called with kretprobe_lock held */
269void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, 269void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
270 struct pt_regs *regs) 270 struct pt_regs *regs)
271{ 271{
@@ -299,15 +299,12 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
299 299
300 /* Check we're not actually recursing */ 300 /* Check we're not actually recursing */
301 if (kprobe_running()) { 301 if (kprobe_running()) {
302 /* We *are* holding lock here, so this is safe.
303 Disarm the probe we just hit, and ignore it. */
304 p = get_kprobe(addr); 302 p = get_kprobe(addr);
305 if (p) { 303 if (p) {
306 if (kcb->kprobe_status == KPROBE_HIT_SS && 304 if (kcb->kprobe_status == KPROBE_HIT_SS &&
307 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 305 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
308 regs->eflags &= ~TF_MASK; 306 regs->eflags &= ~TF_MASK;
309 regs->eflags |= kcb->kprobe_saved_rflags; 307 regs->eflags |= kcb->kprobe_saved_rflags;
310 unlock_kprobes();
311 goto no_kprobe; 308 goto no_kprobe;
312 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) { 309 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
313 /* TODO: Provide re-entrancy from 310 /* TODO: Provide re-entrancy from
@@ -340,14 +337,11 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
340 goto ss_probe; 337 goto ss_probe;
341 } 338 }
342 } 339 }
343 /* If it's not ours, can't be delete race, (we hold lock). */
344 goto no_kprobe; 340 goto no_kprobe;
345 } 341 }
346 342
347 lock_kprobes();
348 p = get_kprobe(addr); 343 p = get_kprobe(addr);
349 if (!p) { 344 if (!p) {
350 unlock_kprobes();
351 if (*addr != BREAKPOINT_INSTRUCTION) { 345 if (*addr != BREAKPOINT_INSTRUCTION) {
352 /* 346 /*
353 * The breakpoint instruction was removed right 347 * The breakpoint instruction was removed right
@@ -406,9 +400,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
406 struct kretprobe_instance *ri = NULL; 400 struct kretprobe_instance *ri = NULL;
407 struct hlist_head *head; 401 struct hlist_head *head;
408 struct hlist_node *node, *tmp; 402 struct hlist_node *node, *tmp;
409 unsigned long orig_ret_address = 0; 403 unsigned long flags, orig_ret_address = 0;
410 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 404 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
411 405
406 spin_lock_irqsave(&kretprobe_lock, flags);
412 head = kretprobe_inst_table_head(current); 407 head = kretprobe_inst_table_head(current);
413 408
414 /* 409 /*
@@ -448,7 +443,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
448 regs->rip = orig_ret_address; 443 regs->rip = orig_ret_address;
449 444
450 reset_current_kprobe(); 445 reset_current_kprobe();
451 unlock_kprobes(); 446 spin_unlock_irqrestore(&kretprobe_lock, flags);
452 preempt_enable_no_resched(); 447 preempt_enable_no_resched();
453 448
454 /* 449 /*
@@ -536,10 +531,6 @@ static void __kprobes resume_execution(struct kprobe *p,
536 } 531 }
537} 532}
538 533
539/*
540 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
541 * remain disabled thoroughout this function. And we hold kprobe lock.
542 */
543int __kprobes post_kprobe_handler(struct pt_regs *regs) 534int __kprobes post_kprobe_handler(struct pt_regs *regs)
544{ 535{
545 struct kprobe *cur = kprobe_running(); 536 struct kprobe *cur = kprobe_running();
@@ -560,8 +551,6 @@ int __kprobes post_kprobe_handler(struct pt_regs *regs)
560 if (kcb->kprobe_status == KPROBE_REENTER) { 551 if (kcb->kprobe_status == KPROBE_REENTER) {
561 restore_previous_kprobe(kcb); 552 restore_previous_kprobe(kcb);
562 goto out; 553 goto out;
563 } else {
564 unlock_kprobes();
565 } 554 }
566 reset_current_kprobe(); 555 reset_current_kprobe();
567out: 556out:
@@ -578,7 +567,6 @@ out:
578 return 1; 567 return 1;
579} 568}
580 569
581/* Interrupts disabled, kprobe_lock held. */
582int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 570int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
583{ 571{
584 struct kprobe *cur = kprobe_running(); 572 struct kprobe *cur = kprobe_running();
@@ -592,7 +580,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
592 regs->eflags |= kcb->kprobe_old_rflags; 580 regs->eflags |= kcb->kprobe_old_rflags;
593 581
594 reset_current_kprobe(); 582 reset_current_kprobe();
595 unlock_kprobes();
596 preempt_enable_no_resched(); 583 preempt_enable_no_resched();
597 } 584 }
598 return 0; 585 return 0;
@@ -607,7 +594,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
607 struct die_args *args = (struct die_args *)data; 594 struct die_args *args = (struct die_args *)data;
608 int ret = NOTIFY_DONE; 595 int ret = NOTIFY_DONE;
609 596
610 preempt_disable(); 597 rcu_read_lock();
611 switch (val) { 598 switch (val) {
612 case DIE_INT3: 599 case DIE_INT3:
613 if (kprobe_handler(args->regs)) 600 if (kprobe_handler(args->regs))
@@ -626,7 +613,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
626 default: 613 default:
627 break; 614 break;
628 } 615 }
629 preempt_enable(); 616 rcu_read_unlock();
630 return ret; 617 return ret;
631} 618}
632 619