aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/kprobes.c25
-rw-r--r--arch/ia64/kernel/kprobes.c37
-rw-r--r--arch/ppc64/kernel/kprobes.c25
-rw-r--r--arch/sparc64/kernel/kprobes.c21
-rw-r--r--arch/x86_64/kernel/kprobes.c29
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--kernel/kprobes.c2
7 files changed, 80 insertions, 61 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index ad469299267a..32b0c24ab9a6 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -153,7 +153,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
153 int ret = 0; 153 int ret = 0;
154 kprobe_opcode_t *addr = NULL; 154 kprobe_opcode_t *addr = NULL;
155 unsigned long *lp; 155 unsigned long *lp;
156 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 156 struct kprobe_ctlblk *kcb;
157
158 /*
159 * We don't want to be preempted for the entire
160 * duration of kprobe processing
161 */
162 preempt_disable();
163 kcb = get_kprobe_ctlblk();
157 164
158 /* Check if the application is using LDT entry for its code segment and 165 /* Check if the application is using LDT entry for its code segment and
159 * calculate the address by reading the base address from the LDT entry. 166 * calculate the address by reading the base address from the LDT entry.
@@ -221,11 +228,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
221 goto no_kprobe; 228 goto no_kprobe;
222 } 229 }
223 230
224 /*
225 * This preempt_disable() matches the preempt_enable_no_resched()
226 * in post_kprobe_handler()
227 */
228 preempt_disable();
229 set_current_kprobe(p, regs, kcb); 231 set_current_kprobe(p, regs, kcb);
230 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 232 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
231 233
@@ -239,6 +241,7 @@ ss_probe:
239 return 1; 241 return 1;
240 242
241no_kprobe: 243no_kprobe:
244 preempt_enable_no_resched();
242 return ret; 245 return ret;
243} 246}
244 247
@@ -310,8 +313,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
310 313
311 /* 314 /*
312 * By returning a non-zero value, we are telling 315 * By returning a non-zero value, we are telling
313 * kprobe_handler() that we have handled unlocking 316 * kprobe_handler() that we don't want the post_handler
314 * and re-enabling preemption 317 * to run (and have re-enabled preemption)
315 */ 318 */
316 return 1; 319 return 1;
317} 320}
@@ -455,7 +458,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
455 struct die_args *args = (struct die_args *)data; 458 struct die_args *args = (struct die_args *)data;
456 int ret = NOTIFY_DONE; 459 int ret = NOTIFY_DONE;
457 460
458 rcu_read_lock();
459 switch (val) { 461 switch (val) {
460 case DIE_INT3: 462 case DIE_INT3:
461 if (kprobe_handler(args->regs)) 463 if (kprobe_handler(args->regs))
@@ -467,14 +469,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
467 break; 469 break;
468 case DIE_GPF: 470 case DIE_GPF:
469 case DIE_PAGE_FAULT: 471 case DIE_PAGE_FAULT:
472 /* kprobe_running() needs smp_processor_id() */
473 preempt_disable();
470 if (kprobe_running() && 474 if (kprobe_running() &&
471 kprobe_fault_handler(args->regs, args->trapnr)) 475 kprobe_fault_handler(args->regs, args->trapnr))
472 ret = NOTIFY_STOP; 476 ret = NOTIFY_STOP;
477 preempt_enable();
473 break; 478 break;
474 default: 479 default:
475 break; 480 break;
476 } 481 }
477 rcu_read_unlock();
478 return ret; 482 return ret;
479} 483}
480 484
@@ -537,6 +541,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
537 *regs = kcb->jprobe_saved_regs; 541 *regs = kcb->jprobe_saved_regs;
538 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, 542 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
539 MIN_STACK_SIZE(stack_addr)); 543 MIN_STACK_SIZE(stack_addr));
544 preempt_enable_no_resched();
540 return 1; 545 return 1;
541 } 546 }
542 return 0; 547 return 0;
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index fddbac32d44a..96736a119c91 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -389,11 +389,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
389 spin_unlock_irqrestore(&kretprobe_lock, flags); 389 spin_unlock_irqrestore(&kretprobe_lock, flags);
390 preempt_enable_no_resched(); 390 preempt_enable_no_resched();
391 391
392 /* 392 /*
393 * By returning a non-zero value, we are telling 393 * By returning a non-zero value, we are telling
394 * kprobe_handler() that we have handled unlocking 394 * kprobe_handler() that we don't want the post_handler
395 * and re-enabling preemption 395 * to run (and have re-enabled preemption)
396 */ 396 */
397 return 1; 397 return 1;
398} 398}
399 399
@@ -604,7 +604,14 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
604 int ret = 0; 604 int ret = 0;
605 struct pt_regs *regs = args->regs; 605 struct pt_regs *regs = args->regs;
606 kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); 606 kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
607 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 607 struct kprobe_ctlblk *kcb;
608
609 /*
610 * We don't want to be preempted for the entire
611 * duration of kprobe processing
612 */
613 preempt_disable();
614 kcb = get_kprobe_ctlblk();
608 615
609 /* Handle recursion cases */ 616 /* Handle recursion cases */
610 if (kprobe_running()) { 617 if (kprobe_running()) {
@@ -659,11 +666,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
659 goto no_kprobe; 666 goto no_kprobe;
660 } 667 }
661 668
662 /*
663 * This preempt_disable() matches the preempt_enable_no_resched()
664 * in post_kprobes_handler()
665 */
666 preempt_disable();
667 set_current_kprobe(p, kcb); 669 set_current_kprobe(p, kcb);
668 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 670 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
669 671
@@ -681,6 +683,7 @@ ss_probe:
681 return 1; 683 return 1;
682 684
683no_kprobe: 685no_kprobe:
686 preempt_enable_no_resched();
684 return ret; 687 return ret;
685} 688}
686 689
@@ -716,9 +719,6 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
716 struct kprobe *cur = kprobe_running(); 719 struct kprobe *cur = kprobe_running();
717 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 720 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
718 721
719 if (!cur)
720 return 0;
721
722 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 722 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
723 return 1; 723 return 1;
724 724
@@ -737,7 +737,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
737 struct die_args *args = (struct die_args *)data; 737 struct die_args *args = (struct die_args *)data;
738 int ret = NOTIFY_DONE; 738 int ret = NOTIFY_DONE;
739 739
740 rcu_read_lock();
741 switch(val) { 740 switch(val) {
742 case DIE_BREAK: 741 case DIE_BREAK:
743 if (pre_kprobes_handler(args)) 742 if (pre_kprobes_handler(args))
@@ -748,12 +747,15 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
748 ret = NOTIFY_STOP; 747 ret = NOTIFY_STOP;
749 break; 748 break;
750 case DIE_PAGE_FAULT: 749 case DIE_PAGE_FAULT:
751 if (kprobes_fault_handler(args->regs, args->trapnr)) 750 /* kprobe_running() needs smp_processor_id() */
751 preempt_disable();
752 if (kprobe_running() &&
753 kprobes_fault_handler(args->regs, args->trapnr))
752 ret = NOTIFY_STOP; 754 ret = NOTIFY_STOP;
755 preempt_enable();
753 default: 756 default:
754 break; 757 break;
755 } 758 }
756 rcu_read_unlock();
757 return ret; 759 return ret;
758} 760}
759 761
@@ -785,6 +787,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
785 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 787 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
786 788
787 *regs = kcb->jprobe_saved_regs; 789 *regs = kcb->jprobe_saved_regs;
790 preempt_enable_no_resched();
788 return 1; 791 return 1;
789} 792}
790 793
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index e0a25b35437f..511af54e6230 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -148,7 +148,14 @@ static inline int kprobe_handler(struct pt_regs *regs)
148 struct kprobe *p; 148 struct kprobe *p;
149 int ret = 0; 149 int ret = 0;
150 unsigned int *addr = (unsigned int *)regs->nip; 150 unsigned int *addr = (unsigned int *)regs->nip;
151 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 151 struct kprobe_ctlblk *kcb;
152
153 /*
154 * We don't want to be preempted for the entire
155 * duration of kprobe processing
156 */
157 preempt_disable();
158 kcb = get_kprobe_ctlblk();
152 159
153 /* Check we're not actually recursing */ 160 /* Check we're not actually recursing */
154 if (kprobe_running()) { 161 if (kprobe_running()) {
@@ -207,11 +214,6 @@ static inline int kprobe_handler(struct pt_regs *regs)
207 goto no_kprobe; 214 goto no_kprobe;
208 } 215 }
209 216
210 /*
211 * This preempt_disable() matches the preempt_enable_no_resched()
212 * in post_kprobe_handler().
213 */
214 preempt_disable();
215 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 217 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
216 set_current_kprobe(p, regs, kcb); 218 set_current_kprobe(p, regs, kcb);
217 if (p->pre_handler && p->pre_handler(p, regs)) 219 if (p->pre_handler && p->pre_handler(p, regs))
@@ -224,6 +226,7 @@ ss_probe:
224 return 1; 226 return 1;
225 227
226no_kprobe: 228no_kprobe:
229 preempt_enable_no_resched();
227 return ret; 230 return ret;
228} 231}
229 232
@@ -296,8 +299,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
296 299
297 /* 300 /*
298 * By returning a non-zero value, we are telling 301 * By returning a non-zero value, we are telling
299 * kprobe_handler() that we have handled unlocking 302 * kprobe_handler() that we don't want the post_handler
300 * and re-enabling preemption. 303 * to run (and have re-enabled preemption)
301 */ 304 */
302 return 1; 305 return 1;
303} 306}
@@ -385,7 +388,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
385 struct die_args *args = (struct die_args *)data; 388 struct die_args *args = (struct die_args *)data;
386 int ret = NOTIFY_DONE; 389 int ret = NOTIFY_DONE;
387 390
388 rcu_read_lock();
389 switch (val) { 391 switch (val) {
390 case DIE_BPT: 392 case DIE_BPT:
391 if (kprobe_handler(args->regs)) 393 if (kprobe_handler(args->regs))
@@ -396,14 +398,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
396 ret = NOTIFY_STOP; 398 ret = NOTIFY_STOP;
397 break; 399 break;
398 case DIE_PAGE_FAULT: 400 case DIE_PAGE_FAULT:
401 /* kprobe_running() needs smp_processor_id() */
402 preempt_disable();
399 if (kprobe_running() && 403 if (kprobe_running() &&
400 kprobe_fault_handler(args->regs, args->trapnr)) 404 kprobe_fault_handler(args->regs, args->trapnr))
401 ret = NOTIFY_STOP; 405 ret = NOTIFY_STOP;
406 preempt_enable();
402 break; 407 break;
403 default: 408 default:
404 break; 409 break;
405 } 410 }
406 rcu_read_unlock();
407 return ret; 411 return ret;
408} 412}
409 413
@@ -440,6 +444,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
440 * saved regs... 444 * saved regs...
441 */ 445 */
442 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 446 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
447 preempt_enable_no_resched();
443 return 1; 448 return 1;
444} 449}
445 450
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index 58a815e90373..96bd09b098f4 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -113,7 +113,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
113 struct kprobe *p; 113 struct kprobe *p;
114 void *addr = (void *) regs->tpc; 114 void *addr = (void *) regs->tpc;
115 int ret = 0; 115 int ret = 0;
116 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 116 struct kprobe_ctlblk *kcb;
117
118 /*
119 * We don't want to be preempted for the entire
120 * duration of kprobe processing
121 */
122 preempt_disable();
123 kcb = get_kprobe_ctlblk();
117 124
118 if (kprobe_running()) { 125 if (kprobe_running()) {
119 p = get_kprobe(addr); 126 p = get_kprobe(addr);
@@ -159,11 +166,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
159 goto no_kprobe; 166 goto no_kprobe;
160 } 167 }
161 168
162 /*
163 * This preempt_disable() matches the preempt_enable_no_resched()
164 * in post_kprobes_handler()
165 */
166 preempt_disable();
167 set_current_kprobe(p, regs, kcb); 169 set_current_kprobe(p, regs, kcb);
168 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 170 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
169 if (p->pre_handler && p->pre_handler(p, regs)) 171 if (p->pre_handler && p->pre_handler(p, regs))
@@ -175,6 +177,7 @@ ss_probe:
175 return 1; 177 return 1;
176 178
177no_kprobe: 179no_kprobe:
180 preempt_enable_no_resched();
178 return ret; 181 return ret;
179} 182}
180 183
@@ -321,7 +324,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
321 struct die_args *args = (struct die_args *)data; 324 struct die_args *args = (struct die_args *)data;
322 int ret = NOTIFY_DONE; 325 int ret = NOTIFY_DONE;
323 326
324 rcu_read_lock();
325 switch (val) { 327 switch (val) {
326 case DIE_DEBUG: 328 case DIE_DEBUG:
327 if (kprobe_handler(args->regs)) 329 if (kprobe_handler(args->regs))
@@ -333,14 +335,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
333 break; 335 break;
334 case DIE_GPF: 336 case DIE_GPF:
335 case DIE_PAGE_FAULT: 337 case DIE_PAGE_FAULT:
338 /* kprobe_running() needs smp_processor_id() */
339 preempt_disable();
336 if (kprobe_running() && 340 if (kprobe_running() &&
337 kprobe_fault_handler(args->regs, args->trapnr)) 341 kprobe_fault_handler(args->regs, args->trapnr))
338 ret = NOTIFY_STOP; 342 ret = NOTIFY_STOP;
343 preempt_enable();
339 break; 344 break;
340 default: 345 default:
341 break; 346 break;
342 } 347 }
343 rcu_read_unlock();
344 return ret; 348 return ret;
345} 349}
346 350
@@ -426,6 +430,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
426 &(kcb->jprobe_saved_stack), 430 &(kcb->jprobe_saved_stack),
427 sizeof(kcb->jprobe_saved_stack)); 431 sizeof(kcb->jprobe_saved_stack));
428 432
433 preempt_enable_no_resched();
429 return 1; 434 return 1;
430 } 435 }
431 return 0; 436 return 0;
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 9bef2c8dc12c..dddeb678b440 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -286,16 +286,19 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
286 } 286 }
287} 287}
288 288
289/*
290 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
291 * remain disabled thorough out this function.
292 */
293int __kprobes kprobe_handler(struct pt_regs *regs) 289int __kprobes kprobe_handler(struct pt_regs *regs)
294{ 290{
295 struct kprobe *p; 291 struct kprobe *p;
296 int ret = 0; 292 int ret = 0;
297 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t)); 293 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t));
298 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 294 struct kprobe_ctlblk *kcb;
295
296 /*
297 * We don't want to be preempted for the entire
298 * duration of kprobe processing
299 */
300 preempt_disable();
301 kcb = get_kprobe_ctlblk();
299 302
300 /* Check we're not actually recursing */ 303 /* Check we're not actually recursing */
301 if (kprobe_running()) { 304 if (kprobe_running()) {
@@ -359,11 +362,6 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
359 goto no_kprobe; 362 goto no_kprobe;
360 } 363 }
361 364
362 /*
363 * This preempt_disable() matches the preempt_enable_no_resched()
364 * in post_kprobe_handler()
365 */
366 preempt_disable();
367 set_current_kprobe(p, regs, kcb); 365 set_current_kprobe(p, regs, kcb);
368 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 366 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
369 367
@@ -377,6 +375,7 @@ ss_probe:
377 return 1; 375 return 1;
378 376
379no_kprobe: 377no_kprobe:
378 preempt_enable_no_resched();
380 return ret; 379 return ret;
381} 380}
382 381
@@ -448,8 +447,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
448 447
449 /* 448 /*
450 * By returning a non-zero value, we are telling 449 * By returning a non-zero value, we are telling
451 * kprobe_handler() that we have handled unlocking 450 * kprobe_handler() that we don't want the post_handler
452 * and re-enabling preemption 451 * to run (and have re-enabled preemption)
453 */ 452 */
454 return 1; 453 return 1;
455} 454}
@@ -594,7 +593,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
594 struct die_args *args = (struct die_args *)data; 593 struct die_args *args = (struct die_args *)data;
595 int ret = NOTIFY_DONE; 594 int ret = NOTIFY_DONE;
596 595
597 rcu_read_lock();
598 switch (val) { 596 switch (val) {
599 case DIE_INT3: 597 case DIE_INT3:
600 if (kprobe_handler(args->regs)) 598 if (kprobe_handler(args->regs))
@@ -606,14 +604,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
606 break; 604 break;
607 case DIE_GPF: 605 case DIE_GPF:
608 case DIE_PAGE_FAULT: 606 case DIE_PAGE_FAULT:
607 /* kprobe_running() needs smp_processor_id() */
608 preempt_disable();
609 if (kprobe_running() && 609 if (kprobe_running() &&
610 kprobe_fault_handler(args->regs, args->trapnr)) 610 kprobe_fault_handler(args->regs, args->trapnr))
611 ret = NOTIFY_STOP; 611 ret = NOTIFY_STOP;
612 preempt_enable();
612 break; 613 break;
613 default: 614 default:
614 break; 615 break;
615 } 616 }
616 rcu_read_unlock();
617 return ret; 617 return ret;
618} 618}
619 619
@@ -675,6 +675,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
675 *regs = kcb->jprobe_saved_regs; 675 *regs = kcb->jprobe_saved_regs;
676 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, 676 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
677 MIN_STACK_SIZE(stack_addr)); 677 MIN_STACK_SIZE(stack_addr));
678 preempt_enable_no_resched();
678 return 1; 679 return 1;
679 } 680 }
680 return 0; 681 return 0;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index cff281cf70cf..e373c4a9de53 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -159,7 +159,7 @@ extern void show_registers(struct pt_regs *regs);
159extern kprobe_opcode_t *get_insn_slot(void); 159extern kprobe_opcode_t *get_insn_slot(void);
160extern void free_insn_slot(kprobe_opcode_t *slot); 160extern void free_insn_slot(kprobe_opcode_t *slot);
161 161
162/* Get the kprobe at this addr (if any) - called under a rcu_read_lock() */ 162/* Get the kprobe at this addr (if any) - called with preemption disabled */
163struct kprobe *get_kprobe(void *addr); 163struct kprobe *get_kprobe(void *addr);
164struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); 164struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
165 165
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index cfef426e4cdc..5beda378cc75 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -167,7 +167,7 @@ static inline void reset_kprobe_instance(void)
167 * This routine is called either: 167 * This routine is called either:
168 * - under the kprobe_lock spinlock - during kprobe_[un]register() 168 * - under the kprobe_lock spinlock - during kprobe_[un]register()
169 * OR 169 * OR
170 * - under an rcu_read_lock() - from arch/xxx/kernel/kprobes.c 170 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
171 */ 171 */
172struct kprobe __kprobes *get_kprobe(void *addr) 172struct kprobe __kprobes *get_kprobe(void *addr)
173{ 173{