aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAnanth N Mavinakayanahalli <ananth@in.ibm.com>2005-11-07 04:00:07 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:45 -0500
commit66ff2d0691e00e1e7bfdf398a970310c9a0fe671 (patch)
treef53bcd846be8fbaee5a5ee65f9bacc5b34392090 /arch
parentb385676b355549afc9a7507ce09c7df47f166521 (diff)
[PATCH] Kprobes: rearrange preempt_disable/enable() calls
The following set of patches are aimed at improving kprobes scalability. We currently serialize kprobe registration, unregistration and handler execution using a single spinlock - kprobe_lock. With these changes, kprobe handlers can run without any locks held. It also allows for simultaneous kprobe handler executions on different processors as we now track kprobe execution on a per processor basis. It is now necessary that the handlers be re-entrant since handlers can run concurrently on multiple processors. All changes have been tested on i386, ia64, ppc64 and x86_64, while sparc64 has been compile tested only. The patches can be viewed as 3 logical chunks: patch 1: Reorder preempt_(dis/en)able calls patches 2-7: Introduce per_cpu data areas to track kprobe execution patches 8-9: Use RCU to synchronize kprobe (un)registration and handler execution. Thanks to Maneesh Soni, James Keniston and Anil Keshavamurthy for their review and suggestions. Thanks again to Anil, Hien Nguyen and Kevin Stafford for testing the patches. This patch: Reorder preempt_disable/enable() calls in arch kprobes files in preparation to introduce locking changes. No functional changes introduced by this patch. Signed-off-by: Ananth N Mavinakayahanalli <ananth@in.ibm.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/kprobes.c35
-rw-r--r--arch/ia64/kernel/kprobes.c22
-rw-r--r--arch/ppc64/kernel/kprobes.c11
-rw-r--r--arch/sparc64/kernel/kprobes.c25
-rw-r--r--arch/x86_64/kernel/kprobes.c28
5 files changed, 65 insertions, 56 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 6345b430b105..fd35039859e6 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -158,8 +158,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
158 kprobe_opcode_t *addr = NULL; 158 kprobe_opcode_t *addr = NULL;
159 unsigned long *lp; 159 unsigned long *lp;
160 160
161 /* We're in an interrupt, but this is clear and BUG()-safe. */
162 preempt_disable();
163 /* Check if the application is using LDT entry for its code segment and 161 /* Check if the application is using LDT entry for its code segment and
164 * calculate the address by reading the base address from the LDT entry. 162 * calculate the address by reading the base address from the LDT entry.
165 */ 163 */
@@ -232,6 +230,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
232 goto no_kprobe; 230 goto no_kprobe;
233 } 231 }
234 232
233 /*
234 * This preempt_disable() matches the preempt_enable_no_resched()
235 * in post_kprobe_handler()
236 */
237 preempt_disable();
235 kprobe_status = KPROBE_HIT_ACTIVE; 238 kprobe_status = KPROBE_HIT_ACTIVE;
236 set_current_kprobe(p, regs); 239 set_current_kprobe(p, regs);
237 240
@@ -245,7 +248,6 @@ ss_probe:
245 return 1; 248 return 1;
246 249
247no_kprobe: 250no_kprobe:
248 preempt_enable_no_resched();
249 return ret; 251 return ret;
250} 252}
251 253
@@ -313,11 +315,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
313 unlock_kprobes(); 315 unlock_kprobes();
314 preempt_enable_no_resched(); 316 preempt_enable_no_resched();
315 317
316 /* 318 /*
317 * By returning a non-zero value, we are telling 319 * By returning a non-zero value, we are telling
318 * kprobe_handler() that we have handled unlocking 320 * kprobe_handler() that we have handled unlocking
319 * and re-enabling preemption. 321 * and re-enabling preemption
320 */ 322 */
321 return 1; 323 return 1;
322} 324}
323 325
@@ -453,29 +455,29 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
453 unsigned long val, void *data) 455 unsigned long val, void *data)
454{ 456{
455 struct die_args *args = (struct die_args *)data; 457 struct die_args *args = (struct die_args *)data;
458 int ret = NOTIFY_DONE;
459
460 preempt_disable();
456 switch (val) { 461 switch (val) {
457 case DIE_INT3: 462 case DIE_INT3:
458 if (kprobe_handler(args->regs)) 463 if (kprobe_handler(args->regs))
459 return NOTIFY_STOP; 464 ret = NOTIFY_STOP;
460 break; 465 break;
461 case DIE_DEBUG: 466 case DIE_DEBUG:
462 if (post_kprobe_handler(args->regs)) 467 if (post_kprobe_handler(args->regs))
463 return NOTIFY_STOP; 468 ret = NOTIFY_STOP;
464 break; 469 break;
465 case DIE_GPF: 470 case DIE_GPF:
466 if (kprobe_running() &&
467 kprobe_fault_handler(args->regs, args->trapnr))
468 return NOTIFY_STOP;
469 break;
470 case DIE_PAGE_FAULT: 471 case DIE_PAGE_FAULT:
471 if (kprobe_running() && 472 if (kprobe_running() &&
472 kprobe_fault_handler(args->regs, args->trapnr)) 473 kprobe_fault_handler(args->regs, args->trapnr))
473 return NOTIFY_STOP; 474 ret = NOTIFY_STOP;
474 break; 475 break;
475 default: 476 default:
476 break; 477 break;
477 } 478 }
478 return NOTIFY_DONE; 479 preempt_enable();
480 return ret;
479} 481}
480 482
481int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 483int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
@@ -502,7 +504,6 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
502 504
503void __kprobes jprobe_return(void) 505void __kprobes jprobe_return(void)
504{ 506{
505 preempt_enable_no_resched();
506 asm volatile (" xchgl %%ebx,%%esp \n" 507 asm volatile (" xchgl %%ebx,%%esp \n"
507 " int3 \n" 508 " int3 \n"
508 " .globl jprobe_return_end \n" 509 " .globl jprobe_return_end \n"
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 471086b808a4..1e80ec80dd21 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -395,7 +395,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
395 /* 395 /*
396 * By returning a non-zero value, we are telling 396 * By returning a non-zero value, we are telling
397 * kprobe_handler() that we have handled unlocking 397 * kprobe_handler() that we have handled unlocking
398 * and re-enabling preemption. 398 * and re-enabling preemption
399 */ 399 */
400 return 1; 400 return 1;
401} 401}
@@ -607,8 +607,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
607 struct pt_regs *regs = args->regs; 607 struct pt_regs *regs = args->regs;
608 kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); 608 kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
609 609
610 preempt_disable();
611
612 /* Handle recursion cases */ 610 /* Handle recursion cases */
613 if (kprobe_running()) { 611 if (kprobe_running()) {
614 p = get_kprobe(addr); 612 p = get_kprobe(addr);
@@ -665,6 +663,11 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
665 goto no_kprobe; 663 goto no_kprobe;
666 } 664 }
667 665
666 /*
667 * This preempt_disable() matches the preempt_enable_no_resched()
668 * in post_kprobes_handler()
669 */
670 preempt_disable();
668 kprobe_status = KPROBE_HIT_ACTIVE; 671 kprobe_status = KPROBE_HIT_ACTIVE;
669 set_current_kprobe(p); 672 set_current_kprobe(p);
670 673
@@ -682,7 +685,6 @@ ss_probe:
682 return 1; 685 return 1;
683 686
684no_kprobe: 687no_kprobe:
685 preempt_enable_no_resched();
686 return ret; 688 return ret;
687} 689}
688 690
@@ -733,22 +735,26 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
733 unsigned long val, void *data) 735 unsigned long val, void *data)
734{ 736{
735 struct die_args *args = (struct die_args *)data; 737 struct die_args *args = (struct die_args *)data;
738 int ret = NOTIFY_DONE;
739
740 preempt_disable();
736 switch(val) { 741 switch(val) {
737 case DIE_BREAK: 742 case DIE_BREAK:
738 if (pre_kprobes_handler(args)) 743 if (pre_kprobes_handler(args))
739 return NOTIFY_STOP; 744 ret = NOTIFY_STOP;
740 break; 745 break;
741 case DIE_SS: 746 case DIE_SS:
742 if (post_kprobes_handler(args->regs)) 747 if (post_kprobes_handler(args->regs))
743 return NOTIFY_STOP; 748 ret = NOTIFY_STOP;
744 break; 749 break;
745 case DIE_PAGE_FAULT: 750 case DIE_PAGE_FAULT:
746 if (kprobes_fault_handler(args->regs, args->trapnr)) 751 if (kprobes_fault_handler(args->regs, args->trapnr))
747 return NOTIFY_STOP; 752 ret = NOTIFY_STOP;
748 default: 753 default:
749 break; 754 break;
750 } 755 }
751 return NOTIFY_DONE; 756 preempt_enable();
757 return ret;
752} 758}
753 759
754int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 760int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index ed876a5178ae..6071ee99f5cb 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -209,6 +209,11 @@ static inline int kprobe_handler(struct pt_regs *regs)
209 goto no_kprobe; 209 goto no_kprobe;
210 } 210 }
211 211
212 /*
213 * This preempt_disable() matches the preempt_enable_no_resched()
214 * in post_kprobe_handler().
215 */
216 preempt_disable();
212 kprobe_status = KPROBE_HIT_ACTIVE; 217 kprobe_status = KPROBE_HIT_ACTIVE;
213 current_kprobe = p; 218 current_kprobe = p;
214 kprobe_saved_msr = regs->msr; 219 kprobe_saved_msr = regs->msr;
@@ -219,11 +224,6 @@ static inline int kprobe_handler(struct pt_regs *regs)
219ss_probe: 224ss_probe:
220 prepare_singlestep(p, regs); 225 prepare_singlestep(p, regs);
221 kprobe_status = KPROBE_HIT_SS; 226 kprobe_status = KPROBE_HIT_SS;
222 /*
223 * This preempt_disable() matches the preempt_enable_no_resched()
224 * in post_kprobe_handler().
225 */
226 preempt_disable();
227 return 1; 227 return 1;
228 228
229no_kprobe: 229no_kprobe:
@@ -293,6 +293,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
293 regs->nip = orig_ret_address; 293 regs->nip = orig_ret_address;
294 294
295 unlock_kprobes(); 295 unlock_kprobes();
296 preempt_enable_no_resched();
296 297
297 /* 298 /*
298 * By returning a non-zero value, we are telling 299 * By returning a non-zero value, we are telling
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index 0d66d07c8c6e..755a0d7d887f 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -118,8 +118,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
118 void *addr = (void *) regs->tpc; 118 void *addr = (void *) regs->tpc;
119 int ret = 0; 119 int ret = 0;
120 120
121 preempt_disable();
122
123 if (kprobe_running()) { 121 if (kprobe_running()) {
124 /* We *are* holding lock here, so this is safe. 122 /* We *are* holding lock here, so this is safe.
125 * Disarm the probe we just hit, and ignore it. 123 * Disarm the probe we just hit, and ignore it.
@@ -171,6 +169,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
171 goto no_kprobe; 169 goto no_kprobe;
172 } 170 }
173 171
172 /*
173 * This preempt_disable() matches the preempt_enable_no_resched()
174 * in post_kprobes_handler()
175 */
176 preempt_disable();
174 set_current_kprobe(p, regs); 177 set_current_kprobe(p, regs);
175 kprobe_status = KPROBE_HIT_ACTIVE; 178 kprobe_status = KPROBE_HIT_ACTIVE;
176 if (p->pre_handler && p->pre_handler(p, regs)) 179 if (p->pre_handler && p->pre_handler(p, regs))
@@ -182,7 +185,6 @@ ss_probe:
182 return 1; 185 return 1;
183 186
184no_kprobe: 187no_kprobe:
185 preempt_enable_no_resched();
186 return ret; 188 return ret;
187} 189}
188 190
@@ -322,29 +324,29 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
322 unsigned long val, void *data) 324 unsigned long val, void *data)
323{ 325{
324 struct die_args *args = (struct die_args *)data; 326 struct die_args *args = (struct die_args *)data;
327 int ret = NOTIFY_DONE;
328
329 preempt_disable();
325 switch (val) { 330 switch (val) {
326 case DIE_DEBUG: 331 case DIE_DEBUG:
327 if (kprobe_handler(args->regs)) 332 if (kprobe_handler(args->regs))
328 return NOTIFY_STOP; 333 ret = NOTIFY_STOP;
329 break; 334 break;
330 case DIE_DEBUG_2: 335 case DIE_DEBUG_2:
331 if (post_kprobe_handler(args->regs)) 336 if (post_kprobe_handler(args->regs))
332 return NOTIFY_STOP; 337 ret = NOTIFY_STOP;
333 break; 338 break;
334 case DIE_GPF: 339 case DIE_GPF:
335 if (kprobe_running() &&
336 kprobe_fault_handler(args->regs, args->trapnr))
337 return NOTIFY_STOP;
338 break;
339 case DIE_PAGE_FAULT: 340 case DIE_PAGE_FAULT:
340 if (kprobe_running() && 341 if (kprobe_running() &&
341 kprobe_fault_handler(args->regs, args->trapnr)) 342 kprobe_fault_handler(args->regs, args->trapnr))
342 return NOTIFY_STOP; 343 ret = NOTIFY_STOP;
343 break; 344 break;
344 default: 345 default:
345 break; 346 break;
346 } 347 }
347 return NOTIFY_DONE; 348 preempt_enable();
349 return ret;
348} 350}
349 351
350asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, 352asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
@@ -396,7 +398,6 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
396 398
397void __kprobes jprobe_return(void) 399void __kprobes jprobe_return(void)
398{ 400{
399 preempt_enable_no_resched();
400 __asm__ __volatile__( 401 __asm__ __volatile__(
401 ".globl jprobe_return_trap_instruction\n" 402 ".globl jprobe_return_trap_instruction\n"
402"jprobe_return_trap_instruction:\n\t" 403"jprobe_return_trap_instruction:\n\t"
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 76a28b007be9..ebfa2c9241ca 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -302,9 +302,6 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
302 int ret = 0; 302 int ret = 0;
303 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t)); 303 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t));
304 304
305 /* We're in an interrupt, but this is clear and BUG()-safe. */
306 preempt_disable();
307
308 /* Check we're not actually recursing */ 305 /* Check we're not actually recursing */
309 if (kprobe_running()) { 306 if (kprobe_running()) {
310 /* We *are* holding lock here, so this is safe. 307 /* We *are* holding lock here, so this is safe.
@@ -372,6 +369,11 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
372 goto no_kprobe; 369 goto no_kprobe;
373 } 370 }
374 371
372 /*
373 * This preempt_disable() matches the preempt_enable_no_resched()
374 * in post_kprobe_handler()
375 */
376 preempt_disable();
375 kprobe_status = KPROBE_HIT_ACTIVE; 377 kprobe_status = KPROBE_HIT_ACTIVE;
376 set_current_kprobe(p, regs); 378 set_current_kprobe(p, regs);
377 379
@@ -385,7 +387,6 @@ ss_probe:
385 return 1; 387 return 1;
386 388
387no_kprobe: 389no_kprobe:
388 preempt_enable_no_resched();
389 return ret; 390 return ret;
390} 391}
391 392
@@ -456,7 +457,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
456 /* 457 /*
457 * By returning a non-zero value, we are telling 458 * By returning a non-zero value, we are telling
458 * kprobe_handler() that we have handled unlocking 459 * kprobe_handler() that we have handled unlocking
459 * and re-enabling preemption. 460 * and re-enabling preemption
460 */ 461 */
461 return 1; 462 return 1;
462} 463}
@@ -599,29 +600,29 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
599 unsigned long val, void *data) 600 unsigned long val, void *data)
600{ 601{
601 struct die_args *args = (struct die_args *)data; 602 struct die_args *args = (struct die_args *)data;
603 int ret = NOTIFY_DONE;
604
605 preempt_disable();
602 switch (val) { 606 switch (val) {
603 case DIE_INT3: 607 case DIE_INT3:
604 if (kprobe_handler(args->regs)) 608 if (kprobe_handler(args->regs))
605 return NOTIFY_STOP; 609 ret = NOTIFY_STOP;
606 break; 610 break;
607 case DIE_DEBUG: 611 case DIE_DEBUG:
608 if (post_kprobe_handler(args->regs)) 612 if (post_kprobe_handler(args->regs))
609 return NOTIFY_STOP; 613 ret = NOTIFY_STOP;
610 break; 614 break;
611 case DIE_GPF: 615 case DIE_GPF:
612 if (kprobe_running() &&
613 kprobe_fault_handler(args->regs, args->trapnr))
614 return NOTIFY_STOP;
615 break;
616 case DIE_PAGE_FAULT: 616 case DIE_PAGE_FAULT:
617 if (kprobe_running() && 617 if (kprobe_running() &&
618 kprobe_fault_handler(args->regs, args->trapnr)) 618 kprobe_fault_handler(args->regs, args->trapnr))
619 return NOTIFY_STOP; 619 ret = NOTIFY_STOP;
620 break; 620 break;
621 default: 621 default:
622 break; 622 break;
623 } 623 }
624 return NOTIFY_DONE; 624 preempt_enable();
625 return ret;
625} 626}
626 627
627int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 628int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
@@ -647,7 +648,6 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
647 648
648void __kprobes jprobe_return(void) 649void __kprobes jprobe_return(void)
649{ 650{
650 preempt_enable_no_resched();
651 asm volatile (" xchg %%rbx,%%rsp \n" 651 asm volatile (" xchg %%rbx,%%rsp \n"
652 " int3 \n" 652 " int3 \n"
653 " .globl jprobe_return_end \n" 653 " .globl jprobe_return_end \n"