aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorAnanth N Mavinakayanahalli <ananth@in.ibm.com>2005-11-07 04:00:07 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:45 -0500
commit66ff2d0691e00e1e7bfdf398a970310c9a0fe671 (patch)
treef53bcd846be8fbaee5a5ee65f9bacc5b34392090 /arch/sparc64/kernel
parentb385676b355549afc9a7507ce09c7df47f166521 (diff)
[PATCH] Kprobes: rearrange preempt_disable/enable() calls
The following set of patches are aimed at improving kprobes scalability. We currently serialize kprobe registration, unregistration and handler execution using a single spinlock - kprobe_lock. With these changes, kprobe handlers can run without any locks held. It also allows for simultaneous kprobe handler executions on different processors as we now track kprobe execution on a per processor basis. It is now necessary that the handlers be re-entrant since handlers can run concurrently on multiple processors. All changes have been tested on i386, ia64, ppc64 and x86_64, while sparc64 has been compile tested only. The patches can be viewed as 3 logical chunks: patch 1: Reorder preempt_(dis/en)able calls patches 2-7: Introduce per_cpu data areas to track kprobe execution patches 8-9: Use RCU to synchronize kprobe (un)registration and handler execution. Thanks to Maneesh Soni, James Keniston and Anil Keshavamurthy for their review and suggestions. Thanks again to Anil, Hien Nguyen and Kevin Stafford for testing the patches. This patch: Reorder preempt_disable/enable() calls in arch kprobes files in preparation to introduce locking changes. No functional changes introduced by this patch. Signed-off-by: Ananth N Mavinakayahanalli <ananth@in.ibm.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/kprobes.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index 0d66d07c8c6e..755a0d7d887f 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -118,8 +118,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
118 void *addr = (void *) regs->tpc; 118 void *addr = (void *) regs->tpc;
119 int ret = 0; 119 int ret = 0;
120 120
121 preempt_disable();
122
123 if (kprobe_running()) { 121 if (kprobe_running()) {
124 /* We *are* holding lock here, so this is safe. 122 /* We *are* holding lock here, so this is safe.
125 * Disarm the probe we just hit, and ignore it. 123 * Disarm the probe we just hit, and ignore it.
@@ -171,6 +169,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
171 goto no_kprobe; 169 goto no_kprobe;
172 } 170 }
173 171
172 /*
173 * This preempt_disable() matches the preempt_enable_no_resched()
174 * in post_kprobes_handler()
175 */
176 preempt_disable();
174 set_current_kprobe(p, regs); 177 set_current_kprobe(p, regs);
175 kprobe_status = KPROBE_HIT_ACTIVE; 178 kprobe_status = KPROBE_HIT_ACTIVE;
176 if (p->pre_handler && p->pre_handler(p, regs)) 179 if (p->pre_handler && p->pre_handler(p, regs))
@@ -182,7 +185,6 @@ ss_probe:
182 return 1; 185 return 1;
183 186
184no_kprobe: 187no_kprobe:
185 preempt_enable_no_resched();
186 return ret; 188 return ret;
187} 189}
188 190
@@ -322,29 +324,29 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
322 unsigned long val, void *data) 324 unsigned long val, void *data)
323{ 325{
324 struct die_args *args = (struct die_args *)data; 326 struct die_args *args = (struct die_args *)data;
327 int ret = NOTIFY_DONE;
328
329 preempt_disable();
325 switch (val) { 330 switch (val) {
326 case DIE_DEBUG: 331 case DIE_DEBUG:
327 if (kprobe_handler(args->regs)) 332 if (kprobe_handler(args->regs))
328 return NOTIFY_STOP; 333 ret = NOTIFY_STOP;
329 break; 334 break;
330 case DIE_DEBUG_2: 335 case DIE_DEBUG_2:
331 if (post_kprobe_handler(args->regs)) 336 if (post_kprobe_handler(args->regs))
332 return NOTIFY_STOP; 337 ret = NOTIFY_STOP;
333 break; 338 break;
334 case DIE_GPF: 339 case DIE_GPF:
335 if (kprobe_running() &&
336 kprobe_fault_handler(args->regs, args->trapnr))
337 return NOTIFY_STOP;
338 break;
339 case DIE_PAGE_FAULT: 340 case DIE_PAGE_FAULT:
340 if (kprobe_running() && 341 if (kprobe_running() &&
341 kprobe_fault_handler(args->regs, args->trapnr)) 342 kprobe_fault_handler(args->regs, args->trapnr))
342 return NOTIFY_STOP; 343 ret = NOTIFY_STOP;
343 break; 344 break;
344 default: 345 default:
345 break; 346 break;
346 } 347 }
347 return NOTIFY_DONE; 348 preempt_enable();
349 return ret;
348} 350}
349 351
350asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, 352asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
@@ -396,7 +398,6 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
396 398
397void __kprobes jprobe_return(void) 399void __kprobes jprobe_return(void)
398{ 400{
399 preempt_enable_no_resched();
400 __asm__ __volatile__( 401 __asm__ __volatile__(
401 ".globl jprobe_return_trap_instruction\n" 402 ".globl jprobe_return_trap_instruction\n"
402"jprobe_return_trap_instruction:\n\t" 403"jprobe_return_trap_instruction:\n\t"