aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel
diff options
context:
space:
mode:
authorAnanth N Mavinakayanahalli <ananth@in.ibm.com>2005-11-07 04:00:07 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:45 -0500
commit66ff2d0691e00e1e7bfdf398a970310c9a0fe671 (patch)
treef53bcd846be8fbaee5a5ee65f9bacc5b34392090 /arch/ppc64/kernel
parentb385676b355549afc9a7507ce09c7df47f166521 (diff)
[PATCH] Kprobes: rearrange preempt_disable/enable() calls
The following set of patches are aimed at improving kprobes scalability. We currently serialize kprobe registration, unregistration and handler execution using a single spinlock - kprobe_lock. With these changes, kprobe handlers can run without any locks held. It also allows for simultaneous kprobe handler executions on different processors as we now track kprobe execution on a per processor basis. It is now necessary that the handlers be re-entrant since handlers can run concurrently on multiple processors. All changes have been tested on i386, ia64, ppc64 and x86_64, while sparc64 has been compile tested only. The patches can be viewed as 3 logical chunks: patch 1: Reorder preempt_(dis/en)able calls patches 2-7: Introduce per_cpu data areas to track kprobe execution patches 8-9: Use RCU to synchronize kprobe (un)registration and handler execution. Thanks to Maneesh Soni, James Keniston and Anil Keshavamurthy for their review and suggestions. Thanks again to Anil, Hien Nguyen and Kevin Stafford for testing the patches. This patch: Reorder preempt_disable/enable() calls in arch kprobes files in preparation to introduce locking changes. No functional changes introduced by this patch. Signed-off-by: Ananth N Mavinakayahanalli <ananth@in.ibm.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/kernel')
-rw-r--r--arch/ppc64/kernel/kprobes.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index ed876a5178ae..6071ee99f5cb 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -209,6 +209,11 @@ static inline int kprobe_handler(struct pt_regs *regs)
209 goto no_kprobe; 209 goto no_kprobe;
210 } 210 }
211 211
212 /*
213 * This preempt_disable() matches the preempt_enable_no_resched()
214 * in post_kprobe_handler().
215 */
216 preempt_disable();
212 kprobe_status = KPROBE_HIT_ACTIVE; 217 kprobe_status = KPROBE_HIT_ACTIVE;
213 current_kprobe = p; 218 current_kprobe = p;
214 kprobe_saved_msr = regs->msr; 219 kprobe_saved_msr = regs->msr;
@@ -219,11 +224,6 @@ static inline int kprobe_handler(struct pt_regs *regs)
219ss_probe: 224ss_probe:
220 prepare_singlestep(p, regs); 225 prepare_singlestep(p, regs);
221 kprobe_status = KPROBE_HIT_SS; 226 kprobe_status = KPROBE_HIT_SS;
222 /*
223 * This preempt_disable() matches the preempt_enable_no_resched()
224 * in post_kprobe_handler().
225 */
226 preempt_disable();
227 return 1; 227 return 1;
228 228
229no_kprobe: 229no_kprobe:
@@ -293,6 +293,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
293 regs->nip = orig_ret_address; 293 regs->nip = orig_ret_address;
294 294
295 unlock_kprobes(); 295 unlock_kprobes();
296 preempt_enable_no_resched();
296 297
297 /* 298 /*
298 * By returning a non-zero value, we are telling 299 * By returning a non-zero value, we are telling