aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/kprobes.c
diff options
context:
space:
mode:
authorAnanth N Mavinakayanahalli <ananth@in.ibm.com>2005-11-07 04:00:14 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:46 -0500
commit991a51d83a3d9bebfafdd1e692cf310899d60791 (patch)
tree4cc6eaa2a868838e59c7737da9868f2358f2bb19 /arch/ppc64/kernel/kprobes.c
parent3516a46042508a495fac13c2e73530d936ebe015 (diff)
[PATCH] Kprobes: Use RCU for (un)register synchronization - arch changes
Changes to the arch kprobes infrastructure to take advantage of the locking changes introduced by usage of RCU for synchronization. All handlers are now run without any locks held, so they have to be re-entrant or provide their own synchronization. Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/kernel/kprobes.c')
-rw-r--r--arch/ppc64/kernel/kprobes.c24
1 files changed, 6 insertions, 18 deletions
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 3f89f3e5584a..e0a25b35437f 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -30,7 +30,6 @@
30#include <linux/config.h> 30#include <linux/config.h>
31#include <linux/kprobes.h> 31#include <linux/kprobes.h>
32#include <linux/ptrace.h> 32#include <linux/ptrace.h>
33#include <linux/spinlock.h>
34#include <linux/preempt.h> 33#include <linux/preempt.h>
35#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
36#include <asm/kdebug.h> 35#include <asm/kdebug.h>
@@ -125,6 +124,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
125 kcb->kprobe_saved_msr = regs->msr; 124 kcb->kprobe_saved_msr = regs->msr;
126} 125}
127 126
127/* Called with kretprobe_lock held */
128void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, 128void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
129 struct pt_regs *regs) 129 struct pt_regs *regs)
130{ 130{
@@ -152,8 +152,6 @@ static inline int kprobe_handler(struct pt_regs *regs)
152 152
153 /* Check we're not actually recursing */ 153 /* Check we're not actually recursing */
154 if (kprobe_running()) { 154 if (kprobe_running()) {
155 /* We *are* holding lock here, so this is safe.
156 Disarm the probe we just hit, and ignore it. */
157 p = get_kprobe(addr); 155 p = get_kprobe(addr);
158 if (p) { 156 if (p) {
159 kprobe_opcode_t insn = *p->ainsn.insn; 157 kprobe_opcode_t insn = *p->ainsn.insn;
@@ -161,7 +159,6 @@ static inline int kprobe_handler(struct pt_regs *regs)
161 is_trap(insn)) { 159 is_trap(insn)) {
162 regs->msr &= ~MSR_SE; 160 regs->msr &= ~MSR_SE;
163 regs->msr |= kcb->kprobe_saved_msr; 161 regs->msr |= kcb->kprobe_saved_msr;
164 unlock_kprobes();
165 goto no_kprobe; 162 goto no_kprobe;
166 } 163 }
167 /* We have reentered the kprobe_handler(), since 164 /* We have reentered the kprobe_handler(), since
@@ -183,14 +180,11 @@ static inline int kprobe_handler(struct pt_regs *regs)
183 goto ss_probe; 180 goto ss_probe;
184 } 181 }
185 } 182 }
186 /* If it's not ours, can't be delete race, (we hold lock). */
187 goto no_kprobe; 183 goto no_kprobe;
188 } 184 }
189 185
190 lock_kprobes();
191 p = get_kprobe(addr); 186 p = get_kprobe(addr);
192 if (!p) { 187 if (!p) {
193 unlock_kprobes();
194 if (*addr != BREAKPOINT_INSTRUCTION) { 188 if (*addr != BREAKPOINT_INSTRUCTION) {
195 /* 189 /*
196 * PowerPC has multiple variants of the "trap" 190 * PowerPC has multiple variants of the "trap"
@@ -254,9 +248,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
254 struct kretprobe_instance *ri = NULL; 248 struct kretprobe_instance *ri = NULL;
255 struct hlist_head *head; 249 struct hlist_head *head;
256 struct hlist_node *node, *tmp; 250 struct hlist_node *node, *tmp;
257 unsigned long orig_ret_address = 0; 251 unsigned long flags, orig_ret_address = 0;
258 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 252 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
259 253
254 spin_lock_irqsave(&kretprobe_lock, flags);
260 head = kretprobe_inst_table_head(current); 255 head = kretprobe_inst_table_head(current);
261 256
262 /* 257 /*
@@ -296,7 +291,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
296 regs->nip = orig_ret_address; 291 regs->nip = orig_ret_address;
297 292
298 reset_current_kprobe(); 293 reset_current_kprobe();
299 unlock_kprobes(); 294 spin_unlock_irqrestore(&kretprobe_lock, flags);
300 preempt_enable_no_resched(); 295 preempt_enable_no_resched();
301 296
302 /* 297 /*
@@ -348,7 +343,6 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
348 goto out; 343 goto out;
349 } 344 }
350 reset_current_kprobe(); 345 reset_current_kprobe();
351 unlock_kprobes();
352out: 346out:
353 preempt_enable_no_resched(); 347 preempt_enable_no_resched();
354 348
@@ -363,7 +357,6 @@ out:
363 return 1; 357 return 1;
364} 358}
365 359
366/* Interrupts disabled, kprobe_lock held. */
367static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 360static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
368{ 361{
369 struct kprobe *cur = kprobe_running(); 362 struct kprobe *cur = kprobe_running();
@@ -378,7 +371,6 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
378 regs->msr |= kcb->kprobe_saved_msr; 371 regs->msr |= kcb->kprobe_saved_msr;
379 372
380 reset_current_kprobe(); 373 reset_current_kprobe();
381 unlock_kprobes();
382 preempt_enable_no_resched(); 374 preempt_enable_no_resched();
383 } 375 }
384 return 0; 376 return 0;
@@ -393,11 +385,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
393 struct die_args *args = (struct die_args *)data; 385 struct die_args *args = (struct die_args *)data;
394 int ret = NOTIFY_DONE; 386 int ret = NOTIFY_DONE;
395 387
396 /* 388 rcu_read_lock();
397 * Interrupts are not disabled here. We need to disable
398 * preemption, because kprobe_running() uses smp_processor_id().
399 */
400 preempt_disable();
401 switch (val) { 389 switch (val) {
402 case DIE_BPT: 390 case DIE_BPT:
403 if (kprobe_handler(args->regs)) 391 if (kprobe_handler(args->regs))
@@ -415,7 +403,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
415 default: 403 default:
416 break; 404 break;
417 } 405 }
418 preempt_enable_no_resched(); 406 rcu_read_unlock();
419 return ret; 407 return ret;
420} 408}
421 409