aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorAnanth N Mavinakayanahalli <ananth@in.ibm.com>2005-11-07 04:00:14 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:46 -0500
commit991a51d83a3d9bebfafdd1e692cf310899d60791 (patch)
tree4cc6eaa2a868838e59c7737da9868f2358f2bb19 /arch/i386
parent3516a46042508a495fac13c2e73530d936ebe015 (diff)
[PATCH] Kprobes: Use RCU for (un)register synchronization - arch changes
Changes to the arch kprobes infrastructure to take advantage of the locking changes introduced by usage of RCU for synchronization. All handlers are now run without any locks held, so they have to be re-entrant or provide their own synchronization. Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/kprobes.c22
1 files changed, 7 insertions, 15 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 99565a66915d..ad469299267a 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -31,7 +31,6 @@
31#include <linux/config.h> 31#include <linux/config.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/ptrace.h> 33#include <linux/ptrace.h>
34#include <linux/spinlock.h>
35#include <linux/preempt.h> 34#include <linux/preempt.h>
36#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
37#include <asm/kdebug.h> 36#include <asm/kdebug.h>
@@ -123,6 +122,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
123 regs->eip = (unsigned long)&p->ainsn.insn; 122 regs->eip = (unsigned long)&p->ainsn.insn;
124} 123}
125 124
125/* Called with kretprobe_lock held */
126void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, 126void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
127 struct pt_regs *regs) 127 struct pt_regs *regs)
128{ 128{
@@ -168,15 +168,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
168 } 168 }
169 /* Check we're not actually recursing */ 169 /* Check we're not actually recursing */
170 if (kprobe_running()) { 170 if (kprobe_running()) {
171 /* We *are* holding lock here, so this is safe.
172 Disarm the probe we just hit, and ignore it. */
173 p = get_kprobe(addr); 171 p = get_kprobe(addr);
174 if (p) { 172 if (p) {
175 if (kcb->kprobe_status == KPROBE_HIT_SS && 173 if (kcb->kprobe_status == KPROBE_HIT_SS &&
176 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 174 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
177 regs->eflags &= ~TF_MASK; 175 regs->eflags &= ~TF_MASK;
178 regs->eflags |= kcb->kprobe_saved_eflags; 176 regs->eflags |= kcb->kprobe_saved_eflags;
179 unlock_kprobes();
180 goto no_kprobe; 177 goto no_kprobe;
181 } 178 }
182 /* We have reentered the kprobe_handler(), since 179 /* We have reentered the kprobe_handler(), since
@@ -197,14 +194,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
197 goto ss_probe; 194 goto ss_probe;
198 } 195 }
199 } 196 }
200 /* If it's not ours, can't be delete race, (we hold lock). */
201 goto no_kprobe; 197 goto no_kprobe;
202 } 198 }
203 199
204 lock_kprobes();
205 p = get_kprobe(addr); 200 p = get_kprobe(addr);
206 if (!p) { 201 if (!p) {
207 unlock_kprobes();
208 if (regs->eflags & VM_MASK) { 202 if (regs->eflags & VM_MASK) {
209 /* We are in virtual-8086 mode. Return 0 */ 203 /* We are in virtual-8086 mode. Return 0 */
210 goto no_kprobe; 204 goto no_kprobe;
@@ -268,9 +262,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
268 struct kretprobe_instance *ri = NULL; 262 struct kretprobe_instance *ri = NULL;
269 struct hlist_head *head; 263 struct hlist_head *head;
270 struct hlist_node *node, *tmp; 264 struct hlist_node *node, *tmp;
271 unsigned long orig_ret_address = 0; 265 unsigned long flags, orig_ret_address = 0;
272 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 266 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
273 267
268 spin_lock_irqsave(&kretprobe_lock, flags);
274 head = kretprobe_inst_table_head(current); 269 head = kretprobe_inst_table_head(current);
275 270
276 /* 271 /*
@@ -310,7 +305,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
310 regs->eip = orig_ret_address; 305 regs->eip = orig_ret_address;
311 306
312 reset_current_kprobe(); 307 reset_current_kprobe();
313 unlock_kprobes(); 308 spin_unlock_irqrestore(&kretprobe_lock, flags);
314 preempt_enable_no_resched(); 309 preempt_enable_no_resched();
315 310
316 /* 311 /*
@@ -395,7 +390,7 @@ static void __kprobes resume_execution(struct kprobe *p,
395 390
396/* 391/*
397 * Interrupts are disabled on entry as trap1 is an interrupt gate and they 392 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
398 * remain disabled thoroughout this function. And we hold kprobe lock. 393 * remain disabled thoroughout this function.
399 */ 394 */
400static inline int post_kprobe_handler(struct pt_regs *regs) 395static inline int post_kprobe_handler(struct pt_regs *regs)
401{ 396{
@@ -419,7 +414,6 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
419 goto out; 414 goto out;
420 } 415 }
421 reset_current_kprobe(); 416 reset_current_kprobe();
422 unlock_kprobes();
423out: 417out:
424 preempt_enable_no_resched(); 418 preempt_enable_no_resched();
425 419
@@ -434,7 +428,6 @@ out:
434 return 1; 428 return 1;
435} 429}
436 430
437/* Interrupts disabled, kprobe_lock held. */
438static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 431static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
439{ 432{
440 struct kprobe *cur = kprobe_running(); 433 struct kprobe *cur = kprobe_running();
@@ -448,7 +441,6 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
448 regs->eflags |= kcb->kprobe_old_eflags; 441 regs->eflags |= kcb->kprobe_old_eflags;
449 442
450 reset_current_kprobe(); 443 reset_current_kprobe();
451 unlock_kprobes();
452 preempt_enable_no_resched(); 444 preempt_enable_no_resched();
453 } 445 }
454 return 0; 446 return 0;
@@ -463,7 +455,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
463 struct die_args *args = (struct die_args *)data; 455 struct die_args *args = (struct die_args *)data;
464 int ret = NOTIFY_DONE; 456 int ret = NOTIFY_DONE;
465 457
466 preempt_disable(); 458 rcu_read_lock();
467 switch (val) { 459 switch (val) {
468 case DIE_INT3: 460 case DIE_INT3:
469 if (kprobe_handler(args->regs)) 461 if (kprobe_handler(args->regs))
@@ -482,7 +474,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
482 default: 474 default:
483 break; 475 break;
484 } 476 }
485 preempt_enable(); 477 rcu_read_unlock();
486 return ret; 478 return ret;
487} 479}
488 480