aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorRusty Lynch <rusty.lynch@intel.com>2005-06-23 03:09:25 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-23 12:45:21 -0400
commit7e1048b11c5afe79aac46a42e3ccec86b8365c6d (patch)
tree4f9caee0153e688f22d7e7b6fdc62e35be4fc3fe /arch/i386
parent73649dab0fd524cb8545a8cb83c6eaf77b107105 (diff)
[PATCH] Move kprobe [dis]arming into arch specific code
The architecture independent code of the current kprobes implementation is arming and disarming kprobes at registration time. The problem is that the code is assuming that arming and disarming is a just done by a simple write of some magic value to an address. This is problematic for ia64 where our instructions look more like structures, and we can not insert break points by just doing something like: *p->addr = BREAKPOINT_INSTRUCTION; The following patch to 2.6.12-rc4-mm2 adds two new architecture dependent functions: * void arch_arm_kprobe(struct kprobe *p) * void arch_disarm_kprobe(struct kprobe *p) and then adds the new functions for each of the architectures that already implement kprobes (spar64/ppc64/i386/x86_64). I thought arch_[dis]arm_kprobe was the most descriptive of what was really happening, but each of the architectures already had a disarm_kprobe() function that was really a "disarm and do some other clean-up items as needed when you stumble across a recursive kprobe." So... I took the liberty of changing the code that was calling disarm_kprobe() to call arch_disarm_kprobe(), and then do the cleanup in the block of code dealing with the recursive kprobe case. So far this patch as been tested on i386, x86_64, and ppc64, but still needs to be tested in sparc64. Signed-off-by: Rusty Lynch <rusty.lynch@intel.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/kprobes.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 048f754bbe23..2314d8d306fd 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -33,6 +33,7 @@
33#include <linux/ptrace.h> 33#include <linux/ptrace.h>
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <asm/cacheflush.h>
36#include <asm/kdebug.h> 37#include <asm/kdebug.h>
37#include <asm/desc.h> 38#include <asm/desc.h>
38 39
@@ -71,16 +72,25 @@ int arch_prepare_kprobe(struct kprobe *p)
71void arch_copy_kprobe(struct kprobe *p) 72void arch_copy_kprobe(struct kprobe *p)
72{ 73{
73 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 74 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
75 p->opcode = *p->addr;
74} 76}
75 77
76void arch_remove_kprobe(struct kprobe *p) 78void arch_arm_kprobe(struct kprobe *p)
77{ 79{
80 *p->addr = BREAKPOINT_INSTRUCTION;
81 flush_icache_range((unsigned long) p->addr,
82 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
78} 83}
79 84
80static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs) 85void arch_disarm_kprobe(struct kprobe *p)
81{ 86{
82 *p->addr = p->opcode; 87 *p->addr = p->opcode;
83 regs->eip = (unsigned long)p->addr; 88 flush_icache_range((unsigned long) p->addr,
89 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
90}
91
92void arch_remove_kprobe(struct kprobe *p)
93{
84} 94}
85 95
86static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 96static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -177,7 +187,8 @@ static int kprobe_handler(struct pt_regs *regs)
177 unlock_kprobes(); 187 unlock_kprobes();
178 goto no_kprobe; 188 goto no_kprobe;
179 } 189 }
180 disarm_kprobe(p, regs); 190 arch_disarm_kprobe(p);
191 regs->eip = (unsigned long)p->addr;
181 ret = 1; 192 ret = 1;
182 } else { 193 } else {
183 p = current_kprobe; 194 p = current_kprobe;