aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAnil S Keshavamurthy <anil.s.keshavamurthy@intel.com>2006-01-09 23:52:43 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-10 11:01:40 -0500
commit49a2a1b83ba6fa40c41968d6a28ba16e7ed0c3f7 (patch)
treef257b535d0f09f9ac2531d40feb732349993665c /arch
parent41dead49ccb4d7f0a34d56478f487342a3c3ab2b (diff)
[PATCH] kprobes: changed from using spinlock to mutex
Since Kprobes runtime exception handlers is now lock free as this code path is now using RCU to walk through the list, there is no need for the register/unregister{_kprobe} to use spin_{lock/unlock}_isr{save/restore}. The serialization during registration/unregistration is now possible using just a mutex. In the above process, this patch also fixes a minor memory leak for x86_64 and powerpc. Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/kprobes.c14
-rw-r--r--arch/sparc64/kernel/kprobes.c6
-rw-r--r--arch/x86_64/kernel/kprobes.c7
4 files changed, 10 insertions, 23 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 19edcd526ba4..68fe10250486 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -58,13 +58,9 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
58 58
59int __kprobes arch_prepare_kprobe(struct kprobe *p) 59int __kprobes arch_prepare_kprobe(struct kprobe *p)
60{ 60{
61 return 0;
62}
63
64void __kprobes arch_copy_kprobe(struct kprobe *p)
65{
66 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 61 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
67 p->opcode = *p->addr; 62 p->opcode = *p->addr;
63 return 0;
68} 64}
69 65
70void __kprobes arch_arm_kprobe(struct kprobe *p) 66void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 5368f9c2e6bf..331e169e8629 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -60,13 +60,13 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
60 if (!p->ainsn.insn) 60 if (!p->ainsn.insn)
61 ret = -ENOMEM; 61 ret = -ENOMEM;
62 } 62 }
63 return ret;
64}
65 63
66void __kprobes arch_copy_kprobe(struct kprobe *p) 64 if (!ret) {
67{ 65 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
68 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 66 p->opcode = *p->addr;
69 p->opcode = *p->addr; 67 }
68
69 return ret;
70} 70}
71 71
72void __kprobes arch_arm_kprobe(struct kprobe *p) 72void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -85,9 +85,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
85 85
86void __kprobes arch_remove_kprobe(struct kprobe *p) 86void __kprobes arch_remove_kprobe(struct kprobe *p)
87{ 87{
88 down(&kprobe_mutex);
89 free_insn_slot(p->ainsn.insn); 88 free_insn_slot(p->ainsn.insn);
90 up(&kprobe_mutex);
91} 89}
92 90
93static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 91static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index a97b0f0727ab..bbd5aa6818ea 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -43,14 +43,10 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43 43
44int __kprobes arch_prepare_kprobe(struct kprobe *p) 44int __kprobes arch_prepare_kprobe(struct kprobe *p)
45{ 45{
46 return 0;
47}
48
49void __kprobes arch_copy_kprobe(struct kprobe *p)
50{
51 p->ainsn.insn[0] = *p->addr; 46 p->ainsn.insn[0] = *p->addr;
52 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; 47 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
53 p->opcode = *p->addr; 48 p->opcode = *p->addr;
49 return 0;
54} 50}
55 51
56void __kprobes arch_arm_kprobe(struct kprobe *p) 52void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index afe11f4fbd1d..8b8943bfb89e 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -42,8 +42,8 @@
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/kdebug.h> 43#include <asm/kdebug.h>
44 44
45static DECLARE_MUTEX(kprobe_mutex);
46void jprobe_return_end(void); 45void jprobe_return_end(void);
46void __kprobes arch_copy_kprobe(struct kprobe *p);
47 47
48DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 48DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
49DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 49DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -69,12 +69,11 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
69int __kprobes arch_prepare_kprobe(struct kprobe *p) 69int __kprobes arch_prepare_kprobe(struct kprobe *p)
70{ 70{
71 /* insn: must be on special executable page on x86_64. */ 71 /* insn: must be on special executable page on x86_64. */
72 down(&kprobe_mutex);
73 p->ainsn.insn = get_insn_slot(); 72 p->ainsn.insn = get_insn_slot();
74 up(&kprobe_mutex);
75 if (!p->ainsn.insn) { 73 if (!p->ainsn.insn) {
76 return -ENOMEM; 74 return -ENOMEM;
77 } 75 }
76 arch_copy_kprobe(p);
78 return 0; 77 return 0;
79} 78}
80 79
@@ -223,9 +222,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
223 222
224void __kprobes arch_remove_kprobe(struct kprobe *p) 223void __kprobes arch_remove_kprobe(struct kprobe *p)
225{ 224{
226 down(&kprobe_mutex);
227 free_insn_slot(p->ainsn.insn); 225 free_insn_slot(p->ainsn.insn);
228 up(&kprobe_mutex);
229} 226}
230 227
231static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 228static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)