diff options
author | Ananth N Mavinakayanahalli <ananth@in.ibm.com> | 2005-11-07 04:00:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-07 10:53:45 -0500 |
commit | e65845235c8120be63001fc1a4ac00c819194bbe (patch) | |
tree | 209a081cc07375290743ceb45f52dc474f45382a | |
parent | 66ff2d0691e00e1e7bfdf398a970310c9a0fe671 (diff) |
[PATCH] Kprobes: Track kprobe on a per_cpu basis - base changes
Changes to the base kprobe infrastructure to track kprobe execution on a
per-cpu basis.
Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/kprobes.h | 31 | ||||
-rw-r--r-- | kernel/kprobes.c | 43 |
2 files changed, 50 insertions, 24 deletions
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e30afdca7917..6720305a31e8 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
34 | #include <linux/notifier.h> | 34 | #include <linux/notifier.h> |
35 | #include <linux/smp.h> | 35 | #include <linux/smp.h> |
36 | #include <linux/percpu.h> | ||
36 | 37 | ||
37 | #include <asm/kprobes.h> | 38 | #include <asm/kprobes.h> |
38 | 39 | ||
@@ -106,6 +107,9 @@ struct jprobe { | |||
106 | kprobe_opcode_t *entry; /* probe handling code to jump to */ | 107 | kprobe_opcode_t *entry; /* probe handling code to jump to */ |
107 | }; | 108 | }; |
108 | 109 | ||
110 | DECLARE_PER_CPU(struct kprobe *, current_kprobe); | ||
111 | DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | ||
112 | |||
109 | #ifdef ARCH_SUPPORTS_KRETPROBES | 113 | #ifdef ARCH_SUPPORTS_KRETPROBES |
110 | extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs); | 114 | extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs); |
111 | #else /* ARCH_SUPPORTS_KRETPROBES */ | 115 | #else /* ARCH_SUPPORTS_KRETPROBES */ |
@@ -146,13 +150,6 @@ struct kretprobe_instance { | |||
146 | void lock_kprobes(void); | 150 | void lock_kprobes(void); |
147 | void unlock_kprobes(void); | 151 | void unlock_kprobes(void); |
148 | 152 | ||
149 | /* kprobe running now on this CPU? */ | ||
150 | static inline int kprobe_running(void) | ||
151 | { | ||
152 | extern unsigned int kprobe_cpu; | ||
153 | return kprobe_cpu == smp_processor_id(); | ||
154 | } | ||
155 | |||
156 | extern int arch_prepare_kprobe(struct kprobe *p); | 153 | extern int arch_prepare_kprobe(struct kprobe *p); |
157 | extern void arch_copy_kprobe(struct kprobe *p); | 154 | extern void arch_copy_kprobe(struct kprobe *p); |
158 | extern void arch_arm_kprobe(struct kprobe *p); | 155 | extern void arch_arm_kprobe(struct kprobe *p); |
@@ -167,6 +164,22 @@ extern void free_insn_slot(kprobe_opcode_t *slot); | |||
167 | struct kprobe *get_kprobe(void *addr); | 164 | struct kprobe *get_kprobe(void *addr); |
168 | struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); | 165 | struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); |
169 | 166 | ||
167 | /* kprobe_running() will just return the current_kprobe on this CPU */ | ||
168 | static inline struct kprobe *kprobe_running(void) | ||
169 | { | ||
170 | return (__get_cpu_var(current_kprobe)); | ||
171 | } | ||
172 | |||
173 | static inline void reset_current_kprobe(void) | ||
174 | { | ||
175 | __get_cpu_var(current_kprobe) = NULL; | ||
176 | } | ||
177 | |||
178 | static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) | ||
179 | { | ||
180 | return (&__get_cpu_var(kprobe_ctlblk)); | ||
181 | } | ||
182 | |||
170 | int register_kprobe(struct kprobe *p); | 183 | int register_kprobe(struct kprobe *p); |
171 | void unregister_kprobe(struct kprobe *p); | 184 | void unregister_kprobe(struct kprobe *p); |
172 | int setjmp_pre_handler(struct kprobe *, struct pt_regs *); | 185 | int setjmp_pre_handler(struct kprobe *, struct pt_regs *); |
@@ -183,9 +196,9 @@ void add_rp_inst(struct kretprobe_instance *ri); | |||
183 | void kprobe_flush_task(struct task_struct *tk); | 196 | void kprobe_flush_task(struct task_struct *tk); |
184 | void recycle_rp_inst(struct kretprobe_instance *ri); | 197 | void recycle_rp_inst(struct kretprobe_instance *ri); |
185 | #else /* CONFIG_KPROBES */ | 198 | #else /* CONFIG_KPROBES */ |
186 | static inline int kprobe_running(void) | 199 | static inline struct kprobe *kprobe_running(void) |
187 | { | 200 | { |
188 | return 0; | 201 | return NULL; |
189 | } | 202 | } |
190 | static inline int register_kprobe(struct kprobe *p) | 203 | static inline int register_kprobe(struct kprobe *p) |
191 | { | 204 | { |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ce4915dd683a..6da8f9b33d1e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -51,7 +51,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | |||
51 | 51 | ||
52 | unsigned int kprobe_cpu = NR_CPUS; | 52 | unsigned int kprobe_cpu = NR_CPUS; |
53 | static DEFINE_SPINLOCK(kprobe_lock); | 53 | static DEFINE_SPINLOCK(kprobe_lock); |
54 | static struct kprobe *curr_kprobe; | 54 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * kprobe->ainsn.insn points to the copy of the instruction to be | 57 | * kprobe->ainsn.insn points to the copy of the instruction to be |
@@ -188,6 +188,17 @@ void __kprobes unlock_kprobes(void) | |||
188 | local_irq_restore(flags); | 188 | local_irq_restore(flags); |
189 | } | 189 | } |
190 | 190 | ||
191 | /* We have preemption disabled.. so it is safe to use __ versions */ | ||
192 | static inline void set_kprobe_instance(struct kprobe *kp) | ||
193 | { | ||
194 | __get_cpu_var(kprobe_instance) = kp; | ||
195 | } | ||
196 | |||
197 | static inline void reset_kprobe_instance(void) | ||
198 | { | ||
199 | __get_cpu_var(kprobe_instance) = NULL; | ||
200 | } | ||
201 | |||
191 | /* You have to be holding the kprobe_lock */ | 202 | /* You have to be holding the kprobe_lock */ |
192 | struct kprobe __kprobes *get_kprobe(void *addr) | 203 | struct kprobe __kprobes *get_kprobe(void *addr) |
193 | { | 204 | { |
@@ -213,11 +224,11 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
213 | 224 | ||
214 | list_for_each_entry(kp, &p->list, list) { | 225 | list_for_each_entry(kp, &p->list, list) { |
215 | if (kp->pre_handler) { | 226 | if (kp->pre_handler) { |
216 | curr_kprobe = kp; | 227 | set_kprobe_instance(kp); |
217 | if (kp->pre_handler(kp, regs)) | 228 | if (kp->pre_handler(kp, regs)) |
218 | return 1; | 229 | return 1; |
219 | } | 230 | } |
220 | curr_kprobe = NULL; | 231 | reset_kprobe_instance(); |
221 | } | 232 | } |
222 | return 0; | 233 | return 0; |
223 | } | 234 | } |
@@ -229,9 +240,9 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
229 | 240 | ||
230 | list_for_each_entry(kp, &p->list, list) { | 241 | list_for_each_entry(kp, &p->list, list) { |
231 | if (kp->post_handler) { | 242 | if (kp->post_handler) { |
232 | curr_kprobe = kp; | 243 | set_kprobe_instance(kp); |
233 | kp->post_handler(kp, regs, flags); | 244 | kp->post_handler(kp, regs, flags); |
234 | curr_kprobe = NULL; | 245 | reset_kprobe_instance(); |
235 | } | 246 | } |
236 | } | 247 | } |
237 | return; | 248 | return; |
@@ -240,12 +251,14 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
240 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, | 251 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, |
241 | int trapnr) | 252 | int trapnr) |
242 | { | 253 | { |
254 | struct kprobe *cur = __get_cpu_var(kprobe_instance); | ||
255 | |||
243 | /* | 256 | /* |
244 | * if we faulted "during" the execution of a user specified | 257 | * if we faulted "during" the execution of a user specified |
245 | * probe handler, invoke just that probe's fault handler | 258 | * probe handler, invoke just that probe's fault handler |
246 | */ | 259 | */ |
247 | if (curr_kprobe && curr_kprobe->fault_handler) { | 260 | if (cur && cur->fault_handler) { |
248 | if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr)) | 261 | if (cur->fault_handler(cur, regs, trapnr)) |
249 | return 1; | 262 | return 1; |
250 | } | 263 | } |
251 | return 0; | 264 | return 0; |
@@ -253,15 +266,15 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, | |||
253 | 266 | ||
254 | static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) | 267 | static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) |
255 | { | 268 | { |
256 | struct kprobe *kp = curr_kprobe; | 269 | struct kprobe *cur = __get_cpu_var(kprobe_instance); |
257 | if (curr_kprobe && kp->break_handler) { | 270 | int ret = 0; |
258 | if (kp->break_handler(kp, regs)) { | 271 | |
259 | curr_kprobe = NULL; | 272 | if (cur && cur->break_handler) { |
260 | return 1; | 273 | if (cur->break_handler(cur, regs)) |
261 | } | 274 | ret = 1; |
262 | } | 275 | } |
263 | curr_kprobe = NULL; | 276 | reset_kprobe_instance(); |
264 | return 0; | 277 | return ret; |
265 | } | 278 | } |
266 | 279 | ||
267 | struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) | 280 | struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) |