diff options
author | Ananth N Mavinakayanahalli <ananth@in.ibm.com> | 2005-11-07 04:00:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-07 10:53:45 -0500 |
commit | e65845235c8120be63001fc1a4ac00c819194bbe (patch) | |
tree | 209a081cc07375290743ceb45f52dc474f45382a /kernel | |
parent | 66ff2d0691e00e1e7bfdf398a970310c9a0fe671 (diff) |
[PATCH] Kprobes: Track kprobe on a per_cpu basis - base changes
Changes to the base kprobe infrastructure to track kprobe execution on a
per-cpu basis.
Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kprobes.c | 43 |
1 files changed, 28 insertions, 15 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ce4915dd683a..6da8f9b33d1e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -51,7 +51,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | |||
51 | 51 | ||
52 | unsigned int kprobe_cpu = NR_CPUS; | 52 | unsigned int kprobe_cpu = NR_CPUS; |
53 | static DEFINE_SPINLOCK(kprobe_lock); | 53 | static DEFINE_SPINLOCK(kprobe_lock); |
54 | static struct kprobe *curr_kprobe; | 54 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * kprobe->ainsn.insn points to the copy of the instruction to be | 57 | * kprobe->ainsn.insn points to the copy of the instruction to be |
@@ -188,6 +188,17 @@ void __kprobes unlock_kprobes(void) | |||
188 | local_irq_restore(flags); | 188 | local_irq_restore(flags); |
189 | } | 189 | } |
190 | 190 | ||
191 | /* We have preemption disabled.. so it is safe to use __ versions */ | ||
192 | static inline void set_kprobe_instance(struct kprobe *kp) | ||
193 | { | ||
194 | __get_cpu_var(kprobe_instance) = kp; | ||
195 | } | ||
196 | |||
197 | static inline void reset_kprobe_instance(void) | ||
198 | { | ||
199 | __get_cpu_var(kprobe_instance) = NULL; | ||
200 | } | ||
201 | |||
191 | /* You have to be holding the kprobe_lock */ | 202 | /* You have to be holding the kprobe_lock */ |
192 | struct kprobe __kprobes *get_kprobe(void *addr) | 203 | struct kprobe __kprobes *get_kprobe(void *addr) |
193 | { | 204 | { |
@@ -213,11 +224,11 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
213 | 224 | ||
214 | list_for_each_entry(kp, &p->list, list) { | 225 | list_for_each_entry(kp, &p->list, list) { |
215 | if (kp->pre_handler) { | 226 | if (kp->pre_handler) { |
216 | curr_kprobe = kp; | 227 | set_kprobe_instance(kp); |
217 | if (kp->pre_handler(kp, regs)) | 228 | if (kp->pre_handler(kp, regs)) |
218 | return 1; | 229 | return 1; |
219 | } | 230 | } |
220 | curr_kprobe = NULL; | 231 | reset_kprobe_instance(); |
221 | } | 232 | } |
222 | return 0; | 233 | return 0; |
223 | } | 234 | } |
@@ -229,9 +240,9 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
229 | 240 | ||
230 | list_for_each_entry(kp, &p->list, list) { | 241 | list_for_each_entry(kp, &p->list, list) { |
231 | if (kp->post_handler) { | 242 | if (kp->post_handler) { |
232 | curr_kprobe = kp; | 243 | set_kprobe_instance(kp); |
233 | kp->post_handler(kp, regs, flags); | 244 | kp->post_handler(kp, regs, flags); |
234 | curr_kprobe = NULL; | 245 | reset_kprobe_instance(); |
235 | } | 246 | } |
236 | } | 247 | } |
237 | return; | 248 | return; |
@@ -240,12 +251,14 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
240 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, | 251 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, |
241 | int trapnr) | 252 | int trapnr) |
242 | { | 253 | { |
254 | struct kprobe *cur = __get_cpu_var(kprobe_instance); | ||
255 | |||
243 | /* | 256 | /* |
244 | * if we faulted "during" the execution of a user specified | 257 | * if we faulted "during" the execution of a user specified |
245 | * probe handler, invoke just that probe's fault handler | 258 | * probe handler, invoke just that probe's fault handler |
246 | */ | 259 | */ |
247 | if (curr_kprobe && curr_kprobe->fault_handler) { | 260 | if (cur && cur->fault_handler) { |
248 | if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr)) | 261 | if (cur->fault_handler(cur, regs, trapnr)) |
249 | return 1; | 262 | return 1; |
250 | } | 263 | } |
251 | return 0; | 264 | return 0; |
@@ -253,15 +266,15 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, | |||
253 | 266 | ||
254 | static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) | 267 | static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) |
255 | { | 268 | { |
256 | struct kprobe *kp = curr_kprobe; | 269 | struct kprobe *cur = __get_cpu_var(kprobe_instance); |
257 | if (curr_kprobe && kp->break_handler) { | 270 | int ret = 0; |
258 | if (kp->break_handler(kp, regs)) { | 271 | |
259 | curr_kprobe = NULL; | 272 | if (cur && cur->break_handler) { |
260 | return 1; | 273 | if (cur->break_handler(cur, regs)) |
261 | } | 274 | ret = 1; |
262 | } | 275 | } |
263 | curr_kprobe = NULL; | 276 | reset_kprobe_instance(); |
264 | return 0; | 277 | return ret; |
265 | } | 278 | } |
266 | 279 | ||
267 | struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) | 280 | struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) |