aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
authorRusty Lynch <rusty.lynch@intel.com>2005-06-23 03:09:23 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-23 12:45:21 -0400
commit73649dab0fd524cb8545a8cb83c6eaf77b107105 (patch)
tree70f43b37ba915de148c28008e275dacec200e33f /arch/x86_64/kernel
parentb94cce926b2b902b79380ccba370d6f9f2980de0 (diff)
[PATCH] x86_64 specific function return probes
The following patch adds the x86_64 architecture specific implementation for function return probes. Function return probes is a mechanism built on top of kprobes that allows a caller to register a handler to be called when a given function exits. For example, to instrument the return path of sys_mkdir: static int sys_mkdir_exit(struct kretprobe_instance *i, struct pt_regs *regs) { printk("sys_mkdir exited\n"); return 0; } static struct kretprobe return_probe = { .handler = sys_mkdir_exit, }; <inside setup function> return_probe.kp.addr = (kprobe_opcode_t *) kallsyms_lookup_name("sys_mkdir"); if (register_kretprobe(&return_probe)) { printk(KERN_DEBUG "Unable to register return probe!\n"); /* do error path */ } <inside cleanup function> unregister_kretprobe(&return_probe); The way this works is that: * At system initialization time, kernel/kprobes.c installs a kprobe on a function called kretprobe_trampoline() that is implemented in the arch/x86_64/kernel/kprobes.c (More on this later) * When a return probe is registered using register_kretprobe(), kernel/kprobes.c will install a kprobe on the first instruction of the targeted function with the pre handler set to arch_prepare_kretprobe() which is implemented in arch/x86_64/kernel/kprobes.c. * arch_prepare_kretprobe() will prepare a kretprobe instance that stores: - nodes for hanging this instance in an empty or free list - a pointer to the return probe - the original return address - a pointer to the stack address With all this stowed away, arch_prepare_kretprobe() then sets the return address for the targeted function to a special trampoline function called kretprobe_trampoline() implemented in arch/x86_64/kernel/kprobes.c * The kprobe completes as normal, with control passing back to the target function that executes as normal, and eventually returns to our trampoline function. * Since a kprobe was installed on kretprobe_trampoline() during system initialization, control passes back to kprobes via the architecture specific function trampoline_probe_handler() which will lookup the instance in an hlist maintained by kernel/kprobes.c, and then call the handler function. * When trampoline_probe_handler() is done, the kprobes infrastructure single steps the original instruction (in this case just a top), and then calls trampoline_post_handler(). trampoline_post_handler() then looks up the instance again, puts the instance back on the free list, and then makes a long jump back to the original return instruction. So to recap, to instrument the exit path of a function this implementation will cause four interruptions: - A breakpoint at the very beginning of the function allowing us to switch out the return address - A single step interruption to execute the original instruction that we replaced with the break instruction (normal kprobe flow) - A breakpoint in the trampoline function where our instrumented function returned to - A single step interruption to execute the original instruction that we replaced with the break instruction (normal kprobe flow) Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/kprobes.c98
-rw-r--r--arch/x86_64/kernel/process.c16
2 files changed, 113 insertions, 1 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index f77f8a0ff187..203672ca7401 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -27,6 +27,8 @@
27 * <prasanna@in.ibm.com> adapted for x86_64 27 * <prasanna@in.ibm.com> adapted for x86_64
28 * 2005-Mar Roland McGrath <roland@redhat.com> 28 * 2005-Mar Roland McGrath <roland@redhat.com>
29 * Fixed to handle %rip-relative addressing mode correctly. 29 * Fixed to handle %rip-relative addressing mode correctly.
30 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
31 * Added function return probes functionality
30 */ 32 */
31 33
32#include <linux/config.h> 34#include <linux/config.h>
@@ -240,6 +242,50 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
240 regs->rip = (unsigned long)p->ainsn.insn; 242 regs->rip = (unsigned long)p->ainsn.insn;
241} 243}
242 244
245struct task_struct *arch_get_kprobe_task(void *ptr)
246{
247 return ((struct thread_info *) (((unsigned long) ptr) &
248 (~(THREAD_SIZE -1))))->task;
249}
250
251void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
252{
253 unsigned long *sara = (unsigned long *)regs->rsp;
254 struct kretprobe_instance *ri;
255 static void *orig_ret_addr;
256
257 /*
258 * Save the return address when the return probe hits
259 * the first time, and use it to populate the (krprobe
260 * instance)->ret_addr for subsequent return probes at
261 * the same addrress since stack address would have
262 * the kretprobe_trampoline by then.
263 */
264 if (((void*) *sara) != kretprobe_trampoline)
265 orig_ret_addr = (void*) *sara;
266
267 if ((ri = get_free_rp_inst(rp)) != NULL) {
268 ri->rp = rp;
269 ri->stack_addr = sara;
270 ri->ret_addr = orig_ret_addr;
271 add_rp_inst(ri);
272 /* Replace the return addr with trampoline addr */
273 *sara = (unsigned long) &kretprobe_trampoline;
274 } else {
275 rp->nmissed++;
276 }
277}
278
279void arch_kprobe_flush_task(struct task_struct *tk)
280{
281 struct kretprobe_instance *ri;
282 while ((ri = get_rp_inst_tsk(tk)) != NULL) {
283 *((unsigned long *)(ri->stack_addr)) =
284 (unsigned long) ri->ret_addr;
285 recycle_rp_inst(ri);
286 }
287}
288
243/* 289/*
244 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 290 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
245 * remain disabled thorough out this function. 291 * remain disabled thorough out this function.
@@ -317,6 +363,55 @@ no_kprobe:
317} 363}
318 364
319/* 365/*
366 * For function-return probes, init_kprobes() establishes a probepoint
367 * here. When a retprobed function returns, this probe is hit and
368 * trampoline_probe_handler() runs, calling the kretprobe's handler.
369 */
370 void kretprobe_trampoline_holder(void)
371 {
372 asm volatile ( ".global kretprobe_trampoline\n"
373 "kretprobe_trampoline: \n"
374 "nop\n");
375 }
376
377/*
378 * Called when we hit the probe point at kretprobe_trampoline
379 */
380int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
381{
382 struct task_struct *tsk;
383 struct kretprobe_instance *ri;
384 struct hlist_head *head;
385 struct hlist_node *node;
386 unsigned long *sara = (unsigned long *)regs->rsp - 1;
387
388 tsk = arch_get_kprobe_task(sara);
389 head = kretprobe_inst_table_head(tsk);
390
391 hlist_for_each_entry(ri, node, head, hlist) {
392 if (ri->stack_addr == sara && ri->rp) {
393 if (ri->rp->handler)
394 ri->rp->handler(ri, regs);
395 }
396 }
397 return 0;
398}
399
400void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
401 unsigned long flags)
402{
403 struct kretprobe_instance *ri;
404 /* RA already popped */
405 unsigned long *sara = ((unsigned long *)regs->rsp) - 1;
406
407 while ((ri = get_rp_inst(sara))) {
408 regs->rip = (unsigned long)ri->ret_addr;
409 recycle_rp_inst(ri);
410 }
411 regs->eflags &= ~TF_MASK;
412}
413
414/*
320 * Called after single-stepping. p->addr is the address of the 415 * Called after single-stepping. p->addr is the address of the
321 * instruction whose first byte has been replaced by the "int 3" 416 * instruction whose first byte has been replaced by the "int 3"
322 * instruction. To avoid the SMP problems that can occur when we 417 * instruction. To avoid the SMP problems that can occur when we
@@ -404,7 +499,8 @@ int post_kprobe_handler(struct pt_regs *regs)
404 if (current_kprobe->post_handler) 499 if (current_kprobe->post_handler)
405 current_kprobe->post_handler(current_kprobe, regs, 0); 500 current_kprobe->post_handler(current_kprobe, regs, 0);
406 501
407 resume_execution(current_kprobe, regs); 502 if (current_kprobe->post_handler != trampoline_post_handler)
503 resume_execution(current_kprobe, regs);
408 regs->eflags |= kprobe_saved_rflags; 504 regs->eflags |= kprobe_saved_rflags;
409 505
410 unlock_kprobes(); 506 unlock_kprobes();
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index dce8bab4306c..e59d1f9d6163 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -34,6 +34,7 @@
34#include <linux/ptrace.h> 34#include <linux/ptrace.h>
35#include <linux/utsname.h> 35#include <linux/utsname.h>
36#include <linux/random.h> 36#include <linux/random.h>
37#include <linux/kprobes.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <asm/pgtable.h> 40#include <asm/pgtable.h>
@@ -293,6 +294,14 @@ void exit_thread(void)
293{ 294{
294 struct task_struct *me = current; 295 struct task_struct *me = current;
295 struct thread_struct *t = &me->thread; 296 struct thread_struct *t = &me->thread;
297
298 /*
299 * Remove function-return probe instances associated with this task
300 * and put them back on the free list. Do not insert an exit probe for
301 * this function, it will be disabled by kprobe_flush_task if you do.
302 */
303 kprobe_flush_task(me);
304
296 if (me->thread.io_bitmap_ptr) { 305 if (me->thread.io_bitmap_ptr) {
297 struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); 306 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
298 307
@@ -312,6 +321,13 @@ void flush_thread(void)
312 struct task_struct *tsk = current; 321 struct task_struct *tsk = current;
313 struct thread_info *t = current_thread_info(); 322 struct thread_info *t = current_thread_info();
314 323
324 /*
325 * Remove function-return probe instances associated with this task
326 * and put them back on the free list. Do not insert an exit probe for
327 * this function, it will be disabled by kprobe_flush_task if you do.
328 */
329 kprobe_flush_task(tsk);
330
315 if (t->flags & _TIF_ABI_PENDING) 331 if (t->flags & _TIF_ABI_PENDING)
316 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32); 332 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
317 333