aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/kprobes.c
diff options
context:
space:
mode:
authorRusty Lynch <rusty.lynch@intel.com>2005-06-27 18:17:10 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-27 18:23:53 -0400
commitba8af12f432c4f00ddb0bc1068b57b20aac93ecf (patch)
tree98e51269052fb24e232d67fa2c55d6cec803c787 /arch/x86_64/kernel/kprobes.c
parent4bdbd37f6d01abc4c002bb8de90ea2c3bc7abe7e (diff)
[PATCH] Return probe redesign: x86_64 specific changes
The following patch contains the x86_64 specific changes for the new return probe design. Changes include: * Removing the architecture specific functions for querying a return probe instance off a stack address * Complete rework onf arch_prepare_kretprobe() and trampoline_probe_handler() * Removing trampoline_post_handler() * Adding arch_init() so that now we handle registering the return probe trampoline instead of kernel/kprobes.c doing it NOTE: Note that with this new design, the dependency on calculating a pointer to the task off the stack pointer no longer exist (resolving the problem of interruption stacks as pointed out in the original feedback to this port.) Signed-off-by: Rusty Lynch <rusty.lynch@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/kprobes.c')
-rw-r--r--arch/x86_64/kernel/kprobes.c133
1 files changed, 70 insertions, 63 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 6a1c88376bef..acd2a778ebe6 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -272,48 +272,23 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
272 regs->rip = (unsigned long)p->ainsn.insn; 272 regs->rip = (unsigned long)p->ainsn.insn;
273} 273}
274 274
275struct task_struct *arch_get_kprobe_task(void *ptr)
276{
277 return ((struct thread_info *) (((unsigned long) ptr) &
278 (~(THREAD_SIZE -1))))->task;
279}
280
281void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) 275void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
282{ 276{
283 unsigned long *sara = (unsigned long *)regs->rsp; 277 unsigned long *sara = (unsigned long *)regs->rsp;
284 struct kretprobe_instance *ri; 278 struct kretprobe_instance *ri;
285 static void *orig_ret_addr; 279
280 if ((ri = get_free_rp_inst(rp)) != NULL) {
281 ri->rp = rp;
282 ri->task = current;
283 ri->ret_addr = (kprobe_opcode_t *) *sara;
286 284
287 /*
288 * Save the return address when the return probe hits
289 * the first time, and use it to populate the (krprobe
290 * instance)->ret_addr for subsequent return probes at
291 * the same addrress since stack address would have
292 * the kretprobe_trampoline by then.
293 */
294 if (((void*) *sara) != kretprobe_trampoline)
295 orig_ret_addr = (void*) *sara;
296
297 if ((ri = get_free_rp_inst(rp)) != NULL) {
298 ri->rp = rp;
299 ri->stack_addr = sara;
300 ri->ret_addr = orig_ret_addr;
301 add_rp_inst(ri);
302 /* Replace the return addr with trampoline addr */ 285 /* Replace the return addr with trampoline addr */
303 *sara = (unsigned long) &kretprobe_trampoline; 286 *sara = (unsigned long) &kretprobe_trampoline;
304 } else {
305 rp->nmissed++;
306 }
307}
308 287
309void arch_kprobe_flush_task(struct task_struct *tk) 288 add_rp_inst(ri);
310{ 289 } else {
311 struct kretprobe_instance *ri; 290 rp->nmissed++;
312 while ((ri = get_rp_inst_tsk(tk)) != NULL) { 291 }
313 *((unsigned long *)(ri->stack_addr)) =
314 (unsigned long) ri->ret_addr;
315 recycle_rp_inst(ri);
316 }
317} 292}
318 293
319/* 294/*
@@ -426,36 +401,59 @@ no_kprobe:
426 */ 401 */
427int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 402int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
428{ 403{
429 struct task_struct *tsk; 404 struct kretprobe_instance *ri = NULL;
430 struct kretprobe_instance *ri; 405 struct hlist_head *head;
431 struct hlist_head *head; 406 struct hlist_node *node, *tmp;
432 struct hlist_node *node; 407 unsigned long orig_ret_address = 0;
433 unsigned long *sara = (unsigned long *)regs->rsp - 1; 408 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
434
435 tsk = arch_get_kprobe_task(sara);
436 head = kretprobe_inst_table_head(tsk);
437
438 hlist_for_each_entry(ri, node, head, hlist) {
439 if (ri->stack_addr == sara && ri->rp) {
440 if (ri->rp->handler)
441 ri->rp->handler(ri, regs);
442 }
443 }
444 return 0;
445}
446 409
447void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs, 410 head = kretprobe_inst_table_head(current);
448 unsigned long flags)
449{
450 struct kretprobe_instance *ri;
451 /* RA already popped */
452 unsigned long *sara = ((unsigned long *)regs->rsp) - 1;
453 411
454 while ((ri = get_rp_inst(sara))) { 412 /*
455 regs->rip = (unsigned long)ri->ret_addr; 413 * It is possible to have multiple instances associated with a given
414 * task either because an multiple functions in the call path
415 * have a return probe installed on them, and/or more then one return
416 * return probe was registered for a target function.
417 *
418 * We can handle this because:
419 * - instances are always inserted at the head of the list
420 * - when multiple return probes are registered for the same
421 * function, the first instance's ret_addr will point to the
422 * real return address, and all the rest will point to
423 * kretprobe_trampoline
424 */
425 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
426 if (ri->task != current)
427 /* another task is sharing our hash bucket */
428 continue;
429
430 if (ri->rp && ri->rp->handler)
431 ri->rp->handler(ri, regs);
432
433 orig_ret_address = (unsigned long)ri->ret_addr;
456 recycle_rp_inst(ri); 434 recycle_rp_inst(ri);
435
436 if (orig_ret_address != trampoline_address)
437 /*
438 * This is the real return address. Any other
439 * instances associated with this task are for
440 * other calls deeper on the call stack
441 */
442 break;
457 } 443 }
458 regs->eflags &= ~TF_MASK; 444
445 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
446 regs->rip = orig_ret_address;
447
448 unlock_kprobes();
449 preempt_enable_no_resched();
450
451 /*
452 * By returning a non-zero value, we are telling
453 * kprobe_handler() that we have handled unlocking
454 * and re-enabling preemption.
455 */
456 return 1;
459} 457}
460 458
461/* 459/*
@@ -548,8 +546,7 @@ int post_kprobe_handler(struct pt_regs *regs)
548 current_kprobe->post_handler(current_kprobe, regs, 0); 546 current_kprobe->post_handler(current_kprobe, regs, 0);
549 } 547 }
550 548
551 if (current_kprobe->post_handler != trampoline_post_handler) 549 resume_execution(current_kprobe, regs);
552 resume_execution(current_kprobe, regs);
553 regs->eflags |= kprobe_saved_rflags; 550 regs->eflags |= kprobe_saved_rflags;
554 551
555 /* Restore the original saved kprobes variables and continue. */ 552 /* Restore the original saved kprobes variables and continue. */
@@ -679,3 +676,13 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
679 } 676 }
680 return 0; 677 return 0;
681} 678}
679
680static struct kprobe trampoline_p = {
681 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
682 .pre_handler = trampoline_probe_handler
683};
684
685int __init arch_init(void)
686{
687 return register_kprobe(&trampoline_p);
688}