diff options
author | Ananth N Mavinakayanahalli <ananth@in.ibm.com> | 2005-11-07 04:00:12 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-07 10:53:46 -0500 |
commit | e7a510f92c1e482a7db05afd3cb84af1f4cfe0bc (patch) | |
tree | fc9eb17fc6788dc9319e134d17bd240dcf6f04a1 /arch/x86_64/kernel/kprobes.c | |
parent | f215d985e936cf493959b365a10593b6d5f80447 (diff) |
[PATCH] Kprobes: Track kprobe on a per_cpu basis - x86_64 changes
x86_64 changes to track kprobe execution on a per-cpu basis. We now track the
kprobe state machine independently on each cpu using a arch specific kprobe
control block.
Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/kprobes.c')
-rw-r--r-- | arch/x86_64/kernel/kprobes.c | 129 |
1 files changed, 70 insertions, 59 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index ebfa2c9241ca..6cb40d133b7c 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
@@ -44,17 +44,10 @@ | |||
44 | #include <asm/kdebug.h> | 44 | #include <asm/kdebug.h> |
45 | 45 | ||
46 | static DECLARE_MUTEX(kprobe_mutex); | 46 | static DECLARE_MUTEX(kprobe_mutex); |
47 | |||
48 | static struct kprobe *current_kprobe; | ||
49 | static unsigned long kprobe_status, kprobe_old_rflags, kprobe_saved_rflags; | ||
50 | static struct kprobe *kprobe_prev; | ||
51 | static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev; | ||
52 | static struct pt_regs jprobe_saved_regs; | ||
53 | static long *jprobe_saved_rsp; | ||
54 | void jprobe_return_end(void); | 47 | void jprobe_return_end(void); |
55 | 48 | ||
56 | /* copy of the kernel stack at the probe fire time */ | 49 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
57 | static kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; | 50 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
58 | 51 | ||
59 | /* | 52 | /* |
60 | * returns non-zero if opcode modifies the interrupt flag. | 53 | * returns non-zero if opcode modifies the interrupt flag. |
@@ -236,29 +229,30 @@ void __kprobes arch_remove_kprobe(struct kprobe *p) | |||
236 | up(&kprobe_mutex); | 229 | up(&kprobe_mutex); |
237 | } | 230 | } |
238 | 231 | ||
239 | static inline void save_previous_kprobe(void) | 232 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) |
240 | { | 233 | { |
241 | kprobe_prev = current_kprobe; | 234 | kcb->prev_kprobe.kp = kprobe_running(); |
242 | kprobe_status_prev = kprobe_status; | 235 | kcb->prev_kprobe.status = kcb->kprobe_status; |
243 | kprobe_old_rflags_prev = kprobe_old_rflags; | 236 | kcb->prev_kprobe.old_rflags = kcb->kprobe_old_rflags; |
244 | kprobe_saved_rflags_prev = kprobe_saved_rflags; | 237 | kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags; |
245 | } | 238 | } |
246 | 239 | ||
247 | static inline void restore_previous_kprobe(void) | 240 | static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
248 | { | 241 | { |
249 | current_kprobe = kprobe_prev; | 242 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
250 | kprobe_status = kprobe_status_prev; | 243 | kcb->kprobe_status = kcb->prev_kprobe.status; |
251 | kprobe_old_rflags = kprobe_old_rflags_prev; | 244 | kcb->kprobe_old_rflags = kcb->prev_kprobe.old_rflags; |
252 | kprobe_saved_rflags = kprobe_saved_rflags_prev; | 245 | kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags; |
253 | } | 246 | } |
254 | 247 | ||
255 | static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs) | 248 | static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
249 | struct kprobe_ctlblk *kcb) | ||
256 | { | 250 | { |
257 | current_kprobe = p; | 251 | __get_cpu_var(current_kprobe) = p; |
258 | kprobe_saved_rflags = kprobe_old_rflags | 252 | kcb->kprobe_saved_rflags = kcb->kprobe_old_rflags |
259 | = (regs->eflags & (TF_MASK | IF_MASK)); | 253 | = (regs->eflags & (TF_MASK | IF_MASK)); |
260 | if (is_IF_modifier(p->ainsn.insn)) | 254 | if (is_IF_modifier(p->ainsn.insn)) |
261 | kprobe_saved_rflags &= ~IF_MASK; | 255 | kcb->kprobe_saved_rflags &= ~IF_MASK; |
262 | } | 256 | } |
263 | 257 | ||
264 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 258 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
@@ -301,6 +295,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs) | |||
301 | struct kprobe *p; | 295 | struct kprobe *p; |
302 | int ret = 0; | 296 | int ret = 0; |
303 | kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t)); | 297 | kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t)); |
298 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
304 | 299 | ||
305 | /* Check we're not actually recursing */ | 300 | /* Check we're not actually recursing */ |
306 | if (kprobe_running()) { | 301 | if (kprobe_running()) { |
@@ -308,13 +303,13 @@ int __kprobes kprobe_handler(struct pt_regs *regs) | |||
308 | Disarm the probe we just hit, and ignore it. */ | 303 | Disarm the probe we just hit, and ignore it. */ |
309 | p = get_kprobe(addr); | 304 | p = get_kprobe(addr); |
310 | if (p) { | 305 | if (p) { |
311 | if (kprobe_status == KPROBE_HIT_SS && | 306 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
312 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { | 307 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { |
313 | regs->eflags &= ~TF_MASK; | 308 | regs->eflags &= ~TF_MASK; |
314 | regs->eflags |= kprobe_saved_rflags; | 309 | regs->eflags |= kcb->kprobe_saved_rflags; |
315 | unlock_kprobes(); | 310 | unlock_kprobes(); |
316 | goto no_kprobe; | 311 | goto no_kprobe; |
317 | } else if (kprobe_status == KPROBE_HIT_SSDONE) { | 312 | } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) { |
318 | /* TODO: Provide re-entrancy from | 313 | /* TODO: Provide re-entrancy from |
319 | * post_kprobes_handler() and avoid exception | 314 | * post_kprobes_handler() and avoid exception |
320 | * stack corruption while single-stepping on | 315 | * stack corruption while single-stepping on |
@@ -322,6 +317,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs) | |||
322 | */ | 317 | */ |
323 | arch_disarm_kprobe(p); | 318 | arch_disarm_kprobe(p); |
324 | regs->rip = (unsigned long)p->addr; | 319 | regs->rip = (unsigned long)p->addr; |
320 | reset_current_kprobe(); | ||
325 | ret = 1; | 321 | ret = 1; |
326 | } else { | 322 | } else { |
327 | /* We have reentered the kprobe_handler(), since | 323 | /* We have reentered the kprobe_handler(), since |
@@ -331,15 +327,15 @@ int __kprobes kprobe_handler(struct pt_regs *regs) | |||
331 | * of the new probe without calling any user | 327 | * of the new probe without calling any user |
332 | * handlers. | 328 | * handlers. |
333 | */ | 329 | */ |
334 | save_previous_kprobe(); | 330 | save_previous_kprobe(kcb); |
335 | set_current_kprobe(p, regs); | 331 | set_current_kprobe(p, regs, kcb); |
336 | p->nmissed++; | 332 | p->nmissed++; |
337 | prepare_singlestep(p, regs); | 333 | prepare_singlestep(p, regs); |
338 | kprobe_status = KPROBE_REENTER; | 334 | kcb->kprobe_status = KPROBE_REENTER; |
339 | return 1; | 335 | return 1; |
340 | } | 336 | } |
341 | } else { | 337 | } else { |
342 | p = current_kprobe; | 338 | p = __get_cpu_var(current_kprobe); |
343 | if (p->break_handler && p->break_handler(p, regs)) { | 339 | if (p->break_handler && p->break_handler(p, regs)) { |
344 | goto ss_probe; | 340 | goto ss_probe; |
345 | } | 341 | } |
@@ -374,8 +370,8 @@ int __kprobes kprobe_handler(struct pt_regs *regs) | |||
374 | * in post_kprobe_handler() | 370 | * in post_kprobe_handler() |
375 | */ | 371 | */ |
376 | preempt_disable(); | 372 | preempt_disable(); |
377 | kprobe_status = KPROBE_HIT_ACTIVE; | 373 | set_current_kprobe(p, regs, kcb); |
378 | set_current_kprobe(p, regs); | 374 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
379 | 375 | ||
380 | if (p->pre_handler && p->pre_handler(p, regs)) | 376 | if (p->pre_handler && p->pre_handler(p, regs)) |
381 | /* handler has already set things up, so skip ss setup */ | 377 | /* handler has already set things up, so skip ss setup */ |
@@ -383,7 +379,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs) | |||
383 | 379 | ||
384 | ss_probe: | 380 | ss_probe: |
385 | prepare_singlestep(p, regs); | 381 | prepare_singlestep(p, regs); |
386 | kprobe_status = KPROBE_HIT_SS; | 382 | kcb->kprobe_status = KPROBE_HIT_SS; |
387 | return 1; | 383 | return 1; |
388 | 384 | ||
389 | no_kprobe: | 385 | no_kprobe: |
@@ -451,6 +447,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
451 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | 447 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); |
452 | regs->rip = orig_ret_address; | 448 | regs->rip = orig_ret_address; |
453 | 449 | ||
450 | reset_current_kprobe(); | ||
454 | unlock_kprobes(); | 451 | unlock_kprobes(); |
455 | preempt_enable_no_resched(); | 452 | preempt_enable_no_resched(); |
456 | 453 | ||
@@ -484,7 +481,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
484 | * that is atop the stack is the address following the copied instruction. | 481 | * that is atop the stack is the address following the copied instruction. |
485 | * We need to make it the address following the original instruction. | 482 | * We need to make it the address following the original instruction. |
486 | */ | 483 | */ |
487 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | 484 | static void __kprobes resume_execution(struct kprobe *p, |
485 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | ||
488 | { | 486 | { |
489 | unsigned long *tos = (unsigned long *)regs->rsp; | 487 | unsigned long *tos = (unsigned long *)regs->rsp; |
490 | unsigned long next_rip = 0; | 488 | unsigned long next_rip = 0; |
@@ -499,7 +497,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
499 | switch (*insn) { | 497 | switch (*insn) { |
500 | case 0x9c: /* pushfl */ | 498 | case 0x9c: /* pushfl */ |
501 | *tos &= ~(TF_MASK | IF_MASK); | 499 | *tos &= ~(TF_MASK | IF_MASK); |
502 | *tos |= kprobe_old_rflags; | 500 | *tos |= kcb->kprobe_old_rflags; |
503 | break; | 501 | break; |
504 | case 0xc3: /* ret/lret */ | 502 | case 0xc3: /* ret/lret */ |
505 | case 0xcb: | 503 | case 0xcb: |
@@ -544,24 +542,28 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
544 | */ | 542 | */ |
545 | int __kprobes post_kprobe_handler(struct pt_regs *regs) | 543 | int __kprobes post_kprobe_handler(struct pt_regs *regs) |
546 | { | 544 | { |
547 | if (!kprobe_running()) | 545 | struct kprobe *cur = kprobe_running(); |
546 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
547 | |||
548 | if (!cur) | ||
548 | return 0; | 549 | return 0; |
549 | 550 | ||
550 | if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) { | 551 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
551 | kprobe_status = KPROBE_HIT_SSDONE; | 552 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
552 | current_kprobe->post_handler(current_kprobe, regs, 0); | 553 | cur->post_handler(cur, regs, 0); |
553 | } | 554 | } |
554 | 555 | ||
555 | resume_execution(current_kprobe, regs); | 556 | resume_execution(cur, regs, kcb); |
556 | regs->eflags |= kprobe_saved_rflags; | 557 | regs->eflags |= kcb->kprobe_saved_rflags; |
557 | 558 | ||
558 | /* Restore the original saved kprobes variables and continue. */ | 559 | /* Restore the original saved kprobes variables and continue. */ |
559 | if (kprobe_status == KPROBE_REENTER) { | 560 | if (kcb->kprobe_status == KPROBE_REENTER) { |
560 | restore_previous_kprobe(); | 561 | restore_previous_kprobe(kcb); |
561 | goto out; | 562 | goto out; |
562 | } else { | 563 | } else { |
563 | unlock_kprobes(); | 564 | unlock_kprobes(); |
564 | } | 565 | } |
566 | reset_current_kprobe(); | ||
565 | out: | 567 | out: |
566 | preempt_enable_no_resched(); | 568 | preempt_enable_no_resched(); |
567 | 569 | ||
@@ -579,14 +581,17 @@ out: | |||
579 | /* Interrupts disabled, kprobe_lock held. */ | 581 | /* Interrupts disabled, kprobe_lock held. */ |
580 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 582 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
581 | { | 583 | { |
582 | if (current_kprobe->fault_handler | 584 | struct kprobe *cur = kprobe_running(); |
583 | && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) | 585 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
586 | |||
587 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
584 | return 1; | 588 | return 1; |
585 | 589 | ||
586 | if (kprobe_status & KPROBE_HIT_SS) { | 590 | if (kcb->kprobe_status & KPROBE_HIT_SS) { |
587 | resume_execution(current_kprobe, regs); | 591 | resume_execution(cur, regs, kcb); |
588 | regs->eflags |= kprobe_old_rflags; | 592 | regs->eflags |= kcb->kprobe_old_rflags; |
589 | 593 | ||
594 | reset_current_kprobe(); | ||
590 | unlock_kprobes(); | 595 | unlock_kprobes(); |
591 | preempt_enable_no_resched(); | 596 | preempt_enable_no_resched(); |
592 | } | 597 | } |
@@ -629,10 +634,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
629 | { | 634 | { |
630 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 635 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
631 | unsigned long addr; | 636 | unsigned long addr; |
637 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
632 | 638 | ||
633 | jprobe_saved_regs = *regs; | 639 | kcb->jprobe_saved_regs = *regs; |
634 | jprobe_saved_rsp = (long *) regs->rsp; | 640 | kcb->jprobe_saved_rsp = (long *) regs->rsp; |
635 | addr = (unsigned long)jprobe_saved_rsp; | 641 | addr = (unsigned long)(kcb->jprobe_saved_rsp); |
636 | /* | 642 | /* |
637 | * As Linus pointed out, gcc assumes that the callee | 643 | * As Linus pointed out, gcc assumes that the callee |
638 | * owns the argument space and could overwrite it, e.g. | 644 | * owns the argument space and could overwrite it, e.g. |
@@ -640,7 +646,8 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
640 | * we also save and restore enough stack bytes to cover | 646 | * we also save and restore enough stack bytes to cover |
641 | * the argument area. | 647 | * the argument area. |
642 | */ | 648 | */ |
643 | memcpy(jprobes_stack, (kprobe_opcode_t *) addr, MIN_STACK_SIZE(addr)); | 649 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
650 | MIN_STACK_SIZE(addr)); | ||
644 | regs->eflags &= ~IF_MASK; | 651 | regs->eflags &= ~IF_MASK; |
645 | regs->rip = (unsigned long)(jp->entry); | 652 | regs->rip = (unsigned long)(jp->entry); |
646 | return 1; | 653 | return 1; |
@@ -648,34 +655,38 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
648 | 655 | ||
649 | void __kprobes jprobe_return(void) | 656 | void __kprobes jprobe_return(void) |
650 | { | 657 | { |
658 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
659 | |||
651 | asm volatile (" xchg %%rbx,%%rsp \n" | 660 | asm volatile (" xchg %%rbx,%%rsp \n" |
652 | " int3 \n" | 661 | " int3 \n" |
653 | " .globl jprobe_return_end \n" | 662 | " .globl jprobe_return_end \n" |
654 | " jprobe_return_end: \n" | 663 | " jprobe_return_end: \n" |
655 | " nop \n"::"b" | 664 | " nop \n"::"b" |
656 | (jprobe_saved_rsp):"memory"); | 665 | (kcb->jprobe_saved_rsp):"memory"); |
657 | } | 666 | } |
658 | 667 | ||
659 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 668 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
660 | { | 669 | { |
670 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
661 | u8 *addr = (u8 *) (regs->rip - 1); | 671 | u8 *addr = (u8 *) (regs->rip - 1); |
662 | unsigned long stack_addr = (unsigned long)jprobe_saved_rsp; | 672 | unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_rsp); |
663 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 673 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
664 | 674 | ||
665 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { | 675 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { |
666 | if ((long *)regs->rsp != jprobe_saved_rsp) { | 676 | if ((long *)regs->rsp != kcb->jprobe_saved_rsp) { |
667 | struct pt_regs *saved_regs = | 677 | struct pt_regs *saved_regs = |
668 | container_of(jprobe_saved_rsp, struct pt_regs, rsp); | 678 | container_of(kcb->jprobe_saved_rsp, |
679 | struct pt_regs, rsp); | ||
669 | printk("current rsp %p does not match saved rsp %p\n", | 680 | printk("current rsp %p does not match saved rsp %p\n", |
670 | (long *)regs->rsp, jprobe_saved_rsp); | 681 | (long *)regs->rsp, kcb->jprobe_saved_rsp); |
671 | printk("Saved registers for jprobe %p\n", jp); | 682 | printk("Saved registers for jprobe %p\n", jp); |
672 | show_registers(saved_regs); | 683 | show_registers(saved_regs); |
673 | printk("Current registers\n"); | 684 | printk("Current registers\n"); |
674 | show_registers(regs); | 685 | show_registers(regs); |
675 | BUG(); | 686 | BUG(); |
676 | } | 687 | } |
677 | *regs = jprobe_saved_regs; | 688 | *regs = kcb->jprobe_saved_regs; |
678 | memcpy((kprobe_opcode_t *) stack_addr, jprobes_stack, | 689 | memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, |
679 | MIN_STACK_SIZE(stack_addr)); | 690 | MIN_STACK_SIZE(stack_addr)); |
680 | return 1; | 691 | return 1; |
681 | } | 692 | } |