aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2014-03-31 15:01:31 -0400
committerOleg Nesterov <oleg@redhat.com>2014-04-17 15:58:19 -0400
commit8ad8e9d3fd64f101eed6652964670672d699e563 (patch)
tree746e9f28e45b400e3ca4afe2866616545be92de9
parent34e7317d6ae8f6111ac449444f22e14f4a14ebfd (diff)
uprobes/x86: Introduce uprobe_xol_ops and arch_uprobe->ops
Introduce arch_uprobe->ops pointing to the "struct uprobe_xol_ops", move the current UPROBE_FIX_{RIP*,IP,CALL} code into the default set of methods and change arch_uprobe_pre/post_xol() accordingly. This way we can add the new uprobe_xol_ops's to handle the insns which need the special processing (rip-relative jmp/call at least). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Reviewed-by: Jim Keniston <jkenisto@us.ibm.com> Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
-rw-r--r--arch/x86/include/asm/uprobes.h7
-rw-r--r--arch/x86/kernel/uprobes.c107
2 files changed, 74 insertions, 40 deletions
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 3087ea9c5f2e..9f8210bcbb49 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -33,12 +33,17 @@ typedef u8 uprobe_opcode_t;
33#define UPROBE_SWBP_INSN 0xcc 33#define UPROBE_SWBP_INSN 0xcc
34#define UPROBE_SWBP_INSN_SIZE 1 34#define UPROBE_SWBP_INSN_SIZE 1
35 35
36struct uprobe_xol_ops;
37
36struct arch_uprobe { 38struct arch_uprobe {
37 u16 fixups;
38 union { 39 union {
39 u8 insn[MAX_UINSN_BYTES]; 40 u8 insn[MAX_UINSN_BYTES];
40 u8 ixol[MAX_UINSN_BYTES]; 41 u8 ixol[MAX_UINSN_BYTES];
41 }; 42 };
43
44 u16 fixups;
45 const struct uprobe_xol_ops *ops;
46
42#ifdef CONFIG_X86_64 47#ifdef CONFIG_X86_64
43 unsigned long rip_rela_target_address; 48 unsigned long rip_rela_target_address;
44#endif 49#endif
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 3bb4198aa588..13ad8a38c2d9 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -402,6 +402,64 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm,
402} 402}
403#endif /* CONFIG_X86_64 */ 403#endif /* CONFIG_X86_64 */
404 404
405struct uprobe_xol_ops {
406 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
407 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
408 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
409};
410
411static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
412{
413 pre_xol_rip_insn(auprobe, regs, &current->utask->autask);
414 return 0;
415}
416
417/*
418 * Adjust the return address pushed by a call insn executed out of line.
419 */
420static int adjust_ret_addr(unsigned long sp, long correction)
421{
422 int rasize, ncopied;
423 long ra = 0;
424
425 if (is_ia32_task())
426 rasize = 4;
427 else
428 rasize = 8;
429
430 ncopied = copy_from_user(&ra, (void __user *)sp, rasize);
431 if (unlikely(ncopied))
432 return -EFAULT;
433
434 ra += correction;
435 ncopied = copy_to_user((void __user *)sp, &ra, rasize);
436 if (unlikely(ncopied))
437 return -EFAULT;
438
439 return 0;
440}
441
442static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
443{
444 struct uprobe_task *utask = current->utask;
445 long correction = (long)(utask->vaddr - utask->xol_vaddr);
446 int ret = 0;
447
448 handle_riprel_post_xol(auprobe, regs, &correction);
449 if (auprobe->fixups & UPROBE_FIX_IP)
450 regs->ip += correction;
451
452 if (auprobe->fixups & UPROBE_FIX_CALL)
453 ret = adjust_ret_addr(regs->sp, correction);
454
455 return ret;
456}
457
458static struct uprobe_xol_ops default_xol_ops = {
459 .pre_xol = default_pre_xol_op,
460 .post_xol = default_post_xol_op,
461};
462
405/** 463/**
406 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. 464 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
407 * @mm: the probed address space. 465 * @mm: the probed address space.
@@ -464,6 +522,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
464 if (fix_call) 522 if (fix_call)
465 auprobe->fixups |= UPROBE_FIX_CALL; 523 auprobe->fixups |= UPROBE_FIX_CALL;
466 524
525 auprobe->ops = &default_xol_ops;
467 return 0; 526 return 0;
468} 527}
469 528
@@ -485,33 +544,8 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
485 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP)) 544 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
486 set_task_blockstep(current, false); 545 set_task_blockstep(current, false);
487 546
488 pre_xol_rip_insn(auprobe, regs, &utask->autask); 547 if (auprobe->ops->pre_xol)
489 return 0; 548 return auprobe->ops->pre_xol(auprobe, regs);
490}
491
492/*
493 * This function is called by arch_uprobe_post_xol() to adjust the return
494 * address pushed by a call instruction executed out of line.
495 */
496static int adjust_ret_addr(unsigned long sp, long correction)
497{
498 int rasize, ncopied;
499 long ra = 0;
500
501 if (is_ia32_task())
502 rasize = 4;
503 else
504 rasize = 8;
505
506 ncopied = copy_from_user(&ra, (void __user *)sp, rasize);
507 if (unlikely(ncopied))
508 return -EFAULT;
509
510 ra += correction;
511 ncopied = copy_to_user((void __user *)sp, &ra, rasize);
512 if (unlikely(ncopied))
513 return -EFAULT;
514
515 return 0; 549 return 0;
516} 550}
517 551
@@ -560,11 +594,8 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *t)
560int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 594int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
561{ 595{
562 struct uprobe_task *utask = current->utask; 596 struct uprobe_task *utask = current->utask;
563 long correction;
564 int result = 0;
565 597
566 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); 598 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
567
568 current->thread.trap_nr = utask->autask.saved_trap_nr; 599 current->thread.trap_nr = utask->autask.saved_trap_nr;
569 /* 600 /*
570 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP 601 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
@@ -576,15 +607,9 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
576 else if (!(auprobe->fixups & UPROBE_FIX_SETF)) 607 else if (!(auprobe->fixups & UPROBE_FIX_SETF))
577 regs->flags &= ~X86_EFLAGS_TF; 608 regs->flags &= ~X86_EFLAGS_TF;
578 609
579 correction = (long)(utask->vaddr - utask->xol_vaddr); 610 if (auprobe->ops->post_xol)
580 handle_riprel_post_xol(auprobe, regs, &correction); 611 return auprobe->ops->post_xol(auprobe, regs);
581 if (auprobe->fixups & UPROBE_FIX_IP) 612 return 0;
582 regs->ip += correction;
583
584 if (auprobe->fixups & UPROBE_FIX_CALL)
585 result = adjust_ret_addr(regs->sp, correction);
586
587 return result;
588} 613}
589 614
590/* callback routine for handling exceptions. */ 615/* callback routine for handling exceptions. */
@@ -642,6 +667,10 @@ static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
642{ 667{
643 int i; 668 int i;
644 669
670 if (auprobe->ops->emulate)
671 return auprobe->ops->emulate(auprobe, regs);
672
673 /* TODO: move this code into ->emulate() hook */
645 for (i = 0; i < MAX_UINSN_BYTES; i++) { 674 for (i = 0; i < MAX_UINSN_BYTES; i++) {
646 if (auprobe->insn[i] == 0x66) 675 if (auprobe->insn[i] == 0x66)
647 continue; 676 continue;