aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2012-08-08 11:11:42 -0400
committerOleg Nesterov <oleg@redhat.com>2012-08-28 12:21:18 -0400
commitf8ac4ec9c064b330dcc49e03c450fe74298c4622 (patch)
tree62d53120d0289719438d27de5f868c36e136ed28 /kernel/events
parent78f7411668aa0b2006d331f6a288416dd91b8e5d (diff)
uprobes: Introduce MMF_HAS_UPROBES
Add the new MMF_HAS_UPROBES flag. It is set by install_breakpoint() and it is copied by dup_mmap(), uprobe_pre_sstep_notifier() checks it to avoid the slow path if the task was never probed. Perhaps it makes sense to check it in valid_vma(is_register => false) as well. This needs the new dup_mmap()->uprobe_dup_mmap() hook. We can't use uprobe_reset_state() or put MMF_HAS_UPROBES into MMF_INIT_MASK, we need oldmm->mmap_sem to avoid the race with uprobe_register() or mmap() from another thread. Currently we never clear this bit, it can be false-positive after uprobe_unregister() or uprobe_munmap() or if dup_mmap() hits the probed VM_DONTCOPY vma. But this is fine correctness-wise and has no effect unless the task hits the non-uprobe breakpoint. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/uprobes.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 3e2996b809be..33870b17e1dd 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -647,6 +647,7 @@ static int
647install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 647install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
648 struct vm_area_struct *vma, unsigned long vaddr) 648 struct vm_area_struct *vma, unsigned long vaddr)
649{ 649{
650 bool first_uprobe;
650 int ret; 651 int ret;
651 652
652 /* 653 /*
@@ -678,7 +679,17 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
678 uprobe->flags |= UPROBE_COPY_INSN; 679 uprobe->flags |= UPROBE_COPY_INSN;
679 } 680 }
680 681
682 /*
683 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
684 * the task can hit this breakpoint right after __replace_page().
685 */
686 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
687 if (first_uprobe)
688 set_bit(MMF_HAS_UPROBES, &mm->flags);
689
681 ret = set_swbp(&uprobe->arch, mm, vaddr); 690 ret = set_swbp(&uprobe->arch, mm, vaddr);
691 if (ret && first_uprobe)
692 clear_bit(MMF_HAS_UPROBES, &mm->flags);
682 693
683 return ret; 694 return ret;
684} 695}
@@ -1032,6 +1043,9 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
1032 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1043 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1033 return; 1044 return;
1034 1045
1046 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1047 return;
1048
1035 /* TODO: unmapping uprobe(s) will need more work */ 1049 /* TODO: unmapping uprobe(s) will need more work */
1036} 1050}
1037 1051
@@ -1142,6 +1156,12 @@ void uprobe_reset_state(struct mm_struct *mm)
1142 mm->uprobes_state.xol_area = NULL; 1156 mm->uprobes_state.xol_area = NULL;
1143} 1157}
1144 1158
1159void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1160{
1161 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags))
1162 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1163}
1164
1145/* 1165/*
1146 * - search for a free slot. 1166 * - search for a free slot.
1147 */ 1167 */
@@ -1507,7 +1527,7 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1507{ 1527{
1508 struct uprobe_task *utask; 1528 struct uprobe_task *utask;
1509 1529
1510 if (!current->mm) 1530 if (!current->mm || !test_bit(MMF_HAS_UPROBES, &current->mm->flags))
1511 return 0; 1531 return 0;
1512 1532
1513 utask = current->utask; 1533 utask = current->utask;