aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2012-08-19 10:15:09 -0400
committerOleg Nesterov <oleg@redhat.com>2012-09-15 11:37:27 -0400
commit9f68f672c47b9bd4cfe0a667ecb0b1382c61e2de (patch)
treeeb389851ce75fa15ad2b5e34c88d4c6af1fb197a /kernel/events
parent6f47caa0e1e4887aa2ddca8388d058d35725d815 (diff)
uprobes: Introduce MMF_RECALC_UPROBES
Add the new MMF_RECALC_UPROBES flag, it means that MMF_HAS_UPROBES can be false positive after remove_breakpoint() or uprobe_munmap(). It is also set by uprobe_dup_mmap(), this is not optimal but simple. We could add the new hook, uprobe_dup_vma(), to set MMF_HAS_UPROBES only if the new mm actually has uprobes, but I don't think this makes sense. The next patch will use this flag to clear MMF_HAS_UPROBES. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/uprobes.c39
1 files changed, 35 insertions, 4 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index ba9f1e7c6060..9a7f08bab91f 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -684,7 +684,9 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
684 set_bit(MMF_HAS_UPROBES, &mm->flags); 684 set_bit(MMF_HAS_UPROBES, &mm->flags);
685 685
686 ret = set_swbp(&uprobe->arch, mm, vaddr); 686 ret = set_swbp(&uprobe->arch, mm, vaddr);
687 if (ret && first_uprobe) 687 if (!ret)
688 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
689 else if (first_uprobe)
688 clear_bit(MMF_HAS_UPROBES, &mm->flags); 690 clear_bit(MMF_HAS_UPROBES, &mm->flags);
689 691
690 return ret; 692 return ret;
@@ -693,6 +695,11 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
693static void 695static void
694remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 696remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
695{ 697{
698 /* can happen if uprobe_register() fails */
699 if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
700 return;
701
702 set_bit(MMF_RECALC_UPROBES, &mm->flags);
696 set_orig_insn(&uprobe->arch, mm, vaddr); 703 set_orig_insn(&uprobe->arch, mm, vaddr);
697} 704}
698 705
@@ -1026,6 +1033,25 @@ int uprobe_mmap(struct vm_area_struct *vma)
1026 return 0; 1033 return 0;
1027} 1034}
1028 1035
1036static bool
1037vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1038{
1039 loff_t min, max;
1040 struct inode *inode;
1041 struct rb_node *n;
1042
1043 inode = vma->vm_file->f_mapping->host;
1044
1045 min = vaddr_to_offset(vma, start);
1046 max = min + (end - start) - 1;
1047
1048 spin_lock(&uprobes_treelock);
1049 n = find_node_in_range(inode, min, max);
1050 spin_unlock(&uprobes_treelock);
1051
1052 return !!n;
1053}
1054
1029/* 1055/*
1030 * Called in context of a munmap of a vma. 1056 * Called in context of a munmap of a vma.
1031 */ 1057 */
@@ -1037,10 +1063,12 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
1037 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1063 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1038 return; 1064 return;
1039 1065
1040 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) 1066 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1067 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1041 return; 1068 return;
1042 1069
1043 /* TODO: unmapping uprobe(s) will need more work */ 1070 if (vma_has_uprobes(vma, start, end))
1071 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1044} 1072}
1045 1073
1046/* Slot allocation for XOL */ 1074/* Slot allocation for XOL */
@@ -1146,8 +1174,11 @@ void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1146{ 1174{
1147 newmm->uprobes_state.xol_area = NULL; 1175 newmm->uprobes_state.xol_area = NULL;
1148 1176
1149 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) 1177 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1150 set_bit(MMF_HAS_UPROBES, &newmm->flags); 1178 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1179 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1180 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1181 }
1151} 1182}
1152 1183
1153/* 1184/*