diff options
-rw-r--r-- | include/linux/uprobes.h | 2 | ||||
-rw-r--r-- | kernel/events/uprobes.c | 38 |
2 files changed, 3 insertions, 37 deletions
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index efe4b3308c74..03ae547c1c31 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h | |||
@@ -99,8 +99,8 @@ struct xol_area { | |||
99 | 99 | ||
100 | struct uprobes_state { | 100 | struct uprobes_state { |
101 | struct xol_area *xol_area; | 101 | struct xol_area *xol_area; |
102 | atomic_t count; | ||
103 | }; | 102 | }; |
103 | |||
104 | extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); | 104 | extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); |
105 | extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr, bool verify); | 105 | extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr, bool verify); |
106 | extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); | 106 | extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 0cefde276641..6f1664d217dc 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -678,18 +678,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, | |||
678 | uprobe->flags |= UPROBE_COPY_INSN; | 678 | uprobe->flags |= UPROBE_COPY_INSN; |
679 | } | 679 | } |
680 | 680 | ||
681 | /* | ||
682 | * Ideally, should be updating the probe count after the breakpoint | ||
683 | * has been successfully inserted. However a thread could hit the | ||
684 | * breakpoint we just inserted even before the probe count is | ||
685 | * incremented. If this is the first breakpoint placed, breakpoint | ||
686 | * notifier might ignore uprobes and pass the trap to the thread. | ||
687 | * Hence increment before and decrement on failure. | ||
688 | */ | ||
689 | atomic_inc(&mm->uprobes_state.count); | ||
690 | ret = set_swbp(&uprobe->arch, mm, vaddr); | 681 | ret = set_swbp(&uprobe->arch, mm, vaddr); |
691 | if (ret) | ||
692 | atomic_dec(&mm->uprobes_state.count); | ||
693 | 682 | ||
694 | return ret; | 683 | return ret; |
695 | } | 684 | } |
@@ -697,8 +686,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, | |||
697 | static void | 686 | static void |
698 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) | 687 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) |
699 | { | 688 | { |
700 | if (!set_orig_insn(&uprobe->arch, mm, vaddr, true)) | 689 | set_orig_insn(&uprobe->arch, mm, vaddr, true); |
701 | atomic_dec(&mm->uprobes_state.count); | ||
702 | } | 690 | } |
703 | 691 | ||
704 | /* | 692 | /* |
@@ -1051,13 +1039,6 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
1051 | 1039 | ||
1052 | if (!is_swbp_at_addr(vma->vm_mm, vaddr)) | 1040 | if (!is_swbp_at_addr(vma->vm_mm, vaddr)) |
1053 | continue; | 1041 | continue; |
1054 | |||
1055 | /* | ||
1056 | * Unable to insert a breakpoint, but | ||
1057 | * breakpoint lies underneath. Increment the | ||
1058 | * probe count. | ||
1059 | */ | ||
1060 | atomic_inc(&vma->vm_mm->uprobes_state.count); | ||
1061 | } | 1042 | } |
1062 | 1043 | ||
1063 | if (!ret) | 1044 | if (!ret) |
@@ -1068,9 +1049,6 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
1068 | 1049 | ||
1069 | mutex_unlock(uprobes_mmap_hash(inode)); | 1050 | mutex_unlock(uprobes_mmap_hash(inode)); |
1070 | 1051 | ||
1071 | if (ret) | ||
1072 | atomic_sub(count, &vma->vm_mm->uprobes_state.count); | ||
1073 | |||
1074 | return ret; | 1052 | return ret; |
1075 | } | 1053 | } |
1076 | 1054 | ||
@@ -1089,9 +1067,6 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon | |||
1089 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ | 1067 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ |
1090 | return; | 1068 | return; |
1091 | 1069 | ||
1092 | if (!atomic_read(&vma->vm_mm->uprobes_state.count)) | ||
1093 | return; | ||
1094 | |||
1095 | inode = vma->vm_file->f_mapping->host; | 1070 | inode = vma->vm_file->f_mapping->host; |
1096 | if (!inode) | 1071 | if (!inode) |
1097 | return; | 1072 | return; |
@@ -1100,13 +1075,6 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon | |||
1100 | build_probe_list(inode, vma, start, end, &tmp_list); | 1075 | build_probe_list(inode, vma, start, end, &tmp_list); |
1101 | 1076 | ||
1102 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { | 1077 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
1103 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); | ||
1104 | /* | ||
1105 | * An unregister could have removed the probe before | ||
1106 | * unmap. So check before we decrement the count. | ||
1107 | */ | ||
1108 | if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1) | ||
1109 | atomic_dec(&vma->vm_mm->uprobes_state.count); | ||
1110 | put_uprobe(uprobe); | 1078 | put_uprobe(uprobe); |
1111 | } | 1079 | } |
1112 | mutex_unlock(uprobes_mmap_hash(inode)); | 1080 | mutex_unlock(uprobes_mmap_hash(inode)); |
@@ -1217,7 +1185,6 @@ void uprobe_clear_state(struct mm_struct *mm) | |||
1217 | void uprobe_reset_state(struct mm_struct *mm) | 1185 | void uprobe_reset_state(struct mm_struct *mm) |
1218 | { | 1186 | { |
1219 | mm->uprobes_state.xol_area = NULL; | 1187 | mm->uprobes_state.xol_area = NULL; |
1220 | atomic_set(&mm->uprobes_state.count, 0); | ||
1221 | } | 1188 | } |
1222 | 1189 | ||
1223 | /* | 1190 | /* |
@@ -1585,8 +1552,7 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs) | |||
1585 | { | 1552 | { |
1586 | struct uprobe_task *utask; | 1553 | struct uprobe_task *utask; |
1587 | 1554 | ||
1588 | if (!current->mm || !atomic_read(¤t->mm->uprobes_state.count)) | 1555 | if (!current->mm) |
1589 | /* task is currently not uprobed */ | ||
1590 | return 0; | 1556 | return 0; |
1591 | 1557 | ||
1592 | utask = current->utask; | 1558 | utask = current->utask; |