diff options
author | Oleg Nesterov <oleg@redhat.com> | 2012-08-06 08:13:23 -0400 |
---|---|---|
committer | Oleg Nesterov <oleg@redhat.com> | 2012-08-28 12:21:17 -0400 |
commit | f1a45d023193f7d8e55e384090b645d609325393 (patch) | |
tree | 7f901f3dbd23a7602bf3d4bf774862a3ee5b7640 /kernel | |
parent | 647c42dfd40fec032a4c8525a755160f0765921f (diff) |
uprobes: Kill dup_mmap()->uprobe_mmap(), simplify uprobe_mmap/munmap
1. Kill dup_mmap()->uprobe_mmap(), it was only needed to calculate
new_mm->uprobes_state.count removed by the previous patch.
If the forking process has a pending uprobe (int3) in vma, it will
be copied by copy_page_range(), note that it checks vma->anon_vma
so "Don't copy ptes" is not possible after install_breakpoint()
which does anon_vma_prepare().
2. Remove is_swbp_at_addr() and "int count" in uprobe_mmap(). Again,
this was needed for uprobes_state.count.
As a side effect this fixes the bug pointed out by Srikar,
this code lacked the necessary put_uprobe().
3. uprobe_munmap() becomes a nop after the previous patch. Remove the
meaningless code but do not remove the helper, we will need it.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/uprobes.c | 30 | ||||
-rw-r--r-- | kernel/fork.c | 3 |
2 files changed, 3 insertions, 30 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 6f1664d217dc..ce59c100d65f 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -1010,7 +1010,7 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
1010 | struct list_head tmp_list; | 1010 | struct list_head tmp_list; |
1011 | struct uprobe *uprobe, *u; | 1011 | struct uprobe *uprobe, *u; |
1012 | struct inode *inode; | 1012 | struct inode *inode; |
1013 | int ret, count; | 1013 | int ret; |
1014 | 1014 | ||
1015 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) | 1015 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) |
1016 | return 0; | 1016 | return 0; |
@@ -1023,8 +1023,6 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
1023 | build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); | 1023 | build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); |
1024 | 1024 | ||
1025 | ret = 0; | 1025 | ret = 0; |
1026 | count = 0; | ||
1027 | |||
1028 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { | 1026 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
1029 | if (!ret) { | 1027 | if (!ret) { |
1030 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); | 1028 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); |
@@ -1034,19 +1032,11 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
1034 | * We can race against uprobe_register(), see the | 1032 | * We can race against uprobe_register(), see the |
1035 | * comment near uprobe_hash(). | 1033 | * comment near uprobe_hash(). |
1036 | */ | 1034 | */ |
1037 | if (ret == -EEXIST) { | 1035 | if (ret == -EEXIST) |
1038 | ret = 0; | 1036 | ret = 0; |
1039 | |||
1040 | if (!is_swbp_at_addr(vma->vm_mm, vaddr)) | ||
1041 | continue; | ||
1042 | } | ||
1043 | |||
1044 | if (!ret) | ||
1045 | count++; | ||
1046 | } | 1037 | } |
1047 | put_uprobe(uprobe); | 1038 | put_uprobe(uprobe); |
1048 | } | 1039 | } |
1049 | |||
1050 | mutex_unlock(uprobes_mmap_hash(inode)); | 1040 | mutex_unlock(uprobes_mmap_hash(inode)); |
1051 | 1041 | ||
1052 | return ret; | 1042 | return ret; |
@@ -1057,27 +1047,13 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
1057 | */ | 1047 | */ |
1058 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 1048 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
1059 | { | 1049 | { |
1060 | struct list_head tmp_list; | ||
1061 | struct uprobe *uprobe, *u; | ||
1062 | struct inode *inode; | ||
1063 | |||
1064 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) | 1050 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) |
1065 | return; | 1051 | return; |
1066 | 1052 | ||
1067 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ | 1053 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ |
1068 | return; | 1054 | return; |
1069 | 1055 | ||
1070 | inode = vma->vm_file->f_mapping->host; | 1056 | /* TODO: unmapping uprobe(s) will need more work */ |
1071 | if (!inode) | ||
1072 | return; | ||
1073 | |||
1074 | mutex_lock(uprobes_mmap_hash(inode)); | ||
1075 | build_probe_list(inode, vma, start, end, &tmp_list); | ||
1076 | |||
1077 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { | ||
1078 | put_uprobe(uprobe); | ||
1079 | } | ||
1080 | mutex_unlock(uprobes_mmap_hash(inode)); | ||
1081 | } | 1057 | } |
1082 | 1058 | ||
1083 | /* Slot allocation for XOL */ | 1059 | /* Slot allocation for XOL */ |
diff --git a/kernel/fork.c b/kernel/fork.c index 2c8857e12855..912b6f6fe5b8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -454,9 +454,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
454 | 454 | ||
455 | if (retval) | 455 | if (retval) |
456 | goto out; | 456 | goto out; |
457 | |||
458 | if (file) | ||
459 | uprobe_mmap(tmp); | ||
460 | } | 457 | } |
461 | /* a new mm has just been created */ | 458 | /* a new mm has just been created */ |
462 | arch_dup_mmap(oldmm, mm); | 459 | arch_dup_mmap(oldmm, mm); |