diff options
author | Oleg Nesterov <oleg@redhat.com> | 2012-07-29 14:22:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-07-30 05:27:23 -0400 |
commit | 891c39708144bbe401b4aa151bffd0fe41b1dafd (patch) | |
tree | 4d49f2169a6f1e7051fd1b88a8a0fbe612a9224e /kernel | |
parent | 89133786f9408d53361874a8c784fff150fc7f7c (diff) |
uprobes: Teach build_probe_list() to consider the range
Currently build_probe_list() builds the list of all uprobes
attached to the given inode, and the caller should filter out
those who don't fall into the [start,end) range, this is
sub-optimal.
This patch turns find_least_offset_node() into
find_node_in_range() which returns the first node inside the
[min,max] range, and changes build_probe_list() to use this node
as a starting point for rb_prev() and rb_next() to find all
other nodes the caller needs. The resulting list is no longer
sorted but we do not care.
This can speed up both build_probe_list() and the callers, but
there is another reason to introduce find_node_in_range(). It
can be used to figure out whether the given vma has uprobes or
not, this will be needed soon.
While at it, shift INIT_LIST_HEAD(tmp_list) into
build_probe_list().
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Srikar Dronamraju <srikar.vnet.ibm.com>
Cc: Anton Arapov <anton@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/20120729182240.GA20352@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/uprobes.c | 103 |
1 files changed, 50 insertions, 53 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index fb961d5e205c..2ef123306183 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -939,59 +939,66 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume | |||
939 | put_uprobe(uprobe); | 939 | put_uprobe(uprobe); |
940 | } | 940 | } |
941 | 941 | ||
942 | /* | 942 | static struct rb_node * |
943 | * Of all the nodes that correspond to the given inode, return the node | 943 | find_node_in_range(struct inode *inode, loff_t min, loff_t max) |
944 | * with the least offset. | ||
945 | */ | ||
946 | static struct rb_node *find_least_offset_node(struct inode *inode) | ||
947 | { | 944 | { |
948 | struct uprobe u = { .inode = inode, .offset = 0}; | ||
949 | struct rb_node *n = uprobes_tree.rb_node; | 945 | struct rb_node *n = uprobes_tree.rb_node; |
950 | struct rb_node *close_node = NULL; | ||
951 | struct uprobe *uprobe; | ||
952 | int match; | ||
953 | 946 | ||
954 | while (n) { | 947 | while (n) { |
955 | uprobe = rb_entry(n, struct uprobe, rb_node); | 948 | struct uprobe *u = rb_entry(n, struct uprobe, rb_node); |
956 | match = match_uprobe(&u, uprobe); | ||
957 | |||
958 | if (uprobe->inode == inode) | ||
959 | close_node = n; | ||
960 | 949 | ||
961 | if (!match) | 950 | if (inode < u->inode) { |
962 | return close_node; | ||
963 | |||
964 | if (match < 0) | ||
965 | n = n->rb_left; | 951 | n = n->rb_left; |
966 | else | 952 | } else if (inode > u->inode) { |
967 | n = n->rb_right; | 953 | n = n->rb_right; |
954 | } else { | ||
955 | if (max < u->offset) | ||
956 | n = n->rb_left; | ||
957 | else if (min > u->offset) | ||
958 | n = n->rb_right; | ||
959 | else | ||
960 | break; | ||
961 | } | ||
968 | } | 962 | } |
969 | 963 | ||
970 | return close_node; | 964 | return n; |
971 | } | 965 | } |
972 | 966 | ||
973 | /* | 967 | /* |
974 | * For a given inode, build a list of probes that need to be inserted. | 968 | * For a given range in vma, build a list of probes that need to be inserted. |
975 | */ | 969 | */ |
976 | static void build_probe_list(struct inode *inode, struct list_head *head) | 970 | static void build_probe_list(struct inode *inode, |
971 | struct vm_area_struct *vma, | ||
972 | unsigned long start, unsigned long end, | ||
973 | struct list_head *head) | ||
977 | { | 974 | { |
978 | struct uprobe *uprobe; | 975 | loff_t min, max; |
979 | unsigned long flags; | 976 | unsigned long flags; |
980 | struct rb_node *n; | 977 | struct rb_node *n, *t; |
981 | 978 | struct uprobe *u; | |
982 | spin_lock_irqsave(&uprobes_treelock, flags); | ||
983 | |||
984 | n = find_least_offset_node(inode); | ||
985 | 979 | ||
986 | for (; n; n = rb_next(n)) { | 980 | INIT_LIST_HEAD(head); |
987 | uprobe = rb_entry(n, struct uprobe, rb_node); | 981 | min = ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + start - vma->vm_start; |
988 | if (uprobe->inode != inode) | 982 | max = min + (end - start) - 1; |
989 | break; | ||
990 | 983 | ||
991 | list_add(&uprobe->pending_list, head); | 984 | spin_lock_irqsave(&uprobes_treelock, flags); |
992 | atomic_inc(&uprobe->ref); | 985 | n = find_node_in_range(inode, min, max); |
986 | if (n) { | ||
987 | for (t = n; t; t = rb_prev(t)) { | ||
988 | u = rb_entry(t, struct uprobe, rb_node); | ||
989 | if (u->inode != inode || u->offset < min) | ||
990 | break; | ||
991 | list_add(&u->pending_list, head); | ||
992 | atomic_inc(&u->ref); | ||
993 | } | ||
994 | for (t = n; (t = rb_next(t)); ) { | ||
995 | u = rb_entry(t, struct uprobe, rb_node); | ||
996 | if (u->inode != inode || u->offset > max) | ||
997 | break; | ||
998 | list_add(&u->pending_list, head); | ||
999 | atomic_inc(&u->ref); | ||
1000 | } | ||
993 | } | 1001 | } |
994 | |||
995 | spin_unlock_irqrestore(&uprobes_treelock, flags); | 1002 | spin_unlock_irqrestore(&uprobes_treelock, flags); |
996 | } | 1003 | } |
997 | 1004 | ||
@@ -1021,9 +1028,8 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
1021 | if (!inode) | 1028 | if (!inode) |
1022 | return 0; | 1029 | return 0; |
1023 | 1030 | ||
1024 | INIT_LIST_HEAD(&tmp_list); | ||
1025 | mutex_lock(uprobes_mmap_hash(inode)); | 1031 | mutex_lock(uprobes_mmap_hash(inode)); |
1026 | build_probe_list(inode, &tmp_list); | 1032 | build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); |
1027 | 1033 | ||
1028 | ret = 0; | 1034 | ret = 0; |
1029 | count = 0; | 1035 | count = 0; |
@@ -1032,11 +1038,6 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
1032 | if (!ret) { | 1038 | if (!ret) { |
1033 | loff_t vaddr = vma_address(vma, uprobe->offset); | 1039 | loff_t vaddr = vma_address(vma, uprobe->offset); |
1034 | 1040 | ||
1035 | if (vaddr < vma->vm_start || vaddr >= vma->vm_end) { | ||
1036 | put_uprobe(uprobe); | ||
1037 | continue; | ||
1038 | } | ||
1039 | |||
1040 | ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); | 1041 | ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); |
1041 | /* | 1042 | /* |
1042 | * We can race against uprobe_register(), see the | 1043 | * We can race against uprobe_register(), see the |
@@ -1092,21 +1093,17 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon | |||
1092 | if (!inode) | 1093 | if (!inode) |
1093 | return; | 1094 | return; |
1094 | 1095 | ||
1095 | INIT_LIST_HEAD(&tmp_list); | ||
1096 | mutex_lock(uprobes_mmap_hash(inode)); | 1096 | mutex_lock(uprobes_mmap_hash(inode)); |
1097 | build_probe_list(inode, &tmp_list); | 1097 | build_probe_list(inode, vma, start, end, &tmp_list); |
1098 | 1098 | ||
1099 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { | 1099 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
1100 | loff_t vaddr = vma_address(vma, uprobe->offset); | 1100 | loff_t vaddr = vma_address(vma, uprobe->offset); |
1101 | 1101 | /* | |
1102 | if (vaddr >= start && vaddr < end) { | 1102 | * An unregister could have removed the probe before |
1103 | /* | 1103 | * unmap. So check before we decrement the count. |
1104 | * An unregister could have removed the probe before | 1104 | */ |
1105 | * unmap. So check before we decrement the count. | 1105 | if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1) |
1106 | */ | 1106 | atomic_dec(&vma->vm_mm->uprobes_state.count); |
1107 | if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1) | ||
1108 | atomic_dec(&vma->vm_mm->uprobes_state.count); | ||
1109 | } | ||
1110 | put_uprobe(uprobe); | 1107 | put_uprobe(uprobe); |
1111 | } | 1108 | } |
1112 | mutex_unlock(uprobes_mmap_hash(inode)); | 1109 | mutex_unlock(uprobes_mmap_hash(inode)); |