diff options
author | Oleg Nesterov <oleg@redhat.com> | 2013-10-13 15:18:35 -0400 |
---|---|---|
committer | Oleg Nesterov <oleg@redhat.com> | 2013-10-29 13:02:50 -0400 |
commit | 6441ec8b7c108b72789d120562b9f1d976e4aaaf (patch) | |
tree | 3b8e813084698de0470713487e16941a08e5cace /kernel/events | |
parent | b68e0749100e1b901bf11330f149b321c082178e (diff) |
uprobes: Introduce __create_xol_area()
No functional changes, preparation.
Extract the code which actually allocates/installs the new area
into the new helper, __create_xol_area().
While at it remove the unnecessary "ret = ENOMEM" and "ret = 0"
in xol_add_vma(), they both have no effect.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/uprobes.c | 47 |
1 files changed, 25 insertions, 22 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index db7a1dcb3dd6..ad17d813e73e 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -1096,16 +1096,14 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon | |||
1096 | } | 1096 | } |
1097 | 1097 | ||
1098 | /* Slot allocation for XOL */ | 1098 | /* Slot allocation for XOL */ |
1099 | static int xol_add_vma(struct xol_area *area) | 1099 | static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) |
1100 | { | 1100 | { |
1101 | struct mm_struct *mm = current->mm; | ||
1102 | int ret = -EALREADY; | 1101 | int ret = -EALREADY; |
1103 | 1102 | ||
1104 | down_write(&mm->mmap_sem); | 1103 | down_write(&mm->mmap_sem); |
1105 | if (mm->uprobes_state.xol_area) | 1104 | if (mm->uprobes_state.xol_area) |
1106 | goto fail; | 1105 | goto fail; |
1107 | 1106 | ||
1108 | ret = -ENOMEM; | ||
1109 | /* Try to map as high as possible, this is only a hint. */ | 1107 | /* Try to map as high as possible, this is only a hint. */ |
1110 | area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); | 1108 | area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); |
1111 | if (area->vaddr & ~PAGE_MASK) { | 1109 | if (area->vaddr & ~PAGE_MASK) { |
@@ -1120,28 +1118,17 @@ static int xol_add_vma(struct xol_area *area) | |||
1120 | 1118 | ||
1121 | smp_wmb(); /* pairs with get_xol_area() */ | 1119 | smp_wmb(); /* pairs with get_xol_area() */ |
1122 | mm->uprobes_state.xol_area = area; | 1120 | mm->uprobes_state.xol_area = area; |
1123 | ret = 0; | ||
1124 | fail: | 1121 | fail: |
1125 | up_write(&mm->mmap_sem); | 1122 | up_write(&mm->mmap_sem); |
1126 | 1123 | ||
1127 | return ret; | 1124 | return ret; |
1128 | } | 1125 | } |
1129 | 1126 | ||
1130 | /* | 1127 | static struct xol_area *__create_xol_area(void) |
1131 | * get_xol_area - Allocate process's xol_area if necessary. | ||
1132 | * This area will be used for storing instructions for execution out of line. | ||
1133 | * | ||
1134 | * Returns the allocated area or NULL. | ||
1135 | */ | ||
1136 | static struct xol_area *get_xol_area(void) | ||
1137 | { | 1128 | { |
1138 | struct mm_struct *mm = current->mm; | 1129 | struct mm_struct *mm = current->mm; |
1139 | struct xol_area *area; | ||
1140 | uprobe_opcode_t insn = UPROBE_SWBP_INSN; | 1130 | uprobe_opcode_t insn = UPROBE_SWBP_INSN; |
1141 | 1131 | struct xol_area *area; | |
1142 | area = mm->uprobes_state.xol_area; | ||
1143 | if (area) | ||
1144 | goto ret; | ||
1145 | 1132 | ||
1146 | area = kzalloc(sizeof(*area), GFP_KERNEL); | 1133 | area = kzalloc(sizeof(*area), GFP_KERNEL); |
1147 | if (unlikely(!area)) | 1134 | if (unlikely(!area)) |
@@ -1155,13 +1142,13 @@ static struct xol_area *get_xol_area(void) | |||
1155 | if (!area->page) | 1142 | if (!area->page) |
1156 | goto free_bitmap; | 1143 | goto free_bitmap; |
1157 | 1144 | ||
1158 | /* allocate first slot of task's xol_area for the return probes */ | 1145 | init_waitqueue_head(&area->wq); |
1146 | /* Reserve the 1st slot for get_trampoline_vaddr() */ | ||
1159 | set_bit(0, area->bitmap); | 1147 | set_bit(0, area->bitmap); |
1160 | copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE); | ||
1161 | atomic_set(&area->slot_count, 1); | 1148 | atomic_set(&area->slot_count, 1); |
1162 | init_waitqueue_head(&area->wq); | 1149 | copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE); |
1163 | 1150 | ||
1164 | if (!xol_add_vma(area)) | 1151 | if (!xol_add_vma(mm, area)) |
1165 | return area; | 1152 | return area; |
1166 | 1153 | ||
1167 | __free_page(area->page); | 1154 | __free_page(area->page); |
@@ -1170,9 +1157,25 @@ static struct xol_area *get_xol_area(void) | |||
1170 | free_area: | 1157 | free_area: |
1171 | kfree(area); | 1158 | kfree(area); |
1172 | out: | 1159 | out: |
1160 | return NULL; | ||
1161 | } | ||
1162 | |||
1163 | /* | ||
1164 | * get_xol_area - Allocate process's xol_area if necessary. | ||
1165 | * This area will be used for storing instructions for execution out of line. | ||
1166 | * | ||
1167 | * Returns the allocated area or NULL. | ||
1168 | */ | ||
1169 | static struct xol_area *get_xol_area(void) | ||
1170 | { | ||
1171 | struct mm_struct *mm = current->mm; | ||
1172 | struct xol_area *area; | ||
1173 | |||
1174 | if (!mm->uprobes_state.xol_area) | ||
1175 | __create_xol_area(); | ||
1176 | |||
1173 | area = mm->uprobes_state.xol_area; | 1177 | area = mm->uprobes_state.xol_area; |
1174 | ret: | 1178 | smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ |
1175 | smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ | ||
1176 | return area; | 1179 | return area; |
1177 | } | 1180 | } |
1178 | 1181 | ||