aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2012-12-30 11:40:39 -0500
committerOleg Nesterov <oleg@redhat.com>2013-02-08 11:47:11 -0500
commitc8a82538001e1a68f4a319d5a75de90d1f284731 (patch)
tree8f6ad569a44b3fb9179442fd6b9d07d42ec28229 /kernel/events
parent74e59dfc6b19e3472a7c16ad57bc831e6e647895 (diff)
uprobes: Move alloc_page() from xol_add_vma() to xol_alloc_area()
Move alloc_page() from xol_add_vma() to xol_alloc_area() to cleanup the code. This separates the memory allocations and consolidates the -EALREADY cleanups and the error handling. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Anton Arapov <anton@redhat.com> Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/uprobes.c32
1 files changed, 13 insertions, 19 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f1b807831fc2..ea2e2a85479a 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1041,22 +1041,14 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
1041/* Slot allocation for XOL */ 1041/* Slot allocation for XOL */
1042static int xol_add_vma(struct xol_area *area) 1042static int xol_add_vma(struct xol_area *area)
1043{ 1043{
1044 struct mm_struct *mm; 1044 struct mm_struct *mm = current->mm;
1045 int ret; 1045 int ret = -EALREADY;
1046
1047 area->page = alloc_page(GFP_HIGHUSER);
1048 if (!area->page)
1049 return -ENOMEM;
1050
1051 ret = -EALREADY;
1052 mm = current->mm;
1053 1046
1054 down_write(&mm->mmap_sem); 1047 down_write(&mm->mmap_sem);
1055 if (mm->uprobes_state.xol_area) 1048 if (mm->uprobes_state.xol_area)
1056 goto fail; 1049 goto fail;
1057 1050
1058 ret = -ENOMEM; 1051 ret = -ENOMEM;
1059
1060 /* Try to map as high as possible, this is only a hint. */ 1052 /* Try to map as high as possible, this is only a hint. */
1061 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); 1053 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
1062 if (area->vaddr & ~PAGE_MASK) { 1054 if (area->vaddr & ~PAGE_MASK) {
@@ -1072,11 +1064,8 @@ static int xol_add_vma(struct xol_area *area)
1072 smp_wmb(); /* pairs with get_xol_area() */ 1064 smp_wmb(); /* pairs with get_xol_area() */
1073 mm->uprobes_state.xol_area = area; 1065 mm->uprobes_state.xol_area = area;
1074 ret = 0; 1066 ret = 0;
1075 1067 fail:
1076fail:
1077 up_write(&mm->mmap_sem); 1068 up_write(&mm->mmap_sem);
1078 if (ret)
1079 __free_page(area->page);
1080 1069
1081 return ret; 1070 return ret;
1082} 1071}
@@ -1104,21 +1093,26 @@ static struct xol_area *xol_alloc_area(void)
1104 1093
1105 area = kzalloc(sizeof(*area), GFP_KERNEL); 1094 area = kzalloc(sizeof(*area), GFP_KERNEL);
1106 if (unlikely(!area)) 1095 if (unlikely(!area))
1107 return NULL; 1096 goto out;
1108 1097
1109 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); 1098 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1110
1111 if (!area->bitmap) 1099 if (!area->bitmap)
1112 goto fail; 1100 goto free_area;
1101
1102 area->page = alloc_page(GFP_HIGHUSER);
1103 if (!area->page)
1104 goto free_bitmap;
1113 1105
1114 init_waitqueue_head(&area->wq); 1106 init_waitqueue_head(&area->wq);
1115 if (!xol_add_vma(area)) 1107 if (!xol_add_vma(area))
1116 return area; 1108 return area;
1117 1109
1118fail: 1110 __free_page(area->page);
1111 free_bitmap:
1119 kfree(area->bitmap); 1112 kfree(area->bitmap);
1113 free_area:
1120 kfree(area); 1114 kfree(area);
1121 1115 out:
1122 return get_xol_area(current->mm); 1116 return get_xol_area(current->mm);
1123} 1117}
1124 1118