aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/vdso/vma.c
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-07-10 21:13:15 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-07-11 19:57:51 -0400
commite6577a7ce99a506b587bcd1d2cd803cb45119557 (patch)
tree71e985803dce2a087d98f98537efd37d1ba630e3 /arch/x86/vdso/vma.c
parentd093601be5e97d2729614419d0d256ed3b6a56b0 (diff)
x86, vdso: Move the vvar area before the vdso text
Putting the vvar area after the vdso text is rather complicated: it only works of the total length of the vdso text mapping is known at vdso link time, and the linker doesn't allow symbol addresses to depend on the sizes of non-allocatable data after the PT_LOAD segment. Moving the vvar area before the vdso text will allow is to safely map non-allocatable data after the vdso text, which is a nice simplification. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/156c78c0d93144ff1055a66493783b9e56813983.1405040914.git.luto@amacapital.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/vdso/vma.c')
-rw-r--r--arch/x86/vdso/vma.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 5a5176de8d0a..dbef622bb5af 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -93,7 +93,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
93{ 93{
94 struct mm_struct *mm = current->mm; 94 struct mm_struct *mm = current->mm;
95 struct vm_area_struct *vma; 95 struct vm_area_struct *vma;
96 unsigned long addr; 96 unsigned long addr, text_start;
97 int ret = 0; 97 int ret = 0;
98 static struct page *no_pages[] = {NULL}; 98 static struct page *no_pages[] = {NULL};
99 static struct vm_special_mapping vvar_mapping = { 99 static struct vm_special_mapping vvar_mapping = {
@@ -103,26 +103,28 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
103 103
104 if (calculate_addr) { 104 if (calculate_addr) {
105 addr = vdso_addr(current->mm->start_stack, 105 addr = vdso_addr(current->mm->start_stack,
106 image->sym_end_mapping); 106 image->size - image->sym_vvar_start);
107 } else { 107 } else {
108 addr = 0; 108 addr = 0;
109 } 109 }
110 110
111 down_write(&mm->mmap_sem); 111 down_write(&mm->mmap_sem);
112 112
113 addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0); 113 addr = get_unmapped_area(NULL, addr,
114 image->size - image->sym_vvar_start, 0, 0);
114 if (IS_ERR_VALUE(addr)) { 115 if (IS_ERR_VALUE(addr)) {
115 ret = addr; 116 ret = addr;
116 goto up_fail; 117 goto up_fail;
117 } 118 }
118 119
119 current->mm->context.vdso = (void __user *)addr; 120 text_start = addr - image->sym_vvar_start;
121 current->mm->context.vdso = (void __user *)text_start;
120 122
121 /* 123 /*
122 * MAYWRITE to allow gdb to COW and set breakpoints 124 * MAYWRITE to allow gdb to COW and set breakpoints
123 */ 125 */
124 vma = _install_special_mapping(mm, 126 vma = _install_special_mapping(mm,
125 addr, 127 text_start,
126 image->size, 128 image->size,
127 VM_READ|VM_EXEC| 129 VM_READ|VM_EXEC|
128 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 130 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
@@ -134,8 +136,8 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
134 } 136 }
135 137
136 vma = _install_special_mapping(mm, 138 vma = _install_special_mapping(mm,
137 addr + image->size, 139 addr,
138 image->sym_end_mapping - image->size, 140 -image->sym_vvar_start,
139 VM_READ, 141 VM_READ,
140 &vvar_mapping); 142 &vvar_mapping);
141 143
@@ -146,7 +148,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
146 148
147 if (image->sym_vvar_page) 149 if (image->sym_vvar_page)
148 ret = remap_pfn_range(vma, 150 ret = remap_pfn_range(vma,
149 addr + image->sym_vvar_page, 151 text_start + image->sym_vvar_page,
150 __pa_symbol(&__vvar_page) >> PAGE_SHIFT, 152 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
151 PAGE_SIZE, 153 PAGE_SIZE,
152 PAGE_READONLY); 154 PAGE_READONLY);
@@ -157,7 +159,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
157#ifdef CONFIG_HPET_TIMER 159#ifdef CONFIG_HPET_TIMER
158 if (hpet_address && image->sym_hpet_page) { 160 if (hpet_address && image->sym_hpet_page) {
159 ret = io_remap_pfn_range(vma, 161 ret = io_remap_pfn_range(vma,
160 addr + image->sym_hpet_page, 162 text_start + image->sym_hpet_page,
161 hpet_address >> PAGE_SHIFT, 163 hpet_address >> PAGE_SHIFT,
162 PAGE_SIZE, 164 PAGE_SIZE,
163 pgprot_noncached(PAGE_READONLY)); 165 pgprot_noncached(PAGE_READONLY));