aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/sys_x86_64.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2012-12-11 19:01:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:25 -0500
commitf99024729e689f5de4534fde5400e3b035f068de (patch)
treea0a568f6cc6df064b67c9594715a0d505726df3a /arch/x86/kernel/sys_x86_64.c
parentdb4fbfb9523c93583c339e66023506f651c1d54b (diff)
mm: use vm_unmapped_area() on x86_64 architecture
Update the x86_64 arch_get_unmapped_area[_topdown] functions to make use of vm_unmapped_area() instead of implementing a brute force search. Signed-off-by: Michel Lespinasse <walken@google.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/kernel/sys_x86_64.c')
-rw-r--r--arch/x86/kernel/sys_x86_64.c151
1 files changed, 30 insertions, 121 deletions
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index b4d3c3927dd8..f00d006d60fd 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -21,37 +21,23 @@
21 21
22/* 22/*
23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h. 23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
24 *
25 * @flags denotes the allocation direction - bottomup or topdown -
26 * or vDSO; see call sites below.
27 */ 24 */
28unsigned long align_addr(unsigned long addr, struct file *filp, 25static unsigned long get_align_mask(void)
29 enum align_flags flags)
30{ 26{
31 unsigned long tmp_addr;
32
33 /* handle 32- and 64-bit case with a single conditional */ 27 /* handle 32- and 64-bit case with a single conditional */
34 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32()))) 28 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
35 return addr; 29 return 0;
36 30
37 if (!(current->flags & PF_RANDOMIZE)) 31 if (!(current->flags & PF_RANDOMIZE))
38 return addr; 32 return 0;
39
40 if (!((flags & ALIGN_VDSO) || filp))
41 return addr;
42
43 tmp_addr = addr;
44
45 /*
46 * We need an address which is <= than the original
47 * one only when in topdown direction.
48 */
49 if (!(flags & ALIGN_TOPDOWN))
50 tmp_addr += va_align.mask;
51 33
52 tmp_addr &= ~va_align.mask; 34 return va_align.mask;
35}
53 36
54 return tmp_addr; 37unsigned long align_vdso_addr(unsigned long addr)
38{
39 unsigned long align_mask = get_align_mask();
40 return (addr + align_mask) & ~align_mask;
55} 41}
56 42
57static int __init control_va_addr_alignment(char *str) 43static int __init control_va_addr_alignment(char *str)
@@ -126,7 +112,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
126{ 112{
127 struct mm_struct *mm = current->mm; 113 struct mm_struct *mm = current->mm;
128 struct vm_area_struct *vma; 114 struct vm_area_struct *vma;
129 unsigned long start_addr; 115 struct vm_unmapped_area_info info;
130 unsigned long begin, end; 116 unsigned long begin, end;
131 117
132 if (flags & MAP_FIXED) 118 if (flags & MAP_FIXED)
@@ -144,50 +130,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
144 (!vma || addr + len <= vma->vm_start)) 130 (!vma || addr + len <= vma->vm_start))
145 return addr; 131 return addr;
146 } 132 }
147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
148 && len <= mm->cached_hole_size) {
149 mm->cached_hole_size = 0;
150 mm->free_area_cache = begin;
151 }
152 addr = mm->free_area_cache;
153 if (addr < begin)
154 addr = begin;
155 start_addr = addr;
156
157full_search:
158
159 addr = align_addr(addr, filp, 0);
160
161 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
162 /* At this point: (!vma || addr < vma->vm_end). */
163 if (end - len < addr) {
164 /*
165 * Start a new search - just in case we missed
166 * some holes.
167 */
168 if (start_addr != begin) {
169 start_addr = addr = begin;
170 mm->cached_hole_size = 0;
171 goto full_search;
172 }
173 return -ENOMEM;
174 }
175 if (!vma || addr + len <= vma->vm_start) {
176 /*
177 * Remember the place where we stopped the search:
178 */
179 mm->free_area_cache = addr + len;
180 return addr;
181 }
182 if (addr + mm->cached_hole_size < vma->vm_start)
183 mm->cached_hole_size = vma->vm_start - addr;
184 133
185 addr = vma->vm_end; 134 info.flags = 0;
186 addr = align_addr(addr, filp, 0); 135 info.length = len;
187 } 136 info.low_limit = begin;
137 info.high_limit = end;
138 info.align_mask = filp ? get_align_mask() : 0;
139 info.align_offset = 0;
140 return vm_unmapped_area(&info);
188} 141}
189 142
190
191unsigned long 143unsigned long
192arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 144arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
193 const unsigned long len, const unsigned long pgoff, 145 const unsigned long len, const unsigned long pgoff,
@@ -195,7 +147,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
195{ 147{
196 struct vm_area_struct *vma; 148 struct vm_area_struct *vma;
197 struct mm_struct *mm = current->mm; 149 struct mm_struct *mm = current->mm;
198 unsigned long addr = addr0, start_addr; 150 unsigned long addr = addr0;
151 struct vm_unmapped_area_info info;
199 152
200 /* requested length too big for entire address space */ 153 /* requested length too big for entire address space */
201 if (len > TASK_SIZE) 154 if (len > TASK_SIZE)
@@ -217,51 +170,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
217 return addr; 170 return addr;
218 } 171 }
219 172
220 /* check if free_area_cache is useful for us */ 173 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
221 if (len <= mm->cached_hole_size) { 174 info.length = len;
222 mm->cached_hole_size = 0; 175 info.low_limit = PAGE_SIZE;
223 mm->free_area_cache = mm->mmap_base; 176 info.high_limit = mm->mmap_base;
224 } 177 info.align_mask = filp ? get_align_mask() : 0;
225 178 info.align_offset = 0;
226try_again: 179 addr = vm_unmapped_area(&info);
227 /* either no address requested or can't fit in requested address hole */ 180 if (!(addr & ~PAGE_MASK))
228 start_addr = addr = mm->free_area_cache; 181 return addr;
229 182 VM_BUG_ON(addr != -ENOMEM);
230 if (addr < len)
231 goto fail;
232
233 addr -= len;
234 do {
235 addr = align_addr(addr, filp, ALIGN_TOPDOWN);
236
237 /*
238 * Lookup failure means no vma is above this address,
239 * else if new region fits below vma->vm_start,
240 * return with success:
241 */
242 vma = find_vma(mm, addr);
243 if (!vma || addr+len <= vma->vm_start)
244 /* remember the address as a hint for next time */
245 return mm->free_area_cache = addr;
246
247 /* remember the largest hole we saw so far */
248 if (addr + mm->cached_hole_size < vma->vm_start)
249 mm->cached_hole_size = vma->vm_start - addr;
250
251 /* try just below the current vma->vm_start */
252 addr = vma->vm_start-len;
253 } while (len < vma->vm_start);
254
255fail:
256 /*
257 * if hint left us with no space for the requested
258 * mapping then try again:
259 */
260 if (start_addr != mm->mmap_base) {
261 mm->free_area_cache = mm->mmap_base;
262 mm->cached_hole_size = 0;
263 goto try_again;
264 }
265 183
266bottomup: 184bottomup:
267 /* 185 /*
@@ -270,14 +188,5 @@ bottomup:
270 * can happen with large stack limits and large mmap() 188 * can happen with large stack limits and large mmap()
271 * allocations. 189 * allocations.
272 */ 190 */
273 mm->cached_hole_size = ~0UL; 191 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
274 mm->free_area_cache = TASK_UNMAPPED_BASE;
275 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
276 /*
277 * Restore the topdown base:
278 */
279 mm->free_area_cache = mm->mmap_base;
280 mm->cached_hole_size = ~0UL;
281
282 return addr;
283} 192}