aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2012-12-11 19:02:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:26 -0500
commitbb64f55019c7b0971cdcfbf3af9fb6376dd01ead (patch)
tree76384613ef13f4f859e9bd2a0a9449aa44cfbbe1 /arch/sparc
parentdd5295965b77e5da6705953ef8a7fc528ea328a1 (diff)
mm: use vm_unmapped_area() on sparc64 architecture
Update the sparc64 arch_get_unmapped_area[_topdown] functions to make use of vm_unmapped_area() instead of implementing a brute force search. [akpm@linux-foundation.org: remove now-unused COLOUR_ALIGN_DOWN()] Signed-off-by: Michel Lespinasse <walken@google.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c144
1 files changed, 30 insertions, 114 deletions
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 878ef3d5fec5..59b66b739bf0 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -84,24 +84,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr,
84 return base + off; 84 return base + off;
85} 85}
86 86
87static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
88 unsigned long pgoff)
89{
90 unsigned long base = addr & ~(SHMLBA-1);
91 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
92
93 if (base + off <= addr)
94 return base + off;
95 return base - off;
96}
97
98unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 87unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
99{ 88{
100 struct mm_struct *mm = current->mm; 89 struct mm_struct *mm = current->mm;
101 struct vm_area_struct * vma; 90 struct vm_area_struct * vma;
102 unsigned long task_size = TASK_SIZE; 91 unsigned long task_size = TASK_SIZE;
103 unsigned long start_addr;
104 int do_color_align; 92 int do_color_align;
93 struct vm_unmapped_area_info info;
105 94
106 if (flags & MAP_FIXED) { 95 if (flags & MAP_FIXED) {
107 /* We do not accept a shared mapping if it would violate 96 /* We do not accept a shared mapping if it would violate
@@ -134,50 +123,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
134 return addr; 123 return addr;
135 } 124 }
136 125
137 if (len > mm->cached_hole_size) { 126 info.flags = 0;
138 start_addr = addr = mm->free_area_cache; 127 info.length = len;
139 } else { 128 info.low_limit = TASK_UNMAPPED_BASE;
140 start_addr = addr = TASK_UNMAPPED_BASE; 129 info.high_limit = min(task_size, VA_EXCLUDE_START);
141 mm->cached_hole_size = 0; 130 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
131 info.align_offset = pgoff << PAGE_SHIFT;
132 addr = vm_unmapped_area(&info);
133
134 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
135 VM_BUG_ON(addr != -ENOMEM);
136 info.low_limit = VA_EXCLUDE_END;
137 info.high_limit = task_size;
138 addr = vm_unmapped_area(&info);
142 } 139 }
143 140
144 task_size -= len; 141 return addr;
145
146full_search:
147 if (do_color_align)
148 addr = COLOUR_ALIGN(addr, pgoff);
149 else
150 addr = PAGE_ALIGN(addr);
151
152 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
153 /* At this point: (!vma || addr < vma->vm_end). */
154 if (addr < VA_EXCLUDE_START &&
155 (addr + len) >= VA_EXCLUDE_START) {
156 addr = VA_EXCLUDE_END;
157 vma = find_vma(mm, VA_EXCLUDE_END);
158 }
159 if (unlikely(task_size < addr)) {
160 if (start_addr != TASK_UNMAPPED_BASE) {
161 start_addr = addr = TASK_UNMAPPED_BASE;
162 mm->cached_hole_size = 0;
163 goto full_search;
164 }
165 return -ENOMEM;
166 }
167 if (likely(!vma || addr + len <= vma->vm_start)) {
168 /*
169 * Remember the place where we stopped the search:
170 */
171 mm->free_area_cache = addr + len;
172 return addr;
173 }
174 if (addr + mm->cached_hole_size < vma->vm_start)
175 mm->cached_hole_size = vma->vm_start - addr;
176
177 addr = vma->vm_end;
178 if (do_color_align)
179 addr = COLOUR_ALIGN(addr, pgoff);
180 }
181} 142}
182 143
183unsigned long 144unsigned long
@@ -190,6 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
190 unsigned long task_size = STACK_TOP32; 151 unsigned long task_size = STACK_TOP32;
191 unsigned long addr = addr0; 152 unsigned long addr = addr0;
192 int do_color_align; 153 int do_color_align;
154 struct vm_unmapped_area_info info;
193 155
194 /* This should only ever run for 32-bit processes. */ 156 /* This should only ever run for 32-bit processes. */
195 BUG_ON(!test_thread_flag(TIF_32BIT)); 157 BUG_ON(!test_thread_flag(TIF_32BIT));
@@ -224,73 +186,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
224 return addr; 186 return addr;
225 } 187 }
226 188
227 /* check if free_area_cache is useful for us */ 189 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
228 if (len <= mm->cached_hole_size) { 190 info.length = len;
229 mm->cached_hole_size = 0; 191 info.low_limit = PAGE_SIZE;
230 mm->free_area_cache = mm->mmap_base; 192 info.high_limit = mm->mmap_base;
231 } 193 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
194 info.align_offset = pgoff << PAGE_SHIFT;
195 addr = vm_unmapped_area(&info);
232 196
233 /* either no address requested or can't fit in requested address hole */
234 addr = mm->free_area_cache;
235 if (do_color_align) {
236 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
237
238 addr = base + len;
239 }
240
241 /* make sure it can fit in the remaining address space */
242 if (likely(addr > len)) {
243 vma = find_vma(mm, addr-len);
244 if (!vma || addr <= vma->vm_start) {
245 /* remember the address as a hint for next time */
246 return (mm->free_area_cache = addr-len);
247 }
248 }
249
250 if (unlikely(mm->mmap_base < len))
251 goto bottomup;
252
253 addr = mm->mmap_base-len;
254 if (do_color_align)
255 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
256
257 do {
258 /*
259 * Lookup failure means no vma is above this address,
260 * else if new region fits below vma->vm_start,
261 * return with success:
262 */
263 vma = find_vma(mm, addr);
264 if (likely(!vma || addr+len <= vma->vm_start)) {
265 /* remember the address as a hint for next time */
266 return (mm->free_area_cache = addr);
267 }
268
269 /* remember the largest hole we saw so far */
270 if (addr + mm->cached_hole_size < vma->vm_start)
271 mm->cached_hole_size = vma->vm_start - addr;
272
273 /* try just below the current vma->vm_start */
274 addr = vma->vm_start-len;
275 if (do_color_align)
276 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
277 } while (likely(len < vma->vm_start));
278
279bottomup:
280 /* 197 /*
281 * A failed mmap() very likely causes application failure, 198 * A failed mmap() very likely causes application failure,
282 * so fall back to the bottom-up function here. This scenario 199 * so fall back to the bottom-up function here. This scenario
283 * can happen with large stack limits and large mmap() 200 * can happen with large stack limits and large mmap()
284 * allocations. 201 * allocations.
285 */ 202 */
286 mm->cached_hole_size = ~0UL; 203 if (addr & ~PAGE_MASK) {
287 mm->free_area_cache = TASK_UNMAPPED_BASE; 204 VM_BUG_ON(addr != -ENOMEM);
288 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 205 info.flags = 0;
289 /* 206 info.low_limit = TASK_UNMAPPED_BASE;
290 * Restore the topdown base: 207 info.high_limit = STACK_TOP32;
291 */ 208 addr = vm_unmapped_area(&info);
292 mm->free_area_cache = mm->mmap_base; 209 }
293 mm->cached_hole_size = ~0UL;
294 210
295 return addr; 211 return addr;
296} 212}