aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Herring <rob.herring@calxeda.com>2011-11-21 22:01:07 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-12-06 06:15:25 -0500
commit7dbaa466780a754154531b44c2086f6618cee3a8 (patch)
tree1a1bcb26085949cee19bb5bbcbe05b66c0811d31
parentd22759ed5680055fdecbdcf6644dbc8a467ab801 (diff)
ARM: 7169/1: topdown mmap support
Similar to other architectures, this adds topdown mmap support in user process address space allocation policy. This allows mmap sizes greater than 2GB. This support is largely copied from MIPS and the generic implementations. The address space randomization is moved into arch_pick_mmap_layout. Tested on V-Express with ubuntu and a mmap test from here: https://bugs.launchpad.net/bugs/861296 Signed-off-by: Rob Herring <rob.herring@calxeda.com> Acked-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/pgtable.h1
-rw-r--r--arch/arm/include/asm/processor.h2
-rw-r--r--arch/arm/mm/mmap.c173
3 files changed, 171 insertions, 5 deletions
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9451dce3a553..2f659e239727 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -336,6 +336,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
336 * We provide our own arch_get_unmapped_area to cope with VIPT caches. 336 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
337 */ 337 */
338#define HAVE_ARCH_UNMAPPED_AREA 338#define HAVE_ARCH_UNMAPPED_AREA
339#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
339 340
340/* 341/*
341 * remap a physical page `pfn' of size `size' with page protection `prot' 342 * remap a physical page `pfn' of size `size' with page protection `prot'
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index b2d9df5667af..ce280b8d613c 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -123,6 +123,8 @@ static inline void prefetch(const void *ptr)
123 123
124#endif 124#endif
125 125
126#define HAVE_ARCH_PICK_MMAP_LAYOUT
127
126#endif 128#endif
127 129
128#endif /* __ASM_ARM_PROCESSOR_H */ 130#endif /* __ASM_ARM_PROCESSOR_H */
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 44b628e4d6ea..ce8cb1970d7a 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -11,10 +11,49 @@
11#include <linux/random.h> 11#include <linux/random.h>
12#include <asm/cachetype.h> 12#include <asm/cachetype.h>
13 13
14static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
15 unsigned long pgoff)
16{
17 unsigned long base = addr & ~(SHMLBA-1);
18 unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1);
19
20 if (base + off <= addr)
21 return base + off;
22
23 return base - off;
24}
25
14#define COLOUR_ALIGN(addr,pgoff) \ 26#define COLOUR_ALIGN(addr,pgoff) \
15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 27 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 28 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
17 29
30/* gap between mmap and stack */
31#define MIN_GAP (128*1024*1024UL)
32#define MAX_GAP ((TASK_SIZE)/6*5)
33
34static int mmap_is_legacy(void)
35{
36 if (current->personality & ADDR_COMPAT_LAYOUT)
37 return 1;
38
39 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
40 return 1;
41
42 return sysctl_legacy_va_layout;
43}
44
45static unsigned long mmap_base(unsigned long rnd)
46{
47 unsigned long gap = rlimit(RLIMIT_STACK);
48
49 if (gap < MIN_GAP)
50 gap = MIN_GAP;
51 else if (gap > MAX_GAP)
52 gap = MAX_GAP;
53
54 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
55}
56
18/* 57/*
19 * We need to ensure that shared mappings are correctly aligned to 58 * We need to ensure that shared mappings are correctly aligned to
20 * avoid aliasing issues with VIPT caches. We need to ensure that 59 * avoid aliasing issues with VIPT caches. We need to ensure that
@@ -68,13 +107,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
68 if (len > mm->cached_hole_size) { 107 if (len > mm->cached_hole_size) {
69 start_addr = addr = mm->free_area_cache; 108 start_addr = addr = mm->free_area_cache;
70 } else { 109 } else {
71 start_addr = addr = TASK_UNMAPPED_BASE; 110 start_addr = addr = mm->mmap_base;
72 mm->cached_hole_size = 0; 111 mm->cached_hole_size = 0;
73 } 112 }
74 /* 8 bits of randomness in 20 address space bits */
75 if ((current->flags & PF_RANDOMIZE) &&
76 !(current->personality & ADDR_NO_RANDOMIZE))
77 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
78 113
79full_search: 114full_search:
80 if (do_align) 115 if (do_align)
@@ -111,6 +146,134 @@ full_search:
111 } 146 }
112} 147}
113 148
149unsigned long
150arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
151 const unsigned long len, const unsigned long pgoff,
152 const unsigned long flags)
153{
154 struct vm_area_struct *vma;
155 struct mm_struct *mm = current->mm;
156 unsigned long addr = addr0;
157 int do_align = 0;
158 int aliasing = cache_is_vipt_aliasing();
159
160 /*
161 * We only need to do colour alignment if either the I or D
162 * caches alias.
163 */
164 if (aliasing)
165 do_align = filp || (flags & MAP_SHARED);
166
167 /* requested length too big for entire address space */
168 if (len > TASK_SIZE)
169 return -ENOMEM;
170
171 if (flags & MAP_FIXED) {
172 if (aliasing && flags & MAP_SHARED &&
173 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
174 return -EINVAL;
175 return addr;
176 }
177
178 /* requesting a specific address */
179 if (addr) {
180 if (do_align)
181 addr = COLOUR_ALIGN(addr, pgoff);
182 else
183 addr = PAGE_ALIGN(addr);
184 vma = find_vma(mm, addr);
185 if (TASK_SIZE - len >= addr &&
186 (!vma || addr + len <= vma->vm_start))
187 return addr;
188 }
189
190 /* check if free_area_cache is useful for us */
191 if (len <= mm->cached_hole_size) {
192 mm->cached_hole_size = 0;
193 mm->free_area_cache = mm->mmap_base;
194 }
195
196 /* either no address requested or can't fit in requested address hole */
197 addr = mm->free_area_cache;
198 if (do_align) {
199 unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff);
200 addr = base + len;
201 }
202
203 /* make sure it can fit in the remaining address space */
204 if (addr > len) {
205 vma = find_vma(mm, addr-len);
206 if (!vma || addr <= vma->vm_start)
207 /* remember the address as a hint for next time */
208 return (mm->free_area_cache = addr-len);
209 }
210
211 if (mm->mmap_base < len)
212 goto bottomup;
213
214 addr = mm->mmap_base - len;
215 if (do_align)
216 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
217
218 do {
219 /*
220 * Lookup failure means no vma is above this address,
221 * else if new region fits below vma->vm_start,
222 * return with success:
223 */
224 vma = find_vma(mm, addr);
225 if (!vma || addr+len <= vma->vm_start)
226 /* remember the address as a hint for next time */
227 return (mm->free_area_cache = addr);
228
229 /* remember the largest hole we saw so far */
230 if (addr + mm->cached_hole_size < vma->vm_start)
231 mm->cached_hole_size = vma->vm_start - addr;
232
233 /* try just below the current vma->vm_start */
234 addr = vma->vm_start - len;
235 if (do_align)
236 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
237 } while (len < vma->vm_start);
238
239bottomup:
240 /*
241 * A failed mmap() very likely causes application failure,
242 * so fall back to the bottom-up function here. This scenario
243 * can happen with large stack limits and large mmap()
244 * allocations.
245 */
246 mm->cached_hole_size = ~0UL;
247 mm->free_area_cache = TASK_UNMAPPED_BASE;
248 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
249 /*
250 * Restore the topdown base:
251 */
252 mm->free_area_cache = mm->mmap_base;
253 mm->cached_hole_size = ~0UL;
254
255 return addr;
256}
257
258void arch_pick_mmap_layout(struct mm_struct *mm)
259{
260 unsigned long random_factor = 0UL;
261
262 /* 8 bits of randomness in 20 address space bits */
263 if ((current->flags & PF_RANDOMIZE) &&
264 !(current->personality & ADDR_NO_RANDOMIZE))
265 random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
266
267 if (mmap_is_legacy()) {
268 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
269 mm->get_unmapped_area = arch_get_unmapped_area;
270 mm->unmap_area = arch_unmap_area;
271 } else {
272 mm->mmap_base = mmap_base(random_factor);
273 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
274 mm->unmap_area = arch_unmap_area_topdown;
275 }
276}
114 277
115/* 278/*
116 * You really shouldn't be using read() or write() on /dev/mem. This 279 * You really shouldn't be using read() or write() on /dev/mem. This