aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-01-05 08:24:16 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-01-05 08:24:16 -0500
commita32737e1ca650504f172292dd344eb64c02311f3 (patch)
tree7dd2004ece26081507af877d9dd40b1bd4eecc1a /arch/arm/mm
parent27edacac7d97d37ec77779c7da08345298a5d283 (diff)
parenta3c2b511a844641f6d0b60bd84cd6076143b3f2d (diff)
Merge branches 'fixes' and 'misc' into for-linus
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/fault.c58
-rw-r--r--arch/arm/mm/mmap.c173
-rw-r--r--arch/arm/mm/proc-v7.S11
3 files changed, 218 insertions, 24 deletions
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index aa33949fef60..4aabeaec25df 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -231,7 +231,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
231 231
232static int __kprobes 232static int __kprobes
233__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, 233__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
234 struct task_struct *tsk) 234 unsigned int flags, struct task_struct *tsk)
235{ 235{
236 struct vm_area_struct *vma; 236 struct vm_area_struct *vma;
237 int fault; 237 int fault;
@@ -253,18 +253,7 @@ good_area:
253 goto out; 253 goto out;
254 } 254 }
255 255
256 /* 256 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
257 * If for any reason at all we couldn't handle the fault, make
258 * sure we exit gracefully rather than endlessly redo the fault.
259 */
260 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
261 if (unlikely(fault & VM_FAULT_ERROR))
262 return fault;
263 if (fault & VM_FAULT_MAJOR)
264 tsk->maj_flt++;
265 else
266 tsk->min_flt++;
267 return fault;
268 257
269check_stack: 258check_stack:
270 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) 259 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -279,6 +268,9 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
279 struct task_struct *tsk; 268 struct task_struct *tsk;
280 struct mm_struct *mm; 269 struct mm_struct *mm;
281 int fault, sig, code; 270 int fault, sig, code;
271 int write = fsr & FSR_WRITE;
272 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
273 (write ? FAULT_FLAG_WRITE : 0);
282 274
283 if (notify_page_fault(regs, fsr)) 275 if (notify_page_fault(regs, fsr))
284 return 0; 276 return 0;
@@ -305,6 +297,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
305 if (!down_read_trylock(&mm->mmap_sem)) { 297 if (!down_read_trylock(&mm->mmap_sem)) {
306 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) 298 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
307 goto no_context; 299 goto no_context;
300retry:
308 down_read(&mm->mmap_sem); 301 down_read(&mm->mmap_sem);
309 } else { 302 } else {
310 /* 303 /*
@@ -320,14 +313,41 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
320#endif 313#endif
321 } 314 }
322 315
323 fault = __do_page_fault(mm, addr, fsr, tsk); 316 fault = __do_page_fault(mm, addr, fsr, flags, tsk);
324 up_read(&mm->mmap_sem); 317
318 /* If we need to retry but a fatal signal is pending, handle the
319 * signal first. We do not need to release the mmap_sem because
320 * it would already be released in __lock_page_or_retry in
321 * mm/filemap.c. */
322 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
323 return 0;
324
325 /*
326 * Major/minor page fault accounting is only done on the
327 * initial attempt. If we go through a retry, it is extremely
328 * likely that the page will be found in page cache at that point.
329 */
325 330
326 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 331 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
327 if (fault & VM_FAULT_MAJOR) 332 if (flags & FAULT_FLAG_ALLOW_RETRY) {
328 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); 333 if (fault & VM_FAULT_MAJOR) {
329 else if (fault & VM_FAULT_MINOR) 334 tsk->maj_flt++;
330 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); 335 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
336 regs, addr);
337 } else {
338 tsk->min_flt++;
339 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
340 regs, addr);
341 }
342 if (fault & VM_FAULT_RETRY) {
343 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
344 * of starvation. */
345 flags &= ~FAULT_FLAG_ALLOW_RETRY;
346 goto retry;
347 }
348 }
349
350 up_read(&mm->mmap_sem);
331 351
332 /* 352 /*
333 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR 353 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 44b628e4d6ea..ce8cb1970d7a 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -11,10 +11,49 @@
11#include <linux/random.h> 11#include <linux/random.h>
12#include <asm/cachetype.h> 12#include <asm/cachetype.h>
13 13
14static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
15 unsigned long pgoff)
16{
17 unsigned long base = addr & ~(SHMLBA-1);
18 unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1);
19
20 if (base + off <= addr)
21 return base + off;
22
23 return base - off;
24}
25
14#define COLOUR_ALIGN(addr,pgoff) \ 26#define COLOUR_ALIGN(addr,pgoff) \
15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 27 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 28 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
17 29
30/* gap between mmap and stack */
31#define MIN_GAP (128*1024*1024UL)
32#define MAX_GAP ((TASK_SIZE)/6*5)
33
34static int mmap_is_legacy(void)
35{
36 if (current->personality & ADDR_COMPAT_LAYOUT)
37 return 1;
38
39 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
40 return 1;
41
42 return sysctl_legacy_va_layout;
43}
44
45static unsigned long mmap_base(unsigned long rnd)
46{
47 unsigned long gap = rlimit(RLIMIT_STACK);
48
49 if (gap < MIN_GAP)
50 gap = MIN_GAP;
51 else if (gap > MAX_GAP)
52 gap = MAX_GAP;
53
54 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
55}
56
18/* 57/*
19 * We need to ensure that shared mappings are correctly aligned to 58 * We need to ensure that shared mappings are correctly aligned to
20 * avoid aliasing issues with VIPT caches. We need to ensure that 59 * avoid aliasing issues with VIPT caches. We need to ensure that
@@ -68,13 +107,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
68 if (len > mm->cached_hole_size) { 107 if (len > mm->cached_hole_size) {
69 start_addr = addr = mm->free_area_cache; 108 start_addr = addr = mm->free_area_cache;
70 } else { 109 } else {
71 start_addr = addr = TASK_UNMAPPED_BASE; 110 start_addr = addr = mm->mmap_base;
72 mm->cached_hole_size = 0; 111 mm->cached_hole_size = 0;
73 } 112 }
74 /* 8 bits of randomness in 20 address space bits */
75 if ((current->flags & PF_RANDOMIZE) &&
76 !(current->personality & ADDR_NO_RANDOMIZE))
77 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
78 113
79full_search: 114full_search:
80 if (do_align) 115 if (do_align)
@@ -111,6 +146,134 @@ full_search:
111 } 146 }
112} 147}
113 148
149unsigned long
150arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
151 const unsigned long len, const unsigned long pgoff,
152 const unsigned long flags)
153{
154 struct vm_area_struct *vma;
155 struct mm_struct *mm = current->mm;
156 unsigned long addr = addr0;
157 int do_align = 0;
158 int aliasing = cache_is_vipt_aliasing();
159
160 /*
161 * We only need to do colour alignment if either the I or D
162 * caches alias.
163 */
164 if (aliasing)
165 do_align = filp || (flags & MAP_SHARED);
166
167 /* requested length too big for entire address space */
168 if (len > TASK_SIZE)
169 return -ENOMEM;
170
171 if (flags & MAP_FIXED) {
172 if (aliasing && flags & MAP_SHARED &&
173 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
174 return -EINVAL;
175 return addr;
176 }
177
178 /* requesting a specific address */
179 if (addr) {
180 if (do_align)
181 addr = COLOUR_ALIGN(addr, pgoff);
182 else
183 addr = PAGE_ALIGN(addr);
184 vma = find_vma(mm, addr);
185 if (TASK_SIZE - len >= addr &&
186 (!vma || addr + len <= vma->vm_start))
187 return addr;
188 }
189
190 /* check if free_area_cache is useful for us */
191 if (len <= mm->cached_hole_size) {
192 mm->cached_hole_size = 0;
193 mm->free_area_cache = mm->mmap_base;
194 }
195
196 /* either no address requested or can't fit in requested address hole */
197 addr = mm->free_area_cache;
198 if (do_align) {
199 unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff);
200 addr = base + len;
201 }
202
203 /* make sure it can fit in the remaining address space */
204 if (addr > len) {
205 vma = find_vma(mm, addr-len);
206 if (!vma || addr <= vma->vm_start)
207 /* remember the address as a hint for next time */
208 return (mm->free_area_cache = addr-len);
209 }
210
211 if (mm->mmap_base < len)
212 goto bottomup;
213
214 addr = mm->mmap_base - len;
215 if (do_align)
216 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
217
218 do {
219 /*
220 * Lookup failure means no vma is above this address,
221 * else if new region fits below vma->vm_start,
222 * return with success:
223 */
224 vma = find_vma(mm, addr);
225 if (!vma || addr+len <= vma->vm_start)
226 /* remember the address as a hint for next time */
227 return (mm->free_area_cache = addr);
228
229 /* remember the largest hole we saw so far */
230 if (addr + mm->cached_hole_size < vma->vm_start)
231 mm->cached_hole_size = vma->vm_start - addr;
232
233 /* try just below the current vma->vm_start */
234 addr = vma->vm_start - len;
235 if (do_align)
236 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
237 } while (len < vma->vm_start);
238
239bottomup:
240 /*
241 * A failed mmap() very likely causes application failure,
242 * so fall back to the bottom-up function here. This scenario
243 * can happen with large stack limits and large mmap()
244 * allocations.
245 */
246 mm->cached_hole_size = ~0UL;
247 mm->free_area_cache = TASK_UNMAPPED_BASE;
248 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
249 /*
250 * Restore the topdown base:
251 */
252 mm->free_area_cache = mm->mmap_base;
253 mm->cached_hole_size = ~0UL;
254
255 return addr;
256}
257
258void arch_pick_mmap_layout(struct mm_struct *mm)
259{
260 unsigned long random_factor = 0UL;
261
262 /* 8 bits of randomness in 20 address space bits */
263 if ((current->flags & PF_RANDOMIZE) &&
264 !(current->personality & ADDR_NO_RANDOMIZE))
265 random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
266
267 if (mmap_is_legacy()) {
268 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
269 mm->get_unmapped_area = arch_get_unmapped_area;
270 mm->unmap_area = arch_unmap_area;
271 } else {
272 mm->mmap_base = mmap_base(random_factor);
273 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
274 mm->unmap_area = arch_unmap_area_topdown;
275 }
276}
114 277
115/* 278/*
116 * You really shouldn't be using read() or write() on /dev/mem. This 279 * You really shouldn't be using read() or write() on /dev/mem. This
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index e70a73731eaa..69a98a4204a5 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -284,6 +284,7 @@ __v7_ca5mp_setup:
284__v7_ca9mp_setup: 284__v7_ca9mp_setup:
285 mov r10, #(1 << 0) @ TLB ops broadcasting 285 mov r10, #(1 << 0) @ TLB ops broadcasting
286 b 1f 286 b 1f
287__v7_ca7mp_setup:
287__v7_ca15mp_setup: 288__v7_ca15mp_setup:
288 mov r10, #0 289 mov r10, #0
2891: 2901:
@@ -465,6 +466,16 @@ __v7_ca5mp_proc_info:
465 .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info 466 .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
466 467
467 /* 468 /*
469 * ARM Ltd. Cortex A7 processor.
470 */
471 .type __v7_ca7mp_proc_info, #object
472__v7_ca7mp_proc_info:
473 .long 0x410fc070
474 .long 0xff0ffff0
475 __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
476 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
477
478 /*
468 * ARM Ltd. Cortex A9 processor. 479 * ARM Ltd. Cortex A9 processor.
469 */ 480 */
470 .type __v7_ca9mp_proc_info, #object 481 .type __v7_ca9mp_proc_info, #object