diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-22 15:54:53 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-22 16:01:40 -0400 |
commit | ae19ffbadc1b2100285a5b5b3d0a4e0a11390904 (patch) | |
tree | 3c2086ab67398a019089a47ca3f362a4bc6db74f /arch/arm/mm | |
parent | 34e84f39a27d059a3e6ec6e8b94aafa702e6f220 (diff) | |
parent | 9173a8ef24a6b1b8031507b35b8ffe5f85a87692 (diff) |
Merge branch 'master' into for-linus
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 5 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 110 | ||||
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 2 |
3 files changed, 68 insertions, 49 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5fe595aeba69..8d43e58f9244 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -771,3 +771,8 @@ config CACHE_XSC3L2 | |||
771 | select OUTER_CACHE | 771 | select OUTER_CACHE |
772 | help | 772 | help |
773 | This option enables the L2 cache on XScale3. | 773 | This option enables the L2 cache on XScale3. |
774 | |||
775 | config ARM_L1_CACHE_SHIFT | ||
776 | int | ||
777 | default 6 if ARCH_OMAP3 | ||
778 | default 5 | ||
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index cc8829d7e116..379f78556055 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -25,6 +25,19 @@ | |||
25 | 25 | ||
26 | #include "fault.h" | 26 | #include "fault.h" |
27 | 27 | ||
28 | /* | ||
29 | * Fault status register encodings. We steal bit 31 for our own purposes. | ||
30 | */ | ||
31 | #define FSR_LNX_PF (1 << 31) | ||
32 | #define FSR_WRITE (1 << 11) | ||
33 | #define FSR_FS4 (1 << 10) | ||
34 | #define FSR_FS3_0 (15) | ||
35 | |||
36 | static inline int fsr_fs(unsigned int fsr) | ||
37 | { | ||
38 | return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; | ||
39 | } | ||
40 | |||
28 | #ifdef CONFIG_MMU | 41 | #ifdef CONFIG_MMU |
29 | 42 | ||
30 | #ifdef CONFIG_KPROBES | 43 | #ifdef CONFIG_KPROBES |
@@ -182,18 +195,35 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
182 | #define VM_FAULT_BADMAP 0x010000 | 195 | #define VM_FAULT_BADMAP 0x010000 |
183 | #define VM_FAULT_BADACCESS 0x020000 | 196 | #define VM_FAULT_BADACCESS 0x020000 |
184 | 197 | ||
185 | static int | 198 | /* |
199 | * Check that the permissions on the VMA allow for the fault which occurred. | ||
200 | * If we encountered a write fault, we must have write permission, otherwise | ||
201 | * we allow any permission. | ||
202 | */ | ||
203 | static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) | ||
204 | { | ||
205 | unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; | ||
206 | |||
207 | if (fsr & FSR_WRITE) | ||
208 | mask = VM_WRITE; | ||
209 | if (fsr & FSR_LNX_PF) | ||
210 | mask = VM_EXEC; | ||
211 | |||
212 | return vma->vm_flags & mask ? false : true; | ||
213 | } | ||
214 | |||
215 | static int __kprobes | ||
186 | __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | 216 | __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, |
187 | struct task_struct *tsk) | 217 | struct task_struct *tsk) |
188 | { | 218 | { |
189 | struct vm_area_struct *vma; | 219 | struct vm_area_struct *vma; |
190 | int fault, mask; | 220 | int fault; |
191 | 221 | ||
192 | vma = find_vma(mm, addr); | 222 | vma = find_vma(mm, addr); |
193 | fault = VM_FAULT_BADMAP; | 223 | fault = VM_FAULT_BADMAP; |
194 | if (!vma) | 224 | if (unlikely(!vma)) |
195 | goto out; | 225 | goto out; |
196 | if (vma->vm_start > addr) | 226 | if (unlikely(vma->vm_start > addr)) |
197 | goto check_stack; | 227 | goto check_stack; |
198 | 228 | ||
199 | /* | 229 | /* |
@@ -201,47 +231,24 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | |||
201 | * memory access, so we can handle it. | 231 | * memory access, so we can handle it. |
202 | */ | 232 | */ |
203 | good_area: | 233 | good_area: |
204 | if (fsr & (1 << 11)) /* write? */ | 234 | if (access_error(fsr, vma)) { |
205 | mask = VM_WRITE; | 235 | fault = VM_FAULT_BADACCESS; |
206 | else | ||
207 | mask = VM_READ|VM_EXEC|VM_WRITE; | ||
208 | |||
209 | fault = VM_FAULT_BADACCESS; | ||
210 | if (!(vma->vm_flags & mask)) | ||
211 | goto out; | 236 | goto out; |
237 | } | ||
212 | 238 | ||
213 | /* | 239 | /* |
214 | * If for any reason at all we couldn't handle | 240 | * If for any reason at all we couldn't handle the fault, make |
215 | * the fault, make sure we exit gracefully rather | 241 | * sure we exit gracefully rather than endlessly redo the fault. |
216 | * than endlessly redo the fault. | ||
217 | */ | 242 | */ |
218 | survive: | 243 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0); |
219 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0); | 244 | if (unlikely(fault & VM_FAULT_ERROR)) |
220 | if (unlikely(fault & VM_FAULT_ERROR)) { | 245 | return fault; |
221 | if (fault & VM_FAULT_OOM) | ||
222 | goto out_of_memory; | ||
223 | else if (fault & VM_FAULT_SIGBUS) | ||
224 | return fault; | ||
225 | BUG(); | ||
226 | } | ||
227 | if (fault & VM_FAULT_MAJOR) | 246 | if (fault & VM_FAULT_MAJOR) |
228 | tsk->maj_flt++; | 247 | tsk->maj_flt++; |
229 | else | 248 | else |
230 | tsk->min_flt++; | 249 | tsk->min_flt++; |
231 | return fault; | 250 | return fault; |
232 | 251 | ||
233 | out_of_memory: | ||
234 | if (!is_global_init(tsk)) | ||
235 | goto out; | ||
236 | |||
237 | /* | ||
238 | * If we are out of memory for pid1, sleep for a while and retry | ||
239 | */ | ||
240 | up_read(&mm->mmap_sem); | ||
241 | yield(); | ||
242 | down_read(&mm->mmap_sem); | ||
243 | goto survive; | ||
244 | |||
245 | check_stack: | 252 | check_stack: |
246 | if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) | 253 | if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) |
247 | goto good_area; | 254 | goto good_area; |
@@ -278,6 +285,13 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
278 | if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) | 285 | if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) |
279 | goto no_context; | 286 | goto no_context; |
280 | down_read(&mm->mmap_sem); | 287 | down_read(&mm->mmap_sem); |
288 | } else { | ||
289 | /* | ||
290 | * The above down_read_trylock() might have succeeded in | ||
291 | * which case, we'll have missed the might_sleep() from | ||
292 | * down_read() | ||
293 | */ | ||
294 | might_sleep(); | ||
281 | } | 295 | } |
282 | 296 | ||
283 | fault = __do_page_fault(mm, addr, fsr, tsk); | 297 | fault = __do_page_fault(mm, addr, fsr, tsk); |
@@ -289,6 +303,16 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
289 | if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) | 303 | if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) |
290 | return 0; | 304 | return 0; |
291 | 305 | ||
306 | if (fault & VM_FAULT_OOM) { | ||
307 | /* | ||
308 | * We ran out of memory, call the OOM killer, and return to | ||
309 | * userspace (which will retry the fault, or kill us if we | ||
310 | * got oom-killed) | ||
311 | */ | ||
312 | pagefault_out_of_memory(); | ||
313 | return 0; | ||
314 | } | ||
315 | |||
292 | /* | 316 | /* |
293 | * If we are in kernel mode at this point, we | 317 | * If we are in kernel mode at this point, we |
294 | * have no context to handle this fault with. | 318 | * have no context to handle this fault with. |
@@ -296,16 +320,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
296 | if (!user_mode(regs)) | 320 | if (!user_mode(regs)) |
297 | goto no_context; | 321 | goto no_context; |
298 | 322 | ||
299 | if (fault & VM_FAULT_OOM) { | ||
300 | /* | ||
301 | * We ran out of memory, or some other thing | ||
302 | * happened to us that made us unable to handle | ||
303 | * the page fault gracefully. | ||
304 | */ | ||
305 | printk("VM: killing process %s\n", tsk->comm); | ||
306 | do_group_exit(SIGKILL); | ||
307 | return 0; | ||
308 | } | ||
309 | if (fault & VM_FAULT_SIGBUS) { | 323 | if (fault & VM_FAULT_SIGBUS) { |
310 | /* | 324 | /* |
311 | * We had some memory, but were unable to | 325 | * We had some memory, but were unable to |
@@ -489,10 +503,10 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *) | |||
489 | asmlinkage void __exception | 503 | asmlinkage void __exception |
490 | do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | 504 | do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
491 | { | 505 | { |
492 | const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6); | 506 | const struct fsr_info *inf = fsr_info + fsr_fs(fsr); |
493 | struct siginfo info; | 507 | struct siginfo info; |
494 | 508 | ||
495 | if (!inf->fn(addr, fsr, regs)) | 509 | if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) |
496 | return; | 510 | return; |
497 | 511 | ||
498 | printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", | 512 | printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", |
@@ -508,6 +522,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
508 | asmlinkage void __exception | 522 | asmlinkage void __exception |
509 | do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) | 523 | do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) |
510 | { | 524 | { |
511 | do_translation_fault(addr, 0, regs); | 525 | do_translation_fault(addr, FSR_LNX_PF, regs); |
512 | } | 526 | } |
513 | 527 | ||
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 0cce37b93937..423394260bcb 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -17,7 +17,7 @@ | |||
17 | * | 17 | * |
18 | * 2001 Sep 08: | 18 | * 2001 Sep 08: |
19 | * Completely revisited, many important fixes | 19 | * Completely revisited, many important fixes |
20 | * Nicolas Pitre <nico@cam.org> | 20 | * Nicolas Pitre <nico@fluxnic.net> |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/linkage.h> | 23 | #include <linux/linkage.h> |