aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/fault_32.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2012-05-13 21:27:34 -0400
committerPaul Mundt <lethal@linux-sh.org>2012-05-13 21:27:34 -0400
commitdbdb4e9f3fd4914caba6f102e62eef23920ab1f4 (patch)
treebf5d09cc2d744348c71a33e9b447cbae7d0297c2 /arch/sh/mm/fault_32.c
parentc4f10e5cd79ce09ef94be0924395c62350bf262d (diff)
sh: Tidy up and generalize page fault error paths.
This follows the x86 changes for tidying up the page fault error paths. We'll build on top of this for _32/_64 unification. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/fault_32.c')
-rw-r--r--arch/sh/mm/fault_32.c325
1 files changed, 220 insertions, 105 deletions
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 8ce856d1572d..889e83b5ff22 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -2,7 +2,7 @@
2 * Page fault handler for SH with an MMU. 2 * Page fault handler for SH with an MMU.
3 * 3 *
4 * Copyright (C) 1999 Niibe Yutaka 4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2009 Paul Mundt 5 * Copyright (C) 2003 - 2012 Paul Mundt
6 * 6 *
7 * Based on linux/arch/i386/mm/fault.c: 7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds 8 * Copyright (C) 1995 Linus Torvalds
@@ -16,6 +16,7 @@
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <linux/perf_event.h> 18#include <linux/perf_event.h>
19#include <linux/kdebug.h>
19#include <asm/io_trapped.h> 20#include <asm/io_trapped.h>
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
@@ -35,6 +36,20 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
35 return ret; 36 return ret;
36} 37}
37 38
39static void
40force_sig_info_fault(int si_signo, int si_code, unsigned long address,
41 struct task_struct *tsk)
42{
43 siginfo_t info;
44
45 info.si_signo = si_signo;
46 info.si_errno = 0;
47 info.si_code = si_code;
48 info.si_addr = (void __user *)address;
49
50 force_sig_info(si_signo, &info, tsk);
51}
52
38/* 53/*
39 * This is useful to dump out the page tables associated with 54 * This is useful to dump out the page tables associated with
40 * 'addr' in mm 'mm'. 55 * 'addr' in mm 'mm'.
@@ -176,6 +191,185 @@ static noinline int vmalloc_fault(unsigned long address)
176 return 0; 191 return 0;
177} 192}
178 193
194static void
195show_fault_oops(struct pt_regs *regs, unsigned long address)
196{
197 if (!oops_may_print())
198 return;
199
200 printk(KERN_ALERT "BUG: unable to handle kernel ");
201 if (address < PAGE_SIZE)
202 printk(KERN_CONT "NULL pointer dereference");
203 else
204 printk(KERN_CONT "paging request");
205
206 printk(KERN_CONT " at %08lx\n", address);
207 printk(KERN_ALERT "PC:");
208 printk_address(regs->pc, 1);
209
210 show_pte(NULL, address);
211}
212
213static noinline void
214no_context(struct pt_regs *regs, unsigned long writeaccess,
215 unsigned long address)
216{
217 /* Are we prepared to handle this kernel fault? */
218 if (fixup_exception(regs))
219 return;
220
221 if (handle_trapped_io(regs, address))
222 return;
223
224 /*
225 * Oops. The kernel tried to access some bad page. We'll have to
226 * terminate things with extreme prejudice.
227 */
228 bust_spinlocks(1);
229
230 show_fault_oops(regs, address);
231
232 die("Oops", regs, writeaccess);
233 bust_spinlocks(0);
234 do_exit(SIGKILL);
235}
236
237static void
238__bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess,
239 unsigned long address, int si_code)
240{
241 struct task_struct *tsk = current;
242
243 /* User mode accesses just cause a SIGSEGV */
244 if (user_mode(regs)) {
245 /*
246 * It's possible to have interrupts off here:
247 */
248 local_irq_enable();
249
250 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
251
252 return;
253 }
254
255 no_context(regs, writeaccess, address);
256}
257
258static noinline void
259bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess,
260 unsigned long address)
261{
262 __bad_area_nosemaphore(regs, writeaccess, address, SEGV_MAPERR);
263}
264
265static void
266__bad_area(struct pt_regs *regs, unsigned long writeaccess,
267 unsigned long address, int si_code)
268{
269 struct mm_struct *mm = current->mm;
270
271 /*
272 * Something tried to access memory that isn't in our memory map..
273 * Fix it, but check if it's kernel or user first..
274 */
275 up_read(&mm->mmap_sem);
276
277 __bad_area_nosemaphore(regs, writeaccess, address, si_code);
278}
279
280static noinline void
281bad_area(struct pt_regs *regs, unsigned long writeaccess, unsigned long address)
282{
283 __bad_area(regs, writeaccess, address, SEGV_MAPERR);
284}
285
286static noinline void
287bad_area_access_error(struct pt_regs *regs, unsigned long writeaccess,
288 unsigned long address)
289{
290 __bad_area(regs, writeaccess, address, SEGV_ACCERR);
291}
292
293static void out_of_memory(void)
294{
295 /*
296 * We ran out of memory, call the OOM killer, and return the userspace
297 * (which will retry the fault, or kill us if we got oom-killed):
298 */
299 up_read(&current->mm->mmap_sem);
300
301 pagefault_out_of_memory();
302}
303
304static void
305do_sigbus(struct pt_regs *regs, unsigned long writeaccess, unsigned long address)
306{
307 struct task_struct *tsk = current;
308 struct mm_struct *mm = tsk->mm;
309
310 up_read(&mm->mmap_sem);
311
312 /* Kernel mode? Handle exceptions or die: */
313 if (!user_mode(regs))
314 no_context(regs, writeaccess, address);
315
316 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
317}
318
319static noinline int
320mm_fault_error(struct pt_regs *regs, unsigned long writeaccess,
321 unsigned long address, unsigned int fault)
322{
323 /*
324 * Pagefault was interrupted by SIGKILL. We have no reason to
325 * continue pagefault.
326 */
327 if (fatal_signal_pending(current)) {
328 if (!(fault & VM_FAULT_RETRY))
329 up_read(&current->mm->mmap_sem);
330 if (!user_mode(regs))
331 no_context(regs, writeaccess, address);
332 return 1;
333 }
334
335 if (!(fault & VM_FAULT_ERROR))
336 return 0;
337
338 if (fault & VM_FAULT_OOM) {
339 /* Kernel mode? Handle exceptions or die: */
340 if (!user_mode(regs)) {
341 up_read(&current->mm->mmap_sem);
342 no_context(regs, writeaccess, address);
343 return 1;
344 }
345
346 out_of_memory();
347 } else {
348 if (fault & VM_FAULT_SIGBUS)
349 do_sigbus(regs, writeaccess, address);
350 else
351 BUG();
352 }
353
354 return 1;
355}
356
357static inline int access_error(int write, struct vm_area_struct *vma)
358{
359 if (write) {
360 /* write, present and write, not present: */
361 if (unlikely(!(vma->vm_flags & VM_WRITE)))
362 return 1;
363 return 0;
364 }
365
366 /* read, not present: */
367 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
368 return 1;
369
370 return 0;
371}
372
179static int fault_in_kernel_space(unsigned long address) 373static int fault_in_kernel_space(unsigned long address)
180{ 374{
181 return address >= TASK_SIZE; 375 return address >= TASK_SIZE;
@@ -194,15 +388,12 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
194 struct task_struct *tsk; 388 struct task_struct *tsk;
195 struct mm_struct *mm; 389 struct mm_struct *mm;
196 struct vm_area_struct * vma; 390 struct vm_area_struct * vma;
197 int si_code;
198 int fault; 391 int fault;
199 siginfo_t info;
200 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 392 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
201 (writeaccess ? FAULT_FLAG_WRITE : 0)); 393 (writeaccess ? FAULT_FLAG_WRITE : 0));
202 394
203 tsk = current; 395 tsk = current;
204 mm = tsk->mm; 396 mm = tsk->mm;
205 si_code = SEGV_MAPERR;
206 vec = lookup_exception_vector(); 397 vec = lookup_exception_vector();
207 398
208 /* 399 /*
@@ -220,7 +411,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
220 if (notify_page_fault(regs, vec)) 411 if (notify_page_fault(regs, vec))
221 return; 412 return;
222 413
223 goto bad_area_nosemaphore; 414 bad_area_nosemaphore(regs, writeaccess, address);
415 return;
224 } 416 }
225 417
226 if (unlikely(notify_page_fault(regs, vec))) 418 if (unlikely(notify_page_fault(regs, vec)))
@@ -236,34 +428,38 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
236 * If we're in an interrupt, have no user context or are running 428 * If we're in an interrupt, have no user context or are running
237 * in an atomic region then we must not take the fault: 429 * in an atomic region then we must not take the fault:
238 */ 430 */
239 if (in_atomic() || !mm) 431 if (unlikely(in_atomic() || !mm)) {
240 goto no_context; 432 bad_area_nosemaphore(regs, writeaccess, address);
433 return;
434 }
241 435
242retry: 436retry:
243 down_read(&mm->mmap_sem); 437 down_read(&mm->mmap_sem);
244 438
245 vma = find_vma(mm, address); 439 vma = find_vma(mm, address);
246 if (!vma) 440 if (unlikely(!vma)) {
247 goto bad_area; 441 bad_area(regs, writeaccess, address);
248 if (vma->vm_start <= address) 442 return;
443 }
444 if (likely(vma->vm_start <= address))
249 goto good_area; 445 goto good_area;
250 if (!(vma->vm_flags & VM_GROWSDOWN)) 446 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
251 goto bad_area; 447 bad_area(regs, writeaccess, address);
252 if (expand_stack(vma, address)) 448 return;
253 goto bad_area; 449 }
450 if (unlikely(expand_stack(vma, address))) {
451 bad_area(regs, writeaccess, address);
452 return;
453 }
254 454
255 /* 455 /*
256 * Ok, we have a good vm_area for this memory access, so 456 * Ok, we have a good vm_area for this memory access, so
257 * we can handle it.. 457 * we can handle it..
258 */ 458 */
259good_area: 459good_area:
260 si_code = SEGV_ACCERR; 460 if (unlikely(access_error(writeaccess, vma))) {
261 if (writeaccess) { 461 bad_area_access_error(regs, writeaccess, address);
262 if (!(vma->vm_flags & VM_WRITE)) 462 return;
263 goto bad_area;
264 } else {
265 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
266 goto bad_area;
267 } 463 }
268 464
269 /* 465 /*
@@ -273,16 +469,9 @@ good_area:
273 */ 469 */
274 fault = handle_mm_fault(mm, vma, address, flags); 470 fault = handle_mm_fault(mm, vma, address, flags);
275 471
276 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 472 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
277 return; 473 if (mm_fault_error(regs, writeaccess, address, fault))
278 474 return;
279 if (unlikely(fault & VM_FAULT_ERROR)) {
280 if (fault & VM_FAULT_OOM)
281 goto out_of_memory;
282 else if (fault & VM_FAULT_SIGBUS)
283 goto do_sigbus;
284 BUG();
285 }
286 475
287 if (flags & FAULT_FLAG_ALLOW_RETRY) { 476 if (flags & FAULT_FLAG_ALLOW_RETRY) {
288 if (fault & VM_FAULT_MAJOR) { 477 if (fault & VM_FAULT_MAJOR) {
@@ -307,80 +496,6 @@ good_area:
307 } 496 }
308 497
309 up_read(&mm->mmap_sem); 498 up_read(&mm->mmap_sem);
310 return;
311
312 /*
313 * Something tried to access memory that isn't in our memory map..
314 * Fix it, but check if it's kernel or user first..
315 */
316bad_area:
317 up_read(&mm->mmap_sem);
318
319bad_area_nosemaphore:
320 if (user_mode(regs)) {
321 info.si_signo = SIGSEGV;
322 info.si_errno = 0;
323 info.si_code = si_code;
324 info.si_addr = (void *) address;
325 force_sig_info(SIGSEGV, &info, tsk);
326 return;
327 }
328
329no_context:
330 /* Are we prepared to handle this kernel fault? */
331 if (fixup_exception(regs))
332 return;
333
334 if (handle_trapped_io(regs, address))
335 return;
336/*
337 * Oops. The kernel tried to access some bad page. We'll have to
338 * terminate things with extreme prejudice.
339 *
340 */
341
342 bust_spinlocks(1);
343
344 if (oops_may_print()) {
345 printk(KERN_ALERT
346 "Unable to handle kernel %s at virtual address %08lx\n",
347 (address < PAGE_SIZE) ? "NULL pointer dereference" :
348 "paging request", address);
349
350 show_pte(mm, address);
351 }
352
353 die("Oops", regs, writeaccess);
354 bust_spinlocks(0);
355 do_exit(SIGKILL);
356
357/*
358 * We ran out of memory, or some other thing happened to us that made
359 * us unable to handle the page fault gracefully.
360 */
361out_of_memory:
362 up_read(&mm->mmap_sem);
363 if (!user_mode(regs))
364 goto no_context;
365 pagefault_out_of_memory();
366 return;
367
368do_sigbus:
369 up_read(&mm->mmap_sem);
370
371 /*
372 * Send a sigbus, regardless of whether we were in kernel
373 * or user mode.
374 */
375 info.si_signo = SIGBUS;
376 info.si_errno = 0;
377 info.si_code = BUS_ADRERR;
378 info.si_addr = (void *)address;
379 force_sig_info(SIGBUS, &info, tsk);
380
381 /* Kernel mode? Handle exceptions or die */
382 if (!user_mode(regs))
383 goto no_context;
384} 499}
385 500
386/* 501/*