diff options
author | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 01:57:28 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 01:57:28 -0400 |
commit | 5a1dc78a38bfb04159a08cd493e5b3d844939e6c (patch) | |
tree | 860420d3d52e2666449d1e688c399876a5c16bd3 /arch/sh/mm/fault_32.c | |
parent | f007688a50cf5724049a4a5f17023fcdb0966b54 (diff) |
sh: Support thread fault code encoding.
This provides a simple interface modelled after sparc64/m32r to encode
the error code in the upper byte of thread_info for finer-grained
handling in the page fault path.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/fault_32.c')
-rw-r--r-- | arch/sh/mm/fault_32.c | 68 |
1 files changed, 36 insertions, 32 deletions
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 889e83b5ff22..a469b95e88fb 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -211,7 +211,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long address) | |||
211 | } | 211 | } |
212 | 212 | ||
213 | static noinline void | 213 | static noinline void |
214 | no_context(struct pt_regs *regs, unsigned long writeaccess, | 214 | no_context(struct pt_regs *regs, unsigned long error_code, |
215 | unsigned long address) | 215 | unsigned long address) |
216 | { | 216 | { |
217 | /* Are we prepared to handle this kernel fault? */ | 217 | /* Are we prepared to handle this kernel fault? */ |
@@ -229,13 +229,13 @@ no_context(struct pt_regs *regs, unsigned long writeaccess, | |||
229 | 229 | ||
230 | show_fault_oops(regs, address); | 230 | show_fault_oops(regs, address); |
231 | 231 | ||
232 | die("Oops", regs, writeaccess); | 232 | die("Oops", regs, error_code); |
233 | bust_spinlocks(0); | 233 | bust_spinlocks(0); |
234 | do_exit(SIGKILL); | 234 | do_exit(SIGKILL); |
235 | } | 235 | } |
236 | 236 | ||
237 | static void | 237 | static void |
238 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, | 238 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
239 | unsigned long address, int si_code) | 239 | unsigned long address, int si_code) |
240 | { | 240 | { |
241 | struct task_struct *tsk = current; | 241 | struct task_struct *tsk = current; |
@@ -252,18 +252,18 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, | |||
252 | return; | 252 | return; |
253 | } | 253 | } |
254 | 254 | ||
255 | no_context(regs, writeaccess, address); | 255 | no_context(regs, error_code, address); |
256 | } | 256 | } |
257 | 257 | ||
258 | static noinline void | 258 | static noinline void |
259 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, | 259 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
260 | unsigned long address) | 260 | unsigned long address) |
261 | { | 261 | { |
262 | __bad_area_nosemaphore(regs, writeaccess, address, SEGV_MAPERR); | 262 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); |
263 | } | 263 | } |
264 | 264 | ||
265 | static void | 265 | static void |
266 | __bad_area(struct pt_regs *regs, unsigned long writeaccess, | 266 | __bad_area(struct pt_regs *regs, unsigned long error_code, |
267 | unsigned long address, int si_code) | 267 | unsigned long address, int si_code) |
268 | { | 268 | { |
269 | struct mm_struct *mm = current->mm; | 269 | struct mm_struct *mm = current->mm; |
@@ -274,20 +274,20 @@ __bad_area(struct pt_regs *regs, unsigned long writeaccess, | |||
274 | */ | 274 | */ |
275 | up_read(&mm->mmap_sem); | 275 | up_read(&mm->mmap_sem); |
276 | 276 | ||
277 | __bad_area_nosemaphore(regs, writeaccess, address, si_code); | 277 | __bad_area_nosemaphore(regs, error_code, address, si_code); |
278 | } | 278 | } |
279 | 279 | ||
280 | static noinline void | 280 | static noinline void |
281 | bad_area(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) | 281 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
282 | { | 282 | { |
283 | __bad_area(regs, writeaccess, address, SEGV_MAPERR); | 283 | __bad_area(regs, error_code, address, SEGV_MAPERR); |
284 | } | 284 | } |
285 | 285 | ||
286 | static noinline void | 286 | static noinline void |
287 | bad_area_access_error(struct pt_regs *regs, unsigned long writeaccess, | 287 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, |
288 | unsigned long address) | 288 | unsigned long address) |
289 | { | 289 | { |
290 | __bad_area(regs, writeaccess, address, SEGV_ACCERR); | 290 | __bad_area(regs, error_code, address, SEGV_ACCERR); |
291 | } | 291 | } |
292 | 292 | ||
293 | static void out_of_memory(void) | 293 | static void out_of_memory(void) |
@@ -302,7 +302,7 @@ static void out_of_memory(void) | |||
302 | } | 302 | } |
303 | 303 | ||
304 | static void | 304 | static void |
305 | do_sigbus(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) | 305 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
306 | { | 306 | { |
307 | struct task_struct *tsk = current; | 307 | struct task_struct *tsk = current; |
308 | struct mm_struct *mm = tsk->mm; | 308 | struct mm_struct *mm = tsk->mm; |
@@ -311,13 +311,13 @@ do_sigbus(struct pt_regs *regs, unsigned long writeaccess, unsigned long address | |||
311 | 311 | ||
312 | /* Kernel mode? Handle exceptions or die: */ | 312 | /* Kernel mode? Handle exceptions or die: */ |
313 | if (!user_mode(regs)) | 313 | if (!user_mode(regs)) |
314 | no_context(regs, writeaccess, address); | 314 | no_context(regs, error_code, address); |
315 | 315 | ||
316 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); | 316 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
317 | } | 317 | } |
318 | 318 | ||
319 | static noinline int | 319 | static noinline int |
320 | mm_fault_error(struct pt_regs *regs, unsigned long writeaccess, | 320 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
321 | unsigned long address, unsigned int fault) | 321 | unsigned long address, unsigned int fault) |
322 | { | 322 | { |
323 | /* | 323 | /* |
@@ -328,7 +328,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long writeaccess, | |||
328 | if (!(fault & VM_FAULT_RETRY)) | 328 | if (!(fault & VM_FAULT_RETRY)) |
329 | up_read(¤t->mm->mmap_sem); | 329 | up_read(¤t->mm->mmap_sem); |
330 | if (!user_mode(regs)) | 330 | if (!user_mode(regs)) |
331 | no_context(regs, writeaccess, address); | 331 | no_context(regs, error_code, address); |
332 | return 1; | 332 | return 1; |
333 | } | 333 | } |
334 | 334 | ||
@@ -339,14 +339,14 @@ mm_fault_error(struct pt_regs *regs, unsigned long writeaccess, | |||
339 | /* Kernel mode? Handle exceptions or die: */ | 339 | /* Kernel mode? Handle exceptions or die: */ |
340 | if (!user_mode(regs)) { | 340 | if (!user_mode(regs)) { |
341 | up_read(¤t->mm->mmap_sem); | 341 | up_read(¤t->mm->mmap_sem); |
342 | no_context(regs, writeaccess, address); | 342 | no_context(regs, error_code, address); |
343 | return 1; | 343 | return 1; |
344 | } | 344 | } |
345 | 345 | ||
346 | out_of_memory(); | 346 | out_of_memory(); |
347 | } else { | 347 | } else { |
348 | if (fault & VM_FAULT_SIGBUS) | 348 | if (fault & VM_FAULT_SIGBUS) |
349 | do_sigbus(regs, writeaccess, address); | 349 | do_sigbus(regs, error_code, address); |
350 | else | 350 | else |
351 | BUG(); | 351 | BUG(); |
352 | } | 352 | } |
@@ -381,7 +381,7 @@ static int fault_in_kernel_space(unsigned long address) | |||
381 | * routines. | 381 | * routines. |
382 | */ | 382 | */ |
383 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | 383 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, |
384 | unsigned long writeaccess, | 384 | unsigned long error_code, |
385 | unsigned long address) | 385 | unsigned long address) |
386 | { | 386 | { |
387 | unsigned long vec; | 387 | unsigned long vec; |
@@ -389,8 +389,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
389 | struct mm_struct *mm; | 389 | struct mm_struct *mm; |
390 | struct vm_area_struct * vma; | 390 | struct vm_area_struct * vma; |
391 | int fault; | 391 | int fault; |
392 | int write = error_code & FAULT_CODE_WRITE; | ||
392 | unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | 393 | unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | |
393 | (writeaccess ? FAULT_FLAG_WRITE : 0)); | 394 | (write ? FAULT_FLAG_WRITE : 0)); |
394 | 395 | ||
395 | tsk = current; | 396 | tsk = current; |
396 | mm = tsk->mm; | 397 | mm = tsk->mm; |
@@ -411,7 +412,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
411 | if (notify_page_fault(regs, vec)) | 412 | if (notify_page_fault(regs, vec)) |
412 | return; | 413 | return; |
413 | 414 | ||
414 | bad_area_nosemaphore(regs, writeaccess, address); | 415 | bad_area_nosemaphore(regs, error_code, address); |
415 | return; | 416 | return; |
416 | } | 417 | } |
417 | 418 | ||
@@ -429,7 +430,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
429 | * in an atomic region then we must not take the fault: | 430 | * in an atomic region then we must not take the fault: |
430 | */ | 431 | */ |
431 | if (unlikely(in_atomic() || !mm)) { | 432 | if (unlikely(in_atomic() || !mm)) { |
432 | bad_area_nosemaphore(regs, writeaccess, address); | 433 | bad_area_nosemaphore(regs, error_code, address); |
433 | return; | 434 | return; |
434 | } | 435 | } |
435 | 436 | ||
@@ -438,17 +439,17 @@ retry: | |||
438 | 439 | ||
439 | vma = find_vma(mm, address); | 440 | vma = find_vma(mm, address); |
440 | if (unlikely(!vma)) { | 441 | if (unlikely(!vma)) { |
441 | bad_area(regs, writeaccess, address); | 442 | bad_area(regs, error_code, address); |
442 | return; | 443 | return; |
443 | } | 444 | } |
444 | if (likely(vma->vm_start <= address)) | 445 | if (likely(vma->vm_start <= address)) |
445 | goto good_area; | 446 | goto good_area; |
446 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { | 447 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
447 | bad_area(regs, writeaccess, address); | 448 | bad_area(regs, error_code, address); |
448 | return; | 449 | return; |
449 | } | 450 | } |
450 | if (unlikely(expand_stack(vma, address))) { | 451 | if (unlikely(expand_stack(vma, address))) { |
451 | bad_area(regs, writeaccess, address); | 452 | bad_area(regs, error_code, address); |
452 | return; | 453 | return; |
453 | } | 454 | } |
454 | 455 | ||
@@ -457,11 +458,13 @@ retry: | |||
457 | * we can handle it.. | 458 | * we can handle it.. |
458 | */ | 459 | */ |
459 | good_area: | 460 | good_area: |
460 | if (unlikely(access_error(writeaccess, vma))) { | 461 | if (unlikely(access_error(error_code, vma))) { |
461 | bad_area_access_error(regs, writeaccess, address); | 462 | bad_area_access_error(regs, error_code, address); |
462 | return; | 463 | return; |
463 | } | 464 | } |
464 | 465 | ||
466 | set_thread_fault_code(error_code); | ||
467 | |||
465 | /* | 468 | /* |
466 | * If for any reason at all we couldn't handle the fault, | 469 | * If for any reason at all we couldn't handle the fault, |
467 | * make sure we exit gracefully rather than endlessly redo | 470 | * make sure we exit gracefully rather than endlessly redo |
@@ -470,7 +473,7 @@ good_area: | |||
470 | fault = handle_mm_fault(mm, vma, address, flags); | 473 | fault = handle_mm_fault(mm, vma, address, flags); |
471 | 474 | ||
472 | if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) | 475 | if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) |
473 | if (mm_fault_error(regs, writeaccess, address, fault)) | 476 | if (mm_fault_error(regs, error_code, address, fault)) |
474 | return; | 477 | return; |
475 | 478 | ||
476 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 479 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
@@ -502,7 +505,7 @@ good_area: | |||
502 | * Called with interrupts disabled. | 505 | * Called with interrupts disabled. |
503 | */ | 506 | */ |
504 | asmlinkage int __kprobes | 507 | asmlinkage int __kprobes |
505 | handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | 508 | handle_tlbmiss(struct pt_regs *regs, unsigned long error_code, |
506 | unsigned long address) | 509 | unsigned long address) |
507 | { | 510 | { |
508 | pgd_t *pgd; | 511 | pgd_t *pgd; |
@@ -535,10 +538,10 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | |||
535 | entry = *pte; | 538 | entry = *pte; |
536 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | 539 | if (unlikely(pte_none(entry) || pte_not_present(entry))) |
537 | return 1; | 540 | return 1; |
538 | if (unlikely(writeaccess && !pte_write(entry))) | 541 | if (unlikely(error_code && !pte_write(entry))) |
539 | return 1; | 542 | return 1; |
540 | 543 | ||
541 | if (writeaccess) | 544 | if (error_code) |
542 | entry = pte_mkdirty(entry); | 545 | entry = pte_mkdirty(entry); |
543 | entry = pte_mkyoung(entry); | 546 | entry = pte_mkyoung(entry); |
544 | 547 | ||
@@ -550,10 +553,11 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | |||
550 | * the case of an initial page write exception, so we need to | 553 | * the case of an initial page write exception, so we need to |
551 | * flush it in order to avoid potential TLB entry duplication. | 554 | * flush it in order to avoid potential TLB entry duplication. |
552 | */ | 555 | */ |
553 | if (writeaccess == 2) | 556 | if (error_code == FAULT_CODE_INITIAL) |
554 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | 557 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); |
555 | #endif | 558 | #endif |
556 | 559 | ||
560 | set_thread_fault_code(error_code); | ||
557 | update_mmu_cache(NULL, address, pte); | 561 | update_mmu_cache(NULL, address, pte); |
558 | 562 | ||
559 | return 0; | 563 | return 0; |