aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/fault_64.c
diff options
context:
space:
mode:
authorKautuk Consul <consul.kautuk@gmail.com>2012-03-26 02:40:49 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-04 18:42:24 -0400
commit7358e51082b68e2026628220510a29197e2b9933 (patch)
tree7ddfd1d23bf4559c0b8b3f29f41276ab7a9f08ae /arch/sparc/mm/fault_64.c
parent64ebe987311853ea857a244439de5b947a4b1b07 (diff)
sparc/mm/fault_64.c: Port OOM changes to do_sparc64_fault
Commit d065bd810b6deb67d4897a14bfe21f8eb526ba99 (mm: retry page fault when blocking on disk transfer) and commit 37b23e0525d393d48a7d59f870b3bc061a30ccdb (x86,mm: make pagefault killable) The above commits introduced changes into the x86 pagefault handler for making the page fault handler retryable as well as killable. These changes reduce the mmap_sem hold time, which is crucial during OOM killer invocation. Port these changes to 64-bit sparc. Signed-off-by: Kautuk Consul <consul.kautuk@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/fault_64.c')
-rw-r--r--arch/sparc/mm/fault_64.c37
1 files changed, 30 insertions, 7 deletions
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 504c0622f729..1fe0429b6314 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -279,6 +279,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
279 unsigned int insn = 0; 279 unsigned int insn = 0;
280 int si_code, fault_code, fault; 280 int si_code, fault_code, fault;
281 unsigned long address, mm_rss; 281 unsigned long address, mm_rss;
282 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
282 283
283 fault_code = get_thread_fault_code(); 284 fault_code = get_thread_fault_code();
284 285
@@ -333,6 +334,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
333 insn = get_fault_insn(regs, insn); 334 insn = get_fault_insn(regs, insn);
334 goto handle_kernel_fault; 335 goto handle_kernel_fault;
335 } 336 }
337
338retry:
336 down_read(&mm->mmap_sem); 339 down_read(&mm->mmap_sem);
337 } 340 }
338 341
@@ -423,7 +426,12 @@ good_area:
423 goto bad_area; 426 goto bad_area;
424 } 427 }
425 428
426 fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); 429 flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
430 fault = handle_mm_fault(mm, vma, address, flags);
431
432 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
433 return;
434
427 if (unlikely(fault & VM_FAULT_ERROR)) { 435 if (unlikely(fault & VM_FAULT_ERROR)) {
428 if (fault & VM_FAULT_OOM) 436 if (fault & VM_FAULT_OOM)
429 goto out_of_memory; 437 goto out_of_memory;
@@ -431,12 +439,27 @@ good_area:
431 goto do_sigbus; 439 goto do_sigbus;
432 BUG(); 440 BUG();
433 } 441 }
434 if (fault & VM_FAULT_MAJOR) { 442
435 current->maj_flt++; 443 if (flags & FAULT_FLAG_ALLOW_RETRY) {
436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 444 if (fault & VM_FAULT_MAJOR) {
437 } else { 445 current->maj_flt++;
438 current->min_flt++; 446 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
439 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 447 1, regs, address);
448 } else {
449 current->min_flt++;
450 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
451 1, regs, address);
452 }
453 if (fault & VM_FAULT_RETRY) {
454 flags &= ~FAULT_FLAG_ALLOW_RETRY;
455
456 /* No need to up_read(&mm->mmap_sem) as we would
457 * have already released it in __lock_page_or_retry
458 * in mm/filemap.c.
459 */
460
461 goto retry;
462 }
440 } 463 }
441 up_read(&mm->mmap_sem); 464 up_read(&mm->mmap_sem);
442 465