aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2012-04-10 23:44:50 -0400
committerPaul Mundt <lethal@linux-sh.org>2012-04-10 23:44:50 -0400
commita1e2030122d4c2605089e60dce28d2fcf9c3ef98 (patch)
treea5617a89aca5b81501920822cbc3cdaedbbe665a /arch/sh/mm
parent11fd982400a8779cb4b5f7cdc806008569ff545c (diff)
sh64: Port OOM changes to do_page_fault
Reflect the sh32 OOM changes for the sh64 page fault handler, too. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/tlbflush_64.c40
1 files changed, 30 insertions, 10 deletions
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 11c5a18f10e..70b3c271aa9 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli 4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) 5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
6 * Copyright (C) 2003 - 2009 Paul Mundt 6 * Copyright (C) 2003 - 2012 Paul Mundt
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
@@ -95,6 +95,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
95 struct mm_struct *mm; 95 struct mm_struct *mm;
96 struct vm_area_struct * vma; 96 struct vm_area_struct * vma;
97 const struct exception_table_entry *fixup; 97 const struct exception_table_entry *fixup;
98 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
99 (writeaccess ? FAULT_FLAG_WRITE : 0));
98 pte_t *pte; 100 pte_t *pte;
99 int fault; 101 int fault;
100 102
@@ -124,6 +126,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
124 if (in_atomic() || !mm) 126 if (in_atomic() || !mm)
125 goto no_context; 127 goto no_context;
126 128
129retry:
127 /* TLB misses upon some cache flushes get done under cli() */ 130 /* TLB misses upon some cache flushes get done under cli() */
128 down_read(&mm->mmap_sem); 131 down_read(&mm->mmap_sem);
129 132
@@ -188,7 +191,11 @@ good_area:
188 * make sure we exit gracefully rather than endlessly redo 191 * make sure we exit gracefully rather than endlessly redo
189 * the fault. 192 * the fault.
190 */ 193 */
191 fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); 194 fault = handle_mm_fault(mm, vma, address, flags);
195
196 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
197 return;
198
192 if (unlikely(fault & VM_FAULT_ERROR)) { 199 if (unlikely(fault & VM_FAULT_ERROR)) {
193 if (fault & VM_FAULT_OOM) 200 if (fault & VM_FAULT_OOM)
194 goto out_of_memory; 201 goto out_of_memory;
@@ -197,14 +204,27 @@ good_area:
197 BUG(); 204 BUG();
198 } 205 }
199 206
200 if (fault & VM_FAULT_MAJOR) { 207 if (flags & FAULT_FLAG_ALLOW_RETRY) {
201 tsk->maj_flt++; 208 if (fault & VM_FAULT_MAJOR) {
202 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 209 tsk->maj_flt++;
203 regs, address); 210 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
204 } else { 211 regs, address);
205 tsk->min_flt++; 212 } else {
206 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 213 tsk->min_flt++;
207 regs, address); 214 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
215 regs, address);
216 }
217
218 if (fault & VM_FAULT_RETRY) {
219 flags &= ~FAULT_FLAG_ALLOW_RETRY;
220
221 /*
222 * No need to up_read(&mm->mmap_sem) as we would
223 * have already released it in __lock_page_or_retry
224 * in mm/filemap.c.
225 */
226 goto retry;
227 }
208 } 228 }
209 229
210 /* If we get here, the page fault has been handled. Do the TLB refill 230 /* If we get here, the page fault has been handled. Do the TLB refill