aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze
diff options
context:
space:
mode:
authorKautuk Consul <consul.kautuk@gmail.com>2012-03-20 09:21:40 -0400
committerMichal Simek <monstr@monstr.eu>2012-05-25 00:34:34 -0400
commitf397c077e114df07bd2b94a16681a04be8d59dff (patch)
tree2bf5882aa6cc8a9f1e03b792849a1dc6e2216500 /arch/microblaze
parent59516b07b4ffa7e607a5787674ea3c405f1b390c (diff)
microblaze: Port OOM changes to do_page_fault
Commit d065bd810b6deb67d4897a14bfe21f8eb526ba99 (mm: retry page fault when blocking on disk transfer) and commit 37b23e0525d393d48a7d59f870b3bc061a30ccdb (x86,mm: make pagefault killable) The above commits introduced changes into the x86 pagefault handler for making the page fault handler retryable as well as killable. These changes reduce the mmap_sem hold time, which is crucial during OOM killer invocation. Port these changes to microblaze. Signed-off-by: Kautuk Consul <consul.kautuk@gmail.com>
Diffstat (limited to 'arch/microblaze')
-rw-r--r--arch/microblaze/mm/fault.c33
1 files changed, 28 insertions, 5 deletions
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index c38a265846de..eb365d6795fa 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -92,6 +92,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
92 int code = SEGV_MAPERR; 92 int code = SEGV_MAPERR;
93 int is_write = error_code & ESR_S; 93 int is_write = error_code & ESR_S;
94 int fault; 94 int fault;
95 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
96 (is_write ? FAULT_FLAG_WRITE : 0);
95 97
96 regs->ear = address; 98 regs->ear = address;
97 regs->esr = error_code; 99 regs->esr = error_code;
@@ -138,6 +140,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
138 if (kernel_mode(regs) && !search_exception_tables(regs->pc)) 140 if (kernel_mode(regs) && !search_exception_tables(regs->pc))
139 goto bad_area_nosemaphore; 141 goto bad_area_nosemaphore;
140 142
143retry:
141 down_read(&mm->mmap_sem); 144 down_read(&mm->mmap_sem);
142 } 145 }
143 146
@@ -210,7 +213,11 @@ good_area:
210 * make sure we exit gracefully rather than endlessly redo 213 * make sure we exit gracefully rather than endlessly redo
211 * the fault. 214 * the fault.
212 */ 215 */
213 fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); 216 fault = handle_mm_fault(mm, vma, address, flags);
217
218 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
219 return;
220
214 if (unlikely(fault & VM_FAULT_ERROR)) { 221 if (unlikely(fault & VM_FAULT_ERROR)) {
215 if (fault & VM_FAULT_OOM) 222 if (fault & VM_FAULT_OOM)
216 goto out_of_memory; 223 goto out_of_memory;
@@ -218,11 +225,27 @@ good_area:
218 goto do_sigbus; 225 goto do_sigbus;
219 BUG(); 226 BUG();
220 } 227 }
221 if (unlikely(fault & VM_FAULT_MAJOR)) 228
222 current->maj_flt++; 229 if (flags & FAULT_FLAG_ALLOW_RETRY) {
223 else 230 if (unlikely(fault & VM_FAULT_MAJOR))
224 current->min_flt++; 231 current->maj_flt++;
232 else
233 current->min_flt++;
234 if (fault & VM_FAULT_RETRY) {
235 flags &= ~FAULT_FLAG_ALLOW_RETRY;
236
237 /*
238 * No need to up_read(&mm->mmap_sem) as we would
239 * have already released it in __lock_page_or_retry
240 * in mm/filemap.c.
241 */
242
243 goto retry;
244 }
245 }
246
225 up_read(&mm->mmap_sem); 247 up_read(&mm->mmap_sem);
248
226 /* 249 /*
227 * keep track of tlb+htab misses that are good addrs but 250 * keep track of tlb+htab misses that are good addrs but
228 * just need pte's created via handle_mm_fault() 251 * just need pte's created via handle_mm_fault()