aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLey Foon Tan <lftan@altera.com>2015-02-09 05:11:29 -0500
committerLey Foon Tan <lftan@altera.com>2015-02-09 05:11:29 -0500
commit96f3a5cc33baede169e0d330119090789e97e86b (patch)
treea3b974676527451ef7542ca1029b92d136ceaee7
parentad7ef26d433bbf21a915652d96ad07048a0a4e26 (diff)
nios2: Port OOM changes to do_page_fault()
Commit d065bd810b6d ("mm: retry page fault when blocking on disk transfer") and and commit 37b23e0525d3 ("x86,mm: make pagefault killable") The above commits introduced changes into the nios2 pagefault handler for making the page fault handler retryable as well as killable. These changes reduce the mmap_sem hold time, which is crucial during OOM killer invocation. Signed-off-by: Ley Foon Tan <lftan@altera.com>
-rw-r--r--arch/nios2/mm/fault.c37
1 files changed, 32 insertions, 5 deletions
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index d194c0427b26..0d231adfe576 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -47,7 +47,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
47 struct mm_struct *mm = tsk->mm; 47 struct mm_struct *mm = tsk->mm;
48 int code = SEGV_MAPERR; 48 int code = SEGV_MAPERR;
49 int fault; 49 int fault;
50 unsigned int flags = 0; 50 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
51 51
52 cause >>= 2; 52 cause >>= 2;
53 53
@@ -86,6 +86,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
86 if (!down_read_trylock(&mm->mmap_sem)) { 86 if (!down_read_trylock(&mm->mmap_sem)) {
87 if (!user_mode(regs) && !search_exception_tables(regs->ea)) 87 if (!user_mode(regs) && !search_exception_tables(regs->ea))
88 goto bad_area_nosemaphore; 88 goto bad_area_nosemaphore;
89retry:
89 down_read(&mm->mmap_sem); 90 down_read(&mm->mmap_sem);
90 } 91 }
91 92
@@ -132,6 +133,10 @@ survive:
132 * the fault. 133 * the fault.
133 */ 134 */
134 fault = handle_mm_fault(mm, vma, address, flags); 135 fault = handle_mm_fault(mm, vma, address, flags);
136
137 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
138 return;
139
135 if (unlikely(fault & VM_FAULT_ERROR)) { 140 if (unlikely(fault & VM_FAULT_ERROR)) {
136 if (fault & VM_FAULT_OOM) 141 if (fault & VM_FAULT_OOM)
137 goto out_of_memory; 142 goto out_of_memory;
@@ -141,10 +146,32 @@ survive:
141 goto do_sigbus; 146 goto do_sigbus;
142 BUG(); 147 BUG();
143 } 148 }
144 if (fault & VM_FAULT_MAJOR) 149
145 tsk->maj_flt++; 150 /*
146 else 151 * Major/minor page fault accounting is only done on the
147 tsk->min_flt++; 152 * initial attempt. If we go through a retry, it is extremely
153 * likely that the page will be found in page cache at that point.
154 */
155 if (flags & FAULT_FLAG_ALLOW_RETRY) {
156 if (fault & VM_FAULT_MAJOR)
157 current->maj_flt++;
158 else
159 current->min_flt++;
160 if (fault & VM_FAULT_RETRY) {
161 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
162 * of starvation. */
163 flags &= ~FAULT_FLAG_ALLOW_RETRY;
164 flags |= FAULT_FLAG_TRIED;
165
166 /*
167 * No need to up_read(&mm->mmap_sem) as we would
168 * have already released it in __lock_page_or_retry
169 * in mm/filemap.c.
170 */
171
172 goto retry;
173 }
174 }
148 175
149 up_read(&mm->mmap_sem); 176 up_read(&mm->mmap_sem);
150 return; 177 return;