diff options
author | Kautuk Consul <consul.kautuk@gmail.com> | 2012-03-31 08:05:17 -0400 |
---|---|---|
committer | Guan Xuetao <gxt@mprc.pku.edu.cn> | 2012-11-09 04:30:08 -0500 |
commit | f3f09d5a4462929609342460d756fab2e8151421 (patch) | |
tree | 43ce4d51ebca8aaca25514ca57e9b61e764ff51b /arch | |
parent | 0e4a43ed08e2f44aa7b96aa95d0a540d675483e1 (diff) |
unicore32/mm/fault.c: Port OOM changes to do_pf
Commit d065bd810b6deb67d4897a14bfe21f8eb526ba99
(mm: retry page fault when blocking on disk transfer) and
commit 37b23e0525d393d48a7d59f870b3bc061a30ccdb
(x86,mm: make pagefault killable)
The above commits introduced changes into the x86 pagefault handler
for making the page fault handler retryable as well as killable.
These changes reduce the mmap_sem hold time, which is crucial
during OOM killer invocation.
Port these changes to unicore32.
Signed-off-by: Kautuk Consul <consul.kautuk@gmail.com>
Acked-by: Guan Xuetao <gxt@mprc.pku.edu.cn>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/unicore32/mm/fault.c | 37 |
1 files changed, 27 insertions, 10 deletions
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 2eeb9c04cab0..f9b5c10bccee 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c | |||
@@ -168,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | 170 | static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, |
171 | struct task_struct *tsk) | 171 | unsigned int flags, struct task_struct *tsk) |
172 | { | 172 | { |
173 | struct vm_area_struct *vma; | 173 | struct vm_area_struct *vma; |
174 | int fault; | 174 | int fault; |
@@ -194,14 +194,7 @@ good_area: | |||
194 | * If for any reason at all we couldn't handle the fault, make | 194 | * If for any reason at all we couldn't handle the fault, make |
195 | * sure we exit gracefully rather than endlessly redo the fault. | 195 | * sure we exit gracefully rather than endlessly redo the fault. |
196 | */ | 196 | */ |
197 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, | 197 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); |
198 | (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); | ||
199 | if (unlikely(fault & VM_FAULT_ERROR)) | ||
200 | return fault; | ||
201 | if (fault & VM_FAULT_MAJOR) | ||
202 | tsk->maj_flt++; | ||
203 | else | ||
204 | tsk->min_flt++; | ||
205 | return fault; | 198 | return fault; |
206 | 199 | ||
207 | check_stack: | 200 | check_stack: |
@@ -216,6 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
216 | struct task_struct *tsk; | 209 | struct task_struct *tsk; |
217 | struct mm_struct *mm; | 210 | struct mm_struct *mm; |
218 | int fault, sig, code; | 211 | int fault, sig, code; |
212 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
213 | ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); | ||
219 | 214 | ||
220 | tsk = current; | 215 | tsk = current; |
221 | mm = tsk->mm; | 216 | mm = tsk->mm; |
@@ -236,6 +231,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
236 | if (!user_mode(regs) | 231 | if (!user_mode(regs) |
237 | && !search_exception_tables(regs->UCreg_pc)) | 232 | && !search_exception_tables(regs->UCreg_pc)) |
238 | goto no_context; | 233 | goto no_context; |
234 | retry: | ||
239 | down_read(&mm->mmap_sem); | 235 | down_read(&mm->mmap_sem); |
240 | } else { | 236 | } else { |
241 | /* | 237 | /* |
@@ -251,7 +247,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
251 | #endif | 247 | #endif |
252 | } | 248 | } |
253 | 249 | ||
254 | fault = __do_pf(mm, addr, fsr, tsk); | 250 | fault = __do_pf(mm, addr, fsr, flags, tsk); |
251 | |||
252 | /* If we need to retry but a fatal signal is pending, handle the | ||
253 | * signal first. We do not need to release the mmap_sem because | ||
254 | * it would already be released in __lock_page_or_retry in | ||
255 | * mm/filemap.c. */ | ||
256 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
257 | return 0; | ||
258 | |||
259 | if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) { | ||
260 | if (fault & VM_FAULT_MAJOR) | ||
261 | tsk->maj_flt++; | ||
262 | else | ||
263 | tsk->min_flt++; | ||
264 | if (fault & VM_FAULT_RETRY) { | ||
265 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | ||
266 | * of starvation. */ | ||
267 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
268 | goto retry; | ||
269 | } | ||
270 | } | ||
271 | |||
255 | up_read(&mm->mmap_sem); | 272 | up_read(&mm->mmap_sem); |
256 | 273 | ||
257 | /* | 274 | /* |