aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/fault.c')
-rw-r--r--arch/arm/mm/fault.c58
1 files changed, 39 insertions, 19 deletions
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index eb5520fc755f..bb7eac381a8e 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -220,7 +220,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
220 220
221static int __kprobes 221static int __kprobes
222__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, 222__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
223 struct task_struct *tsk) 223 unsigned int flags, struct task_struct *tsk)
224{ 224{
225 struct vm_area_struct *vma; 225 struct vm_area_struct *vma;
226 int fault; 226 int fault;
@@ -242,18 +242,7 @@ good_area:
242 goto out; 242 goto out;
243 } 243 }
244 244
245 /* 245 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
246 * If for any reason at all we couldn't handle the fault, make
247 * sure we exit gracefully rather than endlessly redo the fault.
248 */
249 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
250 if (unlikely(fault & VM_FAULT_ERROR))
251 return fault;
252 if (fault & VM_FAULT_MAJOR)
253 tsk->maj_flt++;
254 else
255 tsk->min_flt++;
256 return fault;
257 246
258check_stack: 247check_stack:
259 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) 248 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -268,6 +257,9 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
268 struct task_struct *tsk; 257 struct task_struct *tsk;
269 struct mm_struct *mm; 258 struct mm_struct *mm;
270 int fault, sig, code; 259 int fault, sig, code;
260 int write = fsr & FSR_WRITE;
261 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
262 (write ? FAULT_FLAG_WRITE : 0);
271 263
272 if (notify_page_fault(regs, fsr)) 264 if (notify_page_fault(regs, fsr))
273 return 0; 265 return 0;
@@ -294,6 +286,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
294 if (!down_read_trylock(&mm->mmap_sem)) { 286 if (!down_read_trylock(&mm->mmap_sem)) {
295 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) 287 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
296 goto no_context; 288 goto no_context;
289retry:
297 down_read(&mm->mmap_sem); 290 down_read(&mm->mmap_sem);
298 } else { 291 } else {
299 /* 292 /*
@@ -309,14 +302,41 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
309#endif 302#endif
310 } 303 }
311 304
312 fault = __do_page_fault(mm, addr, fsr, tsk); 305 fault = __do_page_fault(mm, addr, fsr, flags, tsk);
313 up_read(&mm->mmap_sem); 306
307 /* If we need to retry but a fatal signal is pending, handle the
308 * signal first. We do not need to release the mmap_sem because
309 * it would already be released in __lock_page_or_retry in
310 * mm/filemap.c. */
311 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
312 return 0;
313
314 /*
315 * Major/minor page fault accounting is only done on the
316 * initial attempt. If we go through a retry, it is extremely
317 * likely that the page will be found in page cache at that point.
318 */
314 319
315 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 320 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
316 if (fault & VM_FAULT_MAJOR) 321 if (flags & FAULT_FLAG_ALLOW_RETRY) {
317 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); 322 if (fault & VM_FAULT_MAJOR) {
318 else if (fault & VM_FAULT_MINOR) 323 tsk->maj_flt++;
319 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); 324 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
325 regs, addr);
326 } else {
327 tsk->min_flt++;
328 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
329 regs, addr);
330 }
331 if (fault & VM_FAULT_RETRY) {
332 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
333 * of starvation. */
334 flags &= ~FAULT_FLAG_ALLOW_RETRY;
335 goto retry;
336 }
337 }
338
339 up_read(&mm->mmap_sem);
320 340
321 /* 341 /*
322 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR 342 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR