diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-01-05 08:24:16 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-01-05 08:24:16 -0500 |
commit | a32737e1ca650504f172292dd344eb64c02311f3 (patch) | |
tree | 7dd2004ece26081507af877d9dd40b1bd4eecc1a /arch/arm/mm/fault.c | |
parent | 27edacac7d97d37ec77779c7da08345298a5d283 (diff) | |
parent | a3c2b511a844641f6d0b60bd84cd6076143b3f2d (diff) |
Merge branches 'fixes' and 'misc' into for-linus
Diffstat (limited to 'arch/arm/mm/fault.c')
-rw-r--r-- | arch/arm/mm/fault.c | 58 |
1 files changed, 39 insertions, 19 deletions
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index aa33949fef60..4aabeaec25df 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -231,7 +231,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) | |||
231 | 231 | ||
232 | static int __kprobes | 232 | static int __kprobes |
233 | __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | 233 | __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, |
234 | struct task_struct *tsk) | 234 | unsigned int flags, struct task_struct *tsk) |
235 | { | 235 | { |
236 | struct vm_area_struct *vma; | 236 | struct vm_area_struct *vma; |
237 | int fault; | 237 | int fault; |
@@ -253,18 +253,7 @@ good_area: | |||
253 | goto out; | 253 | goto out; |
254 | } | 254 | } |
255 | 255 | ||
256 | /* | 256 | return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); |
257 | * If for any reason at all we couldn't handle the fault, make | ||
258 | * sure we exit gracefully rather than endlessly redo the fault. | ||
259 | */ | ||
260 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0); | ||
261 | if (unlikely(fault & VM_FAULT_ERROR)) | ||
262 | return fault; | ||
263 | if (fault & VM_FAULT_MAJOR) | ||
264 | tsk->maj_flt++; | ||
265 | else | ||
266 | tsk->min_flt++; | ||
267 | return fault; | ||
268 | 257 | ||
269 | check_stack: | 258 | check_stack: |
270 | if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) | 259 | if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) |
@@ -279,6 +268,9 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
279 | struct task_struct *tsk; | 268 | struct task_struct *tsk; |
280 | struct mm_struct *mm; | 269 | struct mm_struct *mm; |
281 | int fault, sig, code; | 270 | int fault, sig, code; |
271 | int write = fsr & FSR_WRITE; | ||
272 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
273 | (write ? FAULT_FLAG_WRITE : 0); | ||
282 | 274 | ||
283 | if (notify_page_fault(regs, fsr)) | 275 | if (notify_page_fault(regs, fsr)) |
284 | return 0; | 276 | return 0; |
@@ -305,6 +297,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
305 | if (!down_read_trylock(&mm->mmap_sem)) { | 297 | if (!down_read_trylock(&mm->mmap_sem)) { |
306 | if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) | 298 | if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) |
307 | goto no_context; | 299 | goto no_context; |
300 | retry: | ||
308 | down_read(&mm->mmap_sem); | 301 | down_read(&mm->mmap_sem); |
309 | } else { | 302 | } else { |
310 | /* | 303 | /* |
@@ -320,14 +313,41 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
320 | #endif | 313 | #endif |
321 | } | 314 | } |
322 | 315 | ||
323 | fault = __do_page_fault(mm, addr, fsr, tsk); | 316 | fault = __do_page_fault(mm, addr, fsr, flags, tsk); |
324 | up_read(&mm->mmap_sem); | 317 | |
318 | /* If we need to retry but a fatal signal is pending, handle the | ||
319 | * signal first. We do not need to release the mmap_sem because | ||
320 | * it would already be released in __lock_page_or_retry in | ||
321 | * mm/filemap.c. */ | ||
322 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
323 | return 0; | ||
324 | |||
325 | /* | ||
326 | * Major/minor page fault accounting is only done on the | ||
327 | * initial attempt. If we go through a retry, it is extremely | ||
328 | * likely that the page will be found in page cache at that point. | ||
329 | */ | ||
325 | 330 | ||
326 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); | 331 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); |
327 | if (fault & VM_FAULT_MAJOR) | 332 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
328 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); | 333 | if (fault & VM_FAULT_MAJOR) { |
329 | else if (fault & VM_FAULT_MINOR) | 334 | tsk->maj_flt++; |
330 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); | 335 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
336 | regs, addr); | ||
337 | } else { | ||
338 | tsk->min_flt++; | ||
339 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
340 | regs, addr); | ||
341 | } | ||
342 | if (fault & VM_FAULT_RETRY) { | ||
343 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | ||
344 | * of starvation. */ | ||
345 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
346 | goto retry; | ||
347 | } | ||
348 | } | ||
349 | |||
350 | up_read(&mm->mmap_sem); | ||
331 | 351 | ||
332 | /* | 352 | /* |
333 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR | 353 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR |