diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2011-05-26 03:48:30 -0400 |
---|---|---|
committer | Heiko Carstens <heiko.carstens@de.ibm.com> | 2011-05-26 03:48:25 -0400 |
commit | 33ce614029576b8585e271fd7d90746a37114a15 (patch) | |
tree | 04f6a764b0ed82f0d6d0647b64547ad24d19830b | |
parent | 99583181cbf2252dd0554eef6f419a6b22cd33ea (diff) |
[S390] mm: add page fault retry handling
s390 arch backend for d065bd81 "mm: retry page fault when blocking on
disk transfer".
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
-rw-r--r-- | arch/s390/mm/fault.c | 41 |
1 files changed, 28 insertions, 13 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index b57723aee848..fe103e891e7a 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -280,7 +280,8 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
280 | struct mm_struct *mm; | 280 | struct mm_struct *mm; |
281 | struct vm_area_struct *vma; | 281 | struct vm_area_struct *vma; |
282 | unsigned long address; | 282 | unsigned long address; |
283 | int fault, write; | 283 | unsigned int flags; |
284 | int fault; | ||
284 | 285 | ||
285 | if (notify_page_fault(regs)) | 286 | if (notify_page_fault(regs)) |
286 | return 0; | 287 | return 0; |
@@ -299,6 +300,10 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
299 | 300 | ||
300 | address = trans_exc_code & __FAIL_ADDR_MASK; | 301 | address = trans_exc_code & __FAIL_ADDR_MASK; |
301 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 302 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); |
303 | flags = FAULT_FLAG_ALLOW_RETRY; | ||
304 | if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) | ||
305 | flags |= FAULT_FLAG_WRITE; | ||
306 | retry: | ||
302 | down_read(&mm->mmap_sem); | 307 | down_read(&mm->mmap_sem); |
303 | 308 | ||
304 | fault = VM_FAULT_BADMAP; | 309 | fault = VM_FAULT_BADMAP; |
@@ -328,21 +333,31 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
328 | * make sure we exit gracefully rather than endlessly redo | 333 | * make sure we exit gracefully rather than endlessly redo |
329 | * the fault. | 334 | * the fault. |
330 | */ | 335 | */ |
331 | write = (access == VM_WRITE || | 336 | fault = handle_mm_fault(mm, vma, address, flags); |
332 | (trans_exc_code & store_indication) == 0x400) ? | ||
333 | FAULT_FLAG_WRITE : 0; | ||
334 | fault = handle_mm_fault(mm, vma, address, write); | ||
335 | if (unlikely(fault & VM_FAULT_ERROR)) | 337 | if (unlikely(fault & VM_FAULT_ERROR)) |
336 | goto out_up; | 338 | goto out_up; |
337 | 339 | ||
338 | if (fault & VM_FAULT_MAJOR) { | 340 | /* |
339 | tsk->maj_flt++; | 341 | * Major/minor page fault accounting is only done on the |
340 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 342 | * initial attempt. If we go through a retry, it is extremely |
341 | regs, address); | 343 | * likely that the page will be found in page cache at that point. |
342 | } else { | 344 | */ |
343 | tsk->min_flt++; | 345 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
344 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 346 | if (fault & VM_FAULT_MAJOR) { |
345 | regs, address); | 347 | tsk->maj_flt++; |
348 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | ||
349 | regs, address); | ||
350 | } else { | ||
351 | tsk->min_flt++; | ||
352 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | ||
353 | regs, address); | ||
354 | } | ||
355 | if (fault & VM_FAULT_RETRY) { | ||
356 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | ||
357 | * of starvation. */ | ||
358 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
359 | goto retry; | ||
360 | } | ||
346 | } | 361 | } |
347 | /* | 362 | /* |
348 | * The instruction that caused the program check will | 363 | * The instruction that caused the program check will |