aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-10 12:01:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-21 16:08:22 -0400
commitd06063cc221fdefcab86589e79ddfdb7c0e14b63 (patch)
tree00ccaf8c1992b57a4445d78b9eae25fde0b3ab31 /arch/s390
parent30c9f3a9fae79517bca595826a19c6855fbb6d32 (diff)
Move FAULT_FLAG_xyz into handle_mm_fault() callers
This allows the callers to now pass down the full set of FAULT_FLAG_xyz flags to handle_mm_fault(). All callers have been (mechanically) converted to the new calling convention, there's almost certainly room for architectures to clean up their code and then add FAULT_FLAG_RETRY when that support is added. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/lib/uaccess_pt.c2
-rw-r--r--arch/s390/mm/fault.c2
2 files changed, 2 insertions, 2 deletions
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index b0b84c35b0ad..cb5d59eab0ee 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -66,7 +66,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address,
66 } 66 }
67 67
68survive: 68survive:
69 fault = handle_mm_fault(mm, vma, address, write_access); 69 fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
70 if (unlikely(fault & VM_FAULT_ERROR)) { 70 if (unlikely(fault & VM_FAULT_ERROR)) {
71 if (fault & VM_FAULT_OOM) 71 if (fault & VM_FAULT_OOM)
72 goto out_of_memory; 72 goto out_of_memory;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 220a152c836c..74eb26bf1970 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -352,7 +352,7 @@ good_area:
352 * make sure we exit gracefully rather than endlessly redo 352 * make sure we exit gracefully rather than endlessly redo
353 * the fault. 353 * the fault.
354 */ 354 */
355 fault = handle_mm_fault(mm, vma, address, write); 355 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
356 if (unlikely(fault & VM_FAULT_ERROR)) { 356 if (unlikely(fault & VM_FAULT_ERROR)) {
357 if (fault & VM_FAULT_OOM) { 357 if (fault & VM_FAULT_OOM) {
358 up_read(&mm->mmap_sem); 358 up_read(&mm->mmap_sem);