summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-07-26 18:25:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 19:19:19 -0400
commitdcddffd41d3f1d3bdcc1dce3f1cd142779b6d4c1 (patch)
treeb7b545d38466ff7e0260573cba32c9cabd3fd1a2 /arch
parent6fb8ddfc455ca82a3ce674f54298cd20f27ca518 (diff)
mm: do not pass mm_struct into handle_mm_fault
We always have vma->vm_mm around. Link: http://lkml.kernel.org/r/1466021202-61880-8-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/hexagon/mm/vm_fault.c2
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/metag/mm/fault.c2
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/nios2/mm/fault.c2
-rw-r--r--arch/openrisc/mm/fault.c2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/score/mm/fault.c2
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sparc/mm/fault_32.c4
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/unicore32/mm/fault.c2
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/xtensa/mm/fault.c2
30 files changed, 31 insertions, 31 deletions
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 4a905bd667e2..83e9eee57a55 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -147,7 +147,7 @@ retry:
147 /* If for any reason at all we couldn't handle the fault, 147 /* If for any reason at all we couldn't handle the fault,
148 make sure we exit gracefully rather than endlessly redo 148 make sure we exit gracefully rather than endlessly redo
149 the fault. */ 149 the fault. */
150 fault = handle_mm_fault(mm, vma, address, flags); 150 fault = handle_mm_fault(vma, address, flags);
151 151
152 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 152 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
153 return; 153 return;
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index af63f4a13e60..e94e5aa33985 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -137,7 +137,7 @@ good_area:
137 * make sure we exit gracefully rather than endlessly redo 137 * make sure we exit gracefully rather than endlessly redo
138 * the fault. 138 * the fault.
139 */ 139 */
140 fault = handle_mm_fault(mm, vma, address, flags); 140 fault = handle_mm_fault(vma, address, flags);
141 141
142 /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ 142 /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
143 if (unlikely(fatal_signal_pending(current))) { 143 if (unlikely(fatal_signal_pending(current))) {
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index ad5841856007..3a2e678b8d30 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -243,7 +243,7 @@ good_area:
243 goto out; 243 goto out;
244 } 244 }
245 245
246 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); 246 return handle_mm_fault(vma, addr & PAGE_MASK, flags);
247 247
248check_stack: 248check_stack:
249 /* Don't allow expansion below FIRST_USER_ADDRESS */ 249 /* Don't allow expansion below FIRST_USER_ADDRESS */
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index b1166d1e5955..031820d989a8 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -233,7 +233,7 @@ good_area:
233 goto out; 233 goto out;
234 } 234 }
235 235
236 return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); 236 return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
237 237
238check_stack: 238check_stack:
239 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) 239 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index c03533937a9f..a4b7edac8f10 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -134,7 +134,7 @@ good_area:
134 * sure we exit gracefully rather than endlessly redo the 134 * sure we exit gracefully rather than endlessly redo the
135 * fault. 135 * fault.
136 */ 136 */
137 fault = handle_mm_fault(mm, vma, address, flags); 137 fault = handle_mm_fault(vma, address, flags);
138 138
139 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 139 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
140 return; 140 return;
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 3066d40a6db1..112ef26c7f2e 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -168,7 +168,7 @@ retry:
168 * the fault. 168 * the fault.
169 */ 169 */
170 170
171 fault = handle_mm_fault(mm, vma, address, flags); 171 fault = handle_mm_fault(vma, address, flags);
172 172
173 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 173 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
174 return; 174 return;
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 61d99767fe16..614a46c413d2 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -164,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
164 * make sure we exit gracefully rather than endlessly redo 164 * make sure we exit gracefully rather than endlessly redo
165 * the fault. 165 * the fault.
166 */ 166 */
167 fault = handle_mm_fault(mm, vma, ear0, flags); 167 fault = handle_mm_fault(vma, ear0, flags);
168 if (unlikely(fault & VM_FAULT_ERROR)) { 168 if (unlikely(fault & VM_FAULT_ERROR)) {
169 if (fault & VM_FAULT_OOM) 169 if (fault & VM_FAULT_OOM)
170 goto out_of_memory; 170 goto out_of_memory;
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index 8704c9320032..bd7c251e2bce 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -101,7 +101,7 @@ good_area:
101 break; 101 break;
102 } 102 }
103 103
104 fault = handle_mm_fault(mm, vma, address, flags); 104 fault = handle_mm_fault(vma, address, flags);
105 105
106 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 106 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
107 return; 107 return;
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 70b40d1205a6..fa6ad95e992e 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -159,7 +159,7 @@ retry:
159 * sure we exit gracefully rather than endlessly redo the 159 * sure we exit gracefully rather than endlessly redo the
160 * fault. 160 * fault.
161 */ 161 */
162 fault = handle_mm_fault(mm, vma, address, flags); 162 fault = handle_mm_fault(vma, address, flags);
163 163
164 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 164 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
165 return; 165 return;
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 8f9875b7933d..a3785d3644c2 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -196,7 +196,7 @@ good_area:
196 */ 196 */
197 addr = (address & PAGE_MASK); 197 addr = (address & PAGE_MASK);
198 set_thread_fault_code(error_code); 198 set_thread_fault_code(error_code);
199 fault = handle_mm_fault(mm, vma, addr, flags); 199 fault = handle_mm_fault(vma, addr, flags);
200 if (unlikely(fault & VM_FAULT_ERROR)) { 200 if (unlikely(fault & VM_FAULT_ERROR)) {
201 if (fault & VM_FAULT_OOM) 201 if (fault & VM_FAULT_OOM)
202 goto out_of_memory; 202 goto out_of_memory;
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 6a94cdd0c830..bd66a0b20c6b 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -136,7 +136,7 @@ good_area:
136 * the fault. 136 * the fault.
137 */ 137 */
138 138
139 fault = handle_mm_fault(mm, vma, address, flags); 139 fault = handle_mm_fault(vma, address, flags);
140 pr_debug("handle_mm_fault returns %d\n", fault); 140 pr_debug("handle_mm_fault returns %d\n", fault);
141 141
142 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 142 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index f57edca63609..372783a67dda 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -133,7 +133,7 @@ good_area:
133 * make sure we exit gracefully rather than endlessly redo 133 * make sure we exit gracefully rather than endlessly redo
134 * the fault. 134 * the fault.
135 */ 135 */
136 fault = handle_mm_fault(mm, vma, address, flags); 136 fault = handle_mm_fault(vma, address, flags);
137 137
138 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 138 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
139 return 0; 139 return 0;
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 177dfc003643..abb678ccde6f 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -216,7 +216,7 @@ good_area:
216 * make sure we exit gracefully rather than endlessly redo 216 * make sure we exit gracefully rather than endlessly redo
217 * the fault. 217 * the fault.
218 */ 218 */
219 fault = handle_mm_fault(mm, vma, address, flags); 219 fault = handle_mm_fault(vma, address, flags);
220 220
221 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 221 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
222 return; 222 return;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 4b88fa031891..9560ad731120 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -153,7 +153,7 @@ good_area:
153 * make sure we exit gracefully rather than endlessly redo 153 * make sure we exit gracefully rather than endlessly redo
154 * the fault. 154 * the fault.
155 */ 155 */
156 fault = handle_mm_fault(mm, vma, address, flags); 156 fault = handle_mm_fault(vma, address, flags);
157 157
158 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 158 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
159 return; 159 return;
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 4a1d181ed32f..f23781d6bbb3 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -254,7 +254,7 @@ good_area:
254 * make sure we exit gracefully rather than endlessly redo 254 * make sure we exit gracefully rather than endlessly redo
255 * the fault. 255 * the fault.
256 */ 256 */
257 fault = handle_mm_fault(mm, vma, address, flags); 257 fault = handle_mm_fault(vma, address, flags);
258 258
259 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 259 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
260 return; 260 return;
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index b51878b0c6b8..affc4eb3f89e 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -131,7 +131,7 @@ good_area:
131 * make sure we exit gracefully rather than endlessly redo 131 * make sure we exit gracefully rather than endlessly redo
132 * the fault. 132 * the fault.
133 */ 133 */
134 fault = handle_mm_fault(mm, vma, address, flags); 134 fault = handle_mm_fault(vma, address, flags);
135 135
136 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 136 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
137 return; 137 return;
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 230ac20ae794..e94cd225e816 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -163,7 +163,7 @@ good_area:
163 * the fault. 163 * the fault.
164 */ 164 */
165 165
166 fault = handle_mm_fault(mm, vma, address, flags); 166 fault = handle_mm_fault(vma, address, flags);
167 167
168 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 168 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
169 return; 169 return;
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 16dbe81c97c9..163af2c31d76 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -239,7 +239,7 @@ good_area:
239 * fault. 239 * fault.
240 */ 240 */
241 241
242 fault = handle_mm_fault(mm, vma, address, flags); 242 fault = handle_mm_fault(vma, address, flags);
243 243
244 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 244 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
245 return; 245 return;
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 6527882ce05e..bb0354222b11 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -75,7 +75,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
75 } 75 }
76 76
77 ret = 0; 77 ret = 0;
78 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); 78 *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
79 if (unlikely(*flt & VM_FAULT_ERROR)) { 79 if (unlikely(*flt & VM_FAULT_ERROR)) {
80 if (*flt & VM_FAULT_OOM) { 80 if (*flt & VM_FAULT_OOM) {
81 ret = -ENOMEM; 81 ret = -ENOMEM;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a67c6d781c52..a4db22f65021 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -429,7 +429,7 @@ good_area:
429 * make sure we exit gracefully rather than endlessly redo 429 * make sure we exit gracefully rather than endlessly redo
430 * the fault. 430 * the fault.
431 */ 431 */
432 fault = handle_mm_fault(mm, vma, address, flags); 432 fault = handle_mm_fault(vma, address, flags);
433 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { 433 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
434 if (fault & VM_FAULT_SIGSEGV) 434 if (fault & VM_FAULT_SIGSEGV)
435 goto bad_area; 435 goto bad_area;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 19288c1b36d3..6c47488745ae 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -456,7 +456,7 @@ retry:
456 * make sure we exit gracefully rather than endlessly redo 456 * make sure we exit gracefully rather than endlessly redo
457 * the fault. 457 * the fault.
458 */ 458 */
459 fault = handle_mm_fault(mm, vma, address, flags); 459 fault = handle_mm_fault(vma, address, flags);
460 /* No reason to continue if interrupted by SIGKILL. */ 460 /* No reason to continue if interrupted by SIGKILL. */
461 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { 461 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
462 fault = VM_FAULT_SIGNAL; 462 fault = VM_FAULT_SIGNAL;
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 37a6c2e0e969..995b71e4db4b 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -111,7 +111,7 @@ good_area:
111 * make sure we exit gracefully rather than endlessly redo 111 * make sure we exit gracefully rather than endlessly redo
112 * the fault. 112 * the fault.
113 */ 113 */
114 fault = handle_mm_fault(mm, vma, address, flags); 114 fault = handle_mm_fault(vma, address, flags);
115 if (unlikely(fault & VM_FAULT_ERROR)) { 115 if (unlikely(fault & VM_FAULT_ERROR)) {
116 if (fault & VM_FAULT_OOM) 116 if (fault & VM_FAULT_OOM)
117 goto out_of_memory; 117 goto out_of_memory;
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 79d8276377d1..9bf876780cef 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -487,7 +487,7 @@ good_area:
487 * make sure we exit gracefully rather than endlessly redo 487 * make sure we exit gracefully rather than endlessly redo
488 * the fault. 488 * the fault.
489 */ 489 */
490 fault = handle_mm_fault(mm, vma, address, flags); 490 fault = handle_mm_fault(vma, address, flags);
491 491
492 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) 492 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
493 if (mm_fault_error(regs, error_code, address, fault)) 493 if (mm_fault_error(regs, error_code, address, fault))
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index b6c559cbd64d..4714061d6cd3 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -241,7 +241,7 @@ good_area:
241 * make sure we exit gracefully rather than endlessly redo 241 * make sure we exit gracefully rather than endlessly redo
242 * the fault. 242 * the fault.
243 */ 243 */
244 fault = handle_mm_fault(mm, vma, address, flags); 244 fault = handle_mm_fault(vma, address, flags);
245 245
246 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 246 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
247 return; 247 return;
@@ -411,7 +411,7 @@ good_area:
411 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 411 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
412 goto bad_area; 412 goto bad_area;
413 } 413 }
414 switch (handle_mm_fault(mm, vma, address, flags)) { 414 switch (handle_mm_fault(vma, address, flags)) {
415 case VM_FAULT_SIGBUS: 415 case VM_FAULT_SIGBUS:
416 case VM_FAULT_OOM: 416 case VM_FAULT_OOM:
417 goto do_sigbus; 417 goto do_sigbus;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index cb841a33da59..6c43b924a7a2 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -436,7 +436,7 @@ good_area:
436 goto bad_area; 436 goto bad_area;
437 } 437 }
438 438
439 fault = handle_mm_fault(mm, vma, address, flags); 439 fault = handle_mm_fault(vma, address, flags);
440 440
441 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 441 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
442 goto exit_exception; 442 goto exit_exception;
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 26734214818c..beba986589e5 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -434,7 +434,7 @@ good_area:
434 * make sure we exit gracefully rather than endlessly redo 434 * make sure we exit gracefully rather than endlessly redo
435 * the fault. 435 * the fault.
436 */ 436 */
437 fault = handle_mm_fault(mm, vma, address, flags); 437 fault = handle_mm_fault(vma, address, flags);
438 438
439 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 439 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
440 return 0; 440 return 0;
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 98783dd0fa2e..ad8f206ab5e8 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -73,7 +73,7 @@ good_area:
73 do { 73 do {
74 int fault; 74 int fault;
75 75
76 fault = handle_mm_fault(mm, vma, address, flags); 76 fault = handle_mm_fault(vma, address, flags);
77 77
78 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 78 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
79 goto out_nosemaphore; 79 goto out_nosemaphore;
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
index 2ec3d3adcefc..6c7f70bcaae3 100644
--- a/arch/unicore32/mm/fault.c
+++ b/arch/unicore32/mm/fault.c
@@ -194,7 +194,7 @@ good_area:
194 * If for any reason at all we couldn't handle the fault, make 194 * If for any reason at all we couldn't handle the fault, make
195 * sure we exit gracefully rather than endlessly redo the fault. 195 * sure we exit gracefully rather than endlessly redo the fault.
196 */ 196 */
197 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); 197 fault = handle_mm_fault(vma, addr & PAGE_MASK, flags);
198 return fault; 198 return fault;
199 199
200check_stack: 200check_stack:
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index d22161ab941d..dc8023060456 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1353,7 +1353,7 @@ good_area:
1353 * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 1353 * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1354 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. 1354 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1355 */ 1355 */
1356 fault = handle_mm_fault(mm, vma, address, flags); 1356 fault = handle_mm_fault(vma, address, flags);
1357 major |= fault & VM_FAULT_MAJOR; 1357 major |= fault & VM_FAULT_MAJOR;
1358 1358
1359 /* 1359 /*
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 7f4a1fdb1502..2725e08ef353 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -110,7 +110,7 @@ good_area:
110 * make sure we exit gracefully rather than endlessly redo 110 * make sure we exit gracefully rather than endlessly redo
111 * the fault. 111 * the fault.
112 */ 112 */
113 fault = handle_mm_fault(mm, vma, address, flags); 113 fault = handle_mm_fault(vma, address, flags);
114 114
115 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 115 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
116 return; 116 return;