aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-07-19 04:47:05 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:41 -0400
commit83c54070ee1a2d05c89793884bea1a03f2851ed4 (patch)
treedc732f5a9b93fb7004ed23f551bd98b77cc580e0
parentd0217ac04ca6591841e5665f518e38064f4e65bd (diff)
mm: fault feedback #2
This patch completes Linus's wish that the fault return codes be made into bit flags, which I agree makes everything nicer. This requires requires all handle_mm_fault callers to be modified (possibly the modifications should go further and do things like fault accounting in handle_mm_fault -- however that would be for another patch). [akpm@linux-foundation.org: fix alpha build] [akpm@linux-foundation.org: fix s390 build] [akpm@linux-foundation.org: fix sparc build] [akpm@linux-foundation.org: fix sparc64 build] [akpm@linux-foundation.org: fix ia64 build] Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ian Molton <spyro@f2s.com> Cc: Bryan Wu <bryan.wu@analog.com> Cc: Mikael Starvik <starvik@axis.com> Cc: David Howells <dhowells@redhat.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Hirokazu Takata <takata@linux-m32r.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Roman Zippel <zippel@linux-m68k.org> Cc: Greg Ungerer <gerg@uclinux.org> Cc: Matthew Wilcox <willy@debian.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Kazumoto Kojima <kkojima@rr.iij4u.or.jp> Cc: Richard Curnow <rc@rc0.org.uk> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Jeff Dike <jdike@addtoit.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Cc: Miles Bader <uclinux-v850@lsi.nec.co.jp> Cc: Chris Zankel <chris@zankel.net> Acked-by: Kyle McMartin <kyle@mcmartin.ca> Acked-by: Haavard Skinnemoen <hskinnemoen@atmel.com> Acked-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> [ Still apparently needs some ARM and PPC loving - Linus ] Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/alpha/mm/fault.c22
-rw-r--r--arch/arm/mm/fault.c36
-rw-r--r--arch/arm26/mm/fault.c30
-rw-r--r--arch/avr32/mm/fault.c23
-rw-r--r--arch/cris/mm/fault.c23
-rw-r--r--arch/frv/mm/fault.c23
-rw-r--r--arch/i386/mm/fault.c23
-rw-r--r--arch/ia64/mm/fault.c26
-rw-r--r--arch/m32r/mm/fault.c23
-rw-r--r--arch/m68k/mm/fault.c21
-rw-r--r--arch/mips/mm/fault.c23
-rw-r--r--arch/parisc/mm/fault.c23
-rw-r--r--arch/powerpc/mm/fault.c26
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c28
-rw-r--r--arch/ppc/mm/fault.c23
-rw-r--r--arch/s390/lib/uaccess_pt.c23
-rw-r--r--arch/s390/mm/fault.c30
-rw-r--r--arch/sh/mm/fault.c23
-rw-r--r--arch/sh64/mm/fault.c24
-rw-r--r--arch/sparc/mm/fault.c22
-rw-r--r--arch/sparc64/mm/fault.c24
-rw-r--r--arch/um/kernel/trap.c29
-rw-r--r--arch/x86_64/mm/fault.c25
-rw-r--r--arch/xtensa/mm/fault.c23
-rw-r--r--fs/gfs2/ops_vm.c11
-rw-r--r--include/linux/mm.h58
-rw-r--r--kernel/futex.c21
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/hugetlb.c10
-rw-r--r--mm/memory.c80
-rw-r--r--mm/shmem.c8
32 files changed, 373 insertions, 419 deletions
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index f5862792a167..a0e18da594d9 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -148,21 +148,17 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
148 the fault. */ 148 the fault. */
149 fault = handle_mm_fault(mm, vma, address, cause > 0); 149 fault = handle_mm_fault(mm, vma, address, cause > 0);
150 up_read(&mm->mmap_sem); 150 up_read(&mm->mmap_sem);
151 151 if (unlikely(fault & VM_FAULT_ERROR)) {
152 switch (fault) { 152 if (fault & VM_FAULT_OOM)
153 case VM_FAULT_MINOR: 153 goto out_of_memory;
154 current->min_flt++; 154 else if (fault & VM_FAULT_SIGBUS)
155 break; 155 goto do_sigbus;
156 case VM_FAULT_MAJOR:
157 current->maj_flt++;
158 break;
159 case VM_FAULT_SIGBUS:
160 goto do_sigbus;
161 case VM_FAULT_OOM:
162 goto out_of_memory;
163 default:
164 BUG(); 156 BUG();
165 } 157 }
158 if (fault & VM_FAULT_MAJOR)
159 current->maj_flt++;
160 else
161 current->min_flt++;
166 return; 162 return;
167 163
168 /* Something tried to access memory that isn't in our memory map. 164 /* Something tried to access memory that isn't in our memory map.
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 75d491448e45..c04124a095cf 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -183,20 +183,20 @@ good_area:
183 */ 183 */
184survive: 184survive:
185 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); 185 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11));
186 186 if (unlikely(fault & VM_FAULT_ERROR)) {
187 /* 187 if (fault & VM_FAULT_OOM)
188 * Handle the "normal" cases first - successful and sigbus 188 goto out_of_memory;
189 */ 189 else if (fault & VM_FAULT_SIGBUS)
190 switch (fault) { 190 return fault;
191 case VM_FAULT_MAJOR: 191 BUG();
192 }
193 if (fault & VM_FAULT_MAJOR)
192 tsk->maj_flt++; 194 tsk->maj_flt++;
193 return fault; 195 else
194 case VM_FAULT_MINOR:
195 tsk->min_flt++; 196 tsk->min_flt++;
196 case VM_FAULT_SIGBUS: 197 return fault;
197 return fault;
198 }
199 198
199out_of_memory:
200 if (!is_init(tsk)) 200 if (!is_init(tsk))
201 goto out; 201 goto out;
202 202
@@ -249,7 +249,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
249 /* 249 /*
250 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR 250 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
251 */ 251 */
252 if (fault >= VM_FAULT_MINOR) 252 if (likely(!(fault & VM_FAULT_ERROR)))
253 return 0; 253 return 0;
254 254
255 /* 255 /*
@@ -259,8 +259,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
259 if (!user_mode(regs)) 259 if (!user_mode(regs))
260 goto no_context; 260 goto no_context;
261 261
262 switch (fault) { 262 if (fault & VM_FAULT_OOM) {
263 case VM_FAULT_OOM:
264 /* 263 /*
265 * We ran out of memory, or some other thing 264 * We ran out of memory, or some other thing
266 * happened to us that made us unable to handle 265 * happened to us that made us unable to handle
@@ -269,17 +268,15 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
269 printk("VM: killing process %s\n", tsk->comm); 268 printk("VM: killing process %s\n", tsk->comm);
270 do_exit(SIGKILL); 269 do_exit(SIGKILL);
271 return 0; 270 return 0;
272 271 }
273 case VM_FAULT_SIGBUS: 272 if (fault & VM_FAULT_SIGBUS) {
274 /* 273 /*
275 * We had some memory, but were unable to 274 * We had some memory, but were unable to
276 * successfully fix up this page fault. 275 * successfully fix up this page fault.
277 */ 276 */
278 sig = SIGBUS; 277 sig = SIGBUS;
279 code = BUS_ADRERR; 278 code = BUS_ADRERR;
280 break; 279 } else {
281
282 default:
283 /* 280 /*
284 * Something tried to access memory that 281 * Something tried to access memory that
285 * isn't in our memory map.. 282 * isn't in our memory map..
@@ -287,7 +284,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
287 sig = SIGSEGV; 284 sig = SIGSEGV;
288 code = fault == VM_FAULT_BADACCESS ? 285 code = fault == VM_FAULT_BADACCESS ?
289 SEGV_ACCERR : SEGV_MAPERR; 286 SEGV_ACCERR : SEGV_MAPERR;
290 break;
291 } 287 }
292 288
293 __do_user_fault(tsk, addr, fsr, sig, code, regs); 289 __do_user_fault(tsk, addr, fsr, sig, code, regs);
diff --git a/arch/arm26/mm/fault.c b/arch/arm26/mm/fault.c
index 93c0cee0fb5e..dec638a0c8d9 100644
--- a/arch/arm26/mm/fault.c
+++ b/arch/arm26/mm/fault.c
@@ -170,20 +170,20 @@ good_area:
170 */ 170 */
171survive: 171survive:
172 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, DO_COW(fsr)); 172 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, DO_COW(fsr));
173 173 if (unlikely(fault & VM_FAULT_ERROR)) {
174 /* 174 if (fault & VM_FAULT_OOM)
175 * Handle the "normal" cases first - successful and sigbus 175 goto out_of_memory;
176 */ 176 else if (fault & VM_FAULT_SIGBUS)
177 switch (fault) { 177 return fault;
178 case VM_FAULT_MAJOR: 178 BUG();
179 }
180 if (fault & VM_FAULT_MAJOR)
179 tsk->maj_flt++; 181 tsk->maj_flt++;
180 return fault; 182 else
181 case VM_FAULT_MINOR:
182 tsk->min_flt++; 183 tsk->min_flt++;
183 case VM_FAULT_SIGBUS: 184 return fault;
184 return fault;
185 }
186 185
186out_of_memory:
187 fault = -3; /* out of memory */ 187 fault = -3; /* out of memory */
188 if (!is_init(tsk)) 188 if (!is_init(tsk))
189 goto out; 189 goto out;
@@ -225,13 +225,11 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
225 /* 225 /*
226 * Handle the "normal" case first 226 * Handle the "normal" case first
227 */ 227 */
228 switch (fault) { 228 if (likely(!(fault & VM_FAULT_ERROR)))
229 case VM_FAULT_MINOR:
230 case VM_FAULT_MAJOR:
231 return 0; 229 return 0;
232 case VM_FAULT_SIGBUS: 230 if (fault & VM_FAULT_SIGBUS)
233 goto do_sigbus; 231 goto do_sigbus;
234 } 232 /* else VM_FAULT_OOM */
235 233
236 /* 234 /*
237 * If we are in kernel mode at this point, we 235 * If we are in kernel mode at this point, we
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 4b2495285d94..ae2d2c593b2b 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -64,6 +64,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
64 int writeaccess; 64 int writeaccess;
65 long signr; 65 long signr;
66 int code; 66 int code;
67 int fault;
67 68
68 if (notify_page_fault(regs, ecr)) 69 if (notify_page_fault(regs, ecr))
69 return; 70 return;
@@ -132,20 +133,18 @@ good_area:
132 * fault. 133 * fault.
133 */ 134 */
134survive: 135survive:
135 switch (handle_mm_fault(mm, vma, address, writeaccess)) { 136 fault = handle_mm_fault(mm, vma, address, writeaccess);
136 case VM_FAULT_MINOR: 137 if (unlikely(fault & VM_FAULT_ERROR)) {
137 tsk->min_flt++; 138 if (fault & VM_FAULT_OOM)
138 break; 139 goto out_of_memory;
139 case VM_FAULT_MAJOR: 140 else if (fault & VM_FAULT_SIGBUS)
140 tsk->maj_flt++; 141 goto do_sigbus;
141 break;
142 case VM_FAULT_SIGBUS:
143 goto do_sigbus;
144 case VM_FAULT_OOM:
145 goto out_of_memory;
146 default:
147 BUG(); 142 BUG();
148 } 143 }
144 if (fault & VM_FAULT_MAJOR)
145 tsk->maj_flt++;
146 else
147 tsk->min_flt++;
149 148
150 up_read(&mm->mmap_sem); 149 up_read(&mm->mmap_sem);
151 return; 150 return;
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index c73e91f1299a..8672ab7d7978 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -179,6 +179,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
179 struct mm_struct *mm; 179 struct mm_struct *mm;
180 struct vm_area_struct * vma; 180 struct vm_area_struct * vma;
181 siginfo_t info; 181 siginfo_t info;
182 int fault;
182 183
183 D(printk("Page fault for %lX on %X at %lX, prot %d write %d\n", 184 D(printk("Page fault for %lX on %X at %lX, prot %d write %d\n",
184 address, smp_processor_id(), instruction_pointer(regs), 185 address, smp_processor_id(), instruction_pointer(regs),
@@ -283,18 +284,18 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
283 * the fault. 284 * the fault.
284 */ 285 */
285 286
286 switch (handle_mm_fault(mm, vma, address, writeaccess & 1)) { 287 fault = handle_mm_fault(mm, vma, address, writeaccess & 1);
287 case VM_FAULT_MINOR: 288 if (unlikely(fault & VM_FAULT_ERROR)) {
288 tsk->min_flt++; 289 if (fault & VM_FAULT_OOM)
289 break; 290 goto out_of_memory;
290 case VM_FAULT_MAJOR: 291 else if (fault & VM_FAULT_SIGBUS)
291 tsk->maj_flt++; 292 goto do_sigbus;
292 break; 293 BUG();
293 case VM_FAULT_SIGBUS:
294 goto do_sigbus;
295 default:
296 goto out_of_memory;
297 } 294 }
295 if (fault & VM_FAULT_MAJOR)
296 tsk->maj_flt++;
297 else
298 tsk->min_flt++;
298 299
299 up_read(&mm->mmap_sem); 300 up_read(&mm->mmap_sem);
300 return; 301 return;
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 3f12296c3688..6798fa0257b1 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -40,6 +40,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
40 pud_t *pue; 40 pud_t *pue;
41 pte_t *pte; 41 pte_t *pte;
42 int write; 42 int write;
43 int fault;
43 44
44#if 0 45#if 0
45 const char *atxc[16] = { 46 const char *atxc[16] = {
@@ -162,18 +163,18 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
162 * make sure we exit gracefully rather than endlessly redo 163 * make sure we exit gracefully rather than endlessly redo
163 * the fault. 164 * the fault.
164 */ 165 */
165 switch (handle_mm_fault(mm, vma, ear0, write)) { 166 fault = handle_mm_fault(mm, vma, ear0, write);
166 case VM_FAULT_MINOR: 167 if (unlikely(fault & VM_FAULT_ERROR)) {
167 current->min_flt++; 168 if (fault & VM_FAULT_OOM)
168 break; 169 goto out_of_memory;
169 case VM_FAULT_MAJOR: 170 else if (fault & VM_FAULT_SIGBUS)
170 current->maj_flt++; 171 goto do_sigbus;
171 break; 172 BUG();
172 case VM_FAULT_SIGBUS:
173 goto do_sigbus;
174 default:
175 goto out_of_memory;
176 } 173 }
174 if (fault & VM_FAULT_MAJOR)
175 current->maj_flt++;
176 else
177 current->min_flt++;
177 178
178 up_read(&mm->mmap_sem); 179 up_read(&mm->mmap_sem);
179 return; 180 return;
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 1ecb3e43b523..e92a10124935 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -303,6 +303,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
303 struct vm_area_struct * vma; 303 struct vm_area_struct * vma;
304 unsigned long address; 304 unsigned long address;
305 int write, si_code; 305 int write, si_code;
306 int fault;
306 307
307 /* get the address */ 308 /* get the address */
308 address = read_cr2(); 309 address = read_cr2();
@@ -422,20 +423,18 @@ good_area:
422 * make sure we exit gracefully rather than endlessly redo 423 * make sure we exit gracefully rather than endlessly redo
423 * the fault. 424 * the fault.
424 */ 425 */
425 switch (handle_mm_fault(mm, vma, address, write)) { 426 fault = handle_mm_fault(mm, vma, address, write);
426 case VM_FAULT_MINOR: 427 if (unlikely(fault & VM_FAULT_ERROR)) {
427 tsk->min_flt++; 428 if (fault & VM_FAULT_OOM)
428 break;
429 case VM_FAULT_MAJOR:
430 tsk->maj_flt++;
431 break;
432 case VM_FAULT_SIGBUS:
433 goto do_sigbus;
434 case VM_FAULT_OOM:
435 goto out_of_memory; 429 goto out_of_memory;
436 default: 430 else if (fault & VM_FAULT_SIGBUS)
437 BUG(); 431 goto do_sigbus;
432 BUG();
438 } 433 }
434 if (fault & VM_FAULT_MAJOR)
435 tsk->maj_flt++;
436 else
437 tsk->min_flt++;
439 438
440 /* 439 /*
441 * Did it hit the DOS screen memory VA from vm86 mode? 440 * Did it hit the DOS screen memory VA from vm86 mode?
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index b87f785c2416..73ccb6010c05 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -80,6 +80,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
80 struct mm_struct *mm = current->mm; 80 struct mm_struct *mm = current->mm;
81 struct siginfo si; 81 struct siginfo si;
82 unsigned long mask; 82 unsigned long mask;
83 int fault;
83 84
84 /* mmap_sem is performance critical.... */ 85 /* mmap_sem is performance critical.... */
85 prefetchw(&mm->mmap_sem); 86 prefetchw(&mm->mmap_sem);
@@ -147,26 +148,25 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
147 * sure we exit gracefully rather than endlessly redo the 148 * sure we exit gracefully rather than endlessly redo the
148 * fault. 149 * fault.
149 */ 150 */
150 switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) { 151 fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0);
151 case VM_FAULT_MINOR: 152 if (unlikely(fault & VM_FAULT_ERROR)) {
152 ++current->min_flt;
153 break;
154 case VM_FAULT_MAJOR:
155 ++current->maj_flt;
156 break;
157 case VM_FAULT_SIGBUS:
158 /* 153 /*
159 * We ran out of memory, or some other thing happened 154 * We ran out of memory, or some other thing happened
160 * to us that made us unable to handle the page fault 155 * to us that made us unable to handle the page fault
161 * gracefully. 156 * gracefully.
162 */ 157 */
163 signal = SIGBUS; 158 if (fault & VM_FAULT_OOM) {
164 goto bad_area; 159 goto out_of_memory;
165 case VM_FAULT_OOM: 160 } else if (fault & VM_FAULT_SIGBUS) {
166 goto out_of_memory; 161 signal = SIGBUS;
167 default: 162 goto bad_area;
163 }
168 BUG(); 164 BUG();
169 } 165 }
166 if (fault & VM_FAULT_MAJOR)
167 current->maj_flt++;
168 else
169 current->min_flt++;
170 up_read(&mm->mmap_sem); 170 up_read(&mm->mmap_sem);
171 return; 171 return;
172 172
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index f3935ba24946..676a1c443d28 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -80,6 +80,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
80 struct vm_area_struct * vma; 80 struct vm_area_struct * vma;
81 unsigned long page, addr; 81 unsigned long page, addr;
82 int write; 82 int write;
83 int fault;
83 siginfo_t info; 84 siginfo_t info;
84 85
85 /* 86 /*
@@ -195,20 +196,18 @@ survive:
195 */ 196 */
196 addr = (address & PAGE_MASK); 197 addr = (address & PAGE_MASK);
197 set_thread_fault_code(error_code); 198 set_thread_fault_code(error_code);
198 switch (handle_mm_fault(mm, vma, addr, write)) { 199 fault = handle_mm_fault(mm, vma, addr, write);
199 case VM_FAULT_MINOR: 200 if (unlikely(fault & VM_FAULT_ERROR)) {
200 tsk->min_flt++; 201 if (fault & VM_FAULT_OOM)
201 break;
202 case VM_FAULT_MAJOR:
203 tsk->maj_flt++;
204 break;
205 case VM_FAULT_SIGBUS:
206 goto do_sigbus;
207 case VM_FAULT_OOM:
208 goto out_of_memory; 202 goto out_of_memory;
209 default: 203 else if (fault & VM_FAULT_SIGBUS)
210 BUG(); 204 goto do_sigbus;
205 BUG();
211 } 206 }
207 if (fault & VM_FAULT_MAJOR)
208 tsk->maj_flt++;
209 else
210 tsk->min_flt++;
212 set_thread_fault_code(0); 211 set_thread_fault_code(0);
213 up_read(&mm->mmap_sem); 212 up_read(&mm->mmap_sem);
214 return; 213 return;
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 2adbeb16e1b8..578b48f47b9e 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -159,18 +159,17 @@ good_area:
159#ifdef DEBUG 159#ifdef DEBUG
160 printk("handle_mm_fault returns %d\n",fault); 160 printk("handle_mm_fault returns %d\n",fault);
161#endif 161#endif
162 switch (fault) { 162 if (unlikely(fault & VM_FAULT_ERROR)) {
163 case VM_FAULT_MINOR: 163 if (fault & VM_FAULT_OOM)
164 current->min_flt++; 164 goto out_of_memory;
165 break; 165 else if (fault & VM_FAULT_SIGBUS)
166 case VM_FAULT_MAJOR: 166 goto bus_err;
167 current->maj_flt++; 167 BUG();
168 break;
169 case VM_FAULT_SIGBUS:
170 goto bus_err;
171 default:
172 goto out_of_memory;
173 } 168 }
169 if (fault & VM_FAULT_MAJOR)
170 current->maj_flt++;
171 else
172 current->min_flt++;
174 173
175 up_read(&mm->mmap_sem); 174 up_read(&mm->mmap_sem);
176 return 0; 175 return 0;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 7ebea331edb8..521771b373de 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -39,6 +39,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
39 struct mm_struct *mm = tsk->mm; 39 struct mm_struct *mm = tsk->mm;
40 const int field = sizeof(unsigned long) * 2; 40 const int field = sizeof(unsigned long) * 2;
41 siginfo_t info; 41 siginfo_t info;
42 int fault;
42 43
43#if 0 44#if 0
44 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), 45 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
@@ -102,20 +103,18 @@ survive:
102 * make sure we exit gracefully rather than endlessly redo 103 * make sure we exit gracefully rather than endlessly redo
103 * the fault. 104 * the fault.
104 */ 105 */
105 switch (handle_mm_fault(mm, vma, address, write)) { 106 fault = handle_mm_fault(mm, vma, address, write);
106 case VM_FAULT_MINOR: 107 if (unlikely(fault & VM_FAULT_ERROR)) {
107 tsk->min_flt++; 108 if (fault & VM_FAULT_OOM)
108 break; 109 goto out_of_memory;
109 case VM_FAULT_MAJOR: 110 else if (fault & VM_FAULT_SIGBUS)
110 tsk->maj_flt++; 111 goto do_sigbus;
111 break;
112 case VM_FAULT_SIGBUS:
113 goto do_sigbus;
114 case VM_FAULT_OOM:
115 goto out_of_memory;
116 default:
117 BUG(); 112 BUG();
118 } 113 }
114 if (fault & VM_FAULT_MAJOR)
115 tsk->maj_flt++;
116 else
117 tsk->min_flt++;
119 118
120 up_read(&mm->mmap_sem); 119 up_read(&mm->mmap_sem);
121 return; 120 return;
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index f6f67554c623..7899ab87785a 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -147,6 +147,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
147 struct mm_struct *mm = tsk->mm; 147 struct mm_struct *mm = tsk->mm;
148 const struct exception_table_entry *fix; 148 const struct exception_table_entry *fix;
149 unsigned long acc_type; 149 unsigned long acc_type;
150 int fault;
150 151
151 if (in_atomic() || !mm) 152 if (in_atomic() || !mm)
152 goto no_context; 153 goto no_context;
@@ -173,23 +174,23 @@ good_area:
173 * fault. 174 * fault.
174 */ 175 */
175 176
176 switch (handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0)) { 177 fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0);
177 case VM_FAULT_MINOR: 178 if (unlikely(fault & VM_FAULT_ERROR)) {
178 ++current->min_flt;
179 break;
180 case VM_FAULT_MAJOR:
181 ++current->maj_flt;
182 break;
183 case VM_FAULT_SIGBUS:
184 /* 179 /*
185 * We hit a shared mapping outside of the file, or some 180 * We hit a shared mapping outside of the file, or some
186 * other thing happened to us that made us unable to 181 * other thing happened to us that made us unable to
187 * handle the page fault gracefully. 182 * handle the page fault gracefully.
188 */ 183 */
189 goto bad_area; 184 if (fault & VM_FAULT_OOM)
190 default: 185 goto out_of_memory;
191 goto out_of_memory; 186 else if (fault & VM_FAULT_SIGBUS)
187 goto bad_area;
188 BUG();
192 } 189 }
190 if (fault & VM_FAULT_MAJOR)
191 current->maj_flt++;
192 else
193 current->min_flt++;
193 up_read(&mm->mmap_sem); 194 up_read(&mm->mmap_sem);
194 return; 195 return;
195 196
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 0ece51310bfe..3767211b3d0f 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -145,7 +145,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
145 struct mm_struct *mm = current->mm; 145 struct mm_struct *mm = current->mm;
146 siginfo_t info; 146 siginfo_t info;
147 int code = SEGV_MAPERR; 147 int code = SEGV_MAPERR;
148 int is_write = 0; 148 int is_write = 0, ret;
149 int trap = TRAP(regs); 149 int trap = TRAP(regs);
150 int is_exec = trap == 0x400; 150 int is_exec = trap == 0x400;
151 151
@@ -330,22 +330,18 @@ good_area:
330 * the fault. 330 * the fault.
331 */ 331 */
332 survive: 332 survive:
333 switch (handle_mm_fault(mm, vma, address, is_write)) { 333 ret = handle_mm_fault(mm, vma, address, is_write);
334 334 if (unlikely(ret & VM_FAULT_ERROR)) {
335 case VM_FAULT_MINOR: 335 if (ret & VM_FAULT_OOM)
336 current->min_flt++; 336 goto out_of_memory;
337 break; 337 else if (ret & VM_FAULT_SIGBUS)
338 case VM_FAULT_MAJOR: 338 goto do_sigbus;
339 current->maj_flt++;
340 break;
341 case VM_FAULT_SIGBUS:
342 goto do_sigbus;
343 case VM_FAULT_OOM:
344 goto out_of_memory;
345 default:
346 BUG(); 339 BUG();
347 } 340 }
348 341 if (ret & VM_FAULT_MAJOR)
342 current->maj_flt++;
343 else
344 current->min_flt++;
349 up_read(&mm->mmap_sem); 345 up_read(&mm->mmap_sem);
350 return 0; 346 return 0;
351 347
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index e064d0c0d80e..07f88de0544d 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -74,23 +74,21 @@ good_area:
74 goto bad_area; 74 goto bad_area;
75 } 75 }
76 ret = 0; 76 ret = 0;
77 *flt = handle_mm_fault(mm, vma, ea, is_write); 77 fault = handle_mm_fault(mm, vma, ea, is_write);
78 switch (*flt) { 78 if (unlikely(fault & VM_FAULT_ERROR)) {
79 case VM_FAULT_MINOR: 79 if (fault & VM_FAULT_OOM) {
80 current->min_flt++; 80 ret = -ENOMEM;
81 break; 81 goto bad_area;
82 case VM_FAULT_MAJOR: 82 } else if (fault & VM_FAULT_SIGBUS) {
83 current->maj_flt++; 83 ret = -EFAULT;
84 break; 84 goto bad_area;
85 case VM_FAULT_SIGBUS: 85 }
86 ret = -EFAULT;
87 goto bad_area;
88 case VM_FAULT_OOM:
89 ret = -ENOMEM;
90 goto bad_area;
91 default:
92 BUG(); 86 BUG();
93 } 87 }
88 if (fault & VM_FAULT_MAJOR)
89 current->maj_flt++;
90 else
91 current->min_flt++;
94 up_read(&mm->mmap_sem); 92 up_read(&mm->mmap_sem);
95 return ret; 93 return ret;
96 94
diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c
index 465f451f3bc3..b98244e277fb 100644
--- a/arch/ppc/mm/fault.c
+++ b/arch/ppc/mm/fault.c
@@ -96,6 +96,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
96 struct mm_struct *mm = current->mm; 96 struct mm_struct *mm = current->mm;
97 siginfo_t info; 97 siginfo_t info;
98 int code = SEGV_MAPERR; 98 int code = SEGV_MAPERR;
99 int fault;
99#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE) 100#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE)
100 int is_write = error_code & ESR_DST; 101 int is_write = error_code & ESR_DST;
101#else 102#else
@@ -249,20 +250,18 @@ good_area:
249 * the fault. 250 * the fault.
250 */ 251 */
251 survive: 252 survive:
252 switch (handle_mm_fault(mm, vma, address, is_write)) { 253 fault = handle_mm_fault(mm, vma, address, is_write);
253 case VM_FAULT_MINOR: 254 if (unlikely(fault & VM_FAULT_ERROR)) {
254 current->min_flt++; 255 if (fault & VM_FAULT_OOM)
255 break; 256 goto out_of_memory;
256 case VM_FAULT_MAJOR: 257 else if (fault & VM_FAULT_SIGBUS)
257 current->maj_flt++; 258 goto do_sigbus;
258 break;
259 case VM_FAULT_SIGBUS:
260 goto do_sigbus;
261 case VM_FAULT_OOM:
262 goto out_of_memory;
263 default:
264 BUG(); 259 BUG();
265 } 260 }
261 if (fault & VM_FAULT_MAJOR)
262 current->maj_flt++;
263 else
264 current->min_flt++;
266 265
267 up_read(&mm->mmap_sem); 266 up_read(&mm->mmap_sem);
268 /* 267 /*
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 63181671e3e3..60604b2819b2 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -20,6 +20,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address,
20{ 20{
21 struct vm_area_struct *vma; 21 struct vm_area_struct *vma;
22 int ret = -EFAULT; 22 int ret = -EFAULT;
23 int fault;
23 24
24 if (in_atomic()) 25 if (in_atomic())
25 return ret; 26 return ret;
@@ -44,20 +45,18 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address,
44 } 45 }
45 46
46survive: 47survive:
47 switch (handle_mm_fault(mm, vma, address, write_access)) { 48 fault = handle_mm_fault(mm, vma, address, write_access);
48 case VM_FAULT_MINOR: 49 if (unlikely(fault & VM_FAULT_ERROR)) {
49 current->min_flt++; 50 if (fault & VM_FAULT_OOM)
50 break; 51 goto out_of_memory;
51 case VM_FAULT_MAJOR: 52 else if (fault & VM_FAULT_SIGBUS)
52 current->maj_flt++; 53 goto out_sigbus;
53 break;
54 case VM_FAULT_SIGBUS:
55 goto out_sigbus;
56 case VM_FAULT_OOM:
57 goto out_of_memory;
58 default:
59 BUG(); 54 BUG();
60 } 55 }
56 if (fault & VM_FAULT_MAJOR)
57 current->maj_flt++;
58 else
59 current->min_flt++;
61 ret = 0; 60 ret = 0;
62out: 61out:
63 up_read(&mm->mmap_sem); 62 up_read(&mm->mmap_sem);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index d855cdbf8fb8..54055194e9af 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -307,6 +307,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
307 unsigned long address; 307 unsigned long address;
308 int space; 308 int space;
309 int si_code; 309 int si_code;
310 int fault;
310 311
311 if (notify_page_fault(regs, error_code)) 312 if (notify_page_fault(regs, error_code))
312 return; 313 return;
@@ -377,23 +378,22 @@ survive:
377 * make sure we exit gracefully rather than endlessly redo 378 * make sure we exit gracefully rather than endlessly redo
378 * the fault. 379 * the fault.
379 */ 380 */
380 switch (handle_mm_fault(mm, vma, address, write)) { 381 fault = handle_mm_fault(mm, vma, address, write);
381 case VM_FAULT_MINOR: 382 if (unlikely(fault & VM_FAULT_ERROR)) {
382 tsk->min_flt++; 383 if (fault & VM_FAULT_OOM) {
383 break; 384 if (do_out_of_memory(regs, error_code, address))
384 case VM_FAULT_MAJOR: 385 goto survive;
385 tsk->maj_flt++; 386 return;
386 break; 387 } else if (fault & VM_FAULT_SIGBUS) {
387 case VM_FAULT_SIGBUS: 388 do_sigbus(regs, error_code, address);
388 do_sigbus(regs, error_code, address); 389 return;
389 return; 390 }
390 case VM_FAULT_OOM:
391 if (do_out_of_memory(regs, error_code, address))
392 goto survive;
393 return;
394 default:
395 BUG(); 391 BUG();
396 } 392 }
393 if (fault & VM_FAULT_MAJOR)
394 tsk->maj_flt++;
395 else
396 tsk->min_flt++;
397 397
398 up_read(&mm->mmap_sem); 398 up_read(&mm->mmap_sem);
399 /* 399 /*
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 0b3eaf6fbb28..964c6767dc73 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -33,6 +33,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
33 struct mm_struct *mm; 33 struct mm_struct *mm;
34 struct vm_area_struct * vma; 34 struct vm_area_struct * vma;
35 int si_code; 35 int si_code;
36 int fault;
36 siginfo_t info; 37 siginfo_t info;
37 38
38 trace_hardirqs_on(); 39 trace_hardirqs_on();
@@ -124,20 +125,18 @@ good_area:
124 * the fault. 125 * the fault.
125 */ 126 */
126survive: 127survive:
127 switch (handle_mm_fault(mm, vma, address, writeaccess)) { 128 fault = handle_mm_fault(mm, vma, address, writeaccess);
128 case VM_FAULT_MINOR: 129 if (unlikely(fault & VM_FAULT_ERROR)) {
129 tsk->min_flt++; 130 if (fault & VM_FAULT_OOM)
130 break;
131 case VM_FAULT_MAJOR:
132 tsk->maj_flt++;
133 break;
134 case VM_FAULT_SIGBUS:
135 goto do_sigbus;
136 case VM_FAULT_OOM:
137 goto out_of_memory; 131 goto out_of_memory;
138 default: 132 else if (fault & VM_FAULT_SIGBUS)
139 BUG(); 133 goto do_sigbus;
134 BUG();
140 } 135 }
136 if (fault & VM_FAULT_MAJOR)
137 tsk->maj_flt++;
138 else
139 tsk->min_flt++;
141 140
142 up_read(&mm->mmap_sem); 141 up_read(&mm->mmap_sem);
143 return; 142 return;
diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c
index 3cd93ba5d826..0d069d82141f 100644
--- a/arch/sh64/mm/fault.c
+++ b/arch/sh64/mm/fault.c
@@ -127,6 +127,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
127 struct vm_area_struct * vma; 127 struct vm_area_struct * vma;
128 const struct exception_table_entry *fixup; 128 const struct exception_table_entry *fixup;
129 pte_t *pte; 129 pte_t *pte;
130 int fault;
130 131
131#if defined(CONFIG_SH64_PROC_TLB) 132#if defined(CONFIG_SH64_PROC_TLB)
132 ++calls_to_do_slow_page_fault; 133 ++calls_to_do_slow_page_fault;
@@ -221,18 +222,19 @@ good_area:
221 * the fault. 222 * the fault.
222 */ 223 */
223survive: 224survive:
224 switch (handle_mm_fault(mm, vma, address, writeaccess)) { 225 fault = handle_mm_fault(mm, vma, address, writeaccess);
225 case VM_FAULT_MINOR: 226 if (unlikely(fault & VM_FAULT_ERROR)) {
226 tsk->min_flt++; 227 if (fault & VM_FAULT_OOM)
227 break; 228 goto out_of_memory;
228 case VM_FAULT_MAJOR: 229 else if (fault & VM_FAULT_SIGBUS)
229 tsk->maj_flt++; 230 goto do_sigbus;
230 break; 231 BUG();
231 case VM_FAULT_SIGBUS:
232 goto do_sigbus;
233 default:
234 goto out_of_memory;
235 } 232 }
233 if (fault & VM_FAULT_MAJOR)
234 tsk->maj_flt++;
235 else
236 tsk->min_flt++;
237
236 /* If we get here, the page fault has been handled. Do the TLB refill 238 /* If we get here, the page fault has been handled. Do the TLB refill
237 now from the newly-setup PTE, to avoid having to fault again right 239 now from the newly-setup PTE, to avoid having to fault again right
238 away on the same instruction. */ 240 away on the same instruction. */
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
index c3483365db4b..50747fe44356 100644
--- a/arch/sparc/mm/fault.c
+++ b/arch/sparc/mm/fault.c
@@ -226,6 +226,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
226 unsigned long g2; 226 unsigned long g2;
227 siginfo_t info; 227 siginfo_t info;
228 int from_user = !(regs->psr & PSR_PS); 228 int from_user = !(regs->psr & PSR_PS);
229 int fault;
229 230
230 if(text_fault) 231 if(text_fault)
231 address = regs->pc; 232 address = regs->pc;
@@ -289,19 +290,18 @@ good_area:
289 * make sure we exit gracefully rather than endlessly redo 290 * make sure we exit gracefully rather than endlessly redo
290 * the fault. 291 * the fault.
291 */ 292 */
292 switch (handle_mm_fault(mm, vma, address, write)) { 293 fault = handle_mm_fault(mm, vma, address, write);
293 case VM_FAULT_SIGBUS: 294 if (unlikely(fault & VM_FAULT_ERROR)) {
294 goto do_sigbus; 295 if (fault & VM_FAULT_OOM)
295 case VM_FAULT_OOM: 296 goto out_of_memory;
296 goto out_of_memory; 297 else if (fault & VM_FAULT_SIGBUS)
297 case VM_FAULT_MAJOR: 298 goto do_sigbus;
299 BUG();
300 }
301 if (fault & VM_FAULT_MAJOR)
298 current->maj_flt++; 302 current->maj_flt++;
299 break; 303 else
300 case VM_FAULT_MINOR:
301 default:
302 current->min_flt++; 304 current->min_flt++;
303 break;
304 }
305 up_read(&mm->mmap_sem); 305 up_read(&mm->mmap_sem);
306 return; 306 return;
307 307
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index b582024d2199..17123e9ecf78 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -278,7 +278,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
278 struct mm_struct *mm = current->mm; 278 struct mm_struct *mm = current->mm;
279 struct vm_area_struct *vma; 279 struct vm_area_struct *vma;
280 unsigned int insn = 0; 280 unsigned int insn = 0;
281 int si_code, fault_code; 281 int si_code, fault_code, fault;
282 unsigned long address, mm_rss; 282 unsigned long address, mm_rss;
283 283
284 fault_code = get_thread_fault_code(); 284 fault_code = get_thread_fault_code();
@@ -415,20 +415,18 @@ good_area:
415 goto bad_area; 415 goto bad_area;
416 } 416 }
417 417
418 switch (handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE))) { 418 fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE));
419 case VM_FAULT_MINOR: 419 if (unlikely(fault & VM_FAULT_ERROR)) {
420 current->min_flt++; 420 if (fault & VM_FAULT_OOM)
421 break; 421 goto out_of_memory;
422 case VM_FAULT_MAJOR: 422 else if (fault & VM_FAULT_SIGBUS)
423 current->maj_flt++; 423 goto do_sigbus;
424 break;
425 case VM_FAULT_SIGBUS:
426 goto do_sigbus;
427 case VM_FAULT_OOM:
428 goto out_of_memory;
429 default:
430 BUG(); 424 BUG();
431 } 425 }
426 if (fault & VM_FAULT_MAJOR)
427 current->maj_flt++;
428 else
429 current->min_flt++;
432 430
433 up_read(&mm->mmap_sem); 431 up_read(&mm->mmap_sem);
434 432
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index abab90c3803f..3850d53f79fd 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -76,23 +76,24 @@ good_area:
76 goto out; 76 goto out;
77 77
78 do { 78 do {
79 int fault;
79survive: 80survive:
80 switch (handle_mm_fault(mm, vma, address, is_write)){ 81 fault = handle_mm_fault(mm, vma, address, is_write);
81 case VM_FAULT_MINOR: 82 if (unlikely(fault & VM_FAULT_ERROR)) {
82 current->min_flt++; 83 if (fault & VM_FAULT_OOM) {
83 break; 84 err = -ENOMEM;
84 case VM_FAULT_MAJOR: 85 goto out_of_memory;
85 current->maj_flt++; 86 } else if (fault & VM_FAULT_SIGBUS) {
86 break; 87 err = -EACCES;
87 case VM_FAULT_SIGBUS: 88 goto out;
88 err = -EACCES; 89 }
89 goto out;
90 case VM_FAULT_OOM:
91 err = -ENOMEM;
92 goto out_of_memory;
93 default:
94 BUG(); 90 BUG();
95 } 91 }
92 if (fault & VM_FAULT_MAJOR)
93 current->maj_flt++;
94 else
95 current->min_flt++;
96
96 pgd = pgd_offset(mm, address); 97 pgd = pgd_offset(mm, address);
97 pud = pud_offset(pgd, address); 98 pud = pud_offset(pgd, address);
98 pmd = pmd_offset(pud, address); 99 pmd = pmd_offset(pud, address);
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 635e58d443d7..84f11728fc76 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -317,7 +317,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
317 struct vm_area_struct * vma; 317 struct vm_area_struct * vma;
318 unsigned long address; 318 unsigned long address;
319 const struct exception_table_entry *fixup; 319 const struct exception_table_entry *fixup;
320 int write; 320 int write, fault;
321 unsigned long flags; 321 unsigned long flags;
322 siginfo_t info; 322 siginfo_t info;
323 323
@@ -450,19 +450,18 @@ good_area:
450 * make sure we exit gracefully rather than endlessly redo 450 * make sure we exit gracefully rather than endlessly redo
451 * the fault. 451 * the fault.
452 */ 452 */
453 switch (handle_mm_fault(mm, vma, address, write)) { 453 fault = handle_mm_fault(mm, vma, address, write);
454 case VM_FAULT_MINOR: 454 if (unlikely(fault & VM_FAULT_ERROR)) {
455 tsk->min_flt++; 455 if (fault & VM_FAULT_OOM)
456 break; 456 goto out_of_memory;
457 case VM_FAULT_MAJOR: 457 else if (fault & VM_FAULT_SIGBUS)
458 tsk->maj_flt++; 458 goto do_sigbus;
459 break; 459 BUG();
460 case VM_FAULT_SIGBUS:
461 goto do_sigbus;
462 default:
463 goto out_of_memory;
464 } 460 }
465 461 if (fault & VM_FAULT_MAJOR)
462 tsk->maj_flt++;
463 else
464 tsk->min_flt++;
466 up_read(&mm->mmap_sem); 465 up_read(&mm->mmap_sem);
467 return; 466 return;
468 467
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 3dc6f2f07bbe..16004067add3 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -41,6 +41,7 @@ void do_page_fault(struct pt_regs *regs)
41 siginfo_t info; 41 siginfo_t info;
42 42
43 int is_write, is_exec; 43 int is_write, is_exec;
44 int fault;
44 45
45 info.si_code = SEGV_MAPERR; 46 info.si_code = SEGV_MAPERR;
46 47
@@ -102,20 +103,18 @@ good_area:
102 * the fault. 103 * the fault.
103 */ 104 */
104survive: 105survive:
105 switch (handle_mm_fault(mm, vma, address, is_write)) { 106 fault = handle_mm_fault(mm, vma, address, is_write);
106 case VM_FAULT_MINOR: 107 if (unlikely(fault & VM_FAULT_ERROR)) {
107 current->min_flt++; 108 if (fault & VM_FAULT_OOM)
108 break; 109 goto out_of_memory;
109 case VM_FAULT_MAJOR: 110 else if (fault & VM_FAULT_SIGBUS)
110 current->maj_flt++; 111 goto do_sigbus;
111 break;
112 case VM_FAULT_SIGBUS:
113 goto do_sigbus;
114 case VM_FAULT_OOM:
115 goto out_of_memory;
116 default:
117 BUG(); 112 BUG();
118 } 113 }
114 if (fault & VM_FAULT_MAJOR)
115 current->maj_flt++;
116 else
117 current->min_flt++;
119 118
120 up_read(&mm->mmap_sem); 119 up_read(&mm->mmap_sem);
121 return; 120 return;
diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c
index dc287d2e3a66..927d739d4685 100644
--- a/fs/gfs2/ops_vm.c
+++ b/fs/gfs2/ops_vm.c
@@ -112,7 +112,7 @@ static int gfs2_sharewrite_fault(struct vm_area_struct *vma,
112 struct gfs2_holder i_gh; 112 struct gfs2_holder i_gh;
113 int alloc_required; 113 int alloc_required;
114 int error; 114 int error;
115 int ret = VM_FAULT_MINOR; 115 int ret = 0;
116 116
117 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); 117 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
118 if (error) 118 if (error)
@@ -132,14 +132,19 @@ static int gfs2_sharewrite_fault(struct vm_area_struct *vma,
132 set_bit(GFF_EXLOCK, &gf->f_flags); 132 set_bit(GFF_EXLOCK, &gf->f_flags);
133 ret = filemap_fault(vma, vmf); 133 ret = filemap_fault(vma, vmf);
134 clear_bit(GFF_EXLOCK, &gf->f_flags); 134 clear_bit(GFF_EXLOCK, &gf->f_flags);
135 if (ret & (VM_FAULT_ERROR | FAULT_RET_NOPAGE)) 135 if (ret & VM_FAULT_ERROR)
136 goto out_unlock; 136 goto out_unlock;
137 137
138 if (alloc_required) { 138 if (alloc_required) {
139 /* XXX: do we need to drop page lock around alloc_page_backing?*/ 139 /* XXX: do we need to drop page lock around alloc_page_backing?*/
140 error = alloc_page_backing(ip, vmf->page); 140 error = alloc_page_backing(ip, vmf->page);
141 if (error) { 141 if (error) {
142 if (ret & FAULT_RET_LOCKED) 142 /*
143 * VM_FAULT_LOCKED should always be the case for
144 * filemap_fault, but it may not be in a future
145 * implementation.
146 */
147 if (ret & VM_FAULT_LOCKED)
143 unlock_page(vmf->page); 148 unlock_page(vmf->page);
144 page_cache_release(vmf->page); 149 page_cache_release(vmf->page);
145 ret = VM_FAULT_OOM; 150 ret = VM_FAULT_OOM;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ff0b8844bd5a..f8e12b3b6110 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -196,25 +196,10 @@ extern pgprot_t protection_map[16];
196#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 196#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
197 197
198 198
199#define FAULT_RET_NOPAGE 0x0100 /* ->fault did not return a page. This
200 * can be used if the handler installs
201 * their own pte.
202 */
203#define FAULT_RET_LOCKED 0x0200 /* ->fault locked the page, caller must
204 * unlock after installing the mapping.
205 * This is used by pagecache in
206 * particular, where the page lock is
207 * used to synchronise against truncate
208 * and invalidate. Mutually exclusive
209 * with FAULT_RET_NOPAGE.
210 */
211
212/* 199/*
213 * vm_fault is filled by the the pagefault handler and passed to the vma's 200 * vm_fault is filled by the the pagefault handler and passed to the vma's
214 * ->fault function. The vma's ->fault is responsible for returning the 201 * ->fault function. The vma's ->fault is responsible for returning a bitmask
215 * VM_FAULT_xxx type which occupies the lowest byte of the return code, ORed 202 * of VM_FAULT_xxx flags that give details about how the fault was handled.
216 * with FAULT_RET_ flags that occupy the next byte and give details about
217 * how the fault was handled.
218 * 203 *
219 * pgoff should be used in favour of virtual_address, if possible. If pgoff 204 * pgoff should be used in favour of virtual_address, if possible. If pgoff
220 * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear 205 * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear
@@ -226,9 +211,9 @@ struct vm_fault {
226 void __user *virtual_address; /* Faulting virtual address */ 211 void __user *virtual_address; /* Faulting virtual address */
227 212
228 struct page *page; /* ->fault handlers should return a 213 struct page *page; /* ->fault handlers should return a
229 * page here, unless FAULT_RET_NOPAGE 214 * page here, unless VM_FAULT_NOPAGE
230 * is set (which is also implied by 215 * is set (which is also implied by
231 * VM_FAULT_OOM or SIGBUS). 216 * VM_FAULT_ERROR).
232 */ 217 */
233}; 218};
234 219
@@ -712,26 +697,17 @@ static inline int page_mapped(struct page *page)
712 * just gets major/minor fault counters bumped up. 697 * just gets major/minor fault counters bumped up.
713 */ 698 */
714 699
715/* 700#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
716 * VM_FAULT_ERROR is set for the error cases, to make some tests simpler.
717 */
718#define VM_FAULT_ERROR 0x20
719 701
720#define VM_FAULT_OOM (0x00 | VM_FAULT_ERROR) 702#define VM_FAULT_OOM 0x0001
721#define VM_FAULT_SIGBUS (0x01 | VM_FAULT_ERROR) 703#define VM_FAULT_SIGBUS 0x0002
722#define VM_FAULT_MINOR 0x02 704#define VM_FAULT_MAJOR 0x0004
723#define VM_FAULT_MAJOR 0x03 705#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
724 706
725/* 707#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
726 * Special case for get_user_pages. 708#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
727 * Must be in a distinct bit from the above VM_FAULT_ flags.
728 */
729#define VM_FAULT_WRITE 0x10
730 709
731/* 710#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS)
732 * Mask of VM_FAULT_ flags
733 */
734#define VM_FAULT_MASK 0xff
735 711
736#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 712#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
737 713
@@ -817,16 +793,8 @@ extern int vmtruncate(struct inode * inode, loff_t offset);
817extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); 793extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
818 794
819#ifdef CONFIG_MMU 795#ifdef CONFIG_MMU
820extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, 796extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
821 unsigned long address, int write_access); 797 unsigned long address, int write_access);
822
823static inline int handle_mm_fault(struct mm_struct *mm,
824 struct vm_area_struct *vma, unsigned long address,
825 int write_access)
826{
827 return __handle_mm_fault(mm, vma, address, write_access) &
828 (~VM_FAULT_WRITE);
829}
830#else 798#else
831static inline int handle_mm_fault(struct mm_struct *mm, 799static inline int handle_mm_fault(struct mm_struct *mm,
832 struct vm_area_struct *vma, unsigned long address, 800 struct vm_area_struct *vma, unsigned long address,
diff --git a/kernel/futex.c b/kernel/futex.c
index 5c3f45d07c53..a12425051ee9 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -346,15 +346,20 @@ static int futex_handle_fault(unsigned long address,
346 vma = find_vma(mm, address); 346 vma = find_vma(mm, address);
347 if (vma && address >= vma->vm_start && 347 if (vma && address >= vma->vm_start &&
348 (vma->vm_flags & VM_WRITE)) { 348 (vma->vm_flags & VM_WRITE)) {
349 switch (handle_mm_fault(mm, vma, address, 1)) { 349 int fault;
350 case VM_FAULT_MINOR: 350 fault = handle_mm_fault(mm, vma, address, 1);
351 ret = 0; 351 if (unlikely((fault & VM_FAULT_ERROR))) {
352 current->min_flt++; 352#if 0
353 break; 353 /* XXX: let's do this when we verify it is OK */
354 case VM_FAULT_MAJOR: 354 if (ret & VM_FAULT_OOM)
355 ret = -ENOMEM;
356#endif
357 } else {
355 ret = 0; 358 ret = 0;
356 current->maj_flt++; 359 if (fault & VM_FAULT_MAJOR)
357 break; 360 current->maj_flt++;
361 else
362 current->min_flt++;
358 } 363 }
359 } 364 }
360 if (!fshared) 365 if (!fshared)
diff --git a/mm/filemap.c b/mm/filemap.c
index 0876cc57255f..4fd9e3f0f48a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1322,9 +1322,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1322 struct page *page; 1322 struct page *page;
1323 unsigned long size; 1323 unsigned long size;
1324 int did_readaround = 0; 1324 int did_readaround = 0;
1325 int ret; 1325 int ret = 0;
1326
1327 ret = VM_FAULT_MINOR;
1328 1326
1329 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1327 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1330 if (vmf->pgoff >= size) 1328 if (vmf->pgoff >= size)
@@ -1408,7 +1406,7 @@ retry_find:
1408 */ 1406 */
1409 mark_page_accessed(page); 1407 mark_page_accessed(page);
1410 vmf->page = page; 1408 vmf->page = page;
1411 return ret | FAULT_RET_LOCKED; 1409 return ret | VM_FAULT_LOCKED;
1412 1410
1413outside_data_content: 1411outside_data_content:
1414 /* 1412 /*
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 847d5d78163e..53ee6a299635 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -252,7 +252,7 @@ static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
252out: 252out:
253 page_cache_get(page); 253 page_cache_get(page);
254 vmf->page = page; 254 vmf->page = page;
255 return VM_FAULT_MINOR; 255 return 0;
256} 256}
257 257
258static struct vm_operations_struct xip_file_vm_ops = { 258static struct vm_operations_struct xip_file_vm_ops = {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index aaa7c1a682d9..c4a573b857bd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -469,7 +469,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
469 avoidcopy = (page_count(old_page) == 1); 469 avoidcopy = (page_count(old_page) == 1);
470 if (avoidcopy) { 470 if (avoidcopy) {
471 set_huge_ptep_writable(vma, address, ptep); 471 set_huge_ptep_writable(vma, address, ptep);
472 return VM_FAULT_MINOR; 472 return 0;
473 } 473 }
474 474
475 page_cache_get(old_page); 475 page_cache_get(old_page);
@@ -494,7 +494,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
494 } 494 }
495 page_cache_release(new_page); 495 page_cache_release(new_page);
496 page_cache_release(old_page); 496 page_cache_release(old_page);
497 return VM_FAULT_MINOR; 497 return 0;
498} 498}
499 499
500static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 500static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -551,7 +551,7 @@ retry:
551 if (idx >= size) 551 if (idx >= size)
552 goto backout; 552 goto backout;
553 553
554 ret = VM_FAULT_MINOR; 554 ret = 0;
555 if (!pte_none(*ptep)) 555 if (!pte_none(*ptep))
556 goto backout; 556 goto backout;
557 557
@@ -602,7 +602,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
602 return ret; 602 return ret;
603 } 603 }
604 604
605 ret = VM_FAULT_MINOR; 605 ret = 0;
606 606
607 spin_lock(&mm->page_table_lock); 607 spin_lock(&mm->page_table_lock);
608 /* Check for a racing update before calling hugetlb_cow */ 608 /* Check for a racing update before calling hugetlb_cow */
@@ -641,7 +641,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
641 spin_unlock(&mm->page_table_lock); 641 spin_unlock(&mm->page_table_lock);
642 ret = hugetlb_fault(mm, vma, vaddr, 0); 642 ret = hugetlb_fault(mm, vma, vaddr, 0);
643 spin_lock(&mm->page_table_lock); 643 spin_lock(&mm->page_table_lock);
644 if (ret == VM_FAULT_MINOR) 644 if (!(ret & VM_FAULT_MAJOR))
645 continue; 645 continue;
646 646
647 remainder = 0; 647 remainder = 0;
diff --git a/mm/memory.c b/mm/memory.c
index 23c870479b3e..61d51da7e17c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1068,31 +1068,30 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1068 cond_resched(); 1068 cond_resched();
1069 while (!(page = follow_page(vma, start, foll_flags))) { 1069 while (!(page = follow_page(vma, start, foll_flags))) {
1070 int ret; 1070 int ret;
1071 ret = __handle_mm_fault(mm, vma, start, 1071 ret = handle_mm_fault(mm, vma, start,
1072 foll_flags & FOLL_WRITE); 1072 foll_flags & FOLL_WRITE);
1073 if (ret & VM_FAULT_ERROR) {
1074 if (ret & VM_FAULT_OOM)
1075 return i ? i : -ENOMEM;
1076 else if (ret & VM_FAULT_SIGBUS)
1077 return i ? i : -EFAULT;
1078 BUG();
1079 }
1080 if (ret & VM_FAULT_MAJOR)
1081 tsk->maj_flt++;
1082 else
1083 tsk->min_flt++;
1084
1073 /* 1085 /*
1074 * The VM_FAULT_WRITE bit tells us that do_wp_page has 1086 * The VM_FAULT_WRITE bit tells us that
1075 * broken COW when necessary, even if maybe_mkwrite 1087 * do_wp_page has broken COW when necessary,
1076 * decided not to set pte_write. We can thus safely do 1088 * even if maybe_mkwrite decided not to set
1077 * subsequent page lookups as if they were reads. 1089 * pte_write. We can thus safely do subsequent
1090 * page lookups as if they were reads.
1078 */ 1091 */
1079 if (ret & VM_FAULT_WRITE) 1092 if (ret & VM_FAULT_WRITE)
1080 foll_flags &= ~FOLL_WRITE; 1093 foll_flags &= ~FOLL_WRITE;
1081 1094
1082 switch (ret & ~VM_FAULT_WRITE) {
1083 case VM_FAULT_MINOR:
1084 tsk->min_flt++;
1085 break;
1086 case VM_FAULT_MAJOR:
1087 tsk->maj_flt++;
1088 break;
1089 case VM_FAULT_SIGBUS:
1090 return i ? i : -EFAULT;
1091 case VM_FAULT_OOM:
1092 return i ? i : -ENOMEM;
1093 default:
1094 BUG();
1095 }
1096 cond_resched(); 1095 cond_resched();
1097 } 1096 }
1098 if (pages) { 1097 if (pages) {
@@ -1639,7 +1638,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1639{ 1638{
1640 struct page *old_page, *new_page; 1639 struct page *old_page, *new_page;
1641 pte_t entry; 1640 pte_t entry;
1642 int reuse = 0, ret = VM_FAULT_MINOR; 1641 int reuse = 0, ret = 0;
1643 struct page *dirty_page = NULL; 1642 struct page *dirty_page = NULL;
1644 1643
1645 old_page = vm_normal_page(vma, address, orig_pte); 1644 old_page = vm_normal_page(vma, address, orig_pte);
@@ -1835,8 +1834,8 @@ static int unmap_mapping_range_vma(struct vm_area_struct *vma,
1835 /* 1834 /*
1836 * files that support invalidating or truncating portions of the 1835 * files that support invalidating or truncating portions of the
1837 * file from under mmaped areas must have their ->fault function 1836 * file from under mmaped areas must have their ->fault function
1838 * return a locked page (and FAULT_RET_LOCKED code). This provides 1837 * return a locked page (and set VM_FAULT_LOCKED in the return).
1839 * synchronisation against concurrent unmapping here. 1838 * This provides synchronisation against concurrent unmapping here.
1840 */ 1839 */
1841 1840
1842again: 1841again:
@@ -2140,7 +2139,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2140 struct page *page; 2139 struct page *page;
2141 swp_entry_t entry; 2140 swp_entry_t entry;
2142 pte_t pte; 2141 pte_t pte;
2143 int ret = VM_FAULT_MINOR; 2142 int ret = 0;
2144 2143
2145 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2144 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2146 goto out; 2145 goto out;
@@ -2208,8 +2207,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2208 unlock_page(page); 2207 unlock_page(page);
2209 2208
2210 if (write_access) { 2209 if (write_access) {
2210 /* XXX: We could OR the do_wp_page code with this one? */
2211 if (do_wp_page(mm, vma, address, 2211 if (do_wp_page(mm, vma, address,
2212 page_table, pmd, ptl, pte) == VM_FAULT_OOM) 2212 page_table, pmd, ptl, pte) & VM_FAULT_OOM)
2213 ret = VM_FAULT_OOM; 2213 ret = VM_FAULT_OOM;
2214 goto out; 2214 goto out;
2215 } 2215 }
@@ -2280,7 +2280,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2280 lazy_mmu_prot_update(entry); 2280 lazy_mmu_prot_update(entry);
2281unlock: 2281unlock:
2282 pte_unmap_unlock(page_table, ptl); 2282 pte_unmap_unlock(page_table, ptl);
2283 return VM_FAULT_MINOR; 2283 return 0;
2284release: 2284release:
2285 page_cache_release(page); 2285 page_cache_release(page);
2286 goto unlock; 2286 goto unlock;
@@ -2323,11 +2323,11 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2323 2323
2324 if (likely(vma->vm_ops->fault)) { 2324 if (likely(vma->vm_ops->fault)) {
2325 ret = vma->vm_ops->fault(vma, &vmf); 2325 ret = vma->vm_ops->fault(vma, &vmf);
2326 if (unlikely(ret & (VM_FAULT_ERROR | FAULT_RET_NOPAGE))) 2326 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2327 return (ret & VM_FAULT_MASK); 2327 return ret;
2328 } else { 2328 } else {
2329 /* Legacy ->nopage path */ 2329 /* Legacy ->nopage path */
2330 ret = VM_FAULT_MINOR; 2330 ret = 0;
2331 vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret); 2331 vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
2332 /* no page was available -- either SIGBUS or OOM */ 2332 /* no page was available -- either SIGBUS or OOM */
2333 if (unlikely(vmf.page == NOPAGE_SIGBUS)) 2333 if (unlikely(vmf.page == NOPAGE_SIGBUS))
@@ -2340,7 +2340,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2340 * For consistency in subsequent calls, make the faulted page always 2340 * For consistency in subsequent calls, make the faulted page always
2341 * locked. 2341 * locked.
2342 */ 2342 */
2343 if (unlikely(!(ret & FAULT_RET_LOCKED))) 2343 if (unlikely(!(ret & VM_FAULT_LOCKED)))
2344 lock_page(vmf.page); 2344 lock_page(vmf.page);
2345 else 2345 else
2346 VM_BUG_ON(!PageLocked(vmf.page)); 2346 VM_BUG_ON(!PageLocked(vmf.page));
@@ -2356,7 +2356,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2356 ret = VM_FAULT_OOM; 2356 ret = VM_FAULT_OOM;
2357 goto out; 2357 goto out;
2358 } 2358 }
2359 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 2359 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
2360 vma, address);
2360 if (!page) { 2361 if (!page) {
2361 ret = VM_FAULT_OOM; 2362 ret = VM_FAULT_OOM;
2362 goto out; 2363 goto out;
@@ -2384,7 +2385,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2384 * is better done later. 2385 * is better done later.
2385 */ 2386 */
2386 if (!page->mapping) { 2387 if (!page->mapping) {
2387 ret = VM_FAULT_MINOR; 2388 ret = 0;
2388 anon = 1; /* no anon but release vmf.page */ 2389 anon = 1; /* no anon but release vmf.page */
2389 goto out; 2390 goto out;
2390 } 2391 }
@@ -2447,7 +2448,7 @@ out_unlocked:
2447 put_page(dirty_page); 2448 put_page(dirty_page);
2448 } 2449 }
2449 2450
2450 return (ret & VM_FAULT_MASK); 2451 return ret;
2451} 2452}
2452 2453
2453static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2454static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -2486,7 +2487,6 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
2486 spinlock_t *ptl; 2487 spinlock_t *ptl;
2487 pte_t entry; 2488 pte_t entry;
2488 unsigned long pfn; 2489 unsigned long pfn;
2489 int ret = VM_FAULT_MINOR;
2490 2490
2491 pte_unmap(page_table); 2491 pte_unmap(page_table);
2492 BUG_ON(!(vma->vm_flags & VM_PFNMAP)); 2492 BUG_ON(!(vma->vm_flags & VM_PFNMAP));
@@ -2498,7 +2498,7 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
2498 else if (unlikely(pfn == NOPFN_SIGBUS)) 2498 else if (unlikely(pfn == NOPFN_SIGBUS))
2499 return VM_FAULT_SIGBUS; 2499 return VM_FAULT_SIGBUS;
2500 else if (unlikely(pfn == NOPFN_REFAULT)) 2500 else if (unlikely(pfn == NOPFN_REFAULT))
2501 return VM_FAULT_MINOR; 2501 return 0;
2502 2502
2503 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2503 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2504 2504
@@ -2510,7 +2510,7 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
2510 set_pte_at(mm, address, page_table, entry); 2510 set_pte_at(mm, address, page_table, entry);
2511 } 2511 }
2512 pte_unmap_unlock(page_table, ptl); 2512 pte_unmap_unlock(page_table, ptl);
2513 return ret; 2513 return 0;
2514} 2514}
2515 2515
2516/* 2516/*
@@ -2531,7 +2531,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2531 pgoff_t pgoff; 2531 pgoff_t pgoff;
2532 2532
2533 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2533 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2534 return VM_FAULT_MINOR; 2534 return 0;
2535 2535
2536 if (unlikely(!(vma->vm_flags & VM_NONLINEAR) || 2536 if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
2537 !(vma->vm_flags & VM_CAN_NONLINEAR))) { 2537 !(vma->vm_flags & VM_CAN_NONLINEAR))) {
@@ -2615,13 +2615,13 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2615 } 2615 }
2616unlock: 2616unlock:
2617 pte_unmap_unlock(pte, ptl); 2617 pte_unmap_unlock(pte, ptl);
2618 return VM_FAULT_MINOR; 2618 return 0;
2619} 2619}
2620 2620
2621/* 2621/*
2622 * By the time we get here, we already hold the mm semaphore 2622 * By the time we get here, we already hold the mm semaphore
2623 */ 2623 */
2624int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2624int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2625 unsigned long address, int write_access) 2625 unsigned long address, int write_access)
2626{ 2626{
2627 pgd_t *pgd; 2627 pgd_t *pgd;
@@ -2650,7 +2650,7 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2650 return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 2650 return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
2651} 2651}
2652 2652
2653EXPORT_SYMBOL_GPL(__handle_mm_fault); 2653EXPORT_SYMBOL_GPL(handle_mm_fault);
2654 2654
2655#ifndef __PAGETABLE_PUD_FOLDED 2655#ifndef __PAGETABLE_PUD_FOLDED
2656/* 2656/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 0a555af8733d..ad155c7745dc 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1103,7 +1103,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
1103 return -EFBIG; 1103 return -EFBIG;
1104 1104
1105 if (type) 1105 if (type)
1106 *type = VM_FAULT_MINOR; 1106 *type = 0;
1107 1107
1108 /* 1108 /*
1109 * Normally, filepage is NULL on entry, and either found 1109 * Normally, filepage is NULL on entry, and either found
@@ -1138,9 +1138,9 @@ repeat:
1138 if (!swappage) { 1138 if (!swappage) {
1139 shmem_swp_unmap(entry); 1139 shmem_swp_unmap(entry);
1140 /* here we actually do the io */ 1140 /* here we actually do the io */
1141 if (type && *type == VM_FAULT_MINOR) { 1141 if (type && !(*type & VM_FAULT_MAJOR)) {
1142 __count_vm_event(PGMAJFAULT); 1142 __count_vm_event(PGMAJFAULT);
1143 *type = VM_FAULT_MAJOR; 1143 *type |= VM_FAULT_MAJOR;
1144 } 1144 }
1145 spin_unlock(&info->lock); 1145 spin_unlock(&info->lock);
1146 swappage = shmem_swapin(info, swap, idx); 1146 swappage = shmem_swapin(info, swap, idx);
@@ -1323,7 +1323,7 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1323 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1323 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1324 1324
1325 mark_page_accessed(vmf->page); 1325 mark_page_accessed(vmf->page);
1326 return ret | FAULT_RET_LOCKED; 1326 return ret | VM_FAULT_LOCKED;
1327} 1327}
1328 1328
1329#ifdef CONFIG_NUMA 1329#ifdef CONFIG_NUMA