diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-10 12:01:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-21 16:08:22 -0400 |
commit | d06063cc221fdefcab86589e79ddfdb7c0e14b63 (patch) | |
tree | 00ccaf8c1992b57a4445d78b9eae25fde0b3ab31 | |
parent | 30c9f3a9fae79517bca595826a19c6855fbb6d32 (diff) |
Move FAULT_FLAG_xyz into handle_mm_fault() callers
This allows the callers to now pass down the full set of FAULT_FLAG_xyz
flags to handle_mm_fault(). All callers have been (mechanically)
converted to the new calling convention, there's almost certainly room
for architectures to clean up their code and then add FAULT_FLAG_RETRY
when that support is added.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/alpha/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/avr32/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/cris/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/frv/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/m32r/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/m68k/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/microblaze/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/mn10300/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/parisc/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_fault.c | 2 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_pt.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/fault_32.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 2 | ||||
-rw-r--r-- | arch/sparc/mm/fault_32.c | 4 | ||||
-rw-r--r-- | arch/sparc/mm/fault_64.c | 2 | ||||
-rw-r--r-- | arch/um/kernel/trap.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/xtensa/mm/fault.c | 2 | ||||
-rw-r--r-- | include/linux/mm.h | 4 | ||||
-rw-r--r-- | mm/memory.c | 8 |
25 files changed, 30 insertions, 30 deletions
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 4829f96585b1..00a31deaa96e 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c | |||
@@ -146,7 +146,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, | |||
146 | /* If for any reason at all we couldn't handle the fault, | 146 | /* If for any reason at all we couldn't handle the fault, |
147 | make sure we exit gracefully rather than endlessly redo | 147 | make sure we exit gracefully rather than endlessly redo |
148 | the fault. */ | 148 | the fault. */ |
149 | fault = handle_mm_fault(mm, vma, address, cause > 0); | 149 | fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); |
150 | up_read(&mm->mmap_sem); | 150 | up_read(&mm->mmap_sem); |
151 | if (unlikely(fault & VM_FAULT_ERROR)) { | 151 | if (unlikely(fault & VM_FAULT_ERROR)) { |
152 | if (fault & VM_FAULT_OOM) | 152 | if (fault & VM_FAULT_OOM) |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 0455557a2899..6fdcbb709827 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -208,7 +208,7 @@ good_area: | |||
208 | * than endlessly redo the fault. | 208 | * than endlessly redo the fault. |
209 | */ | 209 | */ |
210 | survive: | 210 | survive: |
211 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); | 211 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0); |
212 | if (unlikely(fault & VM_FAULT_ERROR)) { | 212 | if (unlikely(fault & VM_FAULT_ERROR)) { |
213 | if (fault & VM_FAULT_OOM) | 213 | if (fault & VM_FAULT_OOM) |
214 | goto out_of_memory; | 214 | goto out_of_memory; |
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 62d4abbaa654..b61d86d3debf 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c | |||
@@ -133,7 +133,7 @@ good_area: | |||
133 | * fault. | 133 | * fault. |
134 | */ | 134 | */ |
135 | survive: | 135 | survive: |
136 | fault = handle_mm_fault(mm, vma, address, writeaccess); | 136 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
137 | if (unlikely(fault & VM_FAULT_ERROR)) { | 137 | if (unlikely(fault & VM_FAULT_ERROR)) { |
138 | if (fault & VM_FAULT_OOM) | 138 | if (fault & VM_FAULT_OOM) |
139 | goto out_of_memory; | 139 | goto out_of_memory; |
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index c4c76db90f9c..f925115e3250 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c | |||
@@ -163,7 +163,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, | |||
163 | * the fault. | 163 | * the fault. |
164 | */ | 164 | */ |
165 | 165 | ||
166 | fault = handle_mm_fault(mm, vma, address, writeaccess & 1); | 166 | fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0); |
167 | if (unlikely(fault & VM_FAULT_ERROR)) { | 167 | if (unlikely(fault & VM_FAULT_ERROR)) { |
168 | if (fault & VM_FAULT_OOM) | 168 | if (fault & VM_FAULT_OOM) |
169 | goto out_of_memory; | 169 | goto out_of_memory; |
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 05093d41d98e..30f5d100a81c 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c | |||
@@ -163,7 +163,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear | |||
163 | * make sure we exit gracefully rather than endlessly redo | 163 | * make sure we exit gracefully rather than endlessly redo |
164 | * the fault. | 164 | * the fault. |
165 | */ | 165 | */ |
166 | fault = handle_mm_fault(mm, vma, ear0, write); | 166 | fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0); |
167 | if (unlikely(fault & VM_FAULT_ERROR)) { | 167 | if (unlikely(fault & VM_FAULT_ERROR)) { |
168 | if (fault & VM_FAULT_OOM) | 168 | if (fault & VM_FAULT_OOM) |
169 | goto out_of_memory; | 169 | goto out_of_memory; |
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 23088bed111e..19261a99e623 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -154,7 +154,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re | |||
154 | * sure we exit gracefully rather than endlessly redo the | 154 | * sure we exit gracefully rather than endlessly redo the |
155 | * fault. | 155 | * fault. |
156 | */ | 156 | */ |
157 | fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0); | 157 | fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0); |
158 | if (unlikely(fault & VM_FAULT_ERROR)) { | 158 | if (unlikely(fault & VM_FAULT_ERROR)) { |
159 | /* | 159 | /* |
160 | * We ran out of memory, or some other thing happened | 160 | * We ran out of memory, or some other thing happened |
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 4a71df4c1b30..7274b47f4c22 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c | |||
@@ -196,7 +196,7 @@ survive: | |||
196 | */ | 196 | */ |
197 | addr = (address & PAGE_MASK); | 197 | addr = (address & PAGE_MASK); |
198 | set_thread_fault_code(error_code); | 198 | set_thread_fault_code(error_code); |
199 | fault = handle_mm_fault(mm, vma, addr, write); | 199 | fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0); |
200 | if (unlikely(fault & VM_FAULT_ERROR)) { | 200 | if (unlikely(fault & VM_FAULT_ERROR)) { |
201 | if (fault & VM_FAULT_OOM) | 201 | if (fault & VM_FAULT_OOM) |
202 | goto out_of_memory; | 202 | goto out_of_memory; |
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index f493f03231d5..d0e35cf99fc6 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c | |||
@@ -155,7 +155,7 @@ good_area: | |||
155 | */ | 155 | */ |
156 | 156 | ||
157 | survive: | 157 | survive: |
158 | fault = handle_mm_fault(mm, vma, address, write); | 158 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
159 | #ifdef DEBUG | 159 | #ifdef DEBUG |
160 | printk("handle_mm_fault returns %d\n",fault); | 160 | printk("handle_mm_fault returns %d\n",fault); |
161 | #endif | 161 | #endif |
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 5e67cd1fab40..956607a63f4c 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
@@ -232,7 +232,7 @@ good_area: | |||
232 | * the fault. | 232 | * the fault. |
233 | */ | 233 | */ |
234 | survive: | 234 | survive: |
235 | fault = handle_mm_fault(mm, vma, address, is_write); | 235 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
236 | if (unlikely(fault & VM_FAULT_ERROR)) { | 236 | if (unlikely(fault & VM_FAULT_ERROR)) { |
237 | if (fault & VM_FAULT_OOM) | 237 | if (fault & VM_FAULT_OOM) |
238 | goto out_of_memory; | 238 | goto out_of_memory; |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 55767ad9f00e..6751ce9ede9e 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -102,7 +102,7 @@ good_area: | |||
102 | * make sure we exit gracefully rather than endlessly redo | 102 | * make sure we exit gracefully rather than endlessly redo |
103 | * the fault. | 103 | * the fault. |
104 | */ | 104 | */ |
105 | fault = handle_mm_fault(mm, vma, address, write); | 105 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
106 | if (unlikely(fault & VM_FAULT_ERROR)) { | 106 | if (unlikely(fault & VM_FAULT_ERROR)) { |
107 | if (fault & VM_FAULT_OOM) | 107 | if (fault & VM_FAULT_OOM) |
108 | goto out_of_memory; | 108 | goto out_of_memory; |
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 33cf25025dac..a62e1e138bc1 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c | |||
@@ -258,7 +258,7 @@ good_area: | |||
258 | * make sure we exit gracefully rather than endlessly redo | 258 | * make sure we exit gracefully rather than endlessly redo |
259 | * the fault. | 259 | * the fault. |
260 | */ | 260 | */ |
261 | fault = handle_mm_fault(mm, vma, address, write); | 261 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
262 | if (unlikely(fault & VM_FAULT_ERROR)) { | 262 | if (unlikely(fault & VM_FAULT_ERROR)) { |
263 | if (fault & VM_FAULT_OOM) | 263 | if (fault & VM_FAULT_OOM) |
264 | goto out_of_memory; | 264 | goto out_of_memory; |
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 92c7fa4ecc3f..bfb6dd6ab380 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c | |||
@@ -202,7 +202,7 @@ good_area: | |||
202 | * fault. | 202 | * fault. |
203 | */ | 203 | */ |
204 | 204 | ||
205 | fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0); | 205 | fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0); |
206 | if (unlikely(fault & VM_FAULT_ERROR)) { | 206 | if (unlikely(fault & VM_FAULT_ERROR)) { |
207 | /* | 207 | /* |
208 | * We hit a shared mapping outside of the file, or some | 208 | * We hit a shared mapping outside of the file, or some |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 5beffc8f481e..830bef0a1131 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -302,7 +302,7 @@ good_area: | |||
302 | * the fault. | 302 | * the fault. |
303 | */ | 303 | */ |
304 | survive: | 304 | survive: |
305 | ret = handle_mm_fault(mm, vma, address, is_write); | 305 | ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
306 | if (unlikely(ret & VM_FAULT_ERROR)) { | 306 | if (unlikely(ret & VM_FAULT_ERROR)) { |
307 | if (ret & VM_FAULT_OOM) | 307 | if (ret & VM_FAULT_OOM) |
308 | goto out_of_memory; | 308 | goto out_of_memory; |
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c index 95d8dadf2d87..d06ba87f1a19 100644 --- a/arch/powerpc/platforms/cell/spu_fault.c +++ b/arch/powerpc/platforms/cell/spu_fault.c | |||
@@ -70,7 +70,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, | |||
70 | } | 70 | } |
71 | 71 | ||
72 | ret = 0; | 72 | ret = 0; |
73 | *flt = handle_mm_fault(mm, vma, ea, is_write); | 73 | *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); |
74 | if (unlikely(*flt & VM_FAULT_ERROR)) { | 74 | if (unlikely(*flt & VM_FAULT_ERROR)) { |
75 | if (*flt & VM_FAULT_OOM) { | 75 | if (*flt & VM_FAULT_OOM) { |
76 | ret = -ENOMEM; | 76 | ret = -ENOMEM; |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index b0b84c35b0ad..cb5d59eab0ee 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -66,7 +66,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address, | |||
66 | } | 66 | } |
67 | 67 | ||
68 | survive: | 68 | survive: |
69 | fault = handle_mm_fault(mm, vma, address, write_access); | 69 | fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0); |
70 | if (unlikely(fault & VM_FAULT_ERROR)) { | 70 | if (unlikely(fault & VM_FAULT_ERROR)) { |
71 | if (fault & VM_FAULT_OOM) | 71 | if (fault & VM_FAULT_OOM) |
72 | goto out_of_memory; | 72 | goto out_of_memory; |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 220a152c836c..74eb26bf1970 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -352,7 +352,7 @@ good_area: | |||
352 | * make sure we exit gracefully rather than endlessly redo | 352 | * make sure we exit gracefully rather than endlessly redo |
353 | * the fault. | 353 | * the fault. |
354 | */ | 354 | */ |
355 | fault = handle_mm_fault(mm, vma, address, write); | 355 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
356 | if (unlikely(fault & VM_FAULT_ERROR)) { | 356 | if (unlikely(fault & VM_FAULT_ERROR)) { |
357 | if (fault & VM_FAULT_OOM) { | 357 | if (fault & VM_FAULT_OOM) { |
358 | up_read(&mm->mmap_sem); | 358 | up_read(&mm->mmap_sem); |
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 2c50f80fc332..cc8ddbdf3d7a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -133,7 +133,7 @@ good_area: | |||
133 | * the fault. | 133 | * the fault. |
134 | */ | 134 | */ |
135 | survive: | 135 | survive: |
136 | fault = handle_mm_fault(mm, vma, address, writeaccess); | 136 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
137 | if (unlikely(fault & VM_FAULT_ERROR)) { | 137 | if (unlikely(fault & VM_FAULT_ERROR)) { |
138 | if (fault & VM_FAULT_OOM) | 138 | if (fault & VM_FAULT_OOM) |
139 | goto out_of_memory; | 139 | goto out_of_memory; |
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 7876997ba19a..fcbb6e135cef 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -187,7 +187,7 @@ good_area: | |||
187 | * the fault. | 187 | * the fault. |
188 | */ | 188 | */ |
189 | survive: | 189 | survive: |
190 | fault = handle_mm_fault(mm, vma, address, writeaccess); | 190 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
191 | if (unlikely(fault & VM_FAULT_ERROR)) { | 191 | if (unlikely(fault & VM_FAULT_ERROR)) { |
192 | if (fault & VM_FAULT_OOM) | 192 | if (fault & VM_FAULT_OOM) |
193 | goto out_of_memory; | 193 | goto out_of_memory; |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 12e447fc8542..a5e30c642ee3 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -241,7 +241,7 @@ good_area: | |||
241 | * make sure we exit gracefully rather than endlessly redo | 241 | * make sure we exit gracefully rather than endlessly redo |
242 | * the fault. | 242 | * the fault. |
243 | */ | 243 | */ |
244 | fault = handle_mm_fault(mm, vma, address, write); | 244 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
245 | if (unlikely(fault & VM_FAULT_ERROR)) { | 245 | if (unlikely(fault & VM_FAULT_ERROR)) { |
246 | if (fault & VM_FAULT_OOM) | 246 | if (fault & VM_FAULT_OOM) |
247 | goto out_of_memory; | 247 | goto out_of_memory; |
@@ -484,7 +484,7 @@ good_area: | |||
484 | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) | 484 | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) |
485 | goto bad_area; | 485 | goto bad_area; |
486 | } | 486 | } |
487 | switch (handle_mm_fault(mm, vma, address, write)) { | 487 | switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { |
488 | case VM_FAULT_SIGBUS: | 488 | case VM_FAULT_SIGBUS: |
489 | case VM_FAULT_OOM: | 489 | case VM_FAULT_OOM: |
490 | goto do_sigbus; | 490 | goto do_sigbus; |
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 4ab8993b0863..e5620b27c8bf 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -398,7 +398,7 @@ good_area: | |||
398 | goto bad_area; | 398 | goto bad_area; |
399 | } | 399 | } |
400 | 400 | ||
401 | fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE)); | 401 | fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); |
402 | if (unlikely(fault & VM_FAULT_ERROR)) { | 402 | if (unlikely(fault & VM_FAULT_ERROR)) { |
403 | if (fault & VM_FAULT_OOM) | 403 | if (fault & VM_FAULT_OOM) |
404 | goto out_of_memory; | 404 | goto out_of_memory; |
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 7384d8accfe7..637c6505dc00 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c | |||
@@ -65,7 +65,7 @@ good_area: | |||
65 | do { | 65 | do { |
66 | int fault; | 66 | int fault; |
67 | 67 | ||
68 | fault = handle_mm_fault(mm, vma, address, is_write); | 68 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
69 | if (unlikely(fault & VM_FAULT_ERROR)) { | 69 | if (unlikely(fault & VM_FAULT_ERROR)) { |
70 | if (fault & VM_FAULT_OOM) { | 70 | if (fault & VM_FAULT_OOM) { |
71 | goto out_of_memory; | 71 | goto out_of_memory; |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c403526d5d15..78a5fff857be 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -1113,7 +1113,7 @@ good_area: | |||
1113 | * make sure we exit gracefully rather than endlessly redo | 1113 | * make sure we exit gracefully rather than endlessly redo |
1114 | * the fault: | 1114 | * the fault: |
1115 | */ | 1115 | */ |
1116 | fault = handle_mm_fault(mm, vma, address, write); | 1116 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
1117 | 1117 | ||
1118 | if (unlikely(fault & VM_FAULT_ERROR)) { | 1118 | if (unlikely(fault & VM_FAULT_ERROR)) { |
1119 | mm_fault_error(regs, error_code, address, fault); | 1119 | mm_fault_error(regs, error_code, address, fault); |
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index bdd860d93f72..bc0733359a88 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c | |||
@@ -106,7 +106,7 @@ good_area: | |||
106 | * the fault. | 106 | * the fault. |
107 | */ | 107 | */ |
108 | survive: | 108 | survive: |
109 | fault = handle_mm_fault(mm, vma, address, is_write); | 109 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
110 | if (unlikely(fault & VM_FAULT_ERROR)) { | 110 | if (unlikely(fault & VM_FAULT_ERROR)) { |
111 | if (fault & VM_FAULT_OOM) | 111 | if (fault & VM_FAULT_OOM) |
112 | goto out_of_memory; | 112 | goto out_of_memory; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index cf260d848eb9..d006e93d5c93 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -810,11 +810,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); | |||
810 | 810 | ||
811 | #ifdef CONFIG_MMU | 811 | #ifdef CONFIG_MMU |
812 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 812 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
813 | unsigned long address, int write_access); | 813 | unsigned long address, unsigned int flags); |
814 | #else | 814 | #else |
815 | static inline int handle_mm_fault(struct mm_struct *mm, | 815 | static inline int handle_mm_fault(struct mm_struct *mm, |
816 | struct vm_area_struct *vma, unsigned long address, | 816 | struct vm_area_struct *vma, unsigned long address, |
817 | int write_access) | 817 | unsigned int flags) |
818 | { | 818 | { |
819 | /* should never happen if there's no MMU */ | 819 | /* should never happen if there's no MMU */ |
820 | BUG(); | 820 | BUG(); |
diff --git a/mm/memory.c b/mm/memory.c index e6a9700359df..98bcb90d5957 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1310,8 +1310,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1310 | cond_resched(); | 1310 | cond_resched(); |
1311 | while (!(page = follow_page(vma, start, foll_flags))) { | 1311 | while (!(page = follow_page(vma, start, foll_flags))) { |
1312 | int ret; | 1312 | int ret; |
1313 | ret = handle_mm_fault(mm, vma, start, | 1313 | |
1314 | foll_flags & FOLL_WRITE); | 1314 | /* FOLL_WRITE matches FAULT_FLAG_WRITE! */ |
1315 | ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE); | ||
1315 | if (ret & VM_FAULT_ERROR) { | 1316 | if (ret & VM_FAULT_ERROR) { |
1316 | if (ret & VM_FAULT_OOM) | 1317 | if (ret & VM_FAULT_OOM) |
1317 | return i ? i : -ENOMEM; | 1318 | return i ? i : -ENOMEM; |
@@ -2958,13 +2959,12 @@ unlock: | |||
2958 | * By the time we get here, we already hold the mm semaphore | 2959 | * By the time we get here, we already hold the mm semaphore |
2959 | */ | 2960 | */ |
2960 | int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 2961 | int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
2961 | unsigned long address, int write_access) | 2962 | unsigned long address, unsigned int flags) |
2962 | { | 2963 | { |
2963 | pgd_t *pgd; | 2964 | pgd_t *pgd; |
2964 | pud_t *pud; | 2965 | pud_t *pud; |
2965 | pmd_t *pmd; | 2966 | pmd_t *pmd; |
2966 | pte_t *pte; | 2967 | pte_t *pte; |
2967 | unsigned int flags = write_access ? FAULT_FLAG_WRITE : 0; | ||
2968 | 2968 | ||
2969 | __set_current_state(TASK_RUNNING); | 2969 | __set_current_state(TASK_RUNNING); |
2970 | 2970 | ||