aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/mm/fault.c7
-rw-r--r--arch/arc/mm/fault.c6
-rw-r--r--arch/arm/mm/fault.c9
-rw-r--r--arch/arm64/mm/fault.c17
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/cris/mm/fault.c6
-rw-r--r--arch/frv/mm/fault.c10
-rw-r--r--arch/hexagon/mm/vm_fault.c6
-rw-r--r--arch/ia64/mm/fault.c6
-rw-r--r--arch/m32r/mm/fault.c10
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/metag/mm/fault.c6
-rw-r--r--arch/microblaze/mm/fault.c7
-rw-r--r--arch/mips/mm/fault.c6
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/openrisc/mm/fault.c1
-rw-r--r--arch/parisc/mm/fault.c7
-rw-r--r--arch/powerpc/mm/fault.c7
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/score/mm/fault.c7
-rw-r--r--arch/sh/mm/fault.c9
-rw-r--r--arch/sparc/mm/fault_32.c12
-rw-r--r--arch/sparc/mm/fault_64.c6
-rw-r--r--arch/tile/mm/fault.c7
-rw-r--r--arch/um/kernel/trap.c20
-rw-r--r--arch/unicore32/mm/fault.c8
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--include/linux/mm.h1
29 files changed, 135 insertions, 64 deletions
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 0c4132dd3507..98838a05ba6d 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -89,8 +89,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
89 const struct exception_table_entry *fixup; 89 const struct exception_table_entry *fixup;
90 int fault, si_code = SEGV_MAPERR; 90 int fault, si_code = SEGV_MAPERR;
91 siginfo_t info; 91 siginfo_t info;
92 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 92 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
93 (cause > 0 ? FAULT_FLAG_WRITE : 0));
94 93
95 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults 94 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
96 (or is suppressed by the PALcode). Support that for older CPUs 95 (or is suppressed by the PALcode). Support that for older CPUs
@@ -115,7 +114,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
115 if (address >= TASK_SIZE) 114 if (address >= TASK_SIZE)
116 goto vmalloc_fault; 115 goto vmalloc_fault;
117#endif 116#endif
118 117 if (user_mode(regs))
118 flags |= FAULT_FLAG_USER;
119retry: 119retry:
120 down_read(&mm->mmap_sem); 120 down_read(&mm->mmap_sem);
121 vma = find_vma(mm, address); 121 vma = find_vma(mm, address);
@@ -142,6 +142,7 @@ retry:
142 } else { 142 } else {
143 if (!(vma->vm_flags & VM_WRITE)) 143 if (!(vma->vm_flags & VM_WRITE))
144 goto bad_area; 144 goto bad_area;
145 flags |= FAULT_FLAG_WRITE;
145 } 146 }
146 147
147 /* If for any reason at all we couldn't handle the fault, 148 /* If for any reason at all we couldn't handle the fault,
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 6b0bb415af40..d63f3de0cd5b 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -60,8 +60,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address)
60 siginfo_t info; 60 siginfo_t info;
61 int fault, ret; 61 int fault, ret;
62 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ 62 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
63 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 63 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
64 (write ? FAULT_FLAG_WRITE : 0);
65 64
66 /* 65 /*
67 * We fault-in kernel-space virtual memory on-demand. The 66 * We fault-in kernel-space virtual memory on-demand. The
@@ -89,6 +88,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address)
89 if (in_atomic() || !mm) 88 if (in_atomic() || !mm)
90 goto no_context; 89 goto no_context;
91 90
91 if (user_mode(regs))
92 flags |= FAULT_FLAG_USER;
92retry: 93retry:
93 down_read(&mm->mmap_sem); 94 down_read(&mm->mmap_sem);
94 vma = find_vma(mm, address); 95 vma = find_vma(mm, address);
@@ -117,6 +118,7 @@ good_area:
117 if (write) { 118 if (write) {
118 if (!(vma->vm_flags & VM_WRITE)) 119 if (!(vma->vm_flags & VM_WRITE))
119 goto bad_area; 120 goto bad_area;
121 flags |= FAULT_FLAG_WRITE;
120 } else { 122 } else {
121 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 123 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
122 goto bad_area; 124 goto bad_area;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 217bcbfde42e..eb8830a4c5ed 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -261,9 +261,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
261 struct task_struct *tsk; 261 struct task_struct *tsk;
262 struct mm_struct *mm; 262 struct mm_struct *mm;
263 int fault, sig, code; 263 int fault, sig, code;
264 int write = fsr & FSR_WRITE; 264 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
265 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
266 (write ? FAULT_FLAG_WRITE : 0);
267 265
268 if (notify_page_fault(regs, fsr)) 266 if (notify_page_fault(regs, fsr))
269 return 0; 267 return 0;
@@ -282,6 +280,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
282 if (in_atomic() || !mm) 280 if (in_atomic() || !mm)
283 goto no_context; 281 goto no_context;
284 282
283 if (user_mode(regs))
284 flags |= FAULT_FLAG_USER;
285 if (fsr & FSR_WRITE)
286 flags |= FAULT_FLAG_WRITE;
287
285 /* 288 /*
286 * As per x86, we may deadlock here. However, since the kernel only 289 * As per x86, we may deadlock here. However, since the kernel only
287 * validly references user space from well defined areas of the code, 290 * validly references user space from well defined areas of the code,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0bb7db41f4fe..6d6acf153bff 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -199,13 +199,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
199 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; 199 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
200 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 200 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
201 201
202 if (esr & ESR_LNX_EXEC) {
203 vm_flags = VM_EXEC;
204 } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
205 vm_flags = VM_WRITE;
206 mm_flags |= FAULT_FLAG_WRITE;
207 }
208
209 tsk = current; 202 tsk = current;
210 mm = tsk->mm; 203 mm = tsk->mm;
211 204
@@ -220,6 +213,16 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
220 if (in_atomic() || !mm) 213 if (in_atomic() || !mm)
221 goto no_context; 214 goto no_context;
222 215
216 if (user_mode(regs))
217 mm_flags |= FAULT_FLAG_USER;
218
219 if (esr & ESR_LNX_EXEC) {
220 vm_flags = VM_EXEC;
221 } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
222 vm_flags = VM_WRITE;
223 mm_flags |= FAULT_FLAG_WRITE;
224 }
225
223 /* 226 /*
224 * As per x86, we may deadlock here. However, since the kernel only 227 * As per x86, we may deadlock here. However, since the kernel only
225 * validly references user space from well defined areas of the code, 228 * validly references user space from well defined areas of the code,
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 2ca27b055825..0eca93327195 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -86,6 +86,8 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
86 86
87 local_irq_enable(); 87 local_irq_enable();
88 88
89 if (user_mode(regs))
90 flags |= FAULT_FLAG_USER;
89retry: 91retry:
90 down_read(&mm->mmap_sem); 92 down_read(&mm->mmap_sem);
91 93
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 73312ab6c696..1790f22e71a2 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -58,8 +58,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
58 struct vm_area_struct * vma; 58 struct vm_area_struct * vma;
59 siginfo_t info; 59 siginfo_t info;
60 int fault; 60 int fault;
61 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 61 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
62 ((writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
63 62
64 D(printk(KERN_DEBUG 63 D(printk(KERN_DEBUG
65 "Page fault for %lX on %X at %lX, prot %d write %d\n", 64 "Page fault for %lX on %X at %lX, prot %d write %d\n",
@@ -117,6 +116,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
117 if (in_atomic() || !mm) 116 if (in_atomic() || !mm)
118 goto no_context; 117 goto no_context;
119 118
119 if (user_mode(regs))
120 flags |= FAULT_FLAG_USER;
120retry: 121retry:
121 down_read(&mm->mmap_sem); 122 down_read(&mm->mmap_sem);
122 vma = find_vma(mm, address); 123 vma = find_vma(mm, address);
@@ -155,6 +156,7 @@ retry:
155 } else if (writeaccess == 1) { 156 } else if (writeaccess == 1) {
156 if (!(vma->vm_flags & VM_WRITE)) 157 if (!(vma->vm_flags & VM_WRITE))
157 goto bad_area; 158 goto bad_area;
159 flags |= FAULT_FLAG_WRITE;
158 } else { 160 } else {
159 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 161 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
160 goto bad_area; 162 goto bad_area;
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 331c1e2cfb67..9a66372fc7c7 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -34,11 +34,11 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
34 struct vm_area_struct *vma; 34 struct vm_area_struct *vma;
35 struct mm_struct *mm; 35 struct mm_struct *mm;
36 unsigned long _pme, lrai, lrad, fixup; 36 unsigned long _pme, lrai, lrad, fixup;
37 unsigned long flags = 0;
37 siginfo_t info; 38 siginfo_t info;
38 pgd_t *pge; 39 pgd_t *pge;
39 pud_t *pue; 40 pud_t *pue;
40 pte_t *pte; 41 pte_t *pte;
41 int write;
42 int fault; 42 int fault;
43 43
44#if 0 44#if 0
@@ -81,6 +81,9 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
81 if (in_atomic() || !mm) 81 if (in_atomic() || !mm)
82 goto no_context; 82 goto no_context;
83 83
84 if (user_mode(__frame))
85 flags |= FAULT_FLAG_USER;
86
84 down_read(&mm->mmap_sem); 87 down_read(&mm->mmap_sem);
85 88
86 vma = find_vma(mm, ear0); 89 vma = find_vma(mm, ear0);
@@ -129,7 +132,6 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
129 */ 132 */
130 good_area: 133 good_area:
131 info.si_code = SEGV_ACCERR; 134 info.si_code = SEGV_ACCERR;
132 write = 0;
133 switch (esr0 & ESR0_ATXC) { 135 switch (esr0 & ESR0_ATXC) {
134 default: 136 default:
135 /* handle write to write protected page */ 137 /* handle write to write protected page */
@@ -140,7 +142,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
140#endif 142#endif
141 if (!(vma->vm_flags & VM_WRITE)) 143 if (!(vma->vm_flags & VM_WRITE))
142 goto bad_area; 144 goto bad_area;
143 write = 1; 145 flags |= FAULT_FLAG_WRITE;
144 break; 146 break;
145 147
146 /* handle read from protected page */ 148 /* handle read from protected page */
@@ -162,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
162 * make sure we exit gracefully rather than endlessly redo 164 * make sure we exit gracefully rather than endlessly redo
163 * the fault. 165 * the fault.
164 */ 166 */
165 fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0); 167 fault = handle_mm_fault(mm, vma, ear0, flags);
166 if (unlikely(fault & VM_FAULT_ERROR)) { 168 if (unlikely(fault & VM_FAULT_ERROR)) {
167 if (fault & VM_FAULT_OOM) 169 if (fault & VM_FAULT_OOM)
168 goto out_of_memory; 170 goto out_of_memory;
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index 1bd276dbec7d..8704c9320032 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -53,8 +53,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
53 int si_code = SEGV_MAPERR; 53 int si_code = SEGV_MAPERR;
54 int fault; 54 int fault;
55 const struct exception_table_entry *fixup; 55 const struct exception_table_entry *fixup;
56 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 56 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
57 (cause > 0 ? FAULT_FLAG_WRITE : 0);
58 57
59 /* 58 /*
60 * If we're in an interrupt or have no user context, 59 * If we're in an interrupt or have no user context,
@@ -65,6 +64,8 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
65 64
66 local_irq_enable(); 65 local_irq_enable();
67 66
67 if (user_mode(regs))
68 flags |= FAULT_FLAG_USER;
68retry: 69retry:
69 down_read(&mm->mmap_sem); 70 down_read(&mm->mmap_sem);
70 vma = find_vma(mm, address); 71 vma = find_vma(mm, address);
@@ -96,6 +97,7 @@ good_area:
96 case FLT_STORE: 97 case FLT_STORE:
97 if (!(vma->vm_flags & VM_WRITE)) 98 if (!(vma->vm_flags & VM_WRITE))
98 goto bad_area; 99 goto bad_area;
100 flags |= FAULT_FLAG_WRITE;
99 break; 101 break;
100 } 102 }
101 103
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 6cf0341f978e..7225dad87094 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -90,8 +90,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
90 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) 90 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
91 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); 91 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
92 92
93 flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
94
95 /* mmap_sem is performance critical.... */ 93 /* mmap_sem is performance critical.... */
96 prefetchw(&mm->mmap_sem); 94 prefetchw(&mm->mmap_sem);
97 95
@@ -119,6 +117,10 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
119 if (notify_page_fault(regs, TRAP_BRKPT)) 117 if (notify_page_fault(regs, TRAP_BRKPT))
120 return; 118 return;
121 119
120 if (user_mode(regs))
121 flags |= FAULT_FLAG_USER;
122 if (mask & VM_WRITE)
123 flags |= FAULT_FLAG_WRITE;
122retry: 124retry:
123 down_read(&mm->mmap_sem); 125 down_read(&mm->mmap_sem);
124 126
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 3cdfa9c1d091..e9c6a8014bd6 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
78 struct mm_struct *mm; 78 struct mm_struct *mm;
79 struct vm_area_struct * vma; 79 struct vm_area_struct * vma;
80 unsigned long page, addr; 80 unsigned long page, addr;
81 int write; 81 unsigned long flags = 0;
82 int fault; 82 int fault;
83 siginfo_t info; 83 siginfo_t info;
84 84
@@ -117,6 +117,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
117 if (in_atomic() || !mm) 117 if (in_atomic() || !mm)
118 goto bad_area_nosemaphore; 118 goto bad_area_nosemaphore;
119 119
120 if (error_code & ACE_USERMODE)
121 flags |= FAULT_FLAG_USER;
122
120 /* When running in the kernel we expect faults to occur only to 123 /* When running in the kernel we expect faults to occur only to
121 * addresses in user space. All other faults represent errors in the 124 * addresses in user space. All other faults represent errors in the
122 * kernel and should generate an OOPS. Unfortunately, in the case of an 125 * kernel and should generate an OOPS. Unfortunately, in the case of an
@@ -166,14 +169,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
166 */ 169 */
167good_area: 170good_area:
168 info.si_code = SEGV_ACCERR; 171 info.si_code = SEGV_ACCERR;
169 write = 0;
170 switch (error_code & (ACE_WRITE|ACE_PROTECTION)) { 172 switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
171 default: /* 3: write, present */ 173 default: /* 3: write, present */
172 /* fall through */ 174 /* fall through */
173 case ACE_WRITE: /* write, not present */ 175 case ACE_WRITE: /* write, not present */
174 if (!(vma->vm_flags & VM_WRITE)) 176 if (!(vma->vm_flags & VM_WRITE))
175 goto bad_area; 177 goto bad_area;
176 write++; 178 flags |= FAULT_FLAG_WRITE;
177 break; 179 break;
178 case ACE_PROTECTION: /* read, present */ 180 case ACE_PROTECTION: /* read, present */
179 case 0: /* read, not present */ 181 case 0: /* read, not present */
@@ -194,7 +196,7 @@ good_area:
194 */ 196 */
195 addr = (address & PAGE_MASK); 197 addr = (address & PAGE_MASK);
196 set_thread_fault_code(error_code); 198 set_thread_fault_code(error_code);
197 fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0); 199 fault = handle_mm_fault(mm, vma, addr, flags);
198 if (unlikely(fault & VM_FAULT_ERROR)) { 200 if (unlikely(fault & VM_FAULT_ERROR)) {
199 if (fault & VM_FAULT_OOM) 201 if (fault & VM_FAULT_OOM)
200 goto out_of_memory; 202 goto out_of_memory;
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index a563727806bf..eb1d61f68725 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -88,6 +88,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
88 if (in_atomic() || !mm) 88 if (in_atomic() || !mm)
89 goto no_context; 89 goto no_context;
90 90
91 if (user_mode(regs))
92 flags |= FAULT_FLAG_USER;
91retry: 93retry:
92 down_read(&mm->mmap_sem); 94 down_read(&mm->mmap_sem);
93 95
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index 8fddf46e6c62..332680e5ebf2 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -53,8 +53,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
53 struct vm_area_struct *vma, *prev_vma; 53 struct vm_area_struct *vma, *prev_vma;
54 siginfo_t info; 54 siginfo_t info;
55 int fault; 55 int fault;
56 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 56 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
57 (write_access ? FAULT_FLAG_WRITE : 0);
58 57
59 tsk = current; 58 tsk = current;
60 59
@@ -109,6 +108,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
109 if (in_atomic() || !mm) 108 if (in_atomic() || !mm)
110 goto no_context; 109 goto no_context;
111 110
111 if (user_mode(regs))
112 flags |= FAULT_FLAG_USER;
112retry: 113retry:
113 down_read(&mm->mmap_sem); 114 down_read(&mm->mmap_sem);
114 115
@@ -121,6 +122,7 @@ good_area:
121 if (write_access) { 122 if (write_access) {
122 if (!(vma->vm_flags & VM_WRITE)) 123 if (!(vma->vm_flags & VM_WRITE))
123 goto bad_area; 124 goto bad_area;
125 flags |= FAULT_FLAG_WRITE;
124 } else { 126 } else {
125 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 127 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
126 goto bad_area; 128 goto bad_area;
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 731f739d17a1..fa4cf52aa7a6 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -92,8 +92,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
92 int code = SEGV_MAPERR; 92 int code = SEGV_MAPERR;
93 int is_write = error_code & ESR_S; 93 int is_write = error_code & ESR_S;
94 int fault; 94 int fault;
95 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 95 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
96 (is_write ? FAULT_FLAG_WRITE : 0);
97 96
98 regs->ear = address; 97 regs->ear = address;
99 regs->esr = error_code; 98 regs->esr = error_code;
@@ -121,6 +120,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
121 die("Weird page fault", regs, SIGSEGV); 120 die("Weird page fault", regs, SIGSEGV);
122 } 121 }
123 122
123 if (user_mode(regs))
124 flags |= FAULT_FLAG_USER;
125
124 /* When running in the kernel we expect faults to occur only to 126 /* When running in the kernel we expect faults to occur only to
125 * addresses in user space. All other faults represent errors in the 127 * addresses in user space. All other faults represent errors in the
126 * kernel and should generate an OOPS. Unfortunately, in the case of an 128 * kernel and should generate an OOPS. Unfortunately, in the case of an
@@ -199,6 +201,7 @@ good_area:
199 if (unlikely(is_write)) { 201 if (unlikely(is_write)) {
200 if (unlikely(!(vma->vm_flags & VM_WRITE))) 202 if (unlikely(!(vma->vm_flags & VM_WRITE)))
201 goto bad_area; 203 goto bad_area;
204 flags |= FAULT_FLAG_WRITE;
202 /* a read */ 205 /* a read */
203 } else { 206 } else {
204 /* protection fault */ 207 /* protection fault */
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 94d3a31ab144..becc42bb1849 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -42,8 +42,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
42 const int field = sizeof(unsigned long) * 2; 42 const int field = sizeof(unsigned long) * 2;
43 siginfo_t info; 43 siginfo_t info;
44 int fault; 44 int fault;
45 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 45 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
46 (write ? FAULT_FLAG_WRITE : 0);
47 46
48#if 0 47#if 0
49 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), 48 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
@@ -93,6 +92,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
93 if (in_atomic() || !mm) 92 if (in_atomic() || !mm)
94 goto bad_area_nosemaphore; 93 goto bad_area_nosemaphore;
95 94
95 if (user_mode(regs))
96 flags |= FAULT_FLAG_USER;
96retry: 97retry:
97 down_read(&mm->mmap_sem); 98 down_read(&mm->mmap_sem);
98 vma = find_vma(mm, address); 99 vma = find_vma(mm, address);
@@ -114,6 +115,7 @@ good_area:
114 if (write) { 115 if (write) {
115 if (!(vma->vm_flags & VM_WRITE)) 116 if (!(vma->vm_flags & VM_WRITE))
116 goto bad_area; 117 goto bad_area;
118 flags |= FAULT_FLAG_WRITE;
117 } else { 119 } else {
118 if (cpu_has_rixi) { 120 if (cpu_has_rixi) {
119 if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { 121 if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 8a2e6ded9a44..3516cbdf1ee9 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -171,6 +171,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
171 if (in_atomic() || !mm) 171 if (in_atomic() || !mm)
172 goto no_context; 172 goto no_context;
173 173
174 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
175 flags |= FAULT_FLAG_USER;
174retry: 176retry:
175 down_read(&mm->mmap_sem); 177 down_read(&mm->mmap_sem);
176 178
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 4a41f8493ab0..0703acf7d327 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -86,6 +86,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
86 if (user_mode(regs)) { 86 if (user_mode(regs)) {
87 /* Exception was in userspace: reenable interrupts */ 87 /* Exception was in userspace: reenable interrupts */
88 local_irq_enable(); 88 local_irq_enable();
89 flags |= FAULT_FLAG_USER;
89 } else { 90 } else {
90 /* If exception was in a syscall, then IRQ's may have 91 /* If exception was in a syscall, then IRQ's may have
91 * been enabled or disabled. If they were enabled, 92 * been enabled or disabled. If they were enabled,
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index f247a3480e8e..d10d27a720c0 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -180,6 +180,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
180 if (in_atomic() || !mm) 180 if (in_atomic() || !mm)
181 goto no_context; 181 goto no_context;
182 182
183 if (user_mode(regs))
184 flags |= FAULT_FLAG_USER;
185 if (acc_type & VM_WRITE)
186 flags |= FAULT_FLAG_WRITE;
183retry: 187retry:
184 down_read(&mm->mmap_sem); 188 down_read(&mm->mmap_sem);
185 vma = find_vma_prev(mm, address, &prev_vma); 189 vma = find_vma_prev(mm, address, &prev_vma);
@@ -203,8 +207,7 @@ good_area:
203 * fault. 207 * fault.
204 */ 208 */
205 209
206 fault = handle_mm_fault(mm, vma, address, 210 fault = handle_mm_fault(mm, vma, address, flags);
207 flags | ((acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0));
208 211
209 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 212 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
210 return; 213 return;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 2dd69bf4af46..51ab9e7e6c39 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -223,9 +223,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
223 is_write = error_code & ESR_DST; 223 is_write = error_code & ESR_DST;
224#endif /* CONFIG_4xx || CONFIG_BOOKE */ 224#endif /* CONFIG_4xx || CONFIG_BOOKE */
225 225
226 if (is_write)
227 flags |= FAULT_FLAG_WRITE;
228
229#ifdef CONFIG_PPC_ICSWX 226#ifdef CONFIG_PPC_ICSWX
230 /* 227 /*
231 * we need to do this early because this "data storage 228 * we need to do this early because this "data storage
@@ -288,6 +285,9 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
288 if (user_mode(regs)) 285 if (user_mode(regs))
289 store_update_sp = store_updates_sp(regs); 286 store_update_sp = store_updates_sp(regs);
290 287
288 if (user_mode(regs))
289 flags |= FAULT_FLAG_USER;
290
291 /* When running in the kernel we expect faults to occur only to 291 /* When running in the kernel we expect faults to occur only to
292 * addresses in user space. All other faults represent errors in the 292 * addresses in user space. All other faults represent errors in the
293 * kernel and should generate an OOPS. Unfortunately, in the case of an 293 * kernel and should generate an OOPS. Unfortunately, in the case of an
@@ -415,6 +415,7 @@ good_area:
415 } else if (is_write) { 415 } else if (is_write) {
416 if (!(vma->vm_flags & VM_WRITE)) 416 if (!(vma->vm_flags & VM_WRITE))
417 goto bad_area; 417 goto bad_area;
418 flags |= FAULT_FLAG_WRITE;
418 /* a read */ 419 /* a read */
419 } else { 420 } else {
420 /* protection fault */ 421 /* protection fault */
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7de4469915f0..fc6679210d83 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -302,6 +302,8 @@ static inline int do_exception(struct pt_regs *regs, int access)
302 address = trans_exc_code & __FAIL_ADDR_MASK; 302 address = trans_exc_code & __FAIL_ADDR_MASK;
303 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 303 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
304 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 304 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
305 if (user_mode(regs))
306 flags |= FAULT_FLAG_USER;
305 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) 307 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
306 flags |= FAULT_FLAG_WRITE; 308 flags |= FAULT_FLAG_WRITE;
307 down_read(&mm->mmap_sem); 309 down_read(&mm->mmap_sem);
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 4b71a626d41e..52238983527d 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -47,6 +47,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
47 struct task_struct *tsk = current; 47 struct task_struct *tsk = current;
48 struct mm_struct *mm = tsk->mm; 48 struct mm_struct *mm = tsk->mm;
49 const int field = sizeof(unsigned long) * 2; 49 const int field = sizeof(unsigned long) * 2;
50 unsigned long flags = 0;
50 siginfo_t info; 51 siginfo_t info;
51 int fault; 52 int fault;
52 53
@@ -75,6 +76,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
75 if (in_atomic() || !mm) 76 if (in_atomic() || !mm)
76 goto bad_area_nosemaphore; 77 goto bad_area_nosemaphore;
77 78
79 if (user_mode(regs))
80 flags |= FAULT_FLAG_USER;
81
78 down_read(&mm->mmap_sem); 82 down_read(&mm->mmap_sem);
79 vma = find_vma(mm, address); 83 vma = find_vma(mm, address);
80 if (!vma) 84 if (!vma)
@@ -95,6 +99,7 @@ good_area:
95 if (write) { 99 if (write) {
96 if (!(vma->vm_flags & VM_WRITE)) 100 if (!(vma->vm_flags & VM_WRITE))
97 goto bad_area; 101 goto bad_area;
102 flags |= FAULT_FLAG_WRITE;
98 } else { 103 } else {
99 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 104 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
100 goto bad_area; 105 goto bad_area;
@@ -105,7 +110,7 @@ good_area:
105 * make sure we exit gracefully rather than endlessly redo 110 * make sure we exit gracefully rather than endlessly redo
106 * the fault. 111 * the fault.
107 */ 112 */
108 fault = handle_mm_fault(mm, vma, address, write); 113 fault = handle_mm_fault(mm, vma, address, flags);
109 if (unlikely(fault & VM_FAULT_ERROR)) { 114 if (unlikely(fault & VM_FAULT_ERROR)) {
110 if (fault & VM_FAULT_OOM) 115 if (fault & VM_FAULT_OOM)
111 goto out_of_memory; 116 goto out_of_memory;
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 1f49c28affa9..541dc6101508 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -400,9 +400,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
400 struct mm_struct *mm; 400 struct mm_struct *mm;
401 struct vm_area_struct * vma; 401 struct vm_area_struct * vma;
402 int fault; 402 int fault;
403 int write = error_code & FAULT_CODE_WRITE; 403 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
404 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
405 (write ? FAULT_FLAG_WRITE : 0));
406 404
407 tsk = current; 405 tsk = current;
408 mm = tsk->mm; 406 mm = tsk->mm;
@@ -476,6 +474,11 @@ good_area:
476 474
477 set_thread_fault_code(error_code); 475 set_thread_fault_code(error_code);
478 476
477 if (user_mode(regs))
478 flags |= FAULT_FLAG_USER;
479 if (error_code & FAULT_CODE_WRITE)
480 flags |= FAULT_FLAG_WRITE;
481
479 /* 482 /*
480 * If for any reason at all we couldn't handle the fault, 483 * If for any reason at all we couldn't handle the fault,
481 * make sure we exit gracefully rather than endlessly redo 484 * make sure we exit gracefully rather than endlessly redo
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index e98bfda205a2..59dbd4645725 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -177,8 +177,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
177 unsigned long g2; 177 unsigned long g2;
178 int from_user = !(regs->psr & PSR_PS); 178 int from_user = !(regs->psr & PSR_PS);
179 int fault, code; 179 int fault, code;
180 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 180 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
181 (write ? FAULT_FLAG_WRITE : 0));
182 181
183 if (text_fault) 182 if (text_fault)
184 address = regs->pc; 183 address = regs->pc;
@@ -235,6 +234,11 @@ good_area:
235 goto bad_area; 234 goto bad_area;
236 } 235 }
237 236
237 if (from_user)
238 flags |= FAULT_FLAG_USER;
239 if (write)
240 flags |= FAULT_FLAG_WRITE;
241
238 /* 242 /*
239 * If for any reason at all we couldn't handle the fault, 243 * If for any reason at all we couldn't handle the fault,
240 * make sure we exit gracefully rather than endlessly redo 244 * make sure we exit gracefully rather than endlessly redo
@@ -383,6 +387,7 @@ static void force_user_fault(unsigned long address, int write)
383 struct vm_area_struct *vma; 387 struct vm_area_struct *vma;
384 struct task_struct *tsk = current; 388 struct task_struct *tsk = current;
385 struct mm_struct *mm = tsk->mm; 389 struct mm_struct *mm = tsk->mm;
390 unsigned int flags = FAULT_FLAG_USER;
386 int code; 391 int code;
387 392
388 code = SEGV_MAPERR; 393 code = SEGV_MAPERR;
@@ -402,11 +407,12 @@ good_area:
402 if (write) { 407 if (write) {
403 if (!(vma->vm_flags & VM_WRITE)) 408 if (!(vma->vm_flags & VM_WRITE))
404 goto bad_area; 409 goto bad_area;
410 flags |= FAULT_FLAG_WRITE;
405 } else { 411 } else {
406 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 412 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
407 goto bad_area; 413 goto bad_area;
408 } 414 }
409 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { 415 switch (handle_mm_fault(mm, vma, address, flags)) {
410 case VM_FAULT_SIGBUS: 416 case VM_FAULT_SIGBUS:
411 case VM_FAULT_OOM: 417 case VM_FAULT_OOM:
412 goto do_sigbus; 418 goto do_sigbus;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 5062ff389e83..2ebec263d685 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -315,7 +315,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
315 bad_kernel_pc(regs, address); 315 bad_kernel_pc(regs, address);
316 return; 316 return;
317 } 317 }
318 } 318 } else
319 flags |= FAULT_FLAG_USER;
319 320
320 /* 321 /*
321 * If we're in an interrupt or have no user 322 * If we're in an interrupt or have no user
@@ -418,13 +419,14 @@ good_area:
418 vma->vm_file != NULL) 419 vma->vm_file != NULL)
419 set_thread_fault_code(fault_code | 420 set_thread_fault_code(fault_code |
420 FAULT_CODE_BLKCOMMIT); 421 FAULT_CODE_BLKCOMMIT);
422
423 flags |= FAULT_FLAG_WRITE;
421 } else { 424 } else {
422 /* Allow reads even for write-only mappings */ 425 /* Allow reads even for write-only mappings */
423 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 426 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
424 goto bad_area; 427 goto bad_area;
425 } 428 }
426 429
427 flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
428 fault = handle_mm_fault(mm, vma, address, flags); 430 fault = handle_mm_fault(mm, vma, address, flags);
429 431
430 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 432 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 4fd2c0f2a66d..4c288f199453 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -280,8 +280,7 @@ static int handle_page_fault(struct pt_regs *regs,
280 if (!is_page_fault) 280 if (!is_page_fault)
281 write = 1; 281 write = 1;
282 282
283 flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 283 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
284 (write ? FAULT_FLAG_WRITE : 0));
285 284
286 is_kernel_mode = !user_mode(regs); 285 is_kernel_mode = !user_mode(regs);
287 286
@@ -365,6 +364,9 @@ static int handle_page_fault(struct pt_regs *regs,
365 goto bad_area_nosemaphore; 364 goto bad_area_nosemaphore;
366 } 365 }
367 366
367 if (!is_kernel_mode)
368 flags |= FAULT_FLAG_USER;
369
368 /* 370 /*
369 * When running in the kernel we expect faults to occur only to 371 * When running in the kernel we expect faults to occur only to
370 * addresses in user space. All other faults represent errors in the 372 * addresses in user space. All other faults represent errors in the
@@ -425,6 +427,7 @@ good_area:
425#endif 427#endif
426 if (!(vma->vm_flags & VM_WRITE)) 428 if (!(vma->vm_flags & VM_WRITE))
427 goto bad_area; 429 goto bad_area;
430 flags |= FAULT_FLAG_WRITE;
428 } else { 431 } else {
429 if (!is_page_fault || !(vma->vm_flags & VM_READ)) 432 if (!is_page_fault || !(vma->vm_flags & VM_READ))
430 goto bad_area; 433 goto bad_area;
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index b2f5adf838dd..5c3aef74237f 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -30,8 +30,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
30 pmd_t *pmd; 30 pmd_t *pmd;
31 pte_t *pte; 31 pte_t *pte;
32 int err = -EFAULT; 32 int err = -EFAULT;
33 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 33 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
34 (is_write ? FAULT_FLAG_WRITE : 0);
35 34
36 *code_out = SEGV_MAPERR; 35 *code_out = SEGV_MAPERR;
37 36
@@ -42,6 +41,8 @@ int handle_page_fault(unsigned long address, unsigned long ip,
42 if (in_atomic()) 41 if (in_atomic())
43 goto out_nosemaphore; 42 goto out_nosemaphore;
44 43
44 if (is_user)
45 flags |= FAULT_FLAG_USER;
45retry: 46retry:
46 down_read(&mm->mmap_sem); 47 down_read(&mm->mmap_sem);
47 vma = find_vma(mm, address); 48 vma = find_vma(mm, address);
@@ -58,12 +59,15 @@ retry:
58 59
59good_area: 60good_area:
60 *code_out = SEGV_ACCERR; 61 *code_out = SEGV_ACCERR;
61 if (is_write && !(vma->vm_flags & VM_WRITE)) 62 if (is_write) {
62 goto out; 63 if (!(vma->vm_flags & VM_WRITE))
63 64 goto out;
64 /* Don't require VM_READ|VM_EXEC for write faults! */ 65 flags |= FAULT_FLAG_WRITE;
65 if (!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC))) 66 } else {
66 goto out; 67 /* Don't require VM_READ|VM_EXEC for write faults! */
68 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
69 goto out;
70 }
67 71
68 do { 72 do {
69 int fault; 73 int fault;
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
index 8ed3c4509d84..0dc922dba915 100644
--- a/arch/unicore32/mm/fault.c
+++ b/arch/unicore32/mm/fault.c
@@ -209,8 +209,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
209 struct task_struct *tsk; 209 struct task_struct *tsk;
210 struct mm_struct *mm; 210 struct mm_struct *mm;
211 int fault, sig, code; 211 int fault, sig, code;
212 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 212 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
213 ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
214 213
215 tsk = current; 214 tsk = current;
216 mm = tsk->mm; 215 mm = tsk->mm;
@@ -222,6 +221,11 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
222 if (in_atomic() || !mm) 221 if (in_atomic() || !mm)
223 goto no_context; 222 goto no_context;
224 223
224 if (user_mode(regs))
225 flags |= FAULT_FLAG_USER;
226 if (!(fsr ^ 0x12))
227 flags |= FAULT_FLAG_WRITE;
228
225 /* 229 /*
226 * As per x86, we may deadlock here. However, since the kernel only 230 * As per x86, we may deadlock here. However, since the kernel only
227 * validly references user space from well defined areas of the code, 231 * validly references user space from well defined areas of the code,
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 654be4ae3047..6d77c3866faa 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1011,9 +1011,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
1011 unsigned long address; 1011 unsigned long address;
1012 struct mm_struct *mm; 1012 struct mm_struct *mm;
1013 int fault; 1013 int fault;
1014 int write = error_code & PF_WRITE; 1014 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1015 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
1016 (write ? FAULT_FLAG_WRITE : 0);
1017 1015
1018 tsk = current; 1016 tsk = current;
1019 mm = tsk->mm; 1017 mm = tsk->mm;
@@ -1083,6 +1081,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
1083 if (user_mode_vm(regs)) { 1081 if (user_mode_vm(regs)) {
1084 local_irq_enable(); 1082 local_irq_enable();
1085 error_code |= PF_USER; 1083 error_code |= PF_USER;
1084 flags |= FAULT_FLAG_USER;
1086 } else { 1085 } else {
1087 if (regs->flags & X86_EFLAGS_IF) 1086 if (regs->flags & X86_EFLAGS_IF)
1088 local_irq_enable(); 1087 local_irq_enable();
@@ -1109,6 +1108,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
1109 return; 1108 return;
1110 } 1109 }
1111 1110
1111 if (error_code & PF_WRITE)
1112 flags |= FAULT_FLAG_WRITE;
1113
1112 /* 1114 /*
1113 * When running in the kernel we expect faults to occur only to 1115 * When running in the kernel we expect faults to occur only to
1114 * addresses in user space. All other faults represent errors in 1116 * addresses in user space. All other faults represent errors in
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 4b7bc8db170f..70fa7bc42b4a 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -72,6 +72,8 @@ void do_page_fault(struct pt_regs *regs)
72 address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); 72 address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
73#endif 73#endif
74 74
75 if (user_mode(regs))
76 flags |= FAULT_FLAG_USER;
75retry: 77retry:
76 down_read(&mm->mmap_sem); 78 down_read(&mm->mmap_sem);
77 vma = find_vma(mm, address); 79 vma = find_vma(mm, address);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index caf543c7eaa7..3d9b503bcd00 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -176,6 +176,7 @@ extern pgprot_t protection_map[16];
176#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ 176#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
177#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ 177#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
178#define FAULT_FLAG_TRIED 0x40 /* second try */ 178#define FAULT_FLAG_TRIED 0x40 /* second try */
179#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */
179 180
180/* 181/*
181 * vm_fault is filled by the the pagefault handler and passed to the vma's 182 * vm_fault is filled by the the pagefault handler and passed to the vma's