aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/fault.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-07-19 10:37:12 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2013-07-19 10:49:44 -0400
commitdb6f41063cbdb58b14846e600e6bc3f4e4c2e888 (patch)
treea52308073355a4662387324a0f998c871ad584fe /arch/arm64/mm/fault.c
parentc783c2815e13bbb0c0b99997cc240bd7e91b6bb8 (diff)
arm64: mm: don't treat user cache maintenance faults as writes
On arm64, cache maintenance faults appear as data aborts with the CM bit set in the ESR. The WnR bit, usually used to distinguish between faulting loads and stores, always reads as 1 and (slightly confusingly) the instructions are treated as reads by the architecture. This patch fixes our fault handling code to treat cache maintenance faults in the same way as loads. Signed-off-by: Will Deacon <will.deacon@arm.com> Cc: <stable@vger.kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm/fault.c')
-rw-r--r--arch/arm64/mm/fault.c46
1 files changed, 20 insertions, 26 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0ecac8980aae..6c8ba25bf6bb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
152#define ESR_CM (1 << 8) 152#define ESR_CM (1 << 8)
153#define ESR_LNX_EXEC (1 << 24) 153#define ESR_LNX_EXEC (1 << 24)
154 154
155/*
156 * Check that the permissions on the VMA allow for the fault which occurred.
157 * If we encountered a write fault, we must have write permission, otherwise
158 * we allow any permission.
159 */
160static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
161{
162 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
163
164 if (esr & ESR_WRITE)
165 mask = VM_WRITE;
166 if (esr & ESR_LNX_EXEC)
167 mask = VM_EXEC;
168
169 return vma->vm_flags & mask ? false : true;
170}
171
172static int __do_page_fault(struct mm_struct *mm, unsigned long addr, 155static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
173 unsigned int esr, unsigned int flags, 156 unsigned int mm_flags, unsigned long vm_flags,
174 struct task_struct *tsk) 157 struct task_struct *tsk)
175{ 158{
176 struct vm_area_struct *vma; 159 struct vm_area_struct *vma;
@@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
188 * it. 171 * it.
189 */ 172 */
190good_area: 173good_area:
191 if (access_error(esr, vma)) { 174 /*
175 * Check that the permissions on the VMA allow for the fault which
176 * occurred. If we encountered a write or exec fault, we must have
177 * appropriate permissions, otherwise we allow any permission.
178 */
179 if (!(vma->vm_flags & vm_flags)) {
192 fault = VM_FAULT_BADACCESS; 180 fault = VM_FAULT_BADACCESS;
193 goto out; 181 goto out;
194 } 182 }
195 183
196 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); 184 return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
197 185
198check_stack: 186check_stack:
199 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) 187 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
208 struct task_struct *tsk; 196 struct task_struct *tsk;
209 struct mm_struct *mm; 197 struct mm_struct *mm;
210 int fault, sig, code; 198 int fault, sig, code;
211 bool write = (esr & ESR_WRITE) && !(esr & ESR_CM); 199 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
212 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 200 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
213 (write ? FAULT_FLAG_WRITE : 0); 201
202 if (esr & ESR_LNX_EXEC) {
203 vm_flags = VM_EXEC;
204 } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
205 vm_flags = VM_WRITE;
206 mm_flags |= FAULT_FLAG_WRITE;
207 }
214 208
215 tsk = current; 209 tsk = current;
216 mm = tsk->mm; 210 mm = tsk->mm;
@@ -248,7 +242,7 @@ retry:
248#endif 242#endif
249 } 243 }
250 244
251 fault = __do_page_fault(mm, addr, esr, flags, tsk); 245 fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
252 246
253 /* 247 /*
254 * If we need to retry but a fatal signal is pending, handle the 248 * If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +259,7 @@ retry:
265 */ 259 */
266 260
267 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 261 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
268 if (flags & FAULT_FLAG_ALLOW_RETRY) { 262 if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
269 if (fault & VM_FAULT_MAJOR) { 263 if (fault & VM_FAULT_MAJOR) {
270 tsk->maj_flt++; 264 tsk->maj_flt++;
271 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, 265 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,7 +274,7 @@ retry:
280 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of 274 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
281 * starvation. 275 * starvation.
282 */ 276 */
283 flags &= ~FAULT_FLAG_ALLOW_RETRY; 277 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
284 goto retry; 278 goto retry;
285 } 279 }
286 } 280 }