aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-09-20 07:53:01 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-09-20 11:53:40 -0400
commitd374bf14a5ff18133bd6a6cc00f189949f7ba8fb (patch)
treed04f40327d5575f5f848695a97099527475a2634
parentbf4569922b97824f33e5d08d0bbe4b34fe43e4cd (diff)
ARM: Separate out access error checking
Since we get notified separately about prefetch aborts, which may be permission faults, we need to check for appropriate access permissions when handling a fault. This patch prepares us for doing this by separating out the access error checking. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/fault.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index b8b3bb4423cf..b7ce07d416cd 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -194,18 +194,33 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
194#define VM_FAULT_BADMAP 0x010000 194#define VM_FAULT_BADMAP 0x010000
195#define VM_FAULT_BADACCESS 0x020000 195#define VM_FAULT_BADACCESS 0x020000
196 196
197static int 197/*
198 * Check that the permissions on the VMA allow for the fault which occurred.
199 * If we encountered a write fault, we must have write permission, otherwise
200 * we allow any permission.
201 */
202static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
203{
204 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
205
206 if (fsr & FSR_WRITE)
207 mask = VM_WRITE;
208
209 return vma->vm_flags & mask ? false : true;
210}
211
212static int __kprobes
198__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, 213__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
199 struct task_struct *tsk) 214 struct task_struct *tsk)
200{ 215{
201 struct vm_area_struct *vma; 216 struct vm_area_struct *vma;
202 int fault, mask; 217 int fault;
203 218
204 vma = find_vma(mm, addr); 219 vma = find_vma(mm, addr);
205 fault = VM_FAULT_BADMAP; 220 fault = VM_FAULT_BADMAP;
206 if (!vma) 221 if (unlikely(!vma))
207 goto out; 222 goto out;
208 if (vma->vm_start > addr) 223 if (unlikely(vma->vm_start > addr))
209 goto check_stack; 224 goto check_stack;
210 225
211 /* 226 /*
@@ -213,14 +228,10 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
213 * memory access, so we can handle it. 228 * memory access, so we can handle it.
214 */ 229 */
215good_area: 230good_area:
216 if (fsr & FSR_WRITE) 231 if (access_error(fsr, vma)) {
217 mask = VM_WRITE; 232 fault = VM_FAULT_BADACCESS;
218 else
219 mask = VM_READ|VM_EXEC|VM_WRITE;
220
221 fault = VM_FAULT_BADACCESS;
222 if (!(vma->vm_flags & mask))
223 goto out; 233 goto out;
234 }
224 235
225 /* 236 /*
226 * If for any reason at all we couldn't handle the fault, make 237 * If for any reason at all we couldn't handle the fault, make