aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2016-08-11 13:44:50 -0400
committerWill Deacon <will.deacon@arm.com>2016-08-25 13:00:29 -0400
commitcab15ce604e550020bb7115b779013b91bcdbc21 (patch)
tree0ba025b8ad16505532aaa6e1b4cc665473f026fa /arch/arm64/mm
parent7419333fa15ec958d12845fcc79b7bdd16de06ec (diff)
arm64: Introduce execute-only page access permissions
The ARMv8 architecture allows execute-only user permissions by clearing the PTE_UXN and PTE_USER bits. However, the kernel running on a CPU implementation without User Access Override (ARMv8.2 onwards) can still access such page, so execute-only page permission does not protect against read(2)/write(2) etc. accesses. Systems requiring such protection must enable features like SECCOMP. This patch changes the arm64 __P100 and __S100 protection_map[] macros to the new __PAGE_EXECONLY attributes. A side effect is that pte_user() no longer triggers for __PAGE_EXECONLY since PTE_USER isn't set. To work around this, the check is done on the PTE_NG bit via the pte_ng() macro. VM_READ is also checked now for page faults. Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/fault.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 05d2bd776c69..a5f098a5f602 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -251,8 +251,7 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
251good_area: 251good_area:
252 /* 252 /*
253 * Check that the permissions on the VMA allow for the fault which 253 * Check that the permissions on the VMA allow for the fault which
254 * occurred. If we encountered a write or exec fault, we must have 254 * occurred.
255 * appropriate permissions, otherwise we allow any permission.
256 */ 255 */
257 if (!(vma->vm_flags & vm_flags)) { 256 if (!(vma->vm_flags & vm_flags)) {
258 fault = VM_FAULT_BADACCESS; 257 fault = VM_FAULT_BADACCESS;
@@ -288,7 +287,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
288 struct task_struct *tsk; 287 struct task_struct *tsk;
289 struct mm_struct *mm; 288 struct mm_struct *mm;
290 int fault, sig, code; 289 int fault, sig, code;
291 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; 290 unsigned long vm_flags = VM_READ | VM_WRITE;
292 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 291 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
293 292
294 if (notify_page_fault(regs, esr)) 293 if (notify_page_fault(regs, esr))