aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2014-04-03 11:17:32 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2014-05-09 10:53:36 -0400
commitbc07c2c6e9ed125d362af0214b6313dca180cb08 (patch)
treea5a63f1272c11e321999145b8b7e8135b094aaaa /arch/arm64
parent15af1942dd61ee236a48b3de14d6f31c0b9e8116 (diff)
arm64: Introduce execute-only page access permissions
The ARMv8 architecture allows execute-only user permissions by clearing the PTE_UXN and PTE_USER bits. The kernel, however, can still access such page, so execute-only page permission does not protect against read(2)/write(2) etc. accesses. Systems requiring such protection must implement/enable features like SECCOMP. This patch changes the arm64 __P100 and __S100 protection_map[] macros to the new __PAGE_EXECONLY attributes. A side effect is that pte_valid_user() no longer triggers for __PAGE_EXECONLY since PTE_USER isn't set. To work around this, the check is done on the PTE_NG bit via the pte_valid_ng() macro. VM_READ is also checked now for page faults. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/pgtable.h11
-rw-r--r--arch/arm64/mm/fault.c5
2 files changed, 8 insertions, 8 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 90c811f05a2e..e50bb3cbd8f2 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -90,6 +90,7 @@ extern pgprot_t pgprot_default;
90#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 90#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
91#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 91#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
92#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 92#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
93#define __PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
93 94
94#endif /* __ASSEMBLY__ */ 95#endif /* __ASSEMBLY__ */
95 96
@@ -97,7 +98,7 @@ extern pgprot_t pgprot_default;
97#define __P001 __PAGE_READONLY 98#define __P001 __PAGE_READONLY
98#define __P010 __PAGE_COPY 99#define __P010 __PAGE_COPY
99#define __P011 __PAGE_COPY 100#define __P011 __PAGE_COPY
100#define __P100 __PAGE_READONLY_EXEC 101#define __P100 __PAGE_EXECONLY
101#define __P101 __PAGE_READONLY_EXEC 102#define __P101 __PAGE_READONLY_EXEC
102#define __P110 __PAGE_COPY_EXEC 103#define __P110 __PAGE_COPY_EXEC
103#define __P111 __PAGE_COPY_EXEC 104#define __P111 __PAGE_COPY_EXEC
@@ -106,7 +107,7 @@ extern pgprot_t pgprot_default;
106#define __S001 __PAGE_READONLY 107#define __S001 __PAGE_READONLY
107#define __S010 __PAGE_SHARED 108#define __S010 __PAGE_SHARED
108#define __S011 __PAGE_SHARED 109#define __S011 __PAGE_SHARED
109#define __S100 __PAGE_READONLY_EXEC 110#define __S100 __PAGE_EXECONLY
110#define __S101 __PAGE_READONLY_EXEC 111#define __S101 __PAGE_READONLY_EXEC
111#define __S110 __PAGE_SHARED_EXEC 112#define __S110 __PAGE_SHARED_EXEC
112#define __S111 __PAGE_SHARED_EXEC 113#define __S111 __PAGE_SHARED_EXEC
@@ -143,8 +144,8 @@ extern struct page *empty_zero_page;
143#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 144#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 145#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
145 146
146#define pte_valid_user(pte) \ 147#define pte_valid_ng(pte) \
147 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) 148 ((pte_val(pte) & (PTE_VALID | PTE_NG)) == (PTE_VALID | PTE_NG))
148 149
149static inline pte_t pte_wrprotect(pte_t pte) 150static inline pte_t pte_wrprotect(pte_t pte)
150{ 151{
@@ -198,7 +199,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
198static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 199static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
199 pte_t *ptep, pte_t pte) 200 pte_t *ptep, pte_t pte)
200{ 201{
201 if (pte_valid_user(pte)) { 202 if (pte_valid_ng(pte)) {
202 if (!pte_special(pte) && pte_exec(pte)) 203 if (!pte_special(pte) && pte_exec(pte))
203 __sync_icache_dcache(pte, addr); 204 __sync_icache_dcache(pte, addr);
204 if (pte_dirty(pte) && pte_write(pte)) 205 if (pte_dirty(pte) && pte_write(pte))
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index bcc965e2cce1..89c6763d5e7e 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -173,8 +173,7 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
173good_area: 173good_area:
174 /* 174 /*
175 * Check that the permissions on the VMA allow for the fault which 175 * Check that the permissions on the VMA allow for the fault which
176 * occurred. If we encountered a write or exec fault, we must have 176 * occurred.
177 * appropriate permissions, otherwise we allow any permission.
178 */ 177 */
179 if (!(vma->vm_flags & vm_flags)) { 178 if (!(vma->vm_flags & vm_flags)) {
180 fault = VM_FAULT_BADACCESS; 179 fault = VM_FAULT_BADACCESS;
@@ -196,7 +195,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
196 struct task_struct *tsk; 195 struct task_struct *tsk;
197 struct mm_struct *mm; 196 struct mm_struct *mm;
198 int fault, sig, code; 197 int fault, sig, code;
199 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; 198 unsigned long vm_flags = VM_READ | VM_WRITE;
200 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 199 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
201 200
202 tsk = current; 201 tsk = current;