aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2016-08-11 13:44:50 -0400
committerWill Deacon <will.deacon@arm.com>2016-08-25 13:00:29 -0400
commitcab15ce604e550020bb7115b779013b91bcdbc21 (patch)
tree0ba025b8ad16505532aaa6e1b4cc665473f026fa
parent7419333fa15ec958d12845fcc79b7bdd16de06ec (diff)
arm64: Introduce execute-only page access permissions
The ARMv8 architecture allows execute-only user permissions by clearing the PTE_UXN and PTE_USER bits. However, the kernel running on a CPU implementation without User Access Override (ARMv8.2 onwards) can still access such page, so execute-only page permission does not protect against read(2)/write(2) etc. accesses. Systems requiring such protection must enable features like SECCOMP. This patch changes the arm64 __P100 and __S100 protection_map[] macros to the new __PAGE_EXECONLY attributes. A side effect is that pte_user() no longer triggers for __PAGE_EXECONLY since PTE_USER isn't set. To work around this, the check is done on the PTE_NG bit via the pte_ng() macro. VM_READ is also checked now for page faults. Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h5
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/mm/fault.c5
-rw-r--r--mm/mmap.c5
4 files changed, 15 insertions, 10 deletions
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 39f5252673f7..2142c7726e76 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -70,12 +70,13 @@
70#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 70#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
71#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 71#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
72#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 72#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
73#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
73 74
74#define __P000 PAGE_NONE 75#define __P000 PAGE_NONE
75#define __P001 PAGE_READONLY 76#define __P001 PAGE_READONLY
76#define __P010 PAGE_COPY 77#define __P010 PAGE_COPY
77#define __P011 PAGE_COPY 78#define __P011 PAGE_COPY
78#define __P100 PAGE_READONLY_EXEC 79#define __P100 PAGE_EXECONLY
79#define __P101 PAGE_READONLY_EXEC 80#define __P101 PAGE_READONLY_EXEC
80#define __P110 PAGE_COPY_EXEC 81#define __P110 PAGE_COPY_EXEC
81#define __P111 PAGE_COPY_EXEC 82#define __P111 PAGE_COPY_EXEC
@@ -84,7 +85,7 @@
84#define __S001 PAGE_READONLY 85#define __S001 PAGE_READONLY
85#define __S010 PAGE_SHARED 86#define __S010 PAGE_SHARED
86#define __S011 PAGE_SHARED 87#define __S011 PAGE_SHARED
87#define __S100 PAGE_READONLY_EXEC 88#define __S100 PAGE_EXECONLY
88#define __S101 PAGE_READONLY_EXEC 89#define __S101 PAGE_READONLY_EXEC
89#define __S110 PAGE_SHARED_EXEC 90#define __S110 PAGE_SHARED_EXEC
90#define __S111 PAGE_SHARED_EXEC 91#define __S111 PAGE_SHARED_EXEC
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e20bd431184a..7ba1cebb64d9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -73,7 +73,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
73#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 73#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
74#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 74#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
75#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 75#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
76#define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 76#define pte_ng(pte) (!!(pte_val(pte) & PTE_NG))
77 77
78#ifdef CONFIG_ARM64_HW_AFDBM 78#ifdef CONFIG_ARM64_HW_AFDBM
79#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) 79#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
@@ -84,8 +84,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
84#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 84#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
85 85
86#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 86#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
87#define pte_valid_not_user(pte) \ 87#define pte_valid_global(pte) \
88 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) 88 ((pte_val(pte) & (PTE_VALID | PTE_NG)) == PTE_VALID)
89#define pte_valid_young(pte) \ 89#define pte_valid_young(pte) \
90 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) 90 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
91 91
@@ -168,7 +168,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
168 * Only if the new pte is valid and kernel, otherwise TLB maintenance 168 * Only if the new pte is valid and kernel, otherwise TLB maintenance
169 * or update_mmu_cache() have the necessary barriers. 169 * or update_mmu_cache() have the necessary barriers.
170 */ 170 */
171 if (pte_valid_not_user(pte)) { 171 if (pte_valid_global(pte)) {
172 dsb(ishst); 172 dsb(ishst);
173 isb(); 173 isb();
174 } 174 }
@@ -202,7 +202,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
202 pte_val(pte) &= ~PTE_RDONLY; 202 pte_val(pte) &= ~PTE_RDONLY;
203 else 203 else
204 pte_val(pte) |= PTE_RDONLY; 204 pte_val(pte) |= PTE_RDONLY;
205 if (pte_user(pte) && pte_exec(pte) && !pte_special(pte)) 205 if (pte_ng(pte) && pte_exec(pte) && !pte_special(pte))
206 __sync_icache_dcache(pte, addr); 206 __sync_icache_dcache(pte, addr);
207 } 207 }
208 208
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 05d2bd776c69..a5f098a5f602 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -251,8 +251,7 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
251good_area: 251good_area:
252 /* 252 /*
253 * Check that the permissions on the VMA allow for the fault which 253 * Check that the permissions on the VMA allow for the fault which
254 * occurred. If we encountered a write or exec fault, we must have 254 * occurred.
255 * appropriate permissions, otherwise we allow any permission.
256 */ 255 */
257 if (!(vma->vm_flags & vm_flags)) { 256 if (!(vma->vm_flags & vm_flags)) {
258 fault = VM_FAULT_BADACCESS; 257 fault = VM_FAULT_BADACCESS;
@@ -288,7 +287,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
288 struct task_struct *tsk; 287 struct task_struct *tsk;
289 struct mm_struct *mm; 288 struct mm_struct *mm;
290 int fault, sig, code; 289 int fault, sig, code;
291 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; 290 unsigned long vm_flags = VM_READ | VM_WRITE;
292 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 291 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
293 292
294 if (notify_page_fault(regs, esr)) 293 if (notify_page_fault(regs, esr))
diff --git a/mm/mmap.c b/mm/mmap.c
index ca9d91bca0d6..69cad562cd00 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -88,6 +88,11 @@ static void unmap_region(struct mm_struct *mm,
88 * w: (no) no w: (no) no w: (copy) copy w: (no) no 88 * w: (no) no w: (no) no w: (copy) copy w: (no) no
89 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 89 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
90 * 90 *
91 * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
92 * MAP_PRIVATE:
93 * r: (no) no
94 * w: (no) no
95 * x: (yes) yes
91 */ 96 */
92pgprot_t protection_map[16] = { 97pgprot_t protection_map[16] = {
93 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, 98 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,