diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2014-05-16 11:44:32 -0400 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2014-05-16 11:44:32 -0400 |
commit | 5a0fdfada3a2aa50d7b947a2e958bf00cbe0d830 (patch) | |
tree | 69ae10dd39df9e8de4edd1049d4dafe1436dd37e /arch/arm64 | |
parent | cf5c95db57ffa02e430c3840c08d1ee0403849d4 (diff) |
Revert "arm64: Introduce execute-only page access permissions"
This reverts commit bc07c2c6e9ed125d362af0214b6313dca180cb08.
While the aim is increased security for --x memory maps, it does not
protect against kernel level reads. Until SECCOMP is implemented for
arm64, revert this patch to avoid giving a false idea of execute-only
mappings.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 11 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 5 |
2 files changed, 8 insertions, 8 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index e4c60d6e18b8..aa150ed99f22 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -86,13 +86,12 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
86 | #define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) | 86 | #define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
87 | #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 87 | #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
88 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) | 88 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
89 | #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN) | ||
90 | 89 | ||
91 | #define __P000 PAGE_NONE | 90 | #define __P000 PAGE_NONE |
92 | #define __P001 PAGE_READONLY | 91 | #define __P001 PAGE_READONLY |
93 | #define __P010 PAGE_COPY | 92 | #define __P010 PAGE_COPY |
94 | #define __P011 PAGE_COPY | 93 | #define __P011 PAGE_COPY |
95 | #define __P100 PAGE_EXECONLY | 94 | #define __P100 PAGE_READONLY_EXEC |
96 | #define __P101 PAGE_READONLY_EXEC | 95 | #define __P101 PAGE_READONLY_EXEC |
97 | #define __P110 PAGE_COPY_EXEC | 96 | #define __P110 PAGE_COPY_EXEC |
98 | #define __P111 PAGE_COPY_EXEC | 97 | #define __P111 PAGE_COPY_EXEC |
@@ -101,7 +100,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
101 | #define __S001 PAGE_READONLY | 100 | #define __S001 PAGE_READONLY |
102 | #define __S010 PAGE_SHARED | 101 | #define __S010 PAGE_SHARED |
103 | #define __S011 PAGE_SHARED | 102 | #define __S011 PAGE_SHARED |
104 | #define __S100 PAGE_EXECONLY | 103 | #define __S100 PAGE_READONLY_EXEC |
105 | #define __S101 PAGE_READONLY_EXEC | 104 | #define __S101 PAGE_READONLY_EXEC |
106 | #define __S110 PAGE_SHARED_EXEC | 105 | #define __S110 PAGE_SHARED_EXEC |
107 | #define __S111 PAGE_SHARED_EXEC | 106 | #define __S111 PAGE_SHARED_EXEC |
@@ -137,8 +136,8 @@ extern struct page *empty_zero_page; | |||
137 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) | 136 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) |
138 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) | 137 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
139 | 138 | ||
140 | #define pte_valid_ng(pte) \ | 139 | #define pte_valid_user(pte) \ |
141 | ((pte_val(pte) & (PTE_VALID | PTE_NG)) == (PTE_VALID | PTE_NG)) | 140 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) |
142 | 141 | ||
143 | static inline pte_t pte_wrprotect(pte_t pte) | 142 | static inline pte_t pte_wrprotect(pte_t pte) |
144 | { | 143 | { |
@@ -192,7 +191,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); | |||
192 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 191 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
193 | pte_t *ptep, pte_t pte) | 192 | pte_t *ptep, pte_t pte) |
194 | { | 193 | { |
195 | if (pte_valid_ng(pte)) { | 194 | if (pte_valid_user(pte)) { |
196 | if (!pte_special(pte) && pte_exec(pte)) | 195 | if (!pte_special(pte) && pte_exec(pte)) |
197 | __sync_icache_dcache(pte, addr); | 196 | __sync_icache_dcache(pte, addr); |
198 | if (pte_dirty(pte) && pte_write(pte)) | 197 | if (pte_dirty(pte) && pte_write(pte)) |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 89c6763d5e7e..bcc965e2cce1 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -173,7 +173,8 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr, | |||
173 | good_area: | 173 | good_area: |
174 | /* | 174 | /* |
175 | * Check that the permissions on the VMA allow for the fault which | 175 | * Check that the permissions on the VMA allow for the fault which |
176 | * occurred. | 176 | * occurred. If we encountered a write or exec fault, we must have |
177 | * appropriate permissions, otherwise we allow any permission. | ||
177 | */ | 178 | */ |
178 | if (!(vma->vm_flags & vm_flags)) { | 179 | if (!(vma->vm_flags & vm_flags)) { |
179 | fault = VM_FAULT_BADACCESS; | 180 | fault = VM_FAULT_BADACCESS; |
@@ -195,7 +196,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, | |||
195 | struct task_struct *tsk; | 196 | struct task_struct *tsk; |
196 | struct mm_struct *mm; | 197 | struct mm_struct *mm; |
197 | int fault, sig, code; | 198 | int fault, sig, code; |
198 | unsigned long vm_flags = VM_READ | VM_WRITE; | 199 | unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; |
199 | unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 200 | unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
200 | 201 | ||
201 | tsk = current; | 202 | tsk = current; |