diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2017-01-27 05:54:12 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-05-14 08:00:15 -0400 |
commit | f46fdb8a2611b4ffe9dda5b712ac02add90e930f (patch) | |
tree | 16c94a7326cce07bfbc8cf825773d33d283913ee | |
parent | aadb7e073e813a447ba6c5f5636702f07e8805dd (diff) |
arm64: Improve detection of user/non-user mappings in set_pte(_at)
commit ec663d967b2276448a416406ca59ff247c0c80c5 upstream.
Commit cab15ce604e5 ("arm64: Introduce execute-only page access
permissions") allowed a valid user PTE to have the PTE_USER bit clear.
As a consequence, the pte_valid_not_user() macro in set_pte() was
replaced with pte_valid_global() under the assumption that only user
pages have the nG bit set. EFI mappings, however, also have the nG bit
set and set_pte() wrongly ignores issuing the DSB+ISB.
This patch reinstates the pte_valid_not_user() macro and adds the
PTE_UXN bit check since all kernel mappings have this bit set. For
clarity, pte_exec() is renamed to pte_user_exec() as it only checks for
the absence of PTE_UXN. Consequently, the user executable check in
set_pte_at() drops the pte_ng() test since pte_user_exec() is
sufficient.
Fixes: cab15ce604e5 ("arm64: Introduce execute-only page access permissions")
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index ffbb9a520563..61e214015b38 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -71,9 +71,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
71 | #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) | 71 | #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) |
72 | #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) | 72 | #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) |
73 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) | 73 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) |
74 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) | 74 | #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
75 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) | 75 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) |
76 | #define pte_ng(pte) (!!(pte_val(pte) & PTE_NG)) | ||
77 | 76 | ||
78 | #ifdef CONFIG_ARM64_HW_AFDBM | 77 | #ifdef CONFIG_ARM64_HW_AFDBM |
79 | #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) | 78 | #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) |
@@ -84,8 +83,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
84 | #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) | 83 | #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) |
85 | 84 | ||
86 | #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) | 85 | #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) |
87 | #define pte_valid_global(pte) \ | 86 | /* |
88 | ((pte_val(pte) & (PTE_VALID | PTE_NG)) == PTE_VALID) | 87 | * Execute-only user mappings do not have the PTE_USER bit set. All valid |
88 | * kernel mappings have the PTE_UXN bit set. | ||
89 | */ | ||
90 | #define pte_valid_not_user(pte) \ | ||
91 | ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) | ||
89 | #define pte_valid_young(pte) \ | 92 | #define pte_valid_young(pte) \ |
90 | ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) | 93 | ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) |
91 | 94 | ||
@@ -178,7 +181,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) | |||
178 | * Only if the new pte is valid and kernel, otherwise TLB maintenance | 181 | * Only if the new pte is valid and kernel, otherwise TLB maintenance |
179 | * or update_mmu_cache() have the necessary barriers. | 182 | * or update_mmu_cache() have the necessary barriers. |
180 | */ | 183 | */ |
181 | if (pte_valid_global(pte)) { | 184 | if (pte_valid_not_user(pte)) { |
182 | dsb(ishst); | 185 | dsb(ishst); |
183 | isb(); | 186 | isb(); |
184 | } | 187 | } |
@@ -212,7 +215,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
212 | pte_val(pte) &= ~PTE_RDONLY; | 215 | pte_val(pte) &= ~PTE_RDONLY; |
213 | else | 216 | else |
214 | pte_val(pte) |= PTE_RDONLY; | 217 | pte_val(pte) |= PTE_RDONLY; |
215 | if (pte_ng(pte) && pte_exec(pte) && !pte_special(pte)) | 218 | if (pte_user_exec(pte) && !pte_special(pte)) |
216 | __sync_icache_dcache(pte, addr); | 219 | __sync_icache_dcache(pte, addr); |
217 | } | 220 | } |
218 | 221 | ||