diff options
author | Steve Capper <Steve.Capper@arm.com> | 2014-01-15 09:07:13 -0500 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2014-01-31 06:30:49 -0500 |
commit | c2c93e5b7f3f42277ec25ff3746096abc0c0d0f7 (patch) | |
tree | ef50b27ee3249facbc8dd809937445cdaefbc028 /arch/arm64/include | |
parent | 44b6dfc556811c0e6981329e29e1865fc7a8847d (diff) |
arm64: mm: Introduce PTE_WRITE
We have the following means for encoding writable or dirty ptes:
PTE_DIRTY PTE_RDONLY
!pte_dirty && !pte_write 0 1
!pte_dirty && pte_write 0 1
pte_dirty && !pte_write 1 1
pte_dirty && pte_write 1 0
So we can't distinguish between writable clean ptes and read only
ptes. This can cause problems with ptes being incorrectly flagged as
read only when they are writable but not dirty.
This patch introduces a new software bit PTE_WRITE which allows us to
correctly identify writable ptes. PTE_RDONLY is now only clear for
valid ptes where a page is both writable and dirty.
Signed-off-by: Steve Capper <steve.capper@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 48 |
1 files changed, 25 insertions, 23 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 27c78395ae76..b524dcd17243 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -28,7 +28,7 @@ | |||
28 | #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ | 28 | #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ |
29 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) | 29 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) |
30 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) | 30 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) |
31 | /* bit 57 for PMD_SECT_SPLITTING */ | 31 | #define PTE_WRITE (_AT(pteval_t, 1) << 57) |
32 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ | 32 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ |
33 | 33 | ||
34 | /* | 34 | /* |
@@ -67,15 +67,15 @@ extern pgprot_t pgprot_default; | |||
67 | 67 | ||
68 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) | 68 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) |
69 | 69 | ||
70 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) | 70 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN) |
71 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 71 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
72 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) | 72 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) |
73 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 73 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
74 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | 74 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) |
75 | #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 75 | #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
76 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | 76 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) |
77 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) | 77 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
78 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) | 78 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE) |
79 | 79 | ||
80 | #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) | 80 | #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) |
81 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) | 81 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) |
@@ -83,13 +83,13 @@ extern pgprot_t pgprot_default; | |||
83 | #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) | 83 | #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) |
84 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) | 84 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) |
85 | 85 | ||
86 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) | 86 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) |
87 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 87 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
88 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) | 88 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) |
89 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 89 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
90 | #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | 90 | #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
91 | #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 91 | #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
92 | #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | 92 | #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
93 | 93 | ||
94 | #endif /* __ASSEMBLY__ */ | 94 | #endif /* __ASSEMBLY__ */ |
95 | 95 | ||
@@ -140,7 +140,7 @@ extern struct page *empty_zero_page; | |||
140 | #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) | 140 | #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) |
141 | #define pte_young(pte) (pte_val(pte) & PTE_AF) | 141 | #define pte_young(pte) (pte_val(pte) & PTE_AF) |
142 | #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) | 142 | #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) |
143 | #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) | 143 | #define pte_write(pte) (pte_val(pte) & PTE_WRITE) |
144 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) | 144 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
145 | 145 | ||
146 | #define pte_valid_user(pte) \ | 146 | #define pte_valid_user(pte) \ |
@@ -148,13 +148,13 @@ extern struct page *empty_zero_page; | |||
148 | 148 | ||
149 | static inline pte_t pte_wrprotect(pte_t pte) | 149 | static inline pte_t pte_wrprotect(pte_t pte) |
150 | { | 150 | { |
151 | pte_val(pte) |= PTE_RDONLY; | 151 | pte_val(pte) &= ~PTE_WRITE; |
152 | return pte; | 152 | return pte; |
153 | } | 153 | } |
154 | 154 | ||
155 | static inline pte_t pte_mkwrite(pte_t pte) | 155 | static inline pte_t pte_mkwrite(pte_t pte) |
156 | { | 156 | { |
157 | pte_val(pte) &= ~PTE_RDONLY; | 157 | pte_val(pte) |= PTE_WRITE; |
158 | return pte; | 158 | return pte; |
159 | } | 159 | } |
160 | 160 | ||
@@ -201,8 +201,10 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
201 | if (pte_valid_user(pte)) { | 201 | if (pte_valid_user(pte)) { |
202 | if (pte_exec(pte)) | 202 | if (pte_exec(pte)) |
203 | __sync_icache_dcache(pte, addr); | 203 | __sync_icache_dcache(pte, addr); |
204 | if (!pte_dirty(pte)) | 204 | if (pte_dirty(pte) && pte_write(pte)) |
205 | pte = pte_wrprotect(pte); | 205 | pte_val(pte) &= ~PTE_RDONLY; |
206 | else | ||
207 | pte_val(pte) |= PTE_RDONLY; | ||
206 | } | 208 | } |
207 | 209 | ||
208 | set_pte(ptep, pte); | 210 | set_pte(ptep, pte); |
@@ -376,7 +378,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
376 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 378 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
377 | { | 379 | { |
378 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | | 380 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
379 | PTE_PROT_NONE | PTE_VALID; | 381 | PTE_PROT_NONE | PTE_VALID | PTE_WRITE; |
380 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | 382 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
381 | return pte; | 383 | return pte; |
382 | } | 384 | } |