aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-02-21 11:01:48 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-02-25 06:32:40 -0500
commit1971188aa19651d8f447211c6535fb68661d77c5 (patch)
treebfaccbce3e19113ebd908b59cd7208d8012709ca
parentc32ffce0f66e5d1d4856254516e24f5ef275cd00 (diff)
ARM: 7985/1: mm: implement pte_accessible for faulting mappings
The pte_accessible macro can be used to identify page table entries capable of being cached by a TLB. In principle, this differs from pte_present, since PROT_NONE mappings are mapped using invalid entries identified as present and ptes designated as `old' can use either invalid entries or those with the access flag cleared (guaranteed not to be in the TLB). However, there is a race to take care of, as described in 20841405940e ("mm: fix TLB flush race between migration, and change_protection_range"), between a page being migrated and mprotected at the same time. In this case, we can check whether a TLB invalidation is pending for the mm and if so, temporarily consider PROT_NONE mappings as valid. This patch implements a quick pte_accessible macro for ARM by simply checking if the pte is valid/present depending on the mm. For classic MMU, these checks are identical and will generate some false positives for PROT_NONE mappings, but this is better than the current asm-generic definition of ((void)(pte),1). Finally, pte_present_user is moved to use pte_valid (and renamed appropriately) since we don't care about cache flushing for faulting mappings. Acked-by: Steve Capper <steve.capper@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/pgtable.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 7d59b524f2af..5478e5d6ad89 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
216 216
217#define pte_none(pte) (!pte_val(pte)) 217#define pte_none(pte) (!pte_val(pte))
218#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 218#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
219#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID)
220#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
219#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) 221#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
220#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 222#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
221#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 223#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
222#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) 224#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
223#define pte_special(pte) (0) 225#define pte_special(pte) (0)
224 226
225#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) 227#define pte_valid_user(pte) \
228 (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))
226 229
227#if __LINUX_ARM_ARCH__ < 6 230#if __LINUX_ARM_ARCH__ < 6
228static inline void __sync_icache_dcache(pte_t pteval) 231static inline void __sync_icache_dcache(pte_t pteval)
@@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
237{ 240{
238 unsigned long ext = 0; 241 unsigned long ext = 0;
239 242
240 if (addr < TASK_SIZE && pte_present_user(pteval)) { 243 if (addr < TASK_SIZE && pte_valid_user(pteval)) {
241 __sync_icache_dcache(pteval); 244 __sync_icache_dcache(pteval);
242 ext |= PTE_EXT_NG; 245 ext |= PTE_EXT_NG;
243 } 246 }