aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-12-17 13:17:11 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-12-29 07:46:47 -0500
commite1a5848e3398dca135f3ae77fe2e01145f9d8826 (patch)
treeb022e51a78e1d923c4ba7ddd342ff209df154174
parenta472b09dd5bb00f7da3087f2a324eb963e8eaa9f (diff)
ARM: 7924/1: mm: don't bother with reserved ttbr0 when running with LPAE
With the new ASID allocation algorithm, active ASIDs at the time of a rollover event will be marked as reserved, so active mm_structs can continue to operate with the same ASID as before. This in turn means that we don't need to worry about allocating a new ASID to an mm that is currently active (installed in TTBR0). Since updating the pgd and ASID is atomic on LPAE systems (by virtue of the two being fields in the same hardware register), we can dispose of the reserved TTBR0 and rely on whatever tables we currently have live. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/context.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 84e6f772e204..3ad0fdaa5cc1 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -78,20 +78,21 @@ void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
78#endif 78#endif
79 79
80#ifdef CONFIG_ARM_LPAE 80#ifdef CONFIG_ARM_LPAE
81static void cpu_set_reserved_ttbr0(void) 81/*
82{ 82 * With LPAE, the ASID and page tables are updated atomicly, so there is
83 /* 83 * no need for a reserved set of tables (the active ASID tracking prevents
84 * Set TTBR0 to swapper_pg_dir which contains only global entries. The 84 * any issues across a rollover).
85 * ASID is set to 0. 85 */
86 */ 86#define cpu_set_reserved_ttbr0()
87 cpu_set_ttbr(0, __pa(swapper_pg_dir));
88 isb();
89}
90#else 87#else
91static void cpu_set_reserved_ttbr0(void) 88static void cpu_set_reserved_ttbr0(void)
92{ 89{
93 u32 ttb; 90 u32 ttb;
94 /* Copy TTBR1 into TTBR0 */ 91 /*
92 * Copy TTBR1 into TTBR0.
93 * This points at swapper_pg_dir, which contains only global
94 * entries so any speculative walks are perfectly safe.
95 */
95 asm volatile( 96 asm volatile(
96 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" 97 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
97 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" 98 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"