aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2011-05-31 10:38:43 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2012-04-17 10:29:21 -0400
commit3c5f7e7b4a0346de670b08f595bd15e7eec91f97 (patch)
treeab49b3cd2cc56f83f569350c0dd2a2499de80a09 /arch/arm/mm/context.c
parente816b57a337ea3b755de72bec38c10c864f23015 (diff)
ARM: Use TTBR1 instead of reserved context ID
On ARMv7 CPUs that cache first level page table entries (like the Cortex-A15), using a reserved ASID while changing the TTBR or flushing the TLB is unsafe. This is because the CPU may cache the first level entry as the result of a speculative memory access while the reserved ASID is assigned. After the process owning the page tables dies, the memory will be reallocated and may be written with junk values which can be interpreted as global, valid PTEs by the processor. This will result in the TLB being populated with bogus global entries. This patch avoids the use of a reserved context ID in the v7 switch_mm and ASID rollover code by temporarily using the swapper_pg_dir pointed at by TTBR1, which contains only global entries that are not tagged with ASIDs. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Tested-by: Marc Zyngier <Marc.Zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> [catalin.marinas@arm.com: add LPAE support] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c45
1 files changed, 27 insertions, 18 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index ee9bb363d606..aaa291fc072e 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -23,25 +23,37 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
23#endif 23#endif
24 24
25#ifdef CONFIG_ARM_LPAE 25#ifdef CONFIG_ARM_LPAE
26#define cpu_set_asid(asid) { \ 26static void cpu_set_reserved_ttbr0(void)
27 unsigned long ttbl, ttbh; \ 27{
28 asm volatile( \ 28 unsigned long ttbl = __pa(swapper_pg_dir);
29 " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \ 29 unsigned long ttbh = 0;
30 " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \ 30
31 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \ 31 /*
32 : "=&r" (ttbl), "=&r" (ttbh) \ 32 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
33 : "r" (asid & ~ASID_MASK)); \ 33 * ASID is set to 0.
34 */
35 asm volatile(
36 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
37 :
38 : "r" (ttbl), "r" (ttbh));
39 isb();
34} 40}
35#else 41#else
36#define cpu_set_asid(asid) \ 42static void cpu_set_reserved_ttbr0(void)
37 asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid)) 43{
44 u32 ttb;
45 /* Copy TTBR1 into TTBR0 */
46 asm volatile(
47 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
48 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
49 : "=r" (ttb));
50 isb();
51}
38#endif 52#endif
39 53
40/* 54/*
41 * We fork()ed a process, and we need a new context for the child 55 * We fork()ed a process, and we need a new context for the child
42 * to run in. We reserve version 0 for initial tasks so we will 56 * to run in.
43 * always allocate an ASID. The ASID 0 is reserved for the TTBR
44 * register changing sequence.
45 */ 57 */
46void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 58void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
47{ 59{
@@ -51,9 +63,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
51 63
52static void flush_context(void) 64static void flush_context(void)
53{ 65{
54 /* set the reserved ASID before flushing the TLB */ 66 cpu_set_reserved_ttbr0();
55 cpu_set_asid(0);
56 isb();
57 local_flush_tlb_all(); 67 local_flush_tlb_all();
58 if (icache_is_vivt_asid_tagged()) { 68 if (icache_is_vivt_asid_tagged()) {
59 __flush_icache_all(); 69 __flush_icache_all();
@@ -114,8 +124,7 @@ static void reset_context(void *info)
114 set_mm_context(mm, asid); 124 set_mm_context(mm, asid);
115 125
116 /* set the new ASID */ 126 /* set the new ASID */
117 cpu_set_asid(mm->context.id); 127 cpu_switch_mm(mm->pgd, mm);
118 isb();
119} 128}
120 129
121#else 130#else