aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2011-05-26 06:23:43 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-05-26 07:14:33 -0400
commit52af9c6cd863fe37d1103035ec7ee22ac1296458 (patch)
treefd6b4e6d7b3bc2881422fb5e735c5b9eda62662a /arch/arm/mm/context.c
parentd427958a46af24f75d0017c45eadd172273bbf33 (diff)
ARM: 6943/1: mm: use TTBR1 instead of reserved context ID
On ARMv7 CPUs that cache first level page table entries (like the Cortex-A15), using a reserved ASID while changing the TTBR or flushing the TLB is unsafe. This is because the CPU may cache the first level entry as the result of a speculative memory access while the reserved ASID is assigned. After the process owning the page tables dies, the memory will be reallocated and may be written with junk values which can be interpreted as global, valid PTEs by the processor. This will result in the TLB being populated with bogus global entries. This patch avoids the use of a reserved context ID in the v7 switch_mm and ASID rollover code by temporarily using the swapper_pg_dir pointed at by TTBR1, which contains only global entries that are not tagged with ASIDs. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b0ee9ba3cfab..0d86298c7279 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -24,9 +24,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
24 24
25/* 25/*
26 * We fork()ed a process, and we need a new context for the child 26 * We fork()ed a process, and we need a new context for the child
27 * to run in. We reserve version 0 for initial tasks so we will 27 * to run in.
28 * always allocate an ASID. The ASID 0 is reserved for the TTBR
29 * register changing sequence.
30 */ 28 */
31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 29void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
32{ 30{
@@ -36,8 +34,11 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
36 34
37static void flush_context(void) 35static void flush_context(void)
38{ 36{
39 /* set the reserved ASID before flushing the TLB */ 37 u32 ttb;
40 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); 38 /* Copy TTBR1 into TTBR0 */
39 asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
40 "mcr p15, 0, %0, c2, c0, 0"
41 : "=r" (ttb));
41 isb(); 42 isb();
42 local_flush_tlb_all(); 43 local_flush_tlb_all();
43 if (icache_is_vivt_asid_tagged()) { 44 if (icache_is_vivt_asid_tagged()) {