aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c57
1 files changed, 28 insertions, 29 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index ee9bb363d606..806cc4f63516 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -18,30 +18,39 @@
18 18
19static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 19static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
20unsigned int cpu_last_asid = ASID_FIRST_VERSION; 20unsigned int cpu_last_asid = ASID_FIRST_VERSION;
21#ifdef CONFIG_SMP
22DEFINE_PER_CPU(struct mm_struct *, current_mm);
23#endif
24 21
25#ifdef CONFIG_ARM_LPAE 22#ifdef CONFIG_ARM_LPAE
26#define cpu_set_asid(asid) { \ 23void cpu_set_reserved_ttbr0(void)
27 unsigned long ttbl, ttbh; \ 24{
28 asm volatile( \ 25 unsigned long ttbl = __pa(swapper_pg_dir);
29 " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \ 26 unsigned long ttbh = 0;
30 " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \ 27
31 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \ 28 /*
32 : "=&r" (ttbl), "=&r" (ttbh) \ 29 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
33 : "r" (asid & ~ASID_MASK)); \ 30 * ASID is set to 0.
31 */
32 asm volatile(
33 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
34 :
35 : "r" (ttbl), "r" (ttbh));
36 isb();
34} 37}
35#else 38#else
36#define cpu_set_asid(asid) \ 39void cpu_set_reserved_ttbr0(void)
37 asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid)) 40{
41 u32 ttb;
42 /* Copy TTBR1 into TTBR0 */
43 asm volatile(
44 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
45 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
46 : "=r" (ttb));
47 isb();
48}
38#endif 49#endif
39 50
40/* 51/*
41 * We fork()ed a process, and we need a new context for the child 52 * We fork()ed a process, and we need a new context for the child
42 * to run in. We reserve version 0 for initial tasks so we will 53 * to run in.
43 * always allocate an ASID. The ASID 0 is reserved for the TTBR
44 * register changing sequence.
45 */ 54 */
46void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 55void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
47{ 56{
@@ -51,9 +60,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
51 60
52static void flush_context(void) 61static void flush_context(void)
53{ 62{
54 /* set the reserved ASID before flushing the TLB */ 63 cpu_set_reserved_ttbr0();
55 cpu_set_asid(0);
56 isb();
57 local_flush_tlb_all(); 64 local_flush_tlb_all();
58 if (icache_is_vivt_asid_tagged()) { 65 if (icache_is_vivt_asid_tagged()) {
59 __flush_icache_all(); 66 __flush_icache_all();
@@ -98,14 +105,7 @@ static void reset_context(void *info)
98{ 105{
99 unsigned int asid; 106 unsigned int asid;
100 unsigned int cpu = smp_processor_id(); 107 unsigned int cpu = smp_processor_id();
101 struct mm_struct *mm = per_cpu(current_mm, cpu); 108 struct mm_struct *mm = current->active_mm;
102
103 /*
104 * Check if a current_mm was set on this CPU as it might still
105 * be in the early booting stages and using the reserved ASID.
106 */
107 if (!mm)
108 return;
109 109
110 smp_rmb(); 110 smp_rmb();
111 asid = cpu_last_asid + cpu + 1; 111 asid = cpu_last_asid + cpu + 1;
@@ -114,8 +114,7 @@ static void reset_context(void *info)
114 set_mm_context(mm, asid); 114 set_mm_context(mm, asid);
115 115
116 /* set the new ASID */ 116 /* set the new ASID */
117 cpu_set_asid(mm->context.id); 117 cpu_switch_mm(mm->pgd, mm);
118 isb();
119} 118}
120 119
121#else 120#else