diff options
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r-- | arch/arm/mm/context.c | 124 |
1 files changed, 110 insertions, 14 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a9e22e31eaa1..b0ee9ba3cfab 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -10,12 +10,17 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> | ||
14 | #include <linux/percpu.h> | ||
13 | 15 | ||
14 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
15 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
16 | 18 | ||
17 | static DEFINE_SPINLOCK(cpu_asid_lock); | 19 | static DEFINE_SPINLOCK(cpu_asid_lock); |
18 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | 20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; |
21 | #ifdef CONFIG_SMP | ||
22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); | ||
23 | #endif | ||
19 | 24 | ||
20 | /* | 25 | /* |
21 | * We fork()ed a process, and we need a new context for the child | 26 | * We fork()ed a process, and we need a new context for the child |
@@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION; | |||
26 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
27 | { | 32 | { |
28 | mm->context.id = 0; | 33 | mm->context.id = 0; |
34 | spin_lock_init(&mm->context.id_lock); | ||
29 | } | 35 | } |
30 | 36 | ||
37 | static void flush_context(void) | ||
38 | { | ||
39 | /* set the reserved ASID before flushing the TLB */ | ||
40 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); | ||
41 | isb(); | ||
42 | local_flush_tlb_all(); | ||
43 | if (icache_is_vivt_asid_tagged()) { | ||
44 | __flush_icache_all(); | ||
45 | dsb(); | ||
46 | } | ||
47 | } | ||
48 | |||
49 | #ifdef CONFIG_SMP | ||
50 | |||
51 | static void set_mm_context(struct mm_struct *mm, unsigned int asid) | ||
52 | { | ||
53 | unsigned long flags; | ||
54 | |||
55 | /* | ||
56 | * Locking needed for multi-threaded applications where the | ||
57 | * same mm->context.id could be set from different CPUs during | ||
58 | * the broadcast. This function is also called via IPI so the | ||
59 | * mm->context.id_lock has to be IRQ-safe. | ||
60 | */ | ||
61 | spin_lock_irqsave(&mm->context.id_lock, flags); | ||
62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { | ||
63 | /* | ||
64 | * Old version of ASID found. Set the new one and | ||
65 | * reset mm_cpumask(mm). | ||
66 | */ | ||
67 | mm->context.id = asid; | ||
68 | cpumask_clear(mm_cpumask(mm)); | ||
69 | } | ||
70 | spin_unlock_irqrestore(&mm->context.id_lock, flags); | ||
71 | |||
72 | /* | ||
73 | * Set the mm_cpumask(mm) bit for the current CPU. | ||
74 | */ | ||
75 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Reset the ASID on the current CPU. This function call is broadcast | ||
80 | * from the CPU handling the ASID rollover and holding cpu_asid_lock. | ||
81 | */ | ||
82 | static void reset_context(void *info) | ||
83 | { | ||
84 | unsigned int asid; | ||
85 | unsigned int cpu = smp_processor_id(); | ||
86 | struct mm_struct *mm = per_cpu(current_mm, cpu); | ||
87 | |||
88 | /* | ||
89 | * Check if a current_mm was set on this CPU as it might still | ||
90 | * be in the early booting stages and using the reserved ASID. | ||
91 | */ | ||
92 | if (!mm) | ||
93 | return; | ||
94 | |||
95 | smp_rmb(); | ||
96 | asid = cpu_last_asid + cpu + 1; | ||
97 | |||
98 | flush_context(); | ||
99 | set_mm_context(mm, asid); | ||
100 | |||
101 | /* set the new ASID */ | ||
102 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); | ||
103 | isb(); | ||
104 | } | ||
105 | |||
106 | #else | ||
107 | |||
108 | static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) | ||
109 | { | ||
110 | mm->context.id = asid; | ||
111 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | ||
112 | } | ||
113 | |||
114 | #endif | ||
115 | |||
31 | void __new_context(struct mm_struct *mm) | 116 | void __new_context(struct mm_struct *mm) |
32 | { | 117 | { |
33 | unsigned int asid; | 118 | unsigned int asid; |
34 | 119 | ||
35 | spin_lock(&cpu_asid_lock); | 120 | spin_lock(&cpu_asid_lock); |
121 | #ifdef CONFIG_SMP | ||
122 | /* | ||
123 | * Check the ASID again, in case the change was broadcast from | ||
124 | * another CPU before we acquired the lock. | ||
125 | */ | ||
126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { | ||
127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
128 | spin_unlock(&cpu_asid_lock); | ||
129 | return; | ||
130 | } | ||
131 | #endif | ||
132 | /* | ||
133 | * At this point, it is guaranteed that the current mm (with | ||
134 | * an old ASID) isn't active on any other CPU since the ASIDs | ||
135 | * are changed simultaneously via IPI. | ||
136 | */ | ||
36 | asid = ++cpu_last_asid; | 137 | asid = ++cpu_last_asid; |
37 | if (asid == 0) | 138 | if (asid == 0) |
38 | asid = cpu_last_asid = ASID_FIRST_VERSION; | 139 | asid = cpu_last_asid = ASID_FIRST_VERSION; |
@@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm) | |||
42 | * to start a new version and flush the TLB. | 143 | * to start a new version and flush the TLB. |
43 | */ | 144 | */ |
44 | if (unlikely((asid & ~ASID_MASK) == 0)) { | 145 | if (unlikely((asid & ~ASID_MASK) == 0)) { |
45 | asid = ++cpu_last_asid; | 146 | asid = cpu_last_asid + smp_processor_id() + 1; |
46 | /* set the reserved ASID before flushing the TLB */ | 147 | flush_context(); |
47 | asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" | 148 | #ifdef CONFIG_SMP |
48 | : | 149 | smp_wmb(); |
49 | : "r" (0)); | 150 | smp_call_function(reset_context, NULL, 1); |
50 | isb(); | 151 | #endif |
51 | flush_tlb_all(); | 152 | cpu_last_asid += NR_CPUS; |
52 | if (icache_is_vivt_asid_tagged()) { | ||
53 | __flush_icache_all(); | ||
54 | dsb(); | ||
55 | } | ||
56 | } | 153 | } |
57 | spin_unlock(&cpu_asid_lock); | ||
58 | 154 | ||
59 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | 155 | set_mm_context(mm, asid); |
60 | mm->context.id = asid; | 156 | spin_unlock(&cpu_asid_lock); |
61 | } | 157 | } |