aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 3172781a8e2e..5ac09e8b4030 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -38,9 +38,9 @@
38#define ASID_FIRST_VERSION (1ULL << ASID_BITS) 38#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
39 39
40static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 40static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
41static u64 cpu_last_asid = ASID_FIRST_VERSION; 41static atomic64_t cpu_last_asid = ATOMIC64_INIT(ASID_FIRST_VERSION);
42 42
43static DEFINE_PER_CPU(u64, active_asids); 43static DEFINE_PER_CPU(atomic64_t, active_asids);
44static DEFINE_PER_CPU(u64, reserved_asids); 44static DEFINE_PER_CPU(u64, reserved_asids);
45static cpumask_t tlb_flush_pending; 45static cpumask_t tlb_flush_pending;
46 46
@@ -113,9 +113,10 @@ static void flush_context(unsigned int cpu)
113 int i; 113 int i;
114 114
115 /* Update the list of reserved ASIDs. */ 115 /* Update the list of reserved ASIDs. */
116 per_cpu(active_asids, cpu) = 0;
117 for_each_possible_cpu(i) 116 for_each_possible_cpu(i)
118 per_cpu(reserved_asids, i) = per_cpu(active_asids, i); 117 per_cpu(reserved_asids, i) =
118 atomic64_xchg(&per_cpu(active_asids, i), 0);
119 per_cpu(reserved_asids, cpu) = 0;
119 120
120 /* Queue a TLB invalidate and flush the I-cache if necessary. */ 121 /* Queue a TLB invalidate and flush the I-cache if necessary. */
121 if (!tlb_ops_need_broadcast()) 122 if (!tlb_ops_need_broadcast())
@@ -145,7 +146,8 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
145 * Our current ASID was active during a rollover, we can 146 * Our current ASID was active during a rollover, we can
146 * continue to use it and this was just a false alarm. 147 * continue to use it and this was just a false alarm.
147 */ 148 */
148 asid = (cpu_last_asid & ASID_MASK) | (asid & ~ASID_MASK); 149 asid = (atomic64_read(&cpu_last_asid) & ASID_MASK) | \
150 (asid & ~ASID_MASK);
149 } else { 151 } else {
150 /* 152 /*
151 * Allocate a free ASID. If we can't find one, take a 153 * Allocate a free ASID. If we can't find one, take a
@@ -153,7 +155,7 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
153 * as requiring flushes. 155 * as requiring flushes.
154 */ 156 */
155 do { 157 do {
156 asid = ++cpu_last_asid; 158 asid = atomic64_inc_return(&cpu_last_asid);
157 if ((asid & ~ASID_MASK) == 0) 159 if ((asid & ~ASID_MASK) == 0)
158 flush_context(cpu); 160 flush_context(cpu);
159 } while (is_reserved_asid(asid, ~ASID_MASK)); 161 } while (is_reserved_asid(asid, ~ASID_MASK));
@@ -177,17 +179,22 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
177 */ 179 */
178 cpu_set_reserved_ttbr0(); 180 cpu_set_reserved_ttbr0();
179 181
182 if (!((mm->context.id ^ atomic64_read(&cpu_last_asid)) >> ASID_BITS)
183 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
184 goto switch_mm_fastpath;
185
180 raw_spin_lock_irqsave(&cpu_asid_lock, flags); 186 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
181 /* Check that our ASID belongs to the current generation. */ 187 /* Check that our ASID belongs to the current generation. */
182 if ((mm->context.id ^ cpu_last_asid) >> ASID_BITS) 188 if ((mm->context.id ^ atomic64_read(&cpu_last_asid)) >> ASID_BITS)
183 new_context(mm, cpu); 189 new_context(mm, cpu);
184 190
185 *this_cpu_ptr(&active_asids) = mm->context.id; 191 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
186 cpumask_set_cpu(cpu, mm_cpumask(mm)); 192 cpumask_set_cpu(cpu, mm_cpumask(mm));
187 193
188 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) 194 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
189 local_flush_tlb_all(); 195 local_flush_tlb_all();
190 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 196 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
191 197
198switch_mm_fastpath:
192 cpu_switch_mm(mm->pgd, mm); 199 cpu_switch_mm(mm->pgd, mm);
193} 200}