diff options
Diffstat (limited to 'arch/arm/mm/context.c')
| -rw-r--r-- | arch/arm/mm/context.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 7a0511191f6b..2ac37372ef52 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
| @@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |||
| 48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
| 49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | 49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
| 50 | 50 | ||
| 51 | static DEFINE_PER_CPU(atomic64_t, active_asids); | 51 | DEFINE_PER_CPU(atomic64_t, active_asids); |
| 52 | static DEFINE_PER_CPU(u64, reserved_asids); | 52 | static DEFINE_PER_CPU(u64, reserved_asids); |
| 53 | static cpumask_t tlb_flush_pending; | 53 | static cpumask_t tlb_flush_pending; |
| 54 | 54 | ||
| @@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid) | |||
| 152 | return 0; | 152 | return 0; |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | static void new_context(struct mm_struct *mm, unsigned int cpu) | 155 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
| 156 | { | 156 | { |
| 157 | u64 asid = mm->context.id; | 157 | u64 asid = atomic64_read(&mm->context.id); |
| 158 | u64 generation = atomic64_read(&asid_generation); | 158 | u64 generation = atomic64_read(&asid_generation); |
| 159 | 159 | ||
| 160 | if (asid != 0 && is_reserved_asid(asid)) { | 160 | if (asid != 0 && is_reserved_asid(asid)) { |
| @@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 181 | cpumask_clear(mm_cpumask(mm)); | 181 | cpumask_clear(mm_cpumask(mm)); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | mm->context.id = asid; | 184 | return asid; |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | 187 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) |
| 188 | { | 188 | { |
| 189 | unsigned long flags; | 189 | unsigned long flags; |
| 190 | unsigned int cpu = smp_processor_id(); | 190 | unsigned int cpu = smp_processor_id(); |
| 191 | u64 asid; | ||
| 191 | 192 | ||
| 192 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) | 193 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
| 193 | __check_vmalloc_seq(mm); | 194 | __check_vmalloc_seq(mm); |
| @@ -198,20 +199,27 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
| 198 | */ | 199 | */ |
| 199 | cpu_set_reserved_ttbr0(); | 200 | cpu_set_reserved_ttbr0(); |
| 200 | 201 | ||
| 201 | if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) | 202 | asid = atomic64_read(&mm->context.id); |
| 202 | && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) | 203 | if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) |
| 204 | && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) | ||
| 203 | goto switch_mm_fastpath; | 205 | goto switch_mm_fastpath; |
| 204 | 206 | ||
| 205 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | 207 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
| 206 | /* Check that our ASID belongs to the current generation. */ | 208 | /* Check that our ASID belongs to the current generation. */ |
| 207 | if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) | 209 | asid = atomic64_read(&mm->context.id); |
| 208 | new_context(mm, cpu); | 210 | if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { |
| 209 | 211 | asid = new_context(mm, cpu); | |
| 210 | atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); | 212 | atomic64_set(&mm->context.id, asid); |
| 211 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 213 | } |
| 212 | 214 | ||
| 213 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) | 215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
| 216 | local_flush_bp_all(); | ||
| 214 | local_flush_tlb_all(); | 217 | local_flush_tlb_all(); |
| 218 | dummy_flush_tlb_a15_erratum(); | ||
| 219 | } | ||
| 220 | |||
| 221 | atomic64_set(&per_cpu(active_asids, cpu), asid); | ||
| 222 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
| 215 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | 223 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
| 216 | 224 | ||
| 217 | switch_mm_fastpath: | 225 | switch_mm_fastpath: |
