diff options
author | Will Deacon <will.deacon@arm.com> | 2013-02-28 11:47:20 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-03-03 17:54:13 -0500 |
commit | 37f47e3d62533c931b04cb409f2eb299e6342331 (patch) | |
tree | 346daa43b9edd75249d72a61c429a62cf662da4b | |
parent | d61947a164760ac520cb416768afdf38c33d60e7 (diff) |
ARM: 7658/1: mm: fix race updating mm->context.id on ASID rollover
If a thread triggers an ASID rollover, other threads of the same process
must be made to wait until the mm->context.id for the shared mm_struct
has been updated to new generation and associated book-keeping (e.g.
TLB invalidation) has ben performed.
However, there is a *tiny* window where both mm->context.id and the
relevant active_asids entry are updated to the new generation, but the
TLB flush has not been performed, which could allow another thread to
return to userspace with a dirty TLB, potentially leading to data
corruption. In reality this will never occur because one CPU would need
to perform a context-switch in the time it takes another to do a couple
of atomic test/set operations but we should plug the race anyway.
This patch moves the active_asids update until after the potential TLB
flush on context-switch.
Cc: <stable@vger.kernel.org> # 3.8
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/mm/context.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 7a0511191f6b..03ba181e359c 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -207,11 +207,11 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
207 | if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) | 207 | if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) |
208 | new_context(mm, cpu); | 208 | new_context(mm, cpu); |
209 | 209 | ||
210 | atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); | ||
211 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
212 | |||
213 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) | 210 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) |
214 | local_flush_tlb_all(); | 211 | local_flush_tlb_all(); |
212 | |||
213 | atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); | ||
214 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
215 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | 215 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
216 | 216 | ||
217 | switch_mm_fastpath: | 217 | switch_mm_fastpath: |