diff options
author | Marc Zyngier <Marc.Zyngier@arm.com> | 2013-06-21 07:07:27 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-07-21 21:21:34 -0400 |
commit | e6a01df4cd3a38e8dcc05d763c0f793b2f30c5f2 (patch) | |
tree | 9cec5f21f1d4b5c46be63e57f981d0d9f261678d /arch/arm/mm | |
parent | 4aa6022129a8b5e0b0e42815521071ce7a766a84 (diff) |
ARM: 7769/1: Cortex-A15: fix erratum 798181 implementation
commit 0d0752bca1f9a91fb646647aa4abbb21156f316c upstream.
Looking into the active_asids array is not enough, as we also need
to look into the reserved_asids array (they both represent processes
that are currently running).
Also, not holding the ASID allocator lock is racy, as another CPU
could schedule that process and trigger a rollover, making the erratum
workaround miss an IPI.
Exposing this outside of context.c is a little ugly on the side, so
let's define a new entry point that the erratum workaround can call
to obtain the cpumask.
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/context.c | 29 |
1 files changed, 28 insertions, 1 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 83e09058f96f..eeab06ebd06e 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -45,10 +45,37 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |||
45 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 45 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
46 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | 46 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
47 | 47 | ||
48 | DEFINE_PER_CPU(atomic64_t, active_asids); | 48 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
49 | static DEFINE_PER_CPU(u64, reserved_asids); | 49 | static DEFINE_PER_CPU(u64, reserved_asids); |
50 | static cpumask_t tlb_flush_pending; | 50 | static cpumask_t tlb_flush_pending; |
51 | 51 | ||
52 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
53 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
54 | cpumask_t *mask) | ||
55 | { | ||
56 | int cpu; | ||
57 | unsigned long flags; | ||
58 | u64 context_id, asid; | ||
59 | |||
60 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | ||
61 | context_id = mm->context.id.counter; | ||
62 | for_each_online_cpu(cpu) { | ||
63 | if (cpu == this_cpu) | ||
64 | continue; | ||
65 | /* | ||
66 | * We only need to send an IPI if the other CPUs are | ||
67 | * running the same ASID as the one being invalidated. | ||
68 | */ | ||
69 | asid = per_cpu(active_asids, cpu).counter; | ||
70 | if (asid == 0) | ||
71 | asid = per_cpu(reserved_asids, cpu); | ||
72 | if (context_id == asid) | ||
73 | cpumask_set_cpu(cpu, mask); | ||
74 | } | ||
75 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | ||
76 | } | ||
77 | #endif | ||
78 | |||
52 | #ifdef CONFIG_ARM_LPAE | 79 | #ifdef CONFIG_ARM_LPAE |
53 | static void cpu_set_reserved_ttbr0(void) | 80 | static void cpu_set_reserved_ttbr0(void) |
54 | { | 81 | { |