aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <Marc.Zyngier@arm.com>2013-06-21 07:07:27 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-06-24 10:27:35 -0400
commit0d0752bca1f9a91fb646647aa4abbb21156f316c (patch)
treed5cedcc1ba2f3fd4ed4c095c98851ed1ecf4e110
parentb8e4a4740fa2b17c0a447b3ab783b3dc10702e27 (diff)
ARM: 7769/1: Cortex-A15: fix erratum 798181 implementation
Looking into the active_asids array is not enough, as we also need to look into the reserved_asids array (they both represent processes that are currently running). Also, not holding the ASID allocator lock is racy, as another CPU could schedule that process and trigger a rollover, making the erratum workaround miss an IPI. Exposing this outside of context.c is a little ugly on the side, so let's define a new entry point that the erratum workaround can call to obtain the cpumask. Cc: <stable@vger.kernel.org> # 3.9 Acked-by: Will Deacon <will.deacon@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/mmu_context.h10
-rw-r--r--arch/arm/kernel/smp_tlb.c18
-rw-r--r--arch/arm/mm/context.c29
3 files changed, 39 insertions, 18 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 2a45c33ebdc8..b5792b7fd8d3 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -28,7 +28,15 @@ void __check_vmalloc_seq(struct mm_struct *mm);
28void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 28void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
29#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 29#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
30 30
31DECLARE_PER_CPU(atomic64_t, active_asids); 31#ifdef CONFIG_ARM_ERRATA_798181
32void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
33 cpumask_t *mask);
34#else /* !CONFIG_ARM_ERRATA_798181 */
35static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
36 cpumask_t *mask)
37{
38}
39#endif /* CONFIG_ARM_ERRATA_798181 */
32 40
33#else /* !CONFIG_CPU_HAS_ASID */ 41#else /* !CONFIG_CPU_HAS_ASID */
34 42
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 9a52a07aa40e..a98b62dca2fa 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void)
103 103
104static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) 104static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
105{ 105{
106 int cpu, this_cpu; 106 int this_cpu;
107 cpumask_t mask = { CPU_BITS_NONE }; 107 cpumask_t mask = { CPU_BITS_NONE };
108 108
109 if (!erratum_a15_798181()) 109 if (!erratum_a15_798181())
@@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
111 111
112 dummy_flush_tlb_a15_erratum(); 112 dummy_flush_tlb_a15_erratum();
113 this_cpu = get_cpu(); 113 this_cpu = get_cpu();
114 for_each_online_cpu(cpu) { 114 a15_erratum_get_cpumask(this_cpu, mm, &mask);
115 if (cpu == this_cpu)
116 continue;
117 /*
118 * We only need to send an IPI if the other CPUs are running
119 * the same ASID as the one being invalidated. There is no
120 * need for locking around the active_asids check since the
121 * switch_mm() function has at least one dmb() (as required by
122 * this workaround) in case a context switch happens on
123 * another CPU after the condition below.
124 */
125 if (atomic64_read(&mm->context.id) ==
126 atomic64_read(&per_cpu(active_asids, cpu)))
127 cpumask_set_cpu(cpu, &mask);
128 }
129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); 115 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
130 put_cpu(); 116 put_cpu();
131} 117}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 83e09058f96f..eeab06ebd06e 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -45,10 +45,37 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
45static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); 45static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
46static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); 46static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
47 47
48DEFINE_PER_CPU(atomic64_t, active_asids); 48static DEFINE_PER_CPU(atomic64_t, active_asids);
49static DEFINE_PER_CPU(u64, reserved_asids); 49static DEFINE_PER_CPU(u64, reserved_asids);
50static cpumask_t tlb_flush_pending; 50static cpumask_t tlb_flush_pending;
51 51
52#ifdef CONFIG_ARM_ERRATA_798181
53void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
54 cpumask_t *mask)
55{
56 int cpu;
57 unsigned long flags;
58 u64 context_id, asid;
59
60 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
61 context_id = mm->context.id.counter;
62 for_each_online_cpu(cpu) {
63 if (cpu == this_cpu)
64 continue;
65 /*
66 * We only need to send an IPI if the other CPUs are
67 * running the same ASID as the one being invalidated.
68 */
69 asid = per_cpu(active_asids, cpu).counter;
70 if (asid == 0)
71 asid = per_cpu(reserved_asids, cpu);
72 if (context_id == asid)
73 cpumask_set_cpu(cpu, mask);
74 }
75 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
76}
77#endif
78
52#ifdef CONFIG_ARM_LPAE 79#ifdef CONFIG_ARM_LPAE
53static void cpu_set_reserved_ttbr0(void) 80static void cpu_set_reserved_ttbr0(void)
54{ 81{