diff options
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 10 | ||||
-rw-r--r-- | arch/arm/kernel/smp_tlb.c | 18 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 29 |
3 files changed, 39 insertions, 18 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a7b85e0d0cc1..dc90203c6ddb 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -27,7 +27,15 @@ void __check_vmalloc_seq(struct mm_struct *mm); | |||
27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); | 27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); |
28 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) | 28 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) |
29 | 29 | ||
30 | DECLARE_PER_CPU(atomic64_t, active_asids); | 30 | #ifdef CONFIG_ARM_ERRATA_798181 |
31 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
32 | cpumask_t *mask); | ||
33 | #else /* !CONFIG_ARM_ERRATA_798181 */ | ||
34 | static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
35 | cpumask_t *mask) | ||
36 | { | ||
37 | } | ||
38 | #endif /* CONFIG_ARM_ERRATA_798181 */ | ||
31 | 39 | ||
32 | #else /* !CONFIG_CPU_HAS_ASID */ | 40 | #else /* !CONFIG_CPU_HAS_ASID */ |
33 | 41 | ||
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 9a52a07aa40e..a98b62dca2fa 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c | |||
@@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void) | |||
103 | 103 | ||
104 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | 104 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) |
105 | { | 105 | { |
106 | int cpu, this_cpu; | 106 | int this_cpu; |
107 | cpumask_t mask = { CPU_BITS_NONE }; | 107 | cpumask_t mask = { CPU_BITS_NONE }; |
108 | 108 | ||
109 | if (!erratum_a15_798181()) | 109 | if (!erratum_a15_798181()) |
@@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | |||
111 | 111 | ||
112 | dummy_flush_tlb_a15_erratum(); | 112 | dummy_flush_tlb_a15_erratum(); |
113 | this_cpu = get_cpu(); | 113 | this_cpu = get_cpu(); |
114 | for_each_online_cpu(cpu) { | 114 | a15_erratum_get_cpumask(this_cpu, mm, &mask); |
115 | if (cpu == this_cpu) | ||
116 | continue; | ||
117 | /* | ||
118 | * We only need to send an IPI if the other CPUs are running | ||
119 | * the same ASID as the one being invalidated. There is no | ||
120 | * need for locking around the active_asids check since the | ||
121 | * switch_mm() function has at least one dmb() (as required by | ||
122 | * this workaround) in case a context switch happens on | ||
123 | * another CPU after the condition below. | ||
124 | */ | ||
125 | if (atomic64_read(&mm->context.id) == | ||
126 | atomic64_read(&per_cpu(active_asids, cpu))) | ||
127 | cpumask_set_cpu(cpu, &mask); | ||
128 | } | ||
129 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); | 115 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); |
130 | put_cpu(); | 116 | put_cpu(); |
131 | } | 117 | } |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 83e09058f96f..eeab06ebd06e 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -45,10 +45,37 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |||
45 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 45 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
46 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | 46 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
47 | 47 | ||
48 | DEFINE_PER_CPU(atomic64_t, active_asids); | 48 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
49 | static DEFINE_PER_CPU(u64, reserved_asids); | 49 | static DEFINE_PER_CPU(u64, reserved_asids); |
50 | static cpumask_t tlb_flush_pending; | 50 | static cpumask_t tlb_flush_pending; |
51 | 51 | ||
52 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
53 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
54 | cpumask_t *mask) | ||
55 | { | ||
56 | int cpu; | ||
57 | unsigned long flags; | ||
58 | u64 context_id, asid; | ||
59 | |||
60 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | ||
61 | context_id = mm->context.id.counter; | ||
62 | for_each_online_cpu(cpu) { | ||
63 | if (cpu == this_cpu) | ||
64 | continue; | ||
65 | /* | ||
66 | * We only need to send an IPI if the other CPUs are | ||
67 | * running the same ASID as the one being invalidated. | ||
68 | */ | ||
69 | asid = per_cpu(active_asids, cpu).counter; | ||
70 | if (asid == 0) | ||
71 | asid = per_cpu(reserved_asids, cpu); | ||
72 | if (context_id == asid) | ||
73 | cpumask_set_cpu(cpu, mask); | ||
74 | } | ||
75 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | ||
76 | } | ||
77 | #endif | ||
78 | |||
52 | #ifdef CONFIG_ARM_LPAE | 79 | #ifdef CONFIG_ARM_LPAE |
53 | static void cpu_set_reserved_ttbr0(void) | 80 | static void cpu_set_reserved_ttbr0(void) |
54 | { | 81 | { |