aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2018-03-22 18:29:05 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2018-03-22 23:14:31 -0400
commitaff6f8cb3e2170b9e58b0932bce7bfb492775e23 (patch)
tree70b8f5db0da67c2a359166e793539dc1bdd9cdbd
parentff6781fd1bb404d8a551c02c35c70cec1da17ff1 (diff)
powerpc/mm: Add tracking of the number of coprocessors using a context
Currently, when using coprocessors (which use the Nest MMU), we simply increment the active_cpu count to force all TLB invalidations to be come broadcast. Unfortunately, due to an errata in POWER9, we will need to know more specifically that coprocessors are in use. This maintains a separate copros counter in the MMU context for that purpose. NB. The commit mentioned in the fixes tag below is not at fault for the bug we're fixing in this commit and the next, but this fix applies on top the infrastructure it introduced. Fixes: 03b8abedf4f4 ("cxl: Enable global TLBIs for cxl contexts") Cc: stable@vger.kernel.org # v4.15+ Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Tested-by: Balbir Singh <bsingharora@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h3
-rw-r--r--arch/powerpc/include/asm/mmu_context.h18
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c1
3 files changed, 17 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 0abeb0e2d616..37671feb2bf6 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -87,6 +87,9 @@ typedef struct {
87 /* Number of bits in the mm_cpumask */ 87 /* Number of bits in the mm_cpumask */
88 atomic_t active_cpus; 88 atomic_t active_cpus;
89 89
90 /* Number of users of the external (Nest) MMU */
91 atomic_t copros;
92
90 /* NPU NMMU context */ 93 /* NPU NMMU context */
91 struct npu_context *npu_context; 94 struct npu_context *npu_context;
92 95
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 051b3d63afe3..3a15b6db9501 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -92,15 +92,23 @@ static inline void dec_mm_active_cpus(struct mm_struct *mm)
92static inline void mm_context_add_copro(struct mm_struct *mm) 92static inline void mm_context_add_copro(struct mm_struct *mm)
93{ 93{
94 /* 94 /*
95 * On hash, should only be called once over the lifetime of 95 * If any copro is in use, increment the active CPU count
96 * the context, as we can't decrement the active cpus count 96 * in order to force TLB invalidations to be global as to
97 * and flush properly for the time being. 97 * propagate to the Nest MMU.
98 */ 98 */
99 inc_mm_active_cpus(mm); 99 if (atomic_inc_return(&mm->context.copros) == 1)
100 inc_mm_active_cpus(mm);
100} 101}
101 102
102static inline void mm_context_remove_copro(struct mm_struct *mm) 103static inline void mm_context_remove_copro(struct mm_struct *mm)
103{ 104{
105 int c;
106
107 c = atomic_dec_if_positive(&mm->context.copros);
108
109 /* Detect imbalance between add and remove */
110 WARN_ON(c < 0);
111
104 /* 112 /*
105 * Need to broadcast a global flush of the full mm before 113 * Need to broadcast a global flush of the full mm before
106 * decrementing active_cpus count, as the next TLBI may be 114 * decrementing active_cpus count, as the next TLBI may be
@@ -111,7 +119,7 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
111 * for the time being. Invalidations will remain global if 119 * for the time being. Invalidations will remain global if
112 * used on hash. 120 * used on hash.
113 */ 121 */
114 if (radix_enabled()) { 122 if (c == 0 && radix_enabled()) {
115 flush_all_mm(mm); 123 flush_all_mm(mm);
116 dec_mm_active_cpus(mm); 124 dec_mm_active_cpus(mm);
117 } 125 }
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 929d9ef7083f..3f980baade4c 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -173,6 +173,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
173 mm_iommu_init(mm); 173 mm_iommu_init(mm);
174#endif 174#endif
175 atomic_set(&mm->context.active_cpus, 0); 175 atomic_set(&mm->context.active_cpus, 0);
176 atomic_set(&mm->context.copros, 0);
176 177
177 return 0; 178 return 0;
178} 179}