aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2017-11-30 13:25:17 -0500
committerWill Deacon <will.deacon@arm.com>2017-12-01 08:05:08 -0500
commit3a33c7605750fb6a87613044d16b1455e482414d (patch)
treedb8a977414805adbc8805e15612a1832bfab008f
parent770ba06084f7aeadea120922c775d574f3128ba3 (diff)
arm64: context: Fix comments and remove pointless smp_wmb()
The comments in the ASID allocator incorrectly hint at an MP-style idiom using the asid_generation and the active_asids array. In fact, the synchronisation is achieved using a combination of an xchg operation and a spinlock, so update the comments and remove the pointless smp_wmb(). Cc: James Morse <james.morse@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/mm/context.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 28a45a19aae7..6f4017046323 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -96,12 +96,6 @@ static void flush_context(unsigned int cpu)
96 96
97 set_reserved_asid_bits(); 97 set_reserved_asid_bits();
98 98
99 /*
100 * Ensure the generation bump is observed before we xchg the
101 * active_asids.
102 */
103 smp_wmb();
104
105 for_each_possible_cpu(i) { 99 for_each_possible_cpu(i) {
106 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); 100 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
107 /* 101 /*
@@ -205,11 +199,18 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
205 asid = atomic64_read(&mm->context.id); 199 asid = atomic64_read(&mm->context.id);
206 200
207 /* 201 /*
208 * The memory ordering here is subtle. We rely on the control 202 * The memory ordering here is subtle.
209 * dependency between the generation read and the update of 203 * If our ASID matches the current generation, then we update
210 * active_asids to ensure that we are synchronised with a 204 * our active_asids entry with a relaxed xchg. Racing with a
211 * parallel rollover (i.e. this pairs with the smp_wmb() in 205 * concurrent rollover means that either:
212 * flush_context). 206 *
207 * - We get a zero back from the xchg and end up waiting on the
208 * lock. Taking the lock synchronises with the rollover and so
209 * we are forced to see the updated generation.
210 *
211 * - We get a valid ASID back from the xchg, which means the
212 * relaxed xchg in flush_context will treat us as reserved
213 * because atomic RmWs are totally ordered for a given location.
213 */ 214 */
214 if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits) 215 if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
215 && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid)) 216 && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))