aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r--kernel/locking/mutex.c51
1 files changed, 21 insertions, 30 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 94674e5919cb..4cccea6b8934 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -25,7 +25,7 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/debug_locks.h> 27#include <linux/debug_locks.h>
28#include "mcs_spinlock.h" 28#include <linux/osq_lock.h>
29 29
30/* 30/*
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -217,44 +217,35 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock,
217} 217}
218 218
219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
220static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
221{
222 if (lock->owner != owner)
223 return false;
224
225 /*
226 * Ensure we emit the owner->on_cpu, dereference _after_ checking
227 * lock->owner still matches owner, if that fails, owner might
228 * point to free()d memory, if it still matches, the rcu_read_lock()
229 * ensures the memory stays valid.
230 */
231 barrier();
232
233 return owner->on_cpu;
234}
235
236/* 220/*
237 * Look out! "owner" is an entirely speculative pointer 221 * Look out! "owner" is an entirely speculative pointer
238 * access and not reliable. 222 * access and not reliable.
239 */ 223 */
240static noinline 224static noinline
241int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) 225bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
242{ 226{
227 bool ret = true;
228
243 rcu_read_lock(); 229 rcu_read_lock();
244 while (owner_running(lock, owner)) { 230 while (lock->owner == owner) {
245 if (need_resched()) 231 /*
232 * Ensure we emit the owner->on_cpu, dereference _after_
233 * checking lock->owner still matches owner. If that fails,
234 * owner might point to freed memory. If it still matches,
235 * the rcu_read_lock() ensures the memory stays valid.
236 */
237 barrier();
238
239 if (!owner->on_cpu || need_resched()) {
240 ret = false;
246 break; 241 break;
242 }
247 243
248 cpu_relax_lowlatency(); 244 cpu_relax_lowlatency();
249 } 245 }
250 rcu_read_unlock(); 246 rcu_read_unlock();
251 247
252 /* 248 return ret;
253 * We break out the loop above on need_resched() and when the
254 * owner changed, which is a sign for heavy contention. Return
255 * success only when lock->owner is NULL.
256 */
257 return lock->owner == NULL;
258} 249}
259 250
260/* 251/*
@@ -269,7 +260,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
269 return 0; 260 return 0;
270 261
271 rcu_read_lock(); 262 rcu_read_lock();
272 owner = ACCESS_ONCE(lock->owner); 263 owner = READ_ONCE(lock->owner);
273 if (owner) 264 if (owner)
274 retval = owner->on_cpu; 265 retval = owner->on_cpu;
275 rcu_read_unlock(); 266 rcu_read_unlock();
@@ -343,7 +334,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
343 * As such, when deadlock detection needs to be 334 * As such, when deadlock detection needs to be
344 * performed the optimistic spinning cannot be done. 335 * performed the optimistic spinning cannot be done.
345 */ 336 */
346 if (ACCESS_ONCE(ww->ctx)) 337 if (READ_ONCE(ww->ctx))
347 break; 338 break;
348 } 339 }
349 340
@@ -351,7 +342,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
351 * If there's an owner, wait for it to either 342 * If there's an owner, wait for it to either
352 * release the lock or go to sleep. 343 * release the lock or go to sleep.
353 */ 344 */
354 owner = ACCESS_ONCE(lock->owner); 345 owner = READ_ONCE(lock->owner);
355 if (owner && !mutex_spin_on_owner(lock, owner)) 346 if (owner && !mutex_spin_on_owner(lock, owner))
356 break; 347 break;
357 348
@@ -490,7 +481,7 @@ static inline int __sched
490__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) 481__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
491{ 482{
492 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 483 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
493 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); 484 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
494 485
495 if (!hold_ctx) 486 if (!hold_ctx)
496 return 0; 487 return 0;