aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c385
1 files changed, 353 insertions, 32 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ad53a664f113..a52ee7bb830d 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -18,6 +18,7 @@
18 * Also see Documentation/mutex-design.txt. 18 * Also see Documentation/mutex-design.txt.
19 */ 19 */
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/ww_mutex.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
22#include <linux/sched/rt.h> 23#include <linux/sched/rt.h>
23#include <linux/export.h> 24#include <linux/export.h>
@@ -254,16 +255,165 @@ void __sched mutex_unlock(struct mutex *lock)
254 255
255EXPORT_SYMBOL(mutex_unlock); 256EXPORT_SYMBOL(mutex_unlock);
256 257
258/**
259 * ww_mutex_unlock - release the w/w mutex
260 * @lock: the mutex to be released
261 *
262 * Unlock a mutex that has been locked by this task previously with any of the
263 * ww_mutex_lock* functions (with or without an acquire context). It is
264 * forbidden to release the locks after releasing the acquire context.
265 *
266 * This function must not be used in interrupt context. Unlocking
267 * of a unlocked mutex is not allowed.
268 */
269void __sched ww_mutex_unlock(struct ww_mutex *lock)
270{
271 /*
272 * The unlocking fastpath is the 0->1 transition from 'locked'
273 * into 'unlocked' state:
274 */
275 if (lock->ctx) {
276#ifdef CONFIG_DEBUG_MUTEXES
277 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
278#endif
279 if (lock->ctx->acquired > 0)
280 lock->ctx->acquired--;
281 lock->ctx = NULL;
282 }
283
284#ifndef CONFIG_DEBUG_MUTEXES
285 /*
286 * When debugging is enabled we must not clear the owner before time,
287 * the slow path will always be taken, and that clears the owner field
288 * after verifying that it was indeed current.
289 */
290 mutex_clear_owner(&lock->base);
291#endif
292 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
293}
294EXPORT_SYMBOL(ww_mutex_unlock);
295
296static inline int __sched
297__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
298{
299 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
300 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
301
302 if (!hold_ctx)
303 return 0;
304
305 if (unlikely(ctx == hold_ctx))
306 return -EALREADY;
307
308 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
309 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
310#ifdef CONFIG_DEBUG_MUTEXES
311 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
312 ctx->contending_lock = ww;
313#endif
314 return -EDEADLK;
315 }
316
317 return 0;
318}
319
320static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
321 struct ww_acquire_ctx *ww_ctx)
322{
323#ifdef CONFIG_DEBUG_MUTEXES
324 /*
325 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
326 * but released with a normal mutex_unlock in this call.
327 *
328 * This should never happen, always use ww_mutex_unlock.
329 */
330 DEBUG_LOCKS_WARN_ON(ww->ctx);
331
332 /*
333 * Not quite done after calling ww_acquire_done() ?
334 */
335 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
336
337 if (ww_ctx->contending_lock) {
338 /*
339 * After -EDEADLK you tried to
340 * acquire a different ww_mutex? Bad!
341 */
342 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
343
344 /*
345 * You called ww_mutex_lock after receiving -EDEADLK,
346 * but 'forgot' to unlock everything else first?
347 */
348 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
349 ww_ctx->contending_lock = NULL;
350 }
351
352 /*
353 * Naughty, using a different class will lead to undefined behavior!
354 */
355 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
356#endif
357 ww_ctx->acquired++;
358}
359
360/*
361 * after acquiring lock with fastpath or when we lost out in contested
362 * slowpath, set ctx and wake up any waiters so they can recheck.
363 *
364 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
365 * as the fastpath and opportunistic spinning are disabled in that case.
366 */
367static __always_inline void
368ww_mutex_set_context_fastpath(struct ww_mutex *lock,
369 struct ww_acquire_ctx *ctx)
370{
371 unsigned long flags;
372 struct mutex_waiter *cur;
373
374 ww_mutex_lock_acquired(lock, ctx);
375
376 lock->ctx = ctx;
377
378 /*
379 * The lock->ctx update should be visible on all cores before
380 * the atomic read is done, otherwise contended waiters might be
381 * missed. The contended waiters will either see ww_ctx == NULL
382 * and keep spinning, or it will acquire wait_lock, add itself
383 * to waiter list and sleep.
384 */
385 smp_mb(); /* ^^^ */
386
387 /*
388 * Check if lock is contended, if not there is nobody to wake up
389 */
390 if (likely(atomic_read(&lock->base.count) == 0))
391 return;
392
393 /*
394 * Uh oh, we raced in fastpath, wake up everyone in this case,
395 * so they can see the new lock->ctx.
396 */
397 spin_lock_mutex(&lock->base.wait_lock, flags);
398 list_for_each_entry(cur, &lock->base.wait_list, list) {
399 debug_mutex_wake_waiter(&lock->base, cur);
400 wake_up_process(cur->task);
401 }
402 spin_unlock_mutex(&lock->base.wait_lock, flags);
403}
404
257/* 405/*
258 * Lock a mutex (possibly interruptible), slowpath: 406 * Lock a mutex (possibly interruptible), slowpath:
259 */ 407 */
260static inline int __sched 408static __always_inline int __sched
261__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 409__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
262 struct lockdep_map *nest_lock, unsigned long ip) 410 struct lockdep_map *nest_lock, unsigned long ip,
411 struct ww_acquire_ctx *ww_ctx)
263{ 412{
264 struct task_struct *task = current; 413 struct task_struct *task = current;
265 struct mutex_waiter waiter; 414 struct mutex_waiter waiter;
266 unsigned long flags; 415 unsigned long flags;
416 int ret;
267 417
268 preempt_disable(); 418 preempt_disable();
269 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 419 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
@@ -298,6 +448,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
298 struct task_struct *owner; 448 struct task_struct *owner;
299 struct mspin_node node; 449 struct mspin_node node;
300 450
451 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
452 struct ww_mutex *ww;
453
454 ww = container_of(lock, struct ww_mutex, base);
455 /*
456 * If ww->ctx is set the contents are undefined, only
457 * by acquiring wait_lock there is a guarantee that
458 * they are not invalid when reading.
459 *
460 * As such, when deadlock detection needs to be
461 * performed the optimistic spinning cannot be done.
462 */
463 if (ACCESS_ONCE(ww->ctx))
464 break;
465 }
466
301 /* 467 /*
302 * If there's an owner, wait for it to either 468 * If there's an owner, wait for it to either
303 * release the lock or go to sleep. 469 * release the lock or go to sleep.
@@ -312,6 +478,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
312 if ((atomic_read(&lock->count) == 1) && 478 if ((atomic_read(&lock->count) == 1) &&
313 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 479 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
314 lock_acquired(&lock->dep_map, ip); 480 lock_acquired(&lock->dep_map, ip);
481 if (!__builtin_constant_p(ww_ctx == NULL)) {
482 struct ww_mutex *ww;
483 ww = container_of(lock, struct ww_mutex, base);
484
485 ww_mutex_set_context_fastpath(ww, ww_ctx);
486 }
487
315 mutex_set_owner(lock); 488 mutex_set_owner(lock);
316 mspin_unlock(MLOCK(lock), &node); 489 mspin_unlock(MLOCK(lock), &node);
317 preempt_enable(); 490 preempt_enable();
@@ -371,15 +544,16 @@ slowpath:
371 * TASK_UNINTERRUPTIBLE case.) 544 * TASK_UNINTERRUPTIBLE case.)
372 */ 545 */
373 if (unlikely(signal_pending_state(state, task))) { 546 if (unlikely(signal_pending_state(state, task))) {
374 mutex_remove_waiter(lock, &waiter, 547 ret = -EINTR;
375 task_thread_info(task)); 548 goto err;
376 mutex_release(&lock->dep_map, 1, ip); 549 }
377 spin_unlock_mutex(&lock->wait_lock, flags);
378 550
379 debug_mutex_free_waiter(&waiter); 551 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
380 preempt_enable(); 552 ret = __mutex_lock_check_stamp(lock, ww_ctx);
381 return -EINTR; 553 if (ret)
554 goto err;
382 } 555 }
556
383 __set_task_state(task, state); 557 __set_task_state(task, state);
384 558
385 /* didn't get the lock, go to sleep: */ 559 /* didn't get the lock, go to sleep: */
@@ -394,6 +568,30 @@ done:
394 mutex_remove_waiter(lock, &waiter, current_thread_info()); 568 mutex_remove_waiter(lock, &waiter, current_thread_info());
395 mutex_set_owner(lock); 569 mutex_set_owner(lock);
396 570
571 if (!__builtin_constant_p(ww_ctx == NULL)) {
572 struct ww_mutex *ww = container_of(lock,
573 struct ww_mutex,
574 base);
575 struct mutex_waiter *cur;
576
577 /*
578 * This branch gets optimized out for the common case,
579 * and is only important for ww_mutex_lock.
580 */
581
582 ww_mutex_lock_acquired(ww, ww_ctx);
583 ww->ctx = ww_ctx;
584
585 /*
586 * Give any possible sleeping processes the chance to wake up,
587 * so they can recheck if they have to back off.
588 */
589 list_for_each_entry(cur, &lock->wait_list, list) {
590 debug_mutex_wake_waiter(lock, cur);
591 wake_up_process(cur->task);
592 }
593 }
594
397 /* set it to 0 if there are no waiters left: */ 595 /* set it to 0 if there are no waiters left: */
398 if (likely(list_empty(&lock->wait_list))) 596 if (likely(list_empty(&lock->wait_list)))
399 atomic_set(&lock->count, 0); 597 atomic_set(&lock->count, 0);
@@ -404,6 +602,14 @@ done:
404 preempt_enable(); 602 preempt_enable();
405 603
406 return 0; 604 return 0;
605
606err:
607 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
608 spin_unlock_mutex(&lock->wait_lock, flags);
609 debug_mutex_free_waiter(&waiter);
610 mutex_release(&lock->dep_map, 1, ip);
611 preempt_enable();
612 return ret;
407} 613}
408 614
409#ifdef CONFIG_DEBUG_LOCK_ALLOC 615#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -411,7 +617,8 @@ void __sched
411mutex_lock_nested(struct mutex *lock, unsigned int subclass) 617mutex_lock_nested(struct mutex *lock, unsigned int subclass)
412{ 618{
413 might_sleep(); 619 might_sleep();
414 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 620 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
621 subclass, NULL, _RET_IP_, NULL);
415} 622}
416 623
417EXPORT_SYMBOL_GPL(mutex_lock_nested); 624EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -420,7 +627,8 @@ void __sched
420_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 627_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
421{ 628{
422 might_sleep(); 629 might_sleep();
423 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 630 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
631 0, nest, _RET_IP_, NULL);
424} 632}
425 633
426EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 634EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -429,7 +637,8 @@ int __sched
429mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 637mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
430{ 638{
431 might_sleep(); 639 might_sleep();
432 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 640 return __mutex_lock_common(lock, TASK_KILLABLE,
641 subclass, NULL, _RET_IP_, NULL);
433} 642}
434EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 643EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
435 644
@@ -438,10 +647,68 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
438{ 647{
439 might_sleep(); 648 might_sleep();
440 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 649 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
441 subclass, NULL, _RET_IP_); 650 subclass, NULL, _RET_IP_, NULL);
442} 651}
443 652
444EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 653EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
654
655static inline int
656ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
657{
658#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
659 unsigned tmp;
660
661 if (ctx->deadlock_inject_countdown-- == 0) {
662 tmp = ctx->deadlock_inject_interval;
663 if (tmp > UINT_MAX/4)
664 tmp = UINT_MAX;
665 else
666 tmp = tmp*2 + tmp + tmp/2;
667
668 ctx->deadlock_inject_interval = tmp;
669 ctx->deadlock_inject_countdown = tmp;
670 ctx->contending_lock = lock;
671
672 ww_mutex_unlock(lock);
673
674 return -EDEADLK;
675 }
676#endif
677
678 return 0;
679}
680
681int __sched
682__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
683{
684 int ret;
685
686 might_sleep();
687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
688 0, &ctx->dep_map, _RET_IP_, ctx);
689 if (!ret && ctx->acquired > 1)
690 return ww_mutex_deadlock_injection(lock, ctx);
691
692 return ret;
693}
694EXPORT_SYMBOL_GPL(__ww_mutex_lock);
695
696int __sched
697__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
698{
699 int ret;
700
701 might_sleep();
702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
703 0, &ctx->dep_map, _RET_IP_, ctx);
704
705 if (!ret && ctx->acquired > 1)
706 return ww_mutex_deadlock_injection(lock, ctx);
707
708 return ret;
709}
710EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
711
445#endif 712#endif
446 713
447/* 714/*
@@ -494,10 +761,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
494 * mutex_lock_interruptible() and mutex_trylock(). 761 * mutex_lock_interruptible() and mutex_trylock().
495 */ 762 */
496static noinline int __sched 763static noinline int __sched
497__mutex_lock_killable_slowpath(atomic_t *lock_count); 764__mutex_lock_killable_slowpath(struct mutex *lock);
498 765
499static noinline int __sched 766static noinline int __sched
500__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 767__mutex_lock_interruptible_slowpath(struct mutex *lock);
501 768
502/** 769/**
503 * mutex_lock_interruptible - acquire the mutex, interruptible 770 * mutex_lock_interruptible - acquire the mutex, interruptible
@@ -515,12 +782,12 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
515 int ret; 782 int ret;
516 783
517 might_sleep(); 784 might_sleep();
518 ret = __mutex_fastpath_lock_retval 785 ret = __mutex_fastpath_lock_retval(&lock->count);
519 (&lock->count, __mutex_lock_interruptible_slowpath); 786 if (likely(!ret)) {
520 if (!ret)
521 mutex_set_owner(lock); 787 mutex_set_owner(lock);
522 788 return 0;
523 return ret; 789 } else
790 return __mutex_lock_interruptible_slowpath(lock);
524} 791}
525 792
526EXPORT_SYMBOL(mutex_lock_interruptible); 793EXPORT_SYMBOL(mutex_lock_interruptible);
@@ -530,12 +797,12 @@ int __sched mutex_lock_killable(struct mutex *lock)
530 int ret; 797 int ret;
531 798
532 might_sleep(); 799 might_sleep();
533 ret = __mutex_fastpath_lock_retval 800 ret = __mutex_fastpath_lock_retval(&lock->count);
534 (&lock->count, __mutex_lock_killable_slowpath); 801 if (likely(!ret)) {
535 if (!ret)
536 mutex_set_owner(lock); 802 mutex_set_owner(lock);
537 803 return 0;
538 return ret; 804 } else
805 return __mutex_lock_killable_slowpath(lock);
539} 806}
540EXPORT_SYMBOL(mutex_lock_killable); 807EXPORT_SYMBOL(mutex_lock_killable);
541 808
@@ -544,24 +811,39 @@ __mutex_lock_slowpath(atomic_t *lock_count)
544{ 811{
545 struct mutex *lock = container_of(lock_count, struct mutex, count); 812 struct mutex *lock = container_of(lock_count, struct mutex, count);
546 813
547 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 814 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
815 NULL, _RET_IP_, NULL);
548} 816}
549 817
550static noinline int __sched 818static noinline int __sched
551__mutex_lock_killable_slowpath(atomic_t *lock_count) 819__mutex_lock_killable_slowpath(struct mutex *lock)
552{ 820{
553 struct mutex *lock = container_of(lock_count, struct mutex, count); 821 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
822 NULL, _RET_IP_, NULL);
823}
554 824
555 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 825static noinline int __sched
826__mutex_lock_interruptible_slowpath(struct mutex *lock)
827{
828 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
829 NULL, _RET_IP_, NULL);
556} 830}
557 831
558static noinline int __sched 832static noinline int __sched
559__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 833__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
560{ 834{
561 struct mutex *lock = container_of(lock_count, struct mutex, count); 835 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
836 NULL, _RET_IP_, ctx);
837}
562 838
563 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 839static noinline int __sched
840__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
841 struct ww_acquire_ctx *ctx)
842{
843 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
844 NULL, _RET_IP_, ctx);
564} 845}
846
565#endif 847#endif
566 848
567/* 849/*
@@ -617,6 +899,45 @@ int __sched mutex_trylock(struct mutex *lock)
617} 899}
618EXPORT_SYMBOL(mutex_trylock); 900EXPORT_SYMBOL(mutex_trylock);
619 901
902#ifndef CONFIG_DEBUG_LOCK_ALLOC
903int __sched
904__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
905{
906 int ret;
907
908 might_sleep();
909
910 ret = __mutex_fastpath_lock_retval(&lock->base.count);
911
912 if (likely(!ret)) {
913 ww_mutex_set_context_fastpath(lock, ctx);
914 mutex_set_owner(&lock->base);
915 } else
916 ret = __ww_mutex_lock_slowpath(lock, ctx);
917 return ret;
918}
919EXPORT_SYMBOL(__ww_mutex_lock);
920
921int __sched
922__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
923{
924 int ret;
925
926 might_sleep();
927
928 ret = __mutex_fastpath_lock_retval(&lock->base.count);
929
930 if (likely(!ret)) {
931 ww_mutex_set_context_fastpath(lock, ctx);
932 mutex_set_owner(&lock->base);
933 } else
934 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
935 return ret;
936}
937EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
938
939#endif
940
620/** 941/**
621 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 942 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
622 * @cnt: the atomic which we are to dec 943 * @cnt: the atomic which we are to dec