aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c384
1 files changed, 352 insertions, 32 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ad53a664f113..e581ada5faf4 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -254,16 +254,165 @@ void __sched mutex_unlock(struct mutex *lock)
254 254
255EXPORT_SYMBOL(mutex_unlock); 255EXPORT_SYMBOL(mutex_unlock);
256 256
257/**
258 * ww_mutex_unlock - release the w/w mutex
259 * @lock: the mutex to be released
260 *
261 * Unlock a mutex that has been locked by this task previously with any of the
262 * ww_mutex_lock* functions (with or without an acquire context). It is
263 * forbidden to release the locks after releasing the acquire context.
264 *
265 * This function must not be used in interrupt context. Unlocking
266 * of a unlocked mutex is not allowed.
267 */
268void __sched ww_mutex_unlock(struct ww_mutex *lock)
269{
270 /*
271 * The unlocking fastpath is the 0->1 transition from 'locked'
272 * into 'unlocked' state:
273 */
274 if (lock->ctx) {
275#ifdef CONFIG_DEBUG_MUTEXES
276 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
277#endif
278 if (lock->ctx->acquired > 0)
279 lock->ctx->acquired--;
280 lock->ctx = NULL;
281 }
282
283#ifndef CONFIG_DEBUG_MUTEXES
284 /*
285 * When debugging is enabled we must not clear the owner before time,
286 * the slow path will always be taken, and that clears the owner field
287 * after verifying that it was indeed current.
288 */
289 mutex_clear_owner(&lock->base);
290#endif
291 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
292}
293EXPORT_SYMBOL(ww_mutex_unlock);
294
295static inline int __sched
296__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
297{
298 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
299 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
300
301 if (!hold_ctx)
302 return 0;
303
304 if (unlikely(ctx == hold_ctx))
305 return -EALREADY;
306
307 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
308 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
309#ifdef CONFIG_DEBUG_MUTEXES
310 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
311 ctx->contending_lock = ww;
312#endif
313 return -EDEADLK;
314 }
315
316 return 0;
317}
318
319static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
320 struct ww_acquire_ctx *ww_ctx)
321{
322#ifdef CONFIG_DEBUG_MUTEXES
323 /*
324 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
325 * but released with a normal mutex_unlock in this call.
326 *
327 * This should never happen, always use ww_mutex_unlock.
328 */
329 DEBUG_LOCKS_WARN_ON(ww->ctx);
330
331 /*
332 * Not quite done after calling ww_acquire_done() ?
333 */
334 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
335
336 if (ww_ctx->contending_lock) {
337 /*
338 * After -EDEADLK you tried to
339 * acquire a different ww_mutex? Bad!
340 */
341 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
342
343 /*
344 * You called ww_mutex_lock after receiving -EDEADLK,
345 * but 'forgot' to unlock everything else first?
346 */
347 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
348 ww_ctx->contending_lock = NULL;
349 }
350
351 /*
352 * Naughty, using a different class will lead to undefined behavior!
353 */
354 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
355#endif
356 ww_ctx->acquired++;
357}
358
359/*
360 * after acquiring lock with fastpath or when we lost out in contested
361 * slowpath, set ctx and wake up any waiters so they can recheck.
362 *
363 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
364 * as the fastpath and opportunistic spinning are disabled in that case.
365 */
366static __always_inline void
367ww_mutex_set_context_fastpath(struct ww_mutex *lock,
368 struct ww_acquire_ctx *ctx)
369{
370 unsigned long flags;
371 struct mutex_waiter *cur;
372
373 ww_mutex_lock_acquired(lock, ctx);
374
375 lock->ctx = ctx;
376
377 /*
378 * The lock->ctx update should be visible on all cores before
379 * the atomic read is done, otherwise contended waiters might be
380 * missed. The contended waiters will either see ww_ctx == NULL
381 * and keep spinning, or it will acquire wait_lock, add itself
382 * to waiter list and sleep.
383 */
384 smp_mb(); /* ^^^ */
385
386 /*
387 * Check if lock is contended, if not there is nobody to wake up
388 */
389 if (likely(atomic_read(&lock->base.count) == 0))
390 return;
391
392 /*
393 * Uh oh, we raced in fastpath, wake up everyone in this case,
394 * so they can see the new lock->ctx.
395 */
396 spin_lock_mutex(&lock->base.wait_lock, flags);
397 list_for_each_entry(cur, &lock->base.wait_list, list) {
398 debug_mutex_wake_waiter(&lock->base, cur);
399 wake_up_process(cur->task);
400 }
401 spin_unlock_mutex(&lock->base.wait_lock, flags);
402}
403
257/* 404/*
258 * Lock a mutex (possibly interruptible), slowpath: 405 * Lock a mutex (possibly interruptible), slowpath:
259 */ 406 */
260static inline int __sched 407static __always_inline int __sched
261__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 408__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
262 struct lockdep_map *nest_lock, unsigned long ip) 409 struct lockdep_map *nest_lock, unsigned long ip,
410 struct ww_acquire_ctx *ww_ctx)
263{ 411{
264 struct task_struct *task = current; 412 struct task_struct *task = current;
265 struct mutex_waiter waiter; 413 struct mutex_waiter waiter;
266 unsigned long flags; 414 unsigned long flags;
415 int ret;
267 416
268 preempt_disable(); 417 preempt_disable();
269 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 418 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
@@ -298,6 +447,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
298 struct task_struct *owner; 447 struct task_struct *owner;
299 struct mspin_node node; 448 struct mspin_node node;
300 449
450 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
451 struct ww_mutex *ww;
452
453 ww = container_of(lock, struct ww_mutex, base);
454 /*
455 * If ww->ctx is set the contents are undefined, only
456 * by acquiring wait_lock there is a guarantee that
457 * they are not invalid when reading.
458 *
459 * As such, when deadlock detection needs to be
460 * performed the optimistic spinning cannot be done.
461 */
462 if (ACCESS_ONCE(ww->ctx))
463 break;
464 }
465
301 /* 466 /*
302 * If there's an owner, wait for it to either 467 * If there's an owner, wait for it to either
303 * release the lock or go to sleep. 468 * release the lock or go to sleep.
@@ -312,6 +477,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
312 if ((atomic_read(&lock->count) == 1) && 477 if ((atomic_read(&lock->count) == 1) &&
313 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 478 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
314 lock_acquired(&lock->dep_map, ip); 479 lock_acquired(&lock->dep_map, ip);
480 if (!__builtin_constant_p(ww_ctx == NULL)) {
481 struct ww_mutex *ww;
482 ww = container_of(lock, struct ww_mutex, base);
483
484 ww_mutex_set_context_fastpath(ww, ww_ctx);
485 }
486
315 mutex_set_owner(lock); 487 mutex_set_owner(lock);
316 mspin_unlock(MLOCK(lock), &node); 488 mspin_unlock(MLOCK(lock), &node);
317 preempt_enable(); 489 preempt_enable();
@@ -371,15 +543,16 @@ slowpath:
371 * TASK_UNINTERRUPTIBLE case.) 543 * TASK_UNINTERRUPTIBLE case.)
372 */ 544 */
373 if (unlikely(signal_pending_state(state, task))) { 545 if (unlikely(signal_pending_state(state, task))) {
374 mutex_remove_waiter(lock, &waiter, 546 ret = -EINTR;
375 task_thread_info(task)); 547 goto err;
376 mutex_release(&lock->dep_map, 1, ip); 548 }
377 spin_unlock_mutex(&lock->wait_lock, flags);
378 549
379 debug_mutex_free_waiter(&waiter); 550 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
380 preempt_enable(); 551 ret = __mutex_lock_check_stamp(lock, ww_ctx);
381 return -EINTR; 552 if (ret)
553 goto err;
382 } 554 }
555
383 __set_task_state(task, state); 556 __set_task_state(task, state);
384 557
385 /* didn't get the lock, go to sleep: */ 558 /* didn't get the lock, go to sleep: */
@@ -394,6 +567,30 @@ done:
394 mutex_remove_waiter(lock, &waiter, current_thread_info()); 567 mutex_remove_waiter(lock, &waiter, current_thread_info());
395 mutex_set_owner(lock); 568 mutex_set_owner(lock);
396 569
570 if (!__builtin_constant_p(ww_ctx == NULL)) {
571 struct ww_mutex *ww = container_of(lock,
572 struct ww_mutex,
573 base);
574 struct mutex_waiter *cur;
575
576 /*
577 * This branch gets optimized out for the common case,
578 * and is only important for ww_mutex_lock.
579 */
580
581 ww_mutex_lock_acquired(ww, ww_ctx);
582 ww->ctx = ww_ctx;
583
584 /*
585 * Give any possible sleeping processes the chance to wake up,
586 * so they can recheck if they have to back off.
587 */
588 list_for_each_entry(cur, &lock->wait_list, list) {
589 debug_mutex_wake_waiter(lock, cur);
590 wake_up_process(cur->task);
591 }
592 }
593
397 /* set it to 0 if there are no waiters left: */ 594 /* set it to 0 if there are no waiters left: */
398 if (likely(list_empty(&lock->wait_list))) 595 if (likely(list_empty(&lock->wait_list)))
399 atomic_set(&lock->count, 0); 596 atomic_set(&lock->count, 0);
@@ -404,6 +601,14 @@ done:
404 preempt_enable(); 601 preempt_enable();
405 602
406 return 0; 603 return 0;
604
605err:
606 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
607 spin_unlock_mutex(&lock->wait_lock, flags);
608 debug_mutex_free_waiter(&waiter);
609 mutex_release(&lock->dep_map, 1, ip);
610 preempt_enable();
611 return ret;
407} 612}
408 613
409#ifdef CONFIG_DEBUG_LOCK_ALLOC 614#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -411,7 +616,8 @@ void __sched
411mutex_lock_nested(struct mutex *lock, unsigned int subclass) 616mutex_lock_nested(struct mutex *lock, unsigned int subclass)
412{ 617{
413 might_sleep(); 618 might_sleep();
414 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 619 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
620 subclass, NULL, _RET_IP_, NULL);
415} 621}
416 622
417EXPORT_SYMBOL_GPL(mutex_lock_nested); 623EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -420,7 +626,8 @@ void __sched
420_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 626_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
421{ 627{
422 might_sleep(); 628 might_sleep();
423 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 629 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
630 0, nest, _RET_IP_, NULL);
424} 631}
425 632
426EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 633EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -429,7 +636,8 @@ int __sched
429mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 636mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
430{ 637{
431 might_sleep(); 638 might_sleep();
432 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 639 return __mutex_lock_common(lock, TASK_KILLABLE,
640 subclass, NULL, _RET_IP_, NULL);
433} 641}
434EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 642EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
435 643
@@ -438,10 +646,68 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
438{ 646{
439 might_sleep(); 647 might_sleep();
440 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 648 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
441 subclass, NULL, _RET_IP_); 649 subclass, NULL, _RET_IP_, NULL);
442} 650}
443 651
444EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 652EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
653
654static inline int
655ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
656{
657#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
658 unsigned tmp;
659
660 if (ctx->deadlock_inject_countdown-- == 0) {
661 tmp = ctx->deadlock_inject_interval;
662 if (tmp > UINT_MAX/4)
663 tmp = UINT_MAX;
664 else
665 tmp = tmp*2 + tmp + tmp/2;
666
667 ctx->deadlock_inject_interval = tmp;
668 ctx->deadlock_inject_countdown = tmp;
669 ctx->contending_lock = lock;
670
671 ww_mutex_unlock(lock);
672
673 return -EDEADLK;
674 }
675#endif
676
677 return 0;
678}
679
680int __sched
681__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
682{
683 int ret;
684
685 might_sleep();
686 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
687 0, &ctx->dep_map, _RET_IP_, ctx);
688 if (!ret && ctx->acquired > 0)
689 return ww_mutex_deadlock_injection(lock, ctx);
690
691 return ret;
692}
693EXPORT_SYMBOL_GPL(__ww_mutex_lock);
694
695int __sched
696__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
697{
698 int ret;
699
700 might_sleep();
701 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
702 0, &ctx->dep_map, _RET_IP_, ctx);
703
704 if (!ret && ctx->acquired > 0)
705 return ww_mutex_deadlock_injection(lock, ctx);
706
707 return ret;
708}
709EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
710
445#endif 711#endif
446 712
447/* 713/*
@@ -494,10 +760,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
494 * mutex_lock_interruptible() and mutex_trylock(). 760 * mutex_lock_interruptible() and mutex_trylock().
495 */ 761 */
496static noinline int __sched 762static noinline int __sched
497__mutex_lock_killable_slowpath(atomic_t *lock_count); 763__mutex_lock_killable_slowpath(struct mutex *lock);
498 764
499static noinline int __sched 765static noinline int __sched
500__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 766__mutex_lock_interruptible_slowpath(struct mutex *lock);
501 767
502/** 768/**
503 * mutex_lock_interruptible - acquire the mutex, interruptible 769 * mutex_lock_interruptible - acquire the mutex, interruptible
@@ -515,12 +781,12 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
515 int ret; 781 int ret;
516 782
517 might_sleep(); 783 might_sleep();
518 ret = __mutex_fastpath_lock_retval 784 ret = __mutex_fastpath_lock_retval(&lock->count);
519 (&lock->count, __mutex_lock_interruptible_slowpath); 785 if (likely(!ret)) {
520 if (!ret)
521 mutex_set_owner(lock); 786 mutex_set_owner(lock);
522 787 return 0;
523 return ret; 788 } else
789 return __mutex_lock_interruptible_slowpath(lock);
524} 790}
525 791
526EXPORT_SYMBOL(mutex_lock_interruptible); 792EXPORT_SYMBOL(mutex_lock_interruptible);
@@ -530,12 +796,12 @@ int __sched mutex_lock_killable(struct mutex *lock)
530 int ret; 796 int ret;
531 797
532 might_sleep(); 798 might_sleep();
533 ret = __mutex_fastpath_lock_retval 799 ret = __mutex_fastpath_lock_retval(&lock->count);
534 (&lock->count, __mutex_lock_killable_slowpath); 800 if (likely(!ret)) {
535 if (!ret)
536 mutex_set_owner(lock); 801 mutex_set_owner(lock);
537 802 return 0;
538 return ret; 803 } else
804 return __mutex_lock_killable_slowpath(lock);
539} 805}
540EXPORT_SYMBOL(mutex_lock_killable); 806EXPORT_SYMBOL(mutex_lock_killable);
541 807
@@ -544,24 +810,39 @@ __mutex_lock_slowpath(atomic_t *lock_count)
544{ 810{
545 struct mutex *lock = container_of(lock_count, struct mutex, count); 811 struct mutex *lock = container_of(lock_count, struct mutex, count);
546 812
547 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 813 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
814 NULL, _RET_IP_, NULL);
548} 815}
549 816
550static noinline int __sched 817static noinline int __sched
551__mutex_lock_killable_slowpath(atomic_t *lock_count) 818__mutex_lock_killable_slowpath(struct mutex *lock)
552{ 819{
553 struct mutex *lock = container_of(lock_count, struct mutex, count); 820 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
821 NULL, _RET_IP_, NULL);
822}
554 823
555 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 824static noinline int __sched
825__mutex_lock_interruptible_slowpath(struct mutex *lock)
826{
827 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
828 NULL, _RET_IP_, NULL);
556} 829}
557 830
558static noinline int __sched 831static noinline int __sched
559__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 832__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
560{ 833{
561 struct mutex *lock = container_of(lock_count, struct mutex, count); 834 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
835 NULL, _RET_IP_, ctx);
836}
562 837
563 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 838static noinline int __sched
839__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
840 struct ww_acquire_ctx *ctx)
841{
842 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
843 NULL, _RET_IP_, ctx);
564} 844}
845
565#endif 846#endif
566 847
567/* 848/*
@@ -617,6 +898,45 @@ int __sched mutex_trylock(struct mutex *lock)
617} 898}
618EXPORT_SYMBOL(mutex_trylock); 899EXPORT_SYMBOL(mutex_trylock);
619 900
901#ifndef CONFIG_DEBUG_LOCK_ALLOC
902int __sched
903__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
904{
905 int ret;
906
907 might_sleep();
908
909 ret = __mutex_fastpath_lock_retval(&lock->base.count);
910
911 if (likely(!ret)) {
912 ww_mutex_set_context_fastpath(lock, ctx);
913 mutex_set_owner(&lock->base);
914 } else
915 ret = __ww_mutex_lock_slowpath(lock, ctx);
916 return ret;
917}
918EXPORT_SYMBOL(__ww_mutex_lock);
919
920int __sched
921__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
922{
923 int ret;
924
925 might_sleep();
926
927 ret = __mutex_fastpath_lock_retval(&lock->base.count);
928
929 if (likely(!ret)) {
930 ww_mutex_set_context_fastpath(lock, ctx);
931 mutex_set_owner(&lock->base);
932 } else
933 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
934 return ret;
935}
936EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
937
938#endif
939
620/** 940/**
621 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 941 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
622 * @cnt: the atomic which we are to dec 942 * @cnt: the atomic which we are to dec