diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/locking/rtmutex-debug.h | 5 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 243 | ||||
-rw-r--r-- | kernel/locking/rtmutex.h | 5 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 37 | ||||
-rw-r--r-- | kernel/power/main.c | 6 | ||||
-rw-r--r-- | kernel/power/user.c | 3 | ||||
-rw-r--r-- | kernel/sysctl.c | 4 |
7 files changed, 259 insertions, 44 deletions
diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h index 14193d596d78..ab29b6a22669 100644 --- a/kernel/locking/rtmutex-debug.h +++ b/kernel/locking/rtmutex-debug.h | |||
@@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, | |||
31 | { | 31 | { |
32 | return (waiter != NULL); | 32 | return (waiter != NULL); |
33 | } | 33 | } |
34 | |||
35 | static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) | ||
36 | { | ||
37 | debug_rt_mutex_print_deadlock(w); | ||
38 | } | ||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index a620d4d08ca6..fc605941b9b8 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -83,6 +83,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |||
83 | owner = *p; | 83 | owner = *p; |
84 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | 84 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); |
85 | } | 85 | } |
86 | |||
87 | /* | ||
88 | * Safe fastpath aware unlock: | ||
89 | * 1) Clear the waiters bit | ||
90 | * 2) Drop lock->wait_lock | ||
91 | * 3) Try to unlock the lock with cmpxchg | ||
92 | */ | ||
93 | static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) | ||
94 | __releases(lock->wait_lock) | ||
95 | { | ||
96 | struct task_struct *owner = rt_mutex_owner(lock); | ||
97 | |||
98 | clear_rt_mutex_waiters(lock); | ||
99 | raw_spin_unlock(&lock->wait_lock); | ||
100 | /* | ||
101 | * If a new waiter comes in between the unlock and the cmpxchg | ||
102 | * we have two situations: | ||
103 | * | ||
104 | * unlock(wait_lock); | ||
105 | * lock(wait_lock); | ||
106 | * cmpxchg(p, owner, 0) == owner | ||
107 | * mark_rt_mutex_waiters(lock); | ||
108 | * acquire(lock); | ||
109 | * or: | ||
110 | * | ||
111 | * unlock(wait_lock); | ||
112 | * lock(wait_lock); | ||
113 | * mark_rt_mutex_waiters(lock); | ||
114 | * | ||
115 | * cmpxchg(p, owner, 0) != owner | ||
116 | * enqueue_waiter(); | ||
117 | * unlock(wait_lock); | ||
118 | * lock(wait_lock); | ||
119 | * wake waiter(); | ||
120 | * unlock(wait_lock); | ||
121 | * lock(wait_lock); | ||
122 | * acquire(lock); | ||
123 | */ | ||
124 | return rt_mutex_cmpxchg(lock, owner, NULL); | ||
125 | } | ||
126 | |||
86 | #else | 127 | #else |
87 | # define rt_mutex_cmpxchg(l,c,n) (0) | 128 | # define rt_mutex_cmpxchg(l,c,n) (0) |
88 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | 129 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
@@ -90,6 +131,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |||
90 | lock->owner = (struct task_struct *) | 131 | lock->owner = (struct task_struct *) |
91 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | 132 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); |
92 | } | 133 | } |
134 | |||
135 | /* | ||
136 | * Simple slow path only version: lock->owner is protected by lock->wait_lock. | ||
137 | */ | ||
138 | static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) | ||
139 | __releases(lock->wait_lock) | ||
140 | { | ||
141 | lock->owner = NULL; | ||
142 | raw_spin_unlock(&lock->wait_lock); | ||
143 | return true; | ||
144 | } | ||
93 | #endif | 145 | #endif |
94 | 146 | ||
95 | static inline int | 147 | static inline int |
@@ -260,27 +312,36 @@ static void rt_mutex_adjust_prio(struct task_struct *task) | |||
260 | */ | 312 | */ |
261 | int max_lock_depth = 1024; | 313 | int max_lock_depth = 1024; |
262 | 314 | ||
315 | static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) | ||
316 | { | ||
317 | return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; | ||
318 | } | ||
319 | |||
263 | /* | 320 | /* |
264 | * Adjust the priority chain. Also used for deadlock detection. | 321 | * Adjust the priority chain. Also used for deadlock detection. |
265 | * Decreases task's usage by one - may thus free the task. | 322 | * Decreases task's usage by one - may thus free the task. |
266 | * | 323 | * |
267 | * @task: the task owning the mutex (owner) for which a chain walk is probably | 324 | * @task: the task owning the mutex (owner) for which a chain walk is |
268 | * needed | 325 | * probably needed |
269 | * @deadlock_detect: do we have to carry out deadlock detection? | 326 | * @deadlock_detect: do we have to carry out deadlock detection? |
270 | * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck | 327 | * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck |
271 | * things for a task that has just got its priority adjusted, and | 328 | * things for a task that has just got its priority adjusted, and |
272 | * is waiting on a mutex) | 329 | * is waiting on a mutex) |
330 | * @next_lock: the mutex on which the owner of @orig_lock was blocked before | ||
331 | * we dropped its pi_lock. Is never dereferenced, only used for | ||
332 | * comparison to detect lock chain changes. | ||
273 | * @orig_waiter: rt_mutex_waiter struct for the task that has just donated | 333 | * @orig_waiter: rt_mutex_waiter struct for the task that has just donated |
274 | * its priority to the mutex owner (can be NULL in the case | 334 | * its priority to the mutex owner (can be NULL in the case |
275 | * depicted above or if the top waiter is gone away and we are | 335 | * depicted above or if the top waiter is gone away and we are |
276 | * actually deboosting the owner) | 336 | * actually deboosting the owner) |
277 | * @top_task: the current top waiter | 337 | * @top_task: the current top waiter |
278 | * | 338 | * |
279 | * Returns 0 or -EDEADLK. | 339 | * Returns 0 or -EDEADLK. |
280 | */ | 340 | */ |
281 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, | 341 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
282 | int deadlock_detect, | 342 | int deadlock_detect, |
283 | struct rt_mutex *orig_lock, | 343 | struct rt_mutex *orig_lock, |
344 | struct rt_mutex *next_lock, | ||
284 | struct rt_mutex_waiter *orig_waiter, | 345 | struct rt_mutex_waiter *orig_waiter, |
285 | struct task_struct *top_task) | 346 | struct task_struct *top_task) |
286 | { | 347 | { |
@@ -314,7 +375,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
314 | } | 375 | } |
315 | put_task_struct(task); | 376 | put_task_struct(task); |
316 | 377 | ||
317 | return deadlock_detect ? -EDEADLK : 0; | 378 | return -EDEADLK; |
318 | } | 379 | } |
319 | retry: | 380 | retry: |
320 | /* | 381 | /* |
@@ -339,6 +400,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
339 | goto out_unlock_pi; | 400 | goto out_unlock_pi; |
340 | 401 | ||
341 | /* | 402 | /* |
403 | * We dropped all locks after taking a refcount on @task, so | ||
404 | * the task might have moved on in the lock chain or even left | ||
405 | * the chain completely and blocks now on an unrelated lock or | ||
406 | * on @orig_lock. | ||
407 | * | ||
408 | * We stored the lock on which @task was blocked in @next_lock, | ||
409 | * so we can detect the chain change. | ||
410 | */ | ||
411 | if (next_lock != waiter->lock) | ||
412 | goto out_unlock_pi; | ||
413 | |||
414 | /* | ||
342 | * Drop out, when the task has no waiters. Note, | 415 | * Drop out, when the task has no waiters. Note, |
343 | * top_waiter can be NULL, when we are in the deboosting | 416 | * top_waiter can be NULL, when we are in the deboosting |
344 | * mode! | 417 | * mode! |
@@ -377,7 +450,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
377 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { | 450 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
378 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); | 451 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
379 | raw_spin_unlock(&lock->wait_lock); | 452 | raw_spin_unlock(&lock->wait_lock); |
380 | ret = deadlock_detect ? -EDEADLK : 0; | 453 | ret = -EDEADLK; |
381 | goto out_unlock_pi; | 454 | goto out_unlock_pi; |
382 | } | 455 | } |
383 | 456 | ||
@@ -422,11 +495,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
422 | __rt_mutex_adjust_prio(task); | 495 | __rt_mutex_adjust_prio(task); |
423 | } | 496 | } |
424 | 497 | ||
498 | /* | ||
499 | * Check whether the task which owns the current lock is pi | ||
500 | * blocked itself. If yes we store a pointer to the lock for | ||
501 | * the lock chain change detection above. After we dropped | ||
502 | * task->pi_lock next_lock cannot be dereferenced anymore. | ||
503 | */ | ||
504 | next_lock = task_blocked_on_lock(task); | ||
505 | |||
425 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 506 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
426 | 507 | ||
427 | top_waiter = rt_mutex_top_waiter(lock); | 508 | top_waiter = rt_mutex_top_waiter(lock); |
428 | raw_spin_unlock(&lock->wait_lock); | 509 | raw_spin_unlock(&lock->wait_lock); |
429 | 510 | ||
511 | /* | ||
512 | * We reached the end of the lock chain. Stop right here. No | ||
513 | * point to go back just to figure that out. | ||
514 | */ | ||
515 | if (!next_lock) | ||
516 | goto out_put_task; | ||
517 | |||
430 | if (!detect_deadlock && waiter != top_waiter) | 518 | if (!detect_deadlock && waiter != top_waiter) |
431 | goto out_put_task; | 519 | goto out_put_task; |
432 | 520 | ||
@@ -536,8 +624,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
536 | { | 624 | { |
537 | struct task_struct *owner = rt_mutex_owner(lock); | 625 | struct task_struct *owner = rt_mutex_owner(lock); |
538 | struct rt_mutex_waiter *top_waiter = waiter; | 626 | struct rt_mutex_waiter *top_waiter = waiter; |
539 | unsigned long flags; | 627 | struct rt_mutex *next_lock; |
540 | int chain_walk = 0, res; | 628 | int chain_walk = 0, res; |
629 | unsigned long flags; | ||
541 | 630 | ||
542 | /* | 631 | /* |
543 | * Early deadlock detection. We really don't want the task to | 632 | * Early deadlock detection. We really don't want the task to |
@@ -548,7 +637,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
548 | * which is wrong, as the other waiter is not in a deadlock | 637 | * which is wrong, as the other waiter is not in a deadlock |
549 | * situation. | 638 | * situation. |
550 | */ | 639 | */ |
551 | if (detect_deadlock && owner == task) | 640 | if (owner == task) |
552 | return -EDEADLK; | 641 | return -EDEADLK; |
553 | 642 | ||
554 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 643 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
@@ -569,20 +658,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
569 | if (!owner) | 658 | if (!owner) |
570 | return 0; | 659 | return 0; |
571 | 660 | ||
661 | raw_spin_lock_irqsave(&owner->pi_lock, flags); | ||
572 | if (waiter == rt_mutex_top_waiter(lock)) { | 662 | if (waiter == rt_mutex_top_waiter(lock)) { |
573 | raw_spin_lock_irqsave(&owner->pi_lock, flags); | ||
574 | rt_mutex_dequeue_pi(owner, top_waiter); | 663 | rt_mutex_dequeue_pi(owner, top_waiter); |
575 | rt_mutex_enqueue_pi(owner, waiter); | 664 | rt_mutex_enqueue_pi(owner, waiter); |
576 | 665 | ||
577 | __rt_mutex_adjust_prio(owner); | 666 | __rt_mutex_adjust_prio(owner); |
578 | if (owner->pi_blocked_on) | 667 | if (owner->pi_blocked_on) |
579 | chain_walk = 1; | 668 | chain_walk = 1; |
580 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); | 669 | } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { |
581 | } | ||
582 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) | ||
583 | chain_walk = 1; | 670 | chain_walk = 1; |
671 | } | ||
584 | 672 | ||
585 | if (!chain_walk) | 673 | /* Store the lock on which owner is blocked or NULL */ |
674 | next_lock = task_blocked_on_lock(owner); | ||
675 | |||
676 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); | ||
677 | /* | ||
678 | * Even if full deadlock detection is on, if the owner is not | ||
679 | * blocked itself, we can avoid finding this out in the chain | ||
680 | * walk. | ||
681 | */ | ||
682 | if (!chain_walk || !next_lock) | ||
586 | return 0; | 683 | return 0; |
587 | 684 | ||
588 | /* | 685 | /* |
@@ -594,8 +691,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
594 | 691 | ||
595 | raw_spin_unlock(&lock->wait_lock); | 692 | raw_spin_unlock(&lock->wait_lock); |
596 | 693 | ||
597 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, | 694 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, |
598 | task); | 695 | next_lock, waiter, task); |
599 | 696 | ||
600 | raw_spin_lock(&lock->wait_lock); | 697 | raw_spin_lock(&lock->wait_lock); |
601 | 698 | ||
@@ -605,7 +702,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
605 | /* | 702 | /* |
606 | * Wake up the next waiter on the lock. | 703 | * Wake up the next waiter on the lock. |
607 | * | 704 | * |
608 | * Remove the top waiter from the current tasks waiter list and wake it up. | 705 | * Remove the top waiter from the current tasks pi waiter list and |
706 | * wake it up. | ||
609 | * | 707 | * |
610 | * Called with lock->wait_lock held. | 708 | * Called with lock->wait_lock held. |
611 | */ | 709 | */ |
@@ -626,10 +724,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
626 | */ | 724 | */ |
627 | rt_mutex_dequeue_pi(current, waiter); | 725 | rt_mutex_dequeue_pi(current, waiter); |
628 | 726 | ||
629 | rt_mutex_set_owner(lock, NULL); | 727 | /* |
728 | * As we are waking up the top waiter, and the waiter stays | ||
729 | * queued on the lock until it gets the lock, this lock | ||
730 | * obviously has waiters. Just set the bit here and this has | ||
731 | * the added benefit of forcing all new tasks into the | ||
732 | * slow path making sure no task of lower priority than | ||
733 | * the top waiter can steal this lock. | ||
734 | */ | ||
735 | lock->owner = (void *) RT_MUTEX_HAS_WAITERS; | ||
630 | 736 | ||
631 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); | 737 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
632 | 738 | ||
739 | /* | ||
740 | * It's safe to dereference waiter as it cannot go away as | ||
741 | * long as we hold lock->wait_lock. The waiter task needs to | ||
742 | * acquire it in order to dequeue the waiter. | ||
743 | */ | ||
633 | wake_up_process(waiter->task); | 744 | wake_up_process(waiter->task); |
634 | } | 745 | } |
635 | 746 | ||
@@ -644,8 +755,8 @@ static void remove_waiter(struct rt_mutex *lock, | |||
644 | { | 755 | { |
645 | int first = (waiter == rt_mutex_top_waiter(lock)); | 756 | int first = (waiter == rt_mutex_top_waiter(lock)); |
646 | struct task_struct *owner = rt_mutex_owner(lock); | 757 | struct task_struct *owner = rt_mutex_owner(lock); |
758 | struct rt_mutex *next_lock = NULL; | ||
647 | unsigned long flags; | 759 | unsigned long flags; |
648 | int chain_walk = 0; | ||
649 | 760 | ||
650 | raw_spin_lock_irqsave(¤t->pi_lock, flags); | 761 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
651 | rt_mutex_dequeue(lock, waiter); | 762 | rt_mutex_dequeue(lock, waiter); |
@@ -669,13 +780,13 @@ static void remove_waiter(struct rt_mutex *lock, | |||
669 | } | 780 | } |
670 | __rt_mutex_adjust_prio(owner); | 781 | __rt_mutex_adjust_prio(owner); |
671 | 782 | ||
672 | if (owner->pi_blocked_on) | 783 | /* Store the lock on which owner is blocked or NULL */ |
673 | chain_walk = 1; | 784 | next_lock = task_blocked_on_lock(owner); |
674 | 785 | ||
675 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); | 786 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
676 | } | 787 | } |
677 | 788 | ||
678 | if (!chain_walk) | 789 | if (!next_lock) |
679 | return; | 790 | return; |
680 | 791 | ||
681 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | 792 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
@@ -683,7 +794,7 @@ static void remove_waiter(struct rt_mutex *lock, | |||
683 | 794 | ||
684 | raw_spin_unlock(&lock->wait_lock); | 795 | raw_spin_unlock(&lock->wait_lock); |
685 | 796 | ||
686 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); | 797 | rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current); |
687 | 798 | ||
688 | raw_spin_lock(&lock->wait_lock); | 799 | raw_spin_lock(&lock->wait_lock); |
689 | } | 800 | } |
@@ -696,6 +807,7 @@ static void remove_waiter(struct rt_mutex *lock, | |||
696 | void rt_mutex_adjust_pi(struct task_struct *task) | 807 | void rt_mutex_adjust_pi(struct task_struct *task) |
697 | { | 808 | { |
698 | struct rt_mutex_waiter *waiter; | 809 | struct rt_mutex_waiter *waiter; |
810 | struct rt_mutex *next_lock; | ||
699 | unsigned long flags; | 811 | unsigned long flags; |
700 | 812 | ||
701 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 813 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
@@ -706,12 +818,13 @@ void rt_mutex_adjust_pi(struct task_struct *task) | |||
706 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 818 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
707 | return; | 819 | return; |
708 | } | 820 | } |
709 | 821 | next_lock = waiter->lock; | |
710 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 822 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
711 | 823 | ||
712 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | 824 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
713 | get_task_struct(task); | 825 | get_task_struct(task); |
714 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); | 826 | |
827 | rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task); | ||
715 | } | 828 | } |
716 | 829 | ||
717 | /** | 830 | /** |
@@ -763,6 +876,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
763 | return ret; | 876 | return ret; |
764 | } | 877 | } |
765 | 878 | ||
879 | static void rt_mutex_handle_deadlock(int res, int detect_deadlock, | ||
880 | struct rt_mutex_waiter *w) | ||
881 | { | ||
882 | /* | ||
883 | * If the result is not -EDEADLOCK or the caller requested | ||
884 | * deadlock detection, nothing to do here. | ||
885 | */ | ||
886 | if (res != -EDEADLOCK || detect_deadlock) | ||
887 | return; | ||
888 | |||
889 | /* | ||
890 | * Yell lowdly and stop the task right here. | ||
891 | */ | ||
892 | rt_mutex_print_deadlock(w); | ||
893 | while (1) { | ||
894 | set_current_state(TASK_INTERRUPTIBLE); | ||
895 | schedule(); | ||
896 | } | ||
897 | } | ||
898 | |||
766 | /* | 899 | /* |
767 | * Slow path lock function: | 900 | * Slow path lock function: |
768 | */ | 901 | */ |
@@ -802,8 +935,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
802 | 935 | ||
803 | set_current_state(TASK_RUNNING); | 936 | set_current_state(TASK_RUNNING); |
804 | 937 | ||
805 | if (unlikely(ret)) | 938 | if (unlikely(ret)) { |
806 | remove_waiter(lock, &waiter); | 939 | remove_waiter(lock, &waiter); |
940 | rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter); | ||
941 | } | ||
807 | 942 | ||
808 | /* | 943 | /* |
809 | * try_to_take_rt_mutex() sets the waiter bit | 944 | * try_to_take_rt_mutex() sets the waiter bit |
@@ -859,12 +994,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock) | |||
859 | 994 | ||
860 | rt_mutex_deadlock_account_unlock(current); | 995 | rt_mutex_deadlock_account_unlock(current); |
861 | 996 | ||
862 | if (!rt_mutex_has_waiters(lock)) { | 997 | /* |
863 | lock->owner = NULL; | 998 | * We must be careful here if the fast path is enabled. If we |
864 | raw_spin_unlock(&lock->wait_lock); | 999 | * have no waiters queued we cannot set owner to NULL here |
865 | return; | 1000 | * because of: |
1001 | * | ||
1002 | * foo->lock->owner = NULL; | ||
1003 | * rtmutex_lock(foo->lock); <- fast path | ||
1004 | * free = atomic_dec_and_test(foo->refcnt); | ||
1005 | * rtmutex_unlock(foo->lock); <- fast path | ||
1006 | * if (free) | ||
1007 | * kfree(foo); | ||
1008 | * raw_spin_unlock(foo->lock->wait_lock); | ||
1009 | * | ||
1010 | * So for the fastpath enabled kernel: | ||
1011 | * | ||
1012 | * Nothing can set the waiters bit as long as we hold | ||
1013 | * lock->wait_lock. So we do the following sequence: | ||
1014 | * | ||
1015 | * owner = rt_mutex_owner(lock); | ||
1016 | * clear_rt_mutex_waiters(lock); | ||
1017 | * raw_spin_unlock(&lock->wait_lock); | ||
1018 | * if (cmpxchg(&lock->owner, owner, 0) == owner) | ||
1019 | * return; | ||
1020 | * goto retry; | ||
1021 | * | ||
1022 | * The fastpath disabled variant is simple as all access to | ||
1023 | * lock->owner is serialized by lock->wait_lock: | ||
1024 | * | ||
1025 | * lock->owner = NULL; | ||
1026 | * raw_spin_unlock(&lock->wait_lock); | ||
1027 | */ | ||
1028 | while (!rt_mutex_has_waiters(lock)) { | ||
1029 | /* Drops lock->wait_lock ! */ | ||
1030 | if (unlock_rt_mutex_safe(lock) == true) | ||
1031 | return; | ||
1032 | /* Relock the rtmutex and try again */ | ||
1033 | raw_spin_lock(&lock->wait_lock); | ||
866 | } | 1034 | } |
867 | 1035 | ||
1036 | /* | ||
1037 | * The wakeup next waiter path does not suffer from the above | ||
1038 | * race. See the comments there. | ||
1039 | */ | ||
868 | wakeup_next_waiter(lock); | 1040 | wakeup_next_waiter(lock); |
869 | 1041 | ||
870 | raw_spin_unlock(&lock->wait_lock); | 1042 | raw_spin_unlock(&lock->wait_lock); |
@@ -1112,7 +1284,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
1112 | return 1; | 1284 | return 1; |
1113 | } | 1285 | } |
1114 | 1286 | ||
1115 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); | 1287 | /* We enforce deadlock detection for futexes */ |
1288 | ret = task_blocks_on_rt_mutex(lock, waiter, task, 1); | ||
1116 | 1289 | ||
1117 | if (ret && !rt_mutex_owner(lock)) { | 1290 | if (ret && !rt_mutex_owner(lock)) { |
1118 | /* | 1291 | /* |
diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h index a1a1dd06421d..f6a1f3c133b1 100644 --- a/kernel/locking/rtmutex.h +++ b/kernel/locking/rtmutex.h | |||
@@ -24,3 +24,8 @@ | |||
24 | #define debug_rt_mutex_print_deadlock(w) do { } while (0) | 24 | #define debug_rt_mutex_print_deadlock(w) do { } while (0) |
25 | #define debug_rt_mutex_detect_deadlock(w,d) (d) | 25 | #define debug_rt_mutex_detect_deadlock(w,d) (d) |
26 | #define debug_rt_mutex_reset_waiter(w) do { } while (0) | 26 | #define debug_rt_mutex_reset_waiter(w) do { } while (0) |
27 | |||
28 | static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) | ||
29 | { | ||
30 | WARN(1, "rtmutex deadlock detected\n"); | ||
31 | } | ||
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 49e0a20fd010..fcc2611d3f14 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | static int nocompress; | 36 | static int nocompress; |
37 | static int noresume; | 37 | static int noresume; |
38 | static int nohibernate; | ||
38 | static int resume_wait; | 39 | static int resume_wait; |
39 | static unsigned int resume_delay; | 40 | static unsigned int resume_delay; |
40 | static char resume_file[256] = CONFIG_PM_STD_PARTITION; | 41 | static char resume_file[256] = CONFIG_PM_STD_PARTITION; |
@@ -62,6 +63,11 @@ bool freezer_test_done; | |||
62 | 63 | ||
63 | static const struct platform_hibernation_ops *hibernation_ops; | 64 | static const struct platform_hibernation_ops *hibernation_ops; |
64 | 65 | ||
66 | bool hibernation_available(void) | ||
67 | { | ||
68 | return (nohibernate == 0); | ||
69 | } | ||
70 | |||
65 | /** | 71 | /** |
66 | * hibernation_set_ops - Set the global hibernate operations. | 72 | * hibernation_set_ops - Set the global hibernate operations. |
67 | * @ops: Hibernation operations to use in subsequent hibernation transitions. | 73 | * @ops: Hibernation operations to use in subsequent hibernation transitions. |
@@ -642,6 +648,11 @@ int hibernate(void) | |||
642 | { | 648 | { |
643 | int error; | 649 | int error; |
644 | 650 | ||
651 | if (!hibernation_available()) { | ||
652 | pr_debug("PM: Hibernation not available.\n"); | ||
653 | return -EPERM; | ||
654 | } | ||
655 | |||
645 | lock_system_sleep(); | 656 | lock_system_sleep(); |
646 | /* The snapshot device should not be opened while we're running */ | 657 | /* The snapshot device should not be opened while we're running */ |
647 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { | 658 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
@@ -734,7 +745,7 @@ static int software_resume(void) | |||
734 | /* | 745 | /* |
735 | * If the user said "noresume".. bail out early. | 746 | * If the user said "noresume".. bail out early. |
736 | */ | 747 | */ |
737 | if (noresume) | 748 | if (noresume || !hibernation_available()) |
738 | return 0; | 749 | return 0; |
739 | 750 | ||
740 | /* | 751 | /* |
@@ -900,6 +911,9 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
900 | int i; | 911 | int i; |
901 | char *start = buf; | 912 | char *start = buf; |
902 | 913 | ||
914 | if (!hibernation_available()) | ||
915 | return sprintf(buf, "[disabled]\n"); | ||
916 | |||
903 | for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { | 917 | for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { |
904 | if (!hibernation_modes[i]) | 918 | if (!hibernation_modes[i]) |
905 | continue; | 919 | continue; |
@@ -934,6 +948,9 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
934 | char *p; | 948 | char *p; |
935 | int mode = HIBERNATION_INVALID; | 949 | int mode = HIBERNATION_INVALID; |
936 | 950 | ||
951 | if (!hibernation_available()) | ||
952 | return -EPERM; | ||
953 | |||
937 | p = memchr(buf, '\n', n); | 954 | p = memchr(buf, '\n', n); |
938 | len = p ? p - buf : n; | 955 | len = p ? p - buf : n; |
939 | 956 | ||
@@ -1101,6 +1118,10 @@ static int __init hibernate_setup(char *str) | |||
1101 | noresume = 1; | 1118 | noresume = 1; |
1102 | else if (!strncmp(str, "nocompress", 10)) | 1119 | else if (!strncmp(str, "nocompress", 10)) |
1103 | nocompress = 1; | 1120 | nocompress = 1; |
1121 | else if (!strncmp(str, "no", 2)) { | ||
1122 | noresume = 1; | ||
1123 | nohibernate = 1; | ||
1124 | } | ||
1104 | return 1; | 1125 | return 1; |
1105 | } | 1126 | } |
1106 | 1127 | ||
@@ -1125,9 +1146,23 @@ static int __init resumedelay_setup(char *str) | |||
1125 | return 1; | 1146 | return 1; |
1126 | } | 1147 | } |
1127 | 1148 | ||
1149 | static int __init nohibernate_setup(char *str) | ||
1150 | { | ||
1151 | noresume = 1; | ||
1152 | nohibernate = 1; | ||
1153 | return 1; | ||
1154 | } | ||
1155 | |||
1156 | static int __init kaslr_nohibernate_setup(char *str) | ||
1157 | { | ||
1158 | return nohibernate_setup(str); | ||
1159 | } | ||
1160 | |||
1128 | __setup("noresume", noresume_setup); | 1161 | __setup("noresume", noresume_setup); |
1129 | __setup("resume_offset=", resume_offset_setup); | 1162 | __setup("resume_offset=", resume_offset_setup); |
1130 | __setup("resume=", resume_setup); | 1163 | __setup("resume=", resume_setup); |
1131 | __setup("hibernate=", hibernate_setup); | 1164 | __setup("hibernate=", hibernate_setup); |
1132 | __setup("resumewait", resumewait_setup); | 1165 | __setup("resumewait", resumewait_setup); |
1133 | __setup("resumedelay=", resumedelay_setup); | 1166 | __setup("resumedelay=", resumedelay_setup); |
1167 | __setup("nohibernate", nohibernate_setup); | ||
1168 | __setup("kaslr", kaslr_nohibernate_setup); | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index 573410d6647e..8e90f330f139 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -300,13 +300,11 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
300 | s += sprintf(s,"%s ", pm_states[i].label); | 300 | s += sprintf(s,"%s ", pm_states[i].label); |
301 | 301 | ||
302 | #endif | 302 | #endif |
303 | #ifdef CONFIG_HIBERNATION | 303 | if (hibernation_available()) |
304 | s += sprintf(s, "%s\n", "disk"); | 304 | s += sprintf(s, "disk "); |
305 | #else | ||
306 | if (s != buf) | 305 | if (s != buf) |
307 | /* convert the last space to a newline */ | 306 | /* convert the last space to a newline */ |
308 | *(s-1) = '\n'; | 307 | *(s-1) = '\n'; |
309 | #endif | ||
310 | return (s - buf); | 308 | return (s - buf); |
311 | } | 309 | } |
312 | 310 | ||
diff --git a/kernel/power/user.c b/kernel/power/user.c index 98d357584cd6..526e8911460a 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -49,6 +49,9 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
49 | struct snapshot_data *data; | 49 | struct snapshot_data *data; |
50 | int error; | 50 | int error; |
51 | 51 | ||
52 | if (!hibernation_available()) | ||
53 | return -EPERM; | ||
54 | |||
52 | lock_system_sleep(); | 55 | lock_system_sleep(); |
53 | 56 | ||
54 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { | 57 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ba9ed453c4ed..7de6555cfea0 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -152,10 +152,6 @@ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ); | |||
152 | #ifdef CONFIG_SPARC | 152 | #ifdef CONFIG_SPARC |
153 | #endif | 153 | #endif |
154 | 154 | ||
155 | #ifdef CONFIG_SPARC64 | ||
156 | extern int sysctl_tsb_ratio; | ||
157 | #endif | ||
158 | |||
159 | #ifdef __hppa__ | 155 | #ifdef __hppa__ |
160 | extern int pwrsw_enabled; | 156 | extern int pwrsw_enabled; |
161 | #endif | 157 | #endif |