aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex_common.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-03-27 08:14:38 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-03-28 17:01:30 -0400
commitc28d62cf52d791ba5f6db7ce525ed06b86291c82 (patch)
tree683f0b9f5b8faa613a46da023b1b7a4bd204c33e /kernel/locking/rtmutex_common.h
parentac605bee0bfab40fd5d11964705e907d2d5a32de (diff)
locking/rtmutex: Handle non enqueued waiters gracefully in remove_waiter()
In -RT task_blocks_on_rt_mutex() may return with -EAGAIN due to (->pi_blocked_on == PI_WAKEUP_INPROGRESS) before it added itself as a waiter. In such a case remove_waiter() must not be called because without a waiter it will trigger the BUG_ON() statement. This was initially reported by Yimin Deng. Thomas Gleixner fixed it then with an explicit check for waiters before calling remove_waiter(). Instead of an explicit NULL check before calling rt_mutex_top_waiter() make the function return NULL if there are no waiters. With that fixed the now pointless NULL check is removed from rt_mutex_slowlock(). Reported-and-debugged-by: Yimin Deng <yimin11.deng@gmail.com> Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/CAAh1qt=DCL9aUXNxanP5BKtiPp3m+qj4yB+gDohhXPVFCxWwzg@mail.gmail.com Link: https://lkml.kernel.org/r/20180327121438.sss7hxg3crqy4ecd@linutronix.de
Diffstat (limited to 'kernel/locking/rtmutex_common.h')
-rw-r--r--kernel/locking/rtmutex_common.h11
1 files changed, 6 insertions, 5 deletions
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 68686b3ec3c1..d1d62f942be2 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -52,12 +52,13 @@ static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
52static inline struct rt_mutex_waiter * 52static inline struct rt_mutex_waiter *
53rt_mutex_top_waiter(struct rt_mutex *lock) 53rt_mutex_top_waiter(struct rt_mutex *lock)
54{ 54{
55 struct rt_mutex_waiter *w; 55 struct rb_node *leftmost = rb_first_cached(&lock->waiters);
56 56 struct rt_mutex_waiter *w = NULL;
57 w = rb_entry(lock->waiters.rb_leftmost,
58 struct rt_mutex_waiter, tree_entry);
59 BUG_ON(w->lock != lock);
60 57
58 if (leftmost) {
59 w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry);
60 BUG_ON(w->lock != lock);
61 }
61 return w; 62 return w;
62} 63}
63 64