diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2014-06-10 16:53:40 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2014-06-21 16:05:30 -0400 |
commit | 88f2b4c15e561bb5c28709d666364f273bf54b98 (patch) | |
tree | 7afc35714df46561b18b78bb39e70d7cbaafabe3 /kernel/locking | |
parent | fddeca638ed428d447d60fc0942a094814073cc6 (diff) |
rtmutex: Simplify rtmutex_slowtrylock()
Oleg noticed that rtmutex_slowtrylock() has a pointless check for
rt_mutex_owner(lock) != current.
To avoid calling try_to_take_rtmutex() we really want to check whether
the lock has an owner at all or whether the trylock failed because the
owner is NULL, but the RT_MUTEX_HAS_WAITERS bit is set. This covers
the lock is owned by caller situation as well.
We can actually do this check lockless. trylock is taking a chance
whether we take lock->wait_lock to do the check or not.
Add comments to the function while at it.
Reported-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/rtmutex.c | 31 |
1 files changed, 20 insertions, 11 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index fc605941b9b8..50bc93b3552f 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -960,22 +960,31 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
960 | /* | 960 | /* |
961 | * Slow path try-lock function: | 961 | * Slow path try-lock function: |
962 | */ | 962 | */ |
963 | static inline int | 963 | static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) |
964 | rt_mutex_slowtrylock(struct rt_mutex *lock) | ||
965 | { | 964 | { |
966 | int ret = 0; | 965 | int ret; |
966 | |||
967 | /* | ||
968 | * If the lock already has an owner we fail to get the lock. | ||
969 | * This can be done without taking the @lock->wait_lock as | ||
970 | * it is only being read, and this is a trylock anyway. | ||
971 | */ | ||
972 | if (rt_mutex_owner(lock)) | ||
973 | return 0; | ||
967 | 974 | ||
975 | /* | ||
976 | * The mutex has currently no owner. Lock the wait lock and | ||
977 | * try to acquire the lock. | ||
978 | */ | ||
968 | raw_spin_lock(&lock->wait_lock); | 979 | raw_spin_lock(&lock->wait_lock); |
969 | 980 | ||
970 | if (likely(rt_mutex_owner(lock) != current)) { | 981 | ret = try_to_take_rt_mutex(lock, current, NULL); |
971 | 982 | ||
972 | ret = try_to_take_rt_mutex(lock, current, NULL); | 983 | /* |
973 | /* | 984 | * try_to_take_rt_mutex() sets the lock waiters bit |
974 | * try_to_take_rt_mutex() sets the lock waiters | 985 | * unconditionally. Clean this up. |
975 | * bit unconditionally. Clean this up. | 986 | */ |
976 | */ | 987 | fixup_rt_mutex_waiters(lock); |
977 | fixup_rt_mutex_waiters(lock); | ||
978 | } | ||
979 | 988 | ||
980 | raw_spin_unlock(&lock->wait_lock); | 989 | raw_spin_unlock(&lock->wait_lock); |
981 | 990 | ||