aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 17:54:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 17:54:22 -0400
commit1bf7067c6e173dc10411704db48338ed69c05565 (patch)
tree06d731d9647c525fa598d03d7ec957ff9772ff40 /kernel/locking/rtmutex.c
parentfc934d40178ad4e551a17e2733241d9f29fddd70 (diff)
parent68722101ec3a0e179408a13708dd020e04f54aab (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The main changes are: - 'qspinlock' support, enabled on x86: queued spinlocks - these are now the spinlock variant used by x86 as they outperform ticket spinlocks in every category. (Waiman Long) - 'pvqspinlock' support on x86: paravirtualized variant of queued spinlocks. (Waiman Long, Peter Zijlstra) - 'qrwlock' support, enabled on x86: queued rwlocks. Similar to queued spinlocks, they are now the variant used by x86: CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y CONFIG_QUEUED_SPINLOCKS=y CONFIG_ARCH_USE_QUEUED_RWLOCKS=y CONFIG_QUEUED_RWLOCKS=y - various lockdep fixlets - various locking primitives cleanups, further WRITE_ONCE() propagation" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) locking/lockdep: Remove hard coded array size dependency locking/qrwlock: Don't contend with readers when setting _QW_WAITING lockdep: Do not break user-visible string locking/arch: Rename set_mb() to smp_store_mb() locking/arch: Add WRITE_ONCE() to set_mb() rtmutex: Warn if trylock is called from hard/softirq context arch: Remove __ARCH_HAVE_CMPXCHG locking/rtmutex: Drop usage of __HAVE_ARCH_CMPXCHG locking/qrwlock: Rename QUEUE_RWLOCK to QUEUED_RWLOCKS locking/pvqspinlock: Rename QUEUED_SPINLOCK to QUEUED_SPINLOCKS locking/pvqspinlock: Replace xchg() by the more descriptive set_mb() locking/pvqspinlock, x86: Enable PV qspinlock for Xen locking/pvqspinlock, x86: Enable PV qspinlock for KVM locking/pvqspinlock, x86: Implement the paravirt qspinlock call patching locking/pvqspinlock: Implement simple paravirt support for the qspinlock locking/qspinlock: Revert to test-and-set on hypervisors locking/qspinlock: Use a simple write to grab the lock locking/qspinlock: Optimize for smaller NR_CPUS locking/qspinlock: Extract out code snippets for the next patch locking/qspinlock: Add pending bit ...
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b025295f4966..30ec5b46cd8c 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -70,10 +70,10 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
70} 70}
71 71
72/* 72/*
73 * We can speed up the acquire/release, if the architecture 73 * We can speed up the acquire/release, if there's no debugging state to be
74 * supports cmpxchg and if there's no debugging state to be set up 74 * set up.
75 */ 75 */
76#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) 76#ifndef CONFIG_DEBUG_RT_MUTEXES
77# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) 77# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
78static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) 78static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79{ 79{
@@ -1443,10 +1443,17 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1443 * 1443 *
1444 * @lock: the rt_mutex to be locked 1444 * @lock: the rt_mutex to be locked
1445 * 1445 *
1446 * This function can only be called in thread context. It's safe to
1447 * call it from atomic regions, but not from hard interrupt or soft
1448 * interrupt context.
1449 *
1446 * Returns 1 on success and 0 on contention 1450 * Returns 1 on success and 0 on contention
1447 */ 1451 */
1448int __sched rt_mutex_trylock(struct rt_mutex *lock) 1452int __sched rt_mutex_trylock(struct rt_mutex *lock)
1449{ 1453{
1454 if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
1455 return 0;
1456
1450 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); 1457 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1451} 1458}
1452EXPORT_SYMBOL_GPL(rt_mutex_trylock); 1459EXPORT_SYMBOL_GPL(rt_mutex_trylock);