aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-09-19 06:15:37 -0400
committerIngo Molnar <mingo@kernel.org>2017-06-08 04:35:49 -0400
commitf5694788ad8da5da41b501f3d6d2ae22379c4ef9 (patch)
tree660f642ba2afcd13ef71449526184480202e8d63 /kernel/locking/rtmutex.c
parent3942b77121986519ee52ab4dd4ae8f4383dfe765 (diff)
rt_mutex: Add lockdep annotations
Now that (PI) futexes have their own private RT-mutex interface and implementation we can easily add lockdep annotations to the existing RT-mutex interface. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c36
1 files changed, 29 insertions, 7 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 28cd09e635ed..43123533e9b1 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1481,6 +1481,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
1481{ 1481{
1482 might_sleep(); 1482 might_sleep();
1483 1483
1484 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1484 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); 1485 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1485} 1486}
1486EXPORT_SYMBOL_GPL(rt_mutex_lock); 1487EXPORT_SYMBOL_GPL(rt_mutex_lock);
@@ -1496,9 +1497,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
1496 */ 1497 */
1497int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) 1498int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1498{ 1499{
1500 int ret;
1501
1499 might_sleep(); 1502 might_sleep();
1500 1503
1501 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); 1504 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1505 ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
1506 if (ret)
1507 mutex_release(&lock->dep_map, 1, _RET_IP_);
1508
1509 return ret;
1502} 1510}
1503EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); 1511EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1504 1512
@@ -1526,11 +1534,18 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1526int 1534int
1527rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) 1535rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1528{ 1536{
1537 int ret;
1538
1529 might_sleep(); 1539 might_sleep();
1530 1540
1531 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 1541 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1542 ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1532 RT_MUTEX_MIN_CHAINWALK, 1543 RT_MUTEX_MIN_CHAINWALK,
1533 rt_mutex_slowlock); 1544 rt_mutex_slowlock);
1545 if (ret)
1546 mutex_release(&lock->dep_map, 1, _RET_IP_);
1547
1548 return ret;
1534} 1549}
1535EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); 1550EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1536 1551
@@ -1547,10 +1562,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1547 */ 1562 */
1548int __sched rt_mutex_trylock(struct rt_mutex *lock) 1563int __sched rt_mutex_trylock(struct rt_mutex *lock)
1549{ 1564{
1565 int ret;
1566
1550 if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) 1567 if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
1551 return 0; 1568 return 0;
1552 1569
1553 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); 1570 ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1571 if (ret)
1572 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1573
1574 return ret;
1554} 1575}
1555EXPORT_SYMBOL_GPL(rt_mutex_trylock); 1576EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1556 1577
@@ -1561,6 +1582,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1561 */ 1582 */
1562void __sched rt_mutex_unlock(struct rt_mutex *lock) 1583void __sched rt_mutex_unlock(struct rt_mutex *lock)
1563{ 1584{
1585 mutex_release(&lock->dep_map, 1, _RET_IP_);
1564 rt_mutex_fastunlock(lock, rt_mutex_slowunlock); 1586 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1565} 1587}
1566EXPORT_SYMBOL_GPL(rt_mutex_unlock); 1588EXPORT_SYMBOL_GPL(rt_mutex_unlock);
@@ -1620,7 +1642,6 @@ void rt_mutex_destroy(struct rt_mutex *lock)
1620 lock->magic = NULL; 1642 lock->magic = NULL;
1621#endif 1643#endif
1622} 1644}
1623
1624EXPORT_SYMBOL_GPL(rt_mutex_destroy); 1645EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1625 1646
1626/** 1647/**
@@ -1632,14 +1653,15 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1632 * 1653 *
1633 * Initializing of a locked rt lock is not allowed 1654 * Initializing of a locked rt lock is not allowed
1634 */ 1655 */
1635void __rt_mutex_init(struct rt_mutex *lock, const char *name) 1656void __rt_mutex_init(struct rt_mutex *lock, const char *name,
1657 struct lock_class_key *key)
1636{ 1658{
1637 lock->owner = NULL; 1659 lock->owner = NULL;
1638 raw_spin_lock_init(&lock->wait_lock); 1660 raw_spin_lock_init(&lock->wait_lock);
1639 lock->waiters = RB_ROOT; 1661 lock->waiters = RB_ROOT;
1640 lock->waiters_leftmost = NULL; 1662 lock->waiters_leftmost = NULL;
1641 1663
1642 debug_rt_mutex_init(lock, name); 1664 debug_rt_mutex_init(lock, name, key);
1643} 1665}
1644EXPORT_SYMBOL_GPL(__rt_mutex_init); 1666EXPORT_SYMBOL_GPL(__rt_mutex_init);
1645 1667
@@ -1660,7 +1682,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
1660void rt_mutex_init_proxy_locked(struct rt_mutex *lock, 1682void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1661 struct task_struct *proxy_owner) 1683 struct task_struct *proxy_owner)
1662{ 1684{
1663 __rt_mutex_init(lock, NULL); 1685 __rt_mutex_init(lock, NULL, NULL);
1664 debug_rt_mutex_proxy_lock(lock, proxy_owner); 1686 debug_rt_mutex_proxy_lock(lock, proxy_owner);
1665 rt_mutex_set_owner(lock, proxy_owner); 1687 rt_mutex_set_owner(lock, proxy_owner);
1666} 1688}