diff options
| author | James Morris <james.l.morris@oracle.com> | 2017-07-24 20:44:18 -0400 |
|---|---|---|
| committer | James Morris <james.l.morris@oracle.com> | 2017-07-24 20:44:18 -0400 |
| commit | 53a2ebaaabc1eb8458796fec3bc1e0e80746b642 (patch) | |
| tree | 9d1f9227b49392cdd2edcc01057517da4f4b09c2 /kernel/locking/rtmutex.c | |
| parent | 3cf29931453215536916d0c4da953fce1911ced3 (diff) | |
| parent | 520eccdfe187591a51ea9ab4c1a024ae4d0f68d9 (diff) | |
sync to Linus v4.13-rc2 for subsystem developers to work against
Diffstat (limited to 'kernel/locking/rtmutex.c')
| -rw-r--r-- | kernel/locking/rtmutex.c | 62 |
1 files changed, 48 insertions, 14 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b95509416909..649dc9d3951a 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
| @@ -963,7 +963,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
| 963 | return -EDEADLK; | 963 | return -EDEADLK; |
| 964 | 964 | ||
| 965 | raw_spin_lock(&task->pi_lock); | 965 | raw_spin_lock(&task->pi_lock); |
| 966 | rt_mutex_adjust_prio(task); | ||
| 967 | waiter->task = task; | 966 | waiter->task = task; |
| 968 | waiter->lock = lock; | 967 | waiter->lock = lock; |
| 969 | waiter->prio = task->prio; | 968 | waiter->prio = task->prio; |
| @@ -1481,6 +1480,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock) | |||
| 1481 | { | 1480 | { |
| 1482 | might_sleep(); | 1481 | might_sleep(); |
| 1483 | 1482 | ||
| 1483 | mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
| 1484 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); | 1484 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); |
| 1485 | } | 1485 | } |
| 1486 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | 1486 | EXPORT_SYMBOL_GPL(rt_mutex_lock); |
| @@ -1496,9 +1496,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); | |||
| 1496 | */ | 1496 | */ |
| 1497 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) | 1497 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) |
| 1498 | { | 1498 | { |
| 1499 | int ret; | ||
| 1500 | |||
| 1499 | might_sleep(); | 1501 | might_sleep(); |
| 1500 | 1502 | ||
| 1501 | return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); | 1503 | mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| 1504 | ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); | ||
| 1505 | if (ret) | ||
| 1506 | mutex_release(&lock->dep_map, 1, _RET_IP_); | ||
| 1507 | |||
| 1508 | return ret; | ||
| 1502 | } | 1509 | } |
| 1503 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | 1510 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); |
| 1504 | 1511 | ||
| @@ -1526,11 +1533,18 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) | |||
| 1526 | int | 1533 | int |
| 1527 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) | 1534 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) |
| 1528 | { | 1535 | { |
| 1536 | int ret; | ||
| 1537 | |||
| 1529 | might_sleep(); | 1538 | might_sleep(); |
| 1530 | 1539 | ||
| 1531 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | 1540 | mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| 1541 | ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | ||
| 1532 | RT_MUTEX_MIN_CHAINWALK, | 1542 | RT_MUTEX_MIN_CHAINWALK, |
| 1533 | rt_mutex_slowlock); | 1543 | rt_mutex_slowlock); |
| 1544 | if (ret) | ||
| 1545 | mutex_release(&lock->dep_map, 1, _RET_IP_); | ||
| 1546 | |||
| 1547 | return ret; | ||
| 1534 | } | 1548 | } |
| 1535 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | 1549 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); |
| 1536 | 1550 | ||
| @@ -1547,10 +1561,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |||
| 1547 | */ | 1561 | */ |
| 1548 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | 1562 | int __sched rt_mutex_trylock(struct rt_mutex *lock) |
| 1549 | { | 1563 | { |
| 1564 | int ret; | ||
| 1565 | |||
| 1550 | if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) | 1566 | if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) |
| 1551 | return 0; | 1567 | return 0; |
| 1552 | 1568 | ||
| 1553 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | 1569 | ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); |
| 1570 | if (ret) | ||
| 1571 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
| 1572 | |||
| 1573 | return ret; | ||
| 1554 | } | 1574 | } |
| 1555 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | 1575 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); |
| 1556 | 1576 | ||
| @@ -1561,6 +1581,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); | |||
| 1561 | */ | 1581 | */ |
| 1562 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | 1582 | void __sched rt_mutex_unlock(struct rt_mutex *lock) |
| 1563 | { | 1583 | { |
| 1584 | mutex_release(&lock->dep_map, 1, _RET_IP_); | ||
| 1564 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); | 1585 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); |
| 1565 | } | 1586 | } |
| 1566 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | 1587 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); |
| @@ -1620,7 +1641,6 @@ void rt_mutex_destroy(struct rt_mutex *lock) | |||
| 1620 | lock->magic = NULL; | 1641 | lock->magic = NULL; |
| 1621 | #endif | 1642 | #endif |
| 1622 | } | 1643 | } |
| 1623 | |||
| 1624 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); | 1644 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); |
| 1625 | 1645 | ||
| 1626 | /** | 1646 | /** |
| @@ -1632,14 +1652,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); | |||
| 1632 | * | 1652 | * |
| 1633 | * Initializing of a locked rt lock is not allowed | 1653 | * Initializing of a locked rt lock is not allowed |
| 1634 | */ | 1654 | */ |
| 1635 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) | 1655 | void __rt_mutex_init(struct rt_mutex *lock, const char *name, |
| 1656 | struct lock_class_key *key) | ||
| 1636 | { | 1657 | { |
| 1637 | lock->owner = NULL; | 1658 | lock->owner = NULL; |
| 1638 | raw_spin_lock_init(&lock->wait_lock); | 1659 | raw_spin_lock_init(&lock->wait_lock); |
| 1639 | lock->waiters = RB_ROOT; | 1660 | lock->waiters = RB_ROOT; |
| 1640 | lock->waiters_leftmost = NULL; | 1661 | lock->waiters_leftmost = NULL; |
| 1641 | 1662 | ||
| 1642 | debug_rt_mutex_init(lock, name); | 1663 | if (name && key) |
| 1664 | debug_rt_mutex_init(lock, name, key); | ||
| 1643 | } | 1665 | } |
| 1644 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | 1666 | EXPORT_SYMBOL_GPL(__rt_mutex_init); |
| 1645 | 1667 | ||
| @@ -1660,7 +1682,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); | |||
| 1660 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | 1682 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, |
| 1661 | struct task_struct *proxy_owner) | 1683 | struct task_struct *proxy_owner) |
| 1662 | { | 1684 | { |
| 1663 | __rt_mutex_init(lock, NULL); | 1685 | __rt_mutex_init(lock, NULL, NULL); |
| 1664 | debug_rt_mutex_proxy_lock(lock, proxy_owner); | 1686 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
| 1665 | rt_mutex_set_owner(lock, proxy_owner); | 1687 | rt_mutex_set_owner(lock, proxy_owner); |
| 1666 | } | 1688 | } |
| @@ -1785,12 +1807,14 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, | |||
| 1785 | int ret; | 1807 | int ret; |
| 1786 | 1808 | ||
| 1787 | raw_spin_lock_irq(&lock->wait_lock); | 1809 | raw_spin_lock_irq(&lock->wait_lock); |
| 1788 | |||
| 1789 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1790 | |||
| 1791 | /* sleep on the mutex */ | 1810 | /* sleep on the mutex */ |
| 1811 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1792 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); | 1812 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
| 1793 | 1813 | /* | |
| 1814 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | ||
| 1815 | * have to fix that up. | ||
| 1816 | */ | ||
| 1817 | fixup_rt_mutex_waiters(lock); | ||
| 1794 | raw_spin_unlock_irq(&lock->wait_lock); | 1818 | raw_spin_unlock_irq(&lock->wait_lock); |
| 1795 | 1819 | ||
| 1796 | return ret; | 1820 | return ret; |
| @@ -1822,15 +1846,25 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, | |||
| 1822 | 1846 | ||
| 1823 | raw_spin_lock_irq(&lock->wait_lock); | 1847 | raw_spin_lock_irq(&lock->wait_lock); |
| 1824 | /* | 1848 | /* |
| 1849 | * Do an unconditional try-lock, this deals with the lock stealing | ||
| 1850 | * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() | ||
| 1851 | * sets a NULL owner. | ||
| 1852 | * | ||
| 1853 | * We're not interested in the return value, because the subsequent | ||
| 1854 | * test on rt_mutex_owner() will infer that. If the trylock succeeded, | ||
| 1855 | * we will own the lock and it will have removed the waiter. If we | ||
| 1856 | * failed the trylock, we're still not owner and we need to remove | ||
| 1857 | * ourselves. | ||
| 1858 | */ | ||
| 1859 | try_to_take_rt_mutex(lock, current, waiter); | ||
| 1860 | /* | ||
| 1825 | * Unless we're the owner; we're still enqueued on the wait_list. | 1861 | * Unless we're the owner; we're still enqueued on the wait_list. |
| 1826 | * So check if we became owner, if not, take us off the wait_list. | 1862 | * So check if we became owner, if not, take us off the wait_list. |
| 1827 | */ | 1863 | */ |
| 1828 | if (rt_mutex_owner(lock) != current) { | 1864 | if (rt_mutex_owner(lock) != current) { |
| 1829 | remove_waiter(lock, waiter); | 1865 | remove_waiter(lock, waiter); |
| 1830 | fixup_rt_mutex_waiters(lock); | ||
| 1831 | cleanup = true; | 1866 | cleanup = true; |
| 1832 | } | 1867 | } |
| 1833 | |||
| 1834 | /* | 1868 | /* |
| 1835 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | 1869 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might |
| 1836 | * have to fix that up. | 1870 | * have to fix that up. |
