diff options
-rw-r--r-- | kernel/printk/printk.c | 181 |
1 files changed, 166 insertions, 15 deletions
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index bf2e6741ec12..63416bb2712a 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -1548,6 +1548,146 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) | |||
1548 | } | 1548 | } |
1549 | 1549 | ||
1550 | /* | 1550 | /* |
1551 | * Special console_lock variants that help to reduce the risk of soft-lockups. | ||
1552 | * They allow to pass console_lock to another printk() call using a busy wait. | ||
1553 | */ | ||
1554 | |||
1555 | #ifdef CONFIG_LOCKDEP | ||
1556 | static struct lockdep_map console_owner_dep_map = { | ||
1557 | .name = "console_owner" | ||
1558 | }; | ||
1559 | #endif | ||
1560 | |||
1561 | static DEFINE_RAW_SPINLOCK(console_owner_lock); | ||
1562 | static struct task_struct *console_owner; | ||
1563 | static bool console_waiter; | ||
1564 | |||
1565 | /** | ||
1566 | * console_lock_spinning_enable - mark beginning of code where another | ||
1567 | * thread might safely busy wait | ||
1568 | * | ||
1569 | * This basically converts console_lock into a spinlock. This marks | ||
1570 | * the section where the console_lock owner can not sleep, because | ||
1571 | * there may be a waiter spinning (like a spinlock). Also it must be | ||
1572 | * ready to hand over the lock at the end of the section. | ||
1573 | */ | ||
1574 | static void console_lock_spinning_enable(void) | ||
1575 | { | ||
1576 | raw_spin_lock(&console_owner_lock); | ||
1577 | console_owner = current; | ||
1578 | raw_spin_unlock(&console_owner_lock); | ||
1579 | |||
1580 | /* The waiter may spin on us after setting console_owner */ | ||
1581 | spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); | ||
1582 | } | ||
1583 | |||
1584 | /** | ||
1585 | * console_lock_spinning_disable_and_check - mark end of code where another | ||
1586 | * thread was able to busy wait and check if there is a waiter | ||
1587 | * | ||
1588 | * This is called at the end of the section where spinning is allowed. | ||
1589 | * It has two functions. First, it is a signal that it is no longer | ||
1590 | * safe to start busy waiting for the lock. Second, it checks if | ||
1591 | * there is a busy waiter and passes the lock rights to her. | ||
1592 | * | ||
1593 | * Important: Callers lose the lock if there was a busy waiter. | ||
1594 | * They must not touch items synchronized by console_lock | ||
1595 | * in this case. | ||
1596 | * | ||
1597 | * Return: 1 if the lock rights were passed, 0 otherwise. | ||
1598 | */ | ||
1599 | static int console_lock_spinning_disable_and_check(void) | ||
1600 | { | ||
1601 | int waiter; | ||
1602 | |||
1603 | raw_spin_lock(&console_owner_lock); | ||
1604 | waiter = READ_ONCE(console_waiter); | ||
1605 | console_owner = NULL; | ||
1606 | raw_spin_unlock(&console_owner_lock); | ||
1607 | |||
1608 | if (!waiter) { | ||
1609 | spin_release(&console_owner_dep_map, 1, _THIS_IP_); | ||
1610 | return 0; | ||
1611 | } | ||
1612 | |||
1613 | /* The waiter is now free to continue */ | ||
1614 | WRITE_ONCE(console_waiter, false); | ||
1615 | |||
1616 | spin_release(&console_owner_dep_map, 1, _THIS_IP_); | ||
1617 | |||
1618 | /* | ||
1619 | * Hand off console_lock to waiter. The waiter will perform | ||
1620 | * the up(). After this, the waiter is the console_lock owner. | ||
1621 | */ | ||
1622 | mutex_release(&console_lock_dep_map, 1, _THIS_IP_); | ||
1623 | return 1; | ||
1624 | } | ||
1625 | |||
1626 | /** | ||
1627 | * console_trylock_spinning - try to get console_lock by busy waiting | ||
1628 | * | ||
1629 | * This allows to busy wait for the console_lock when the current | ||
1630 | * owner is running in specially marked sections. It means that | ||
1631 | * the current owner is running and cannot reschedule until it | ||
1632 | * is ready to lose the lock. | ||
1633 | * | ||
1634 | * Return: 1 if we got the lock, 0 othrewise | ||
1635 | */ | ||
1636 | static int console_trylock_spinning(void) | ||
1637 | { | ||
1638 | struct task_struct *owner = NULL; | ||
1639 | bool waiter; | ||
1640 | bool spin = false; | ||
1641 | unsigned long flags; | ||
1642 | |||
1643 | if (console_trylock()) | ||
1644 | return 1; | ||
1645 | |||
1646 | printk_safe_enter_irqsave(flags); | ||
1647 | |||
1648 | raw_spin_lock(&console_owner_lock); | ||
1649 | owner = READ_ONCE(console_owner); | ||
1650 | waiter = READ_ONCE(console_waiter); | ||
1651 | if (!waiter && owner && owner != current) { | ||
1652 | WRITE_ONCE(console_waiter, true); | ||
1653 | spin = true; | ||
1654 | } | ||
1655 | raw_spin_unlock(&console_owner_lock); | ||
1656 | |||
1657 | /* | ||
1658 | * If there is an active printk() writing to the | ||
1659 | * consoles, instead of having it write our data too, | ||
1660 | * see if we can offload that load from the active | ||
1661 | * printer, and do some printing ourselves. | ||
1662 | * Go into a spin only if there isn't already a waiter | ||
1663 | * spinning, and there is an active printer, and | ||
1664 | * that active printer isn't us (recursive printk?). | ||
1665 | */ | ||
1666 | if (!spin) { | ||
1667 | printk_safe_exit_irqrestore(flags); | ||
1668 | return 0; | ||
1669 | } | ||
1670 | |||
1671 | /* We spin waiting for the owner to release us */ | ||
1672 | spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); | ||
1673 | /* Owner will clear console_waiter on hand off */ | ||
1674 | while (READ_ONCE(console_waiter)) | ||
1675 | cpu_relax(); | ||
1676 | spin_release(&console_owner_dep_map, 1, _THIS_IP_); | ||
1677 | |||
1678 | printk_safe_exit_irqrestore(flags); | ||
1679 | /* | ||
1680 | * The owner passed the console lock to us. | ||
1681 | * Since we did not spin on console lock, annotate | ||
1682 | * this as a trylock. Otherwise lockdep will | ||
1683 | * complain. | ||
1684 | */ | ||
1685 | mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_); | ||
1686 | |||
1687 | return 1; | ||
1688 | } | ||
1689 | |||
1690 | /* | ||
1551 | * Call the console drivers, asking them to write out | 1691 | * Call the console drivers, asking them to write out |
1552 | * log_buf[start] to log_buf[end - 1]. | 1692 | * log_buf[start] to log_buf[end - 1]. |
1553 | * The console_lock must be held. | 1693 | * The console_lock must be held. |
@@ -1753,12 +1893,19 @@ asmlinkage int vprintk_emit(int facility, int level, | |||
1753 | /* If called from the scheduler, we can not call up(). */ | 1893 | /* If called from the scheduler, we can not call up(). */ |
1754 | if (!in_sched) { | 1894 | if (!in_sched) { |
1755 | /* | 1895 | /* |
1896 | * Disable preemption to avoid being preempted while holding | ||
1897 | * console_sem which would prevent anyone from printing to | ||
1898 | * console | ||
1899 | */ | ||
1900 | preempt_disable(); | ||
1901 | /* | ||
1756 | * Try to acquire and then immediately release the console | 1902 | * Try to acquire and then immediately release the console |
1757 | * semaphore. The release will print out buffers and wake up | 1903 | * semaphore. The release will print out buffers and wake up |
1758 | * /dev/kmsg and syslog() users. | 1904 | * /dev/kmsg and syslog() users. |
1759 | */ | 1905 | */ |
1760 | if (console_trylock()) | 1906 | if (console_trylock_spinning()) |
1761 | console_unlock(); | 1907 | console_unlock(); |
1908 | preempt_enable(); | ||
1762 | } | 1909 | } |
1763 | 1910 | ||
1764 | return printed_len; | 1911 | return printed_len; |
@@ -1859,6 +2006,8 @@ static ssize_t msg_print_ext_header(char *buf, size_t size, | |||
1859 | static ssize_t msg_print_ext_body(char *buf, size_t size, | 2006 | static ssize_t msg_print_ext_body(char *buf, size_t size, |
1860 | char *dict, size_t dict_len, | 2007 | char *dict, size_t dict_len, |
1861 | char *text, size_t text_len) { return 0; } | 2008 | char *text, size_t text_len) { return 0; } |
2009 | static void console_lock_spinning_enable(void) { } | ||
2010 | static int console_lock_spinning_disable_and_check(void) { return 0; } | ||
1862 | static void call_console_drivers(const char *ext_text, size_t ext_len, | 2011 | static void call_console_drivers(const char *ext_text, size_t ext_len, |
1863 | const char *text, size_t len) {} | 2012 | const char *text, size_t len) {} |
1864 | static size_t msg_print_text(const struct printk_log *msg, | 2013 | static size_t msg_print_text(const struct printk_log *msg, |
@@ -2084,20 +2233,7 @@ int console_trylock(void) | |||
2084 | return 0; | 2233 | return 0; |
2085 | } | 2234 | } |
2086 | console_locked = 1; | 2235 | console_locked = 1; |
2087 | /* | 2236 | console_may_schedule = 0; |
2088 | * When PREEMPT_COUNT disabled we can't reliably detect if it's | ||
2089 | * safe to schedule (e.g. calling printk while holding a spin_lock), | ||
2090 | * because preempt_disable()/preempt_enable() are just barriers there | ||
2091 | * and preempt_count() is always 0. | ||
2092 | * | ||
2093 | * RCU read sections have a separate preemption counter when | ||
2094 | * PREEMPT_RCU enabled thus we must take extra care and check | ||
2095 | * rcu_preempt_depth(), otherwise RCU read sections modify | ||
2096 | * preempt_count(). | ||
2097 | */ | ||
2098 | console_may_schedule = !oops_in_progress && | ||
2099 | preemptible() && | ||
2100 | !rcu_preempt_depth(); | ||
2101 | return 1; | 2237 | return 1; |
2102 | } | 2238 | } |
2103 | EXPORT_SYMBOL(console_trylock); | 2239 | EXPORT_SYMBOL(console_trylock); |
@@ -2247,14 +2383,29 @@ skip: | |||
2247 | console_seq++; | 2383 | console_seq++; |
2248 | raw_spin_unlock(&logbuf_lock); | 2384 | raw_spin_unlock(&logbuf_lock); |
2249 | 2385 | ||
2386 | /* | ||
2387 | * While actively printing out messages, if another printk() | ||
2388 | * were to occur on another CPU, it may wait for this one to | ||
2389 | * finish. This task can not be preempted if there is a | ||
2390 | * waiter waiting to take over. | ||
2391 | */ | ||
2392 | console_lock_spinning_enable(); | ||
2393 | |||
2250 | stop_critical_timings(); /* don't trace print latency */ | 2394 | stop_critical_timings(); /* don't trace print latency */ |
2251 | call_console_drivers(ext_text, ext_len, text, len); | 2395 | call_console_drivers(ext_text, ext_len, text, len); |
2252 | start_critical_timings(); | 2396 | start_critical_timings(); |
2397 | |||
2398 | if (console_lock_spinning_disable_and_check()) { | ||
2399 | printk_safe_exit_irqrestore(flags); | ||
2400 | return; | ||
2401 | } | ||
2402 | |||
2253 | printk_safe_exit_irqrestore(flags); | 2403 | printk_safe_exit_irqrestore(flags); |
2254 | 2404 | ||
2255 | if (do_cond_resched) | 2405 | if (do_cond_resched) |
2256 | cond_resched(); | 2406 | cond_resched(); |
2257 | } | 2407 | } |
2408 | |||
2258 | console_locked = 0; | 2409 | console_locked = 0; |
2259 | 2410 | ||
2260 | /* Release the exclusive_console once it is used */ | 2411 | /* Release the exclusive_console once it is used */ |