aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2018-01-12 11:08:37 -0500
committerPetr Mladek <pmladek@suse.com>2018-01-16 11:21:16 -0500
commitc162d5b4338d72deed61aa65ed0f2f4ba2bbc8ab (patch)
treefef8496a1822da0090a54f8c0747cb75fcdf6457
parentdbdda842fe96f8932bae554f0adf463c27c42bc7 (diff)
printk: Hide console waiter logic into helpers
The commit ("printk: Add console owner and waiter logic to load balance console writes") made vprintk_emit() and console_unlock() even more complicated. This patch extracts the new code into 3 helper functions. They should help to keep it rather self-contained. It will be easier to use and maintain. This patch just shuffles the existing code. It does not change the functionality. Link: http://lkml.kernel.org/r/20180112160837.GD24497@linux.suse Cc: akpm@linux-foundation.org Cc: linux-mm@kvack.org Cc: Cong Wang <xiyou.wangcong@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Jan Kara <jack@suse.cz> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: rostedt@home.goodmis.org Cc: Byungchul Park <byungchul.park@lge.com> Cc: Tejun Heo <tj@kernel.org> Cc: Pavel Machek <pavel@ucw.cz> Cc: linux-kernel@vger.kernel.org Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
-rw-r--r--kernel/printk/printk.c245
1 files changed, 148 insertions, 97 deletions
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 040fb948924e..3a475f58b749 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -86,15 +86,8 @@ EXPORT_SYMBOL_GPL(console_drivers);
86static struct lockdep_map console_lock_dep_map = { 86static struct lockdep_map console_lock_dep_map = {
87 .name = "console_lock" 87 .name = "console_lock"
88}; 88};
89static struct lockdep_map console_owner_dep_map = {
90 .name = "console_owner"
91};
92#endif 89#endif
93 90
94static DEFINE_RAW_SPINLOCK(console_owner_lock);
95static struct task_struct *console_owner;
96static bool console_waiter;
97
98enum devkmsg_log_bits { 91enum devkmsg_log_bits {
99 __DEVKMSG_LOG_BIT_ON = 0, 92 __DEVKMSG_LOG_BIT_ON = 0,
100 __DEVKMSG_LOG_BIT_OFF, 93 __DEVKMSG_LOG_BIT_OFF,
@@ -1551,6 +1544,146 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1551} 1544}
1552 1545
1553/* 1546/*
1547 * Special console_lock variants that help to reduce the risk of soft-lockups.
1548 * They allow to pass console_lock to another printk() call using a busy wait.
1549 */
1550
1551#ifdef CONFIG_LOCKDEP
1552static struct lockdep_map console_owner_dep_map = {
1553 .name = "console_owner"
1554};
1555#endif
1556
1557static DEFINE_RAW_SPINLOCK(console_owner_lock);
1558static struct task_struct *console_owner;
1559static bool console_waiter;
1560
1561/**
1562 * console_lock_spinning_enable - mark beginning of code where another
1563 * thread might safely busy wait
1564 *
1565 * This basically converts console_lock into a spinlock. This marks
1566 * the section where the console_lock owner can not sleep, because
1567 * there may be a waiter spinning (like a spinlock). Also it must be
1568 * ready to hand over the lock at the end of the section.
1569 */
1570static void console_lock_spinning_enable(void)
1571{
1572 raw_spin_lock(&console_owner_lock);
1573 console_owner = current;
1574 raw_spin_unlock(&console_owner_lock);
1575
1576 /* The waiter may spin on us after setting console_owner */
1577 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1578}
1579
1580/**
1581 * console_lock_spinning_disable_and_check - mark end of code where another
1582 * thread was able to busy wait and check if there is a waiter
1583 *
1584 * This is called at the end of the section where spinning is allowed.
1585 * It has two functions. First, it is a signal that it is no longer
1586 * safe to start busy waiting for the lock. Second, it checks if
1587 * there is a busy waiter and passes the lock rights to her.
1588 *
1589 * Important: Callers lose the lock if there was a busy waiter.
1590 * They must not touch items synchronized by console_lock
1591 * in this case.
1592 *
1593 * Return: 1 if the lock rights were passed, 0 otherwise.
1594 */
1595static int console_lock_spinning_disable_and_check(void)
1596{
1597 int waiter;
1598
1599 raw_spin_lock(&console_owner_lock);
1600 waiter = READ_ONCE(console_waiter);
1601 console_owner = NULL;
1602 raw_spin_unlock(&console_owner_lock);
1603
1604 if (!waiter) {
1605 spin_release(&console_owner_dep_map, 1, _THIS_IP_);
1606 return 0;
1607 }
1608
1609 /* The waiter is now free to continue */
1610 WRITE_ONCE(console_waiter, false);
1611
1612 spin_release(&console_owner_dep_map, 1, _THIS_IP_);
1613
1614 /*
1615 * Hand off console_lock to waiter. The waiter will perform
1616 * the up(). After this, the waiter is the console_lock owner.
1617 */
1618 mutex_release(&console_lock_dep_map, 1, _THIS_IP_);
1619 return 1;
1620}
1621
1622/**
1623 * console_trylock_spinning - try to get console_lock by busy waiting
1624 *
1625 * This allows to busy wait for the console_lock when the current
1626 * owner is running in specially marked sections. It means that
1627 * the current owner is running and cannot reschedule until it
1628 * is ready to lose the lock.
1629 *
1630 * Return: 1 if we got the lock, 0 othrewise
1631 */
1632static int console_trylock_spinning(void)
1633{
1634 struct task_struct *owner = NULL;
1635 bool waiter;
1636 bool spin = false;
1637 unsigned long flags;
1638
1639 if (console_trylock())
1640 return 1;
1641
1642 printk_safe_enter_irqsave(flags);
1643
1644 raw_spin_lock(&console_owner_lock);
1645 owner = READ_ONCE(console_owner);
1646 waiter = READ_ONCE(console_waiter);
1647 if (!waiter && owner && owner != current) {
1648 WRITE_ONCE(console_waiter, true);
1649 spin = true;
1650 }
1651 raw_spin_unlock(&console_owner_lock);
1652
1653 /*
1654 * If there is an active printk() writing to the
1655 * consoles, instead of having it write our data too,
1656 * see if we can offload that load from the active
1657 * printer, and do some printing ourselves.
1658 * Go into a spin only if there isn't already a waiter
1659 * spinning, and there is an active printer, and
1660 * that active printer isn't us (recursive printk?).
1661 */
1662 if (!spin) {
1663 printk_safe_exit_irqrestore(flags);
1664 return 0;
1665 }
1666
1667 /* We spin waiting for the owner to release us */
1668 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1669 /* Owner will clear console_waiter on hand off */
1670 while (READ_ONCE(console_waiter))
1671 cpu_relax();
1672 spin_release(&console_owner_dep_map, 1, _THIS_IP_);
1673
1674 printk_safe_exit_irqrestore(flags);
1675 /*
1676 * The owner passed the console lock to us.
1677 * Since we did not spin on console lock, annotate
1678 * this as a trylock. Otherwise lockdep will
1679 * complain.
1680 */
1681 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
1682
1683 return 1;
1684}
1685
1686/*
1554 * Call the console drivers, asking them to write out 1687 * Call the console drivers, asking them to write out
1555 * log_buf[start] to log_buf[end - 1]. 1688 * log_buf[start] to log_buf[end - 1].
1556 * The console_lock must be held. 1689 * The console_lock must be held.
@@ -1760,56 +1893,8 @@ asmlinkage int vprintk_emit(int facility, int level,
1760 * semaphore. The release will print out buffers and wake up 1893 * semaphore. The release will print out buffers and wake up
1761 * /dev/kmsg and syslog() users. 1894 * /dev/kmsg and syslog() users.
1762 */ 1895 */
1763 if (console_trylock()) { 1896 if (console_trylock_spinning())
1764 console_unlock(); 1897 console_unlock();
1765 } else {
1766 struct task_struct *owner = NULL;
1767 bool waiter;
1768 bool spin = false;
1769
1770 printk_safe_enter_irqsave(flags);
1771
1772 raw_spin_lock(&console_owner_lock);
1773 owner = READ_ONCE(console_owner);
1774 waiter = READ_ONCE(console_waiter);
1775 if (!waiter && owner && owner != current) {
1776 WRITE_ONCE(console_waiter, true);
1777 spin = true;
1778 }
1779 raw_spin_unlock(&console_owner_lock);
1780
1781 /*
1782 * If there is an active printk() writing to the
1783 * consoles, instead of having it write our data too,
1784 * see if we can offload that load from the active
1785 * printer, and do some printing ourselves.
1786 * Go into a spin only if there isn't already a waiter
1787 * spinning, and there is an active printer, and
1788 * that active printer isn't us (recursive printk?).
1789 */
1790 if (spin) {
1791 /* We spin waiting for the owner to release us */
1792 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1793 /* Owner will clear console_waiter on hand off */
1794 while (READ_ONCE(console_waiter))
1795 cpu_relax();
1796
1797 spin_release(&console_owner_dep_map, 1, _THIS_IP_);
1798 printk_safe_exit_irqrestore(flags);
1799
1800 /*
1801 * The owner passed the console lock to us.
1802 * Since we did not spin on console lock, annotate
1803 * this as a trylock. Otherwise lockdep will
1804 * complain.
1805 */
1806 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
1807 console_unlock();
1808 printk_safe_enter_irqsave(flags);
1809 }
1810 printk_safe_exit_irqrestore(flags);
1811
1812 }
1813 } 1898 }
1814 1899
1815 return printed_len; 1900 return printed_len;
@@ -1910,6 +1995,8 @@ static ssize_t msg_print_ext_header(char *buf, size_t size,
1910static ssize_t msg_print_ext_body(char *buf, size_t size, 1995static ssize_t msg_print_ext_body(char *buf, size_t size,
1911 char *dict, size_t dict_len, 1996 char *dict, size_t dict_len,
1912 char *text, size_t text_len) { return 0; } 1997 char *text, size_t text_len) { return 0; }
1998static void console_lock_spinning_enable(void) { }
1999static int console_lock_spinning_disable_and_check(void) { return 0; }
1913static void call_console_drivers(const char *ext_text, size_t ext_len, 2000static void call_console_drivers(const char *ext_text, size_t ext_len,
1914 const char *text, size_t len) {} 2001 const char *text, size_t len) {}
1915static size_t msg_print_text(const struct printk_log *msg, 2002static size_t msg_print_text(const struct printk_log *msg,
@@ -2196,7 +2283,6 @@ void console_unlock(void)
2196 static u64 seen_seq; 2283 static u64 seen_seq;
2197 unsigned long flags; 2284 unsigned long flags;
2198 bool wake_klogd = false; 2285 bool wake_klogd = false;
2199 bool waiter = false;
2200 bool do_cond_resched, retry; 2286 bool do_cond_resched, retry;
2201 2287
2202 if (console_suspended) { 2288 if (console_suspended) {
@@ -2291,31 +2377,16 @@ skip:
2291 * finish. This task can not be preempted if there is a 2377 * finish. This task can not be preempted if there is a
2292 * waiter waiting to take over. 2378 * waiter waiting to take over.
2293 */ 2379 */
2294 raw_spin_lock(&console_owner_lock); 2380 console_lock_spinning_enable();
2295 console_owner = current;
2296 raw_spin_unlock(&console_owner_lock);
2297
2298 /* The waiter may spin on us after setting console_owner */
2299 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
2300 2381
2301 stop_critical_timings(); /* don't trace print latency */ 2382 stop_critical_timings(); /* don't trace print latency */
2302 call_console_drivers(ext_text, ext_len, text, len); 2383 call_console_drivers(ext_text, ext_len, text, len);
2303 start_critical_timings(); 2384 start_critical_timings();
2304 2385
2305 raw_spin_lock(&console_owner_lock); 2386 if (console_lock_spinning_disable_and_check()) {
2306 waiter = READ_ONCE(console_waiter); 2387 printk_safe_exit_irqrestore(flags);
2307 console_owner = NULL; 2388 return;
2308 raw_spin_unlock(&console_owner_lock); 2389 }
2309
2310 /*
2311 * If there is a waiter waiting for us, then pass the
2312 * rest of the work load over to that waiter.
2313 */
2314 if (waiter)
2315 break;
2316
2317 /* There was no waiter, and nothing will spin on us here */
2318 spin_release(&console_owner_dep_map, 1, _THIS_IP_);
2319 2390
2320 printk_safe_exit_irqrestore(flags); 2391 printk_safe_exit_irqrestore(flags);
2321 2392
@@ -2323,26 +2394,6 @@ skip:
2323 cond_resched(); 2394 cond_resched();
2324 } 2395 }
2325 2396
2326 /*
2327 * If there is an active waiter waiting on the console_lock.
2328 * Pass off the printing to the waiter, and the waiter
2329 * will continue printing on its CPU, and when all writing
2330 * has finished, the last printer will wake up klogd.
2331 */
2332 if (waiter) {
2333 WRITE_ONCE(console_waiter, false);
2334 /* The waiter is now free to continue */
2335 spin_release(&console_owner_dep_map, 1, _THIS_IP_);
2336 /*
2337 * Hand off console_lock to waiter. The waiter will perform
2338 * the up(). After this, the waiter is the console_lock owner.
2339 */
2340 mutex_release(&console_lock_dep_map, 1, _THIS_IP_);
2341 printk_safe_exit_irqrestore(flags);
2342 /* Note, if waiter is set, logbuf_lock is not held */
2343 return;
2344 }
2345
2346 console_locked = 0; 2397 console_locked = 0;
2347 2398
2348 /* Release the exclusive_console once it is used */ 2399 /* Release the exclusive_console once it is used */