diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 238 |
1 files changed, 140 insertions, 98 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 908b309d60d7..55bde94b9572 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -180,6 +180,8 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) | |||
180 | struct task_struct *t = current; | 180 | struct task_struct *t = current; |
181 | 181 | ||
182 | lockdep_assert_held(&rnp->lock); | 182 | lockdep_assert_held(&rnp->lock); |
183 | WARN_ON_ONCE(rdp->mynode != rnp); | ||
184 | WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1); | ||
183 | 185 | ||
184 | /* | 186 | /* |
185 | * Decide where to queue the newly blocked task. In theory, | 187 | * Decide where to queue the newly blocked task. In theory, |
@@ -261,6 +263,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) | |||
261 | rnp->gp_tasks = &t->rcu_node_entry; | 263 | rnp->gp_tasks = &t->rcu_node_entry; |
262 | if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) | 264 | if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) |
263 | rnp->exp_tasks = &t->rcu_node_entry; | 265 | rnp->exp_tasks = &t->rcu_node_entry; |
266 | WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) != | ||
267 | !(rnp->qsmask & rdp->grpmask)); | ||
268 | WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) != | ||
269 | !(rnp->expmask & rdp->grpmask)); | ||
264 | raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ | 270 | raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ |
265 | 271 | ||
266 | /* | 272 | /* |
@@ -482,6 +488,7 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
482 | rnp = t->rcu_blocked_node; | 488 | rnp = t->rcu_blocked_node; |
483 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ | 489 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ |
484 | WARN_ON_ONCE(rnp != t->rcu_blocked_node); | 490 | WARN_ON_ONCE(rnp != t->rcu_blocked_node); |
491 | WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1); | ||
485 | empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); | 492 | empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); |
486 | empty_exp = sync_rcu_preempt_exp_done(rnp); | 493 | empty_exp = sync_rcu_preempt_exp_done(rnp); |
487 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | 494 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ |
@@ -495,10 +502,10 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
495 | if (&t->rcu_node_entry == rnp->exp_tasks) | 502 | if (&t->rcu_node_entry == rnp->exp_tasks) |
496 | rnp->exp_tasks = np; | 503 | rnp->exp_tasks = np; |
497 | if (IS_ENABLED(CONFIG_RCU_BOOST)) { | 504 | if (IS_ENABLED(CONFIG_RCU_BOOST)) { |
498 | if (&t->rcu_node_entry == rnp->boost_tasks) | ||
499 | rnp->boost_tasks = np; | ||
500 | /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ | 505 | /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ |
501 | drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; | 506 | drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; |
507 | if (&t->rcu_node_entry == rnp->boost_tasks) | ||
508 | rnp->boost_tasks = np; | ||
502 | } | 509 | } |
503 | 510 | ||
504 | /* | 511 | /* |
@@ -636,10 +643,17 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) | |||
636 | */ | 643 | */ |
637 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | 644 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) |
638 | { | 645 | { |
646 | struct task_struct *t; | ||
647 | |||
639 | RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); | 648 | RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); |
640 | WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); | 649 | WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); |
641 | if (rcu_preempt_has_tasks(rnp)) | 650 | if (rcu_preempt_has_tasks(rnp)) { |
642 | rnp->gp_tasks = rnp->blkd_tasks.next; | 651 | rnp->gp_tasks = rnp->blkd_tasks.next; |
652 | t = container_of(rnp->gp_tasks, struct task_struct, | ||
653 | rcu_node_entry); | ||
654 | trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), | ||
655 | rnp->gpnum, t->pid); | ||
656 | } | ||
643 | WARN_ON_ONCE(rnp->qsmask); | 657 | WARN_ON_ONCE(rnp->qsmask); |
644 | } | 658 | } |
645 | 659 | ||
@@ -1788,23 +1802,62 @@ bool rcu_is_nocb_cpu(int cpu) | |||
1788 | } | 1802 | } |
1789 | 1803 | ||
1790 | /* | 1804 | /* |
1791 | * Kick the leader kthread for this NOCB group. | 1805 | * Kick the leader kthread for this NOCB group. Caller holds ->nocb_lock |
1806 | * and this function releases it. | ||
1792 | */ | 1807 | */ |
1793 | static void wake_nocb_leader(struct rcu_data *rdp, bool force) | 1808 | static void __wake_nocb_leader(struct rcu_data *rdp, bool force, |
1809 | unsigned long flags) | ||
1810 | __releases(rdp->nocb_lock) | ||
1794 | { | 1811 | { |
1795 | struct rcu_data *rdp_leader = rdp->nocb_leader; | 1812 | struct rcu_data *rdp_leader = rdp->nocb_leader; |
1796 | 1813 | ||
1797 | if (!READ_ONCE(rdp_leader->nocb_kthread)) | 1814 | lockdep_assert_held(&rdp->nocb_lock); |
1815 | if (!READ_ONCE(rdp_leader->nocb_kthread)) { | ||
1816 | raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); | ||
1798 | return; | 1817 | return; |
1799 | if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { | 1818 | } |
1819 | if (rdp_leader->nocb_leader_sleep || force) { | ||
1800 | /* Prior smp_mb__after_atomic() orders against prior enqueue. */ | 1820 | /* Prior smp_mb__after_atomic() orders against prior enqueue. */ |
1801 | WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); | 1821 | WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); |
1822 | del_timer(&rdp->nocb_timer); | ||
1823 | raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); | ||
1802 | smp_mb(); /* ->nocb_leader_sleep before swake_up(). */ | 1824 | smp_mb(); /* ->nocb_leader_sleep before swake_up(). */ |
1803 | swake_up(&rdp_leader->nocb_wq); | 1825 | swake_up(&rdp_leader->nocb_wq); |
1826 | } else { | ||
1827 | raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); | ||
1804 | } | 1828 | } |
1805 | } | 1829 | } |
1806 | 1830 | ||
1807 | /* | 1831 | /* |
1832 | * Kick the leader kthread for this NOCB group, but caller has not | ||
1833 | * acquired locks. | ||
1834 | */ | ||
1835 | static void wake_nocb_leader(struct rcu_data *rdp, bool force) | ||
1836 | { | ||
1837 | unsigned long flags; | ||
1838 | |||
1839 | raw_spin_lock_irqsave(&rdp->nocb_lock, flags); | ||
1840 | __wake_nocb_leader(rdp, force, flags); | ||
1841 | } | ||
1842 | |||
1843 | /* | ||
1844 | * Arrange to wake the leader kthread for this NOCB group at some | ||
1845 | * future time when it is safe to do so. | ||
1846 | */ | ||
1847 | static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype, | ||
1848 | const char *reason) | ||
1849 | { | ||
1850 | unsigned long flags; | ||
1851 | |||
1852 | raw_spin_lock_irqsave(&rdp->nocb_lock, flags); | ||
1853 | if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) | ||
1854 | mod_timer(&rdp->nocb_timer, jiffies + 1); | ||
1855 | WRITE_ONCE(rdp->nocb_defer_wakeup, waketype); | ||
1856 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, reason); | ||
1857 | raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); | ||
1858 | } | ||
1859 | |||
1860 | /* | ||
1808 | * Does the specified CPU need an RCU callback for the specified flavor | 1861 | * Does the specified CPU need an RCU callback for the specified flavor |
1809 | * of rcu_barrier()? | 1862 | * of rcu_barrier()? |
1810 | */ | 1863 | */ |
@@ -1891,11 +1944,8 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | |||
1891 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | 1944 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, |
1892 | TPS("WakeEmpty")); | 1945 | TPS("WakeEmpty")); |
1893 | } else { | 1946 | } else { |
1894 | WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE); | 1947 | wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE, |
1895 | /* Store ->nocb_defer_wakeup before ->rcu_urgent_qs. */ | 1948 | TPS("WakeEmptyIsDeferred")); |
1896 | smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true); | ||
1897 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
1898 | TPS("WakeEmptyIsDeferred")); | ||
1899 | } | 1949 | } |
1900 | rdp->qlen_last_fqs_check = 0; | 1950 | rdp->qlen_last_fqs_check = 0; |
1901 | } else if (len > rdp->qlen_last_fqs_check + qhimark) { | 1951 | } else if (len > rdp->qlen_last_fqs_check + qhimark) { |
@@ -1905,11 +1955,8 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | |||
1905 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | 1955 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, |
1906 | TPS("WakeOvf")); | 1956 | TPS("WakeOvf")); |
1907 | } else { | 1957 | } else { |
1908 | WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_FORCE); | 1958 | wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE, |
1909 | /* Store ->nocb_defer_wakeup before ->rcu_urgent_qs. */ | 1959 | TPS("WakeOvfIsDeferred")); |
1910 | smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true); | ||
1911 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
1912 | TPS("WakeOvfIsDeferred")); | ||
1913 | } | 1960 | } |
1914 | rdp->qlen_last_fqs_check = LONG_MAX / 2; | 1961 | rdp->qlen_last_fqs_check = LONG_MAX / 2; |
1915 | } else { | 1962 | } else { |
@@ -1961,30 +2008,19 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | |||
1961 | * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is | 2008 | * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is |
1962 | * not a no-CBs CPU. | 2009 | * not a no-CBs CPU. |
1963 | */ | 2010 | */ |
1964 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, | 2011 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, |
1965 | struct rcu_data *rdp, | 2012 | struct rcu_data *rdp, |
1966 | unsigned long flags) | 2013 | unsigned long flags) |
1967 | { | 2014 | { |
1968 | long ql = rsp->orphan_done.len; | 2015 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_nocb_adopt_orphan_cbs() invoked with irqs enabled!!!"); |
1969 | long qll = rsp->orphan_done.len_lazy; | ||
1970 | |||
1971 | /* If this is not a no-CBs CPU, tell the caller to do it the old way. */ | ||
1972 | if (!rcu_is_nocb_cpu(smp_processor_id())) | 2016 | if (!rcu_is_nocb_cpu(smp_processor_id())) |
1973 | return false; | 2017 | return false; /* Not NOCBs CPU, caller must migrate CBs. */ |
1974 | 2018 | __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist), | |
1975 | /* First, enqueue the donelist, if any. This preserves CB ordering. */ | 2019 | rcu_segcblist_tail(&rdp->cblist), |
1976 | if (rsp->orphan_done.head) { | 2020 | rcu_segcblist_n_cbs(&rdp->cblist), |
1977 | __call_rcu_nocb_enqueue(rdp, rcu_cblist_head(&rsp->orphan_done), | 2021 | rcu_segcblist_n_lazy_cbs(&rdp->cblist), flags); |
1978 | rcu_cblist_tail(&rsp->orphan_done), | 2022 | rcu_segcblist_init(&rdp->cblist); |
1979 | ql, qll, flags); | 2023 | rcu_segcblist_disable(&rdp->cblist); |
1980 | } | ||
1981 | if (rsp->orphan_pend.head) { | ||
1982 | __call_rcu_nocb_enqueue(rdp, rcu_cblist_head(&rsp->orphan_pend), | ||
1983 | rcu_cblist_tail(&rsp->orphan_pend), | ||
1984 | ql, qll, flags); | ||
1985 | } | ||
1986 | rcu_cblist_init(&rsp->orphan_done); | ||
1987 | rcu_cblist_init(&rsp->orphan_pend); | ||
1988 | return true; | 2024 | return true; |
1989 | } | 2025 | } |
1990 | 2026 | ||
@@ -2031,6 +2067,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) | |||
2031 | static void nocb_leader_wait(struct rcu_data *my_rdp) | 2067 | static void nocb_leader_wait(struct rcu_data *my_rdp) |
2032 | { | 2068 | { |
2033 | bool firsttime = true; | 2069 | bool firsttime = true; |
2070 | unsigned long flags; | ||
2034 | bool gotcbs; | 2071 | bool gotcbs; |
2035 | struct rcu_data *rdp; | 2072 | struct rcu_data *rdp; |
2036 | struct rcu_head **tail; | 2073 | struct rcu_head **tail; |
@@ -2039,13 +2076,17 @@ wait_again: | |||
2039 | 2076 | ||
2040 | /* Wait for callbacks to appear. */ | 2077 | /* Wait for callbacks to appear. */ |
2041 | if (!rcu_nocb_poll) { | 2078 | if (!rcu_nocb_poll) { |
2042 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); | 2079 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep")); |
2043 | swait_event_interruptible(my_rdp->nocb_wq, | 2080 | swait_event_interruptible(my_rdp->nocb_wq, |
2044 | !READ_ONCE(my_rdp->nocb_leader_sleep)); | 2081 | !READ_ONCE(my_rdp->nocb_leader_sleep)); |
2045 | /* Memory barrier handled by smp_mb() calls below and repoll. */ | 2082 | raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); |
2083 | my_rdp->nocb_leader_sleep = true; | ||
2084 | WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); | ||
2085 | del_timer(&my_rdp->nocb_timer); | ||
2086 | raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags); | ||
2046 | } else if (firsttime) { | 2087 | } else if (firsttime) { |
2047 | firsttime = false; /* Don't drown trace log with "Poll"! */ | 2088 | firsttime = false; /* Don't drown trace log with "Poll"! */ |
2048 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll"); | 2089 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Poll")); |
2049 | } | 2090 | } |
2050 | 2091 | ||
2051 | /* | 2092 | /* |
@@ -2054,7 +2095,7 @@ wait_again: | |||
2054 | * nocb_gp_head, where they await a grace period. | 2095 | * nocb_gp_head, where they await a grace period. |
2055 | */ | 2096 | */ |
2056 | gotcbs = false; | 2097 | gotcbs = false; |
2057 | smp_mb(); /* wakeup before ->nocb_head reads. */ | 2098 | smp_mb(); /* wakeup and _sleep before ->nocb_head reads. */ |
2058 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { | 2099 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { |
2059 | rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head); | 2100 | rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head); |
2060 | if (!rdp->nocb_gp_head) | 2101 | if (!rdp->nocb_gp_head) |
@@ -2066,56 +2107,41 @@ wait_again: | |||
2066 | gotcbs = true; | 2107 | gotcbs = true; |
2067 | } | 2108 | } |
2068 | 2109 | ||
2069 | /* | 2110 | /* No callbacks? Sleep a bit if polling, and go retry. */ |
2070 | * If there were no callbacks, sleep a bit, rescan after a | ||
2071 | * memory barrier, and go retry. | ||
2072 | */ | ||
2073 | if (unlikely(!gotcbs)) { | 2111 | if (unlikely(!gotcbs)) { |
2074 | if (!rcu_nocb_poll) | ||
2075 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, | ||
2076 | "WokeEmpty"); | ||
2077 | WARN_ON(signal_pending(current)); | 2112 | WARN_ON(signal_pending(current)); |
2078 | schedule_timeout_interruptible(1); | 2113 | if (rcu_nocb_poll) { |
2079 | 2114 | schedule_timeout_interruptible(1); | |
2080 | /* Rescan in case we were a victim of memory ordering. */ | 2115 | } else { |
2081 | my_rdp->nocb_leader_sleep = true; | 2116 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, |
2082 | smp_mb(); /* Ensure _sleep true before scan. */ | 2117 | TPS("WokeEmpty")); |
2083 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) | 2118 | } |
2084 | if (READ_ONCE(rdp->nocb_head)) { | ||
2085 | /* Found CB, so short-circuit next wait. */ | ||
2086 | my_rdp->nocb_leader_sleep = false; | ||
2087 | break; | ||
2088 | } | ||
2089 | goto wait_again; | 2119 | goto wait_again; |
2090 | } | 2120 | } |
2091 | 2121 | ||
2092 | /* Wait for one grace period. */ | 2122 | /* Wait for one grace period. */ |
2093 | rcu_nocb_wait_gp(my_rdp); | 2123 | rcu_nocb_wait_gp(my_rdp); |
2094 | 2124 | ||
2095 | /* | ||
2096 | * We left ->nocb_leader_sleep unset to reduce cache thrashing. | ||
2097 | * We set it now, but recheck for new callbacks while | ||
2098 | * traversing our follower list. | ||
2099 | */ | ||
2100 | my_rdp->nocb_leader_sleep = true; | ||
2101 | smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */ | ||
2102 | |||
2103 | /* Each pass through the following loop wakes a follower, if needed. */ | 2125 | /* Each pass through the following loop wakes a follower, if needed. */ |
2104 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { | 2126 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { |
2105 | if (READ_ONCE(rdp->nocb_head)) | 2127 | if (!rcu_nocb_poll && |
2128 | READ_ONCE(rdp->nocb_head) && | ||
2129 | READ_ONCE(my_rdp->nocb_leader_sleep)) { | ||
2130 | raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); | ||
2106 | my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ | 2131 | my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ |
2132 | raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags); | ||
2133 | } | ||
2107 | if (!rdp->nocb_gp_head) | 2134 | if (!rdp->nocb_gp_head) |
2108 | continue; /* No CBs, so no need to wake follower. */ | 2135 | continue; /* No CBs, so no need to wake follower. */ |
2109 | 2136 | ||
2110 | /* Append callbacks to follower's "done" list. */ | 2137 | /* Append callbacks to follower's "done" list. */ |
2111 | tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail); | 2138 | raw_spin_lock_irqsave(&rdp->nocb_lock, flags); |
2139 | tail = rdp->nocb_follower_tail; | ||
2140 | rdp->nocb_follower_tail = rdp->nocb_gp_tail; | ||
2112 | *tail = rdp->nocb_gp_head; | 2141 | *tail = rdp->nocb_gp_head; |
2113 | smp_mb__after_atomic(); /* Store *tail before wakeup. */ | 2142 | raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); |
2114 | if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { | 2143 | if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { |
2115 | /* | 2144 | /* List was empty, so wake up the follower. */ |
2116 | * List was empty, wake up the follower. | ||
2117 | * Memory barriers supplied by atomic_long_add(). | ||
2118 | */ | ||
2119 | swake_up(&rdp->nocb_wq); | 2145 | swake_up(&rdp->nocb_wq); |
2120 | } | 2146 | } |
2121 | } | 2147 | } |
@@ -2131,28 +2157,16 @@ wait_again: | |||
2131 | */ | 2157 | */ |
2132 | static void nocb_follower_wait(struct rcu_data *rdp) | 2158 | static void nocb_follower_wait(struct rcu_data *rdp) |
2133 | { | 2159 | { |
2134 | bool firsttime = true; | ||
2135 | |||
2136 | for (;;) { | 2160 | for (;;) { |
2137 | if (!rcu_nocb_poll) { | 2161 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep")); |
2138 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | 2162 | swait_event_interruptible(rdp->nocb_wq, |
2139 | "FollowerSleep"); | 2163 | READ_ONCE(rdp->nocb_follower_head)); |
2140 | swait_event_interruptible(rdp->nocb_wq, | ||
2141 | READ_ONCE(rdp->nocb_follower_head)); | ||
2142 | } else if (firsttime) { | ||
2143 | /* Don't drown trace log with "Poll"! */ | ||
2144 | firsttime = false; | ||
2145 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll"); | ||
2146 | } | ||
2147 | if (smp_load_acquire(&rdp->nocb_follower_head)) { | 2164 | if (smp_load_acquire(&rdp->nocb_follower_head)) { |
2148 | /* ^^^ Ensure CB invocation follows _head test. */ | 2165 | /* ^^^ Ensure CB invocation follows _head test. */ |
2149 | return; | 2166 | return; |
2150 | } | 2167 | } |
2151 | if (!rcu_nocb_poll) | ||
2152 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2153 | "WokeEmpty"); | ||
2154 | WARN_ON(signal_pending(current)); | 2168 | WARN_ON(signal_pending(current)); |
2155 | schedule_timeout_interruptible(1); | 2169 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeEmpty")); |
2156 | } | 2170 | } |
2157 | } | 2171 | } |
2158 | 2172 | ||
@@ -2165,6 +2179,7 @@ static void nocb_follower_wait(struct rcu_data *rdp) | |||
2165 | static int rcu_nocb_kthread(void *arg) | 2179 | static int rcu_nocb_kthread(void *arg) |
2166 | { | 2180 | { |
2167 | int c, cl; | 2181 | int c, cl; |
2182 | unsigned long flags; | ||
2168 | struct rcu_head *list; | 2183 | struct rcu_head *list; |
2169 | struct rcu_head *next; | 2184 | struct rcu_head *next; |
2170 | struct rcu_head **tail; | 2185 | struct rcu_head **tail; |
@@ -2179,11 +2194,14 @@ static int rcu_nocb_kthread(void *arg) | |||
2179 | nocb_follower_wait(rdp); | 2194 | nocb_follower_wait(rdp); |
2180 | 2195 | ||
2181 | /* Pull the ready-to-invoke callbacks onto local list. */ | 2196 | /* Pull the ready-to-invoke callbacks onto local list. */ |
2182 | list = READ_ONCE(rdp->nocb_follower_head); | 2197 | raw_spin_lock_irqsave(&rdp->nocb_lock, flags); |
2198 | list = rdp->nocb_follower_head; | ||
2199 | rdp->nocb_follower_head = NULL; | ||
2200 | tail = rdp->nocb_follower_tail; | ||
2201 | rdp->nocb_follower_tail = &rdp->nocb_follower_head; | ||
2202 | raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); | ||
2183 | BUG_ON(!list); | 2203 | BUG_ON(!list); |
2184 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); | 2204 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeNonEmpty")); |
2185 | WRITE_ONCE(rdp->nocb_follower_head, NULL); | ||
2186 | tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); | ||
2187 | 2205 | ||
2188 | /* Each pass through the following loop invokes a callback. */ | 2206 | /* Each pass through the following loop invokes a callback. */ |
2189 | trace_rcu_batch_start(rdp->rsp->name, | 2207 | trace_rcu_batch_start(rdp->rsp->name, |
@@ -2226,18 +2244,39 @@ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) | |||
2226 | } | 2244 | } |
2227 | 2245 | ||
2228 | /* Do a deferred wakeup of rcu_nocb_kthread(). */ | 2246 | /* Do a deferred wakeup of rcu_nocb_kthread(). */ |
2229 | static void do_nocb_deferred_wakeup(struct rcu_data *rdp) | 2247 | static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp) |
2230 | { | 2248 | { |
2249 | unsigned long flags; | ||
2231 | int ndw; | 2250 | int ndw; |
2232 | 2251 | ||
2233 | if (!rcu_nocb_need_deferred_wakeup(rdp)) | 2252 | raw_spin_lock_irqsave(&rdp->nocb_lock, flags); |
2253 | if (!rcu_nocb_need_deferred_wakeup(rdp)) { | ||
2254 | raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); | ||
2234 | return; | 2255 | return; |
2256 | } | ||
2235 | ndw = READ_ONCE(rdp->nocb_defer_wakeup); | 2257 | ndw = READ_ONCE(rdp->nocb_defer_wakeup); |
2236 | WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); | 2258 | WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); |
2237 | wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE); | 2259 | __wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); |
2238 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake")); | 2260 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake")); |
2239 | } | 2261 | } |
2240 | 2262 | ||
2263 | /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */ | ||
2264 | static void do_nocb_deferred_wakeup_timer(unsigned long x) | ||
2265 | { | ||
2266 | do_nocb_deferred_wakeup_common((struct rcu_data *)x); | ||
2267 | } | ||
2268 | |||
2269 | /* | ||
2270 | * Do a deferred wakeup of rcu_nocb_kthread() from fastpath. | ||
2271 | * This means we do an inexact common-case check. Note that if | ||
2272 | * we miss, ->nocb_timer will eventually clean things up. | ||
2273 | */ | ||
2274 | static void do_nocb_deferred_wakeup(struct rcu_data *rdp) | ||
2275 | { | ||
2276 | if (rcu_nocb_need_deferred_wakeup(rdp)) | ||
2277 | do_nocb_deferred_wakeup_common(rdp); | ||
2278 | } | ||
2279 | |||
2241 | void __init rcu_init_nohz(void) | 2280 | void __init rcu_init_nohz(void) |
2242 | { | 2281 | { |
2243 | int cpu; | 2282 | int cpu; |
@@ -2287,6 +2326,9 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) | |||
2287 | rdp->nocb_tail = &rdp->nocb_head; | 2326 | rdp->nocb_tail = &rdp->nocb_head; |
2288 | init_swait_queue_head(&rdp->nocb_wq); | 2327 | init_swait_queue_head(&rdp->nocb_wq); |
2289 | rdp->nocb_follower_tail = &rdp->nocb_follower_head; | 2328 | rdp->nocb_follower_tail = &rdp->nocb_follower_head; |
2329 | raw_spin_lock_init(&rdp->nocb_lock); | ||
2330 | setup_timer(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, | ||
2331 | (unsigned long)rdp); | ||
2290 | } | 2332 | } |
2291 | 2333 | ||
2292 | /* | 2334 | /* |
@@ -2459,7 +2501,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | |||
2459 | return false; | 2501 | return false; |
2460 | } | 2502 | } |
2461 | 2503 | ||
2462 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, | 2504 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, |
2463 | struct rcu_data *rdp, | 2505 | struct rcu_data *rdp, |
2464 | unsigned long flags) | 2506 | unsigned long flags) |
2465 | { | 2507 | { |