aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c437
1 files changed, 311 insertions, 126 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 48d640ca1a05..233165da782f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -91,8 +91,10 @@ static const char *tp_##sname##_varname __used __tracepoint_string = sname##_var
91 91
92#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \ 92#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
93DEFINE_RCU_TPS(sname) \ 93DEFINE_RCU_TPS(sname) \
94DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
94struct rcu_state sname##_state = { \ 95struct rcu_state sname##_state = { \
95 .level = { &sname##_state.node[0] }, \ 96 .level = { &sname##_state.node[0] }, \
97 .rda = &sname##_data, \
96 .call = cr, \ 98 .call = cr, \
97 .fqs_state = RCU_GP_IDLE, \ 99 .fqs_state = RCU_GP_IDLE, \
98 .gpnum = 0UL - 300UL, \ 100 .gpnum = 0UL - 300UL, \
@@ -101,11 +103,9 @@ struct rcu_state sname##_state = { \
101 .orphan_nxttail = &sname##_state.orphan_nxtlist, \ 103 .orphan_nxttail = &sname##_state.orphan_nxtlist, \
102 .orphan_donetail = &sname##_state.orphan_donelist, \ 104 .orphan_donetail = &sname##_state.orphan_donelist, \
103 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ 105 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
104 .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
105 .name = RCU_STATE_NAME(sname), \ 106 .name = RCU_STATE_NAME(sname), \
106 .abbr = sabbr, \ 107 .abbr = sabbr, \
107}; \ 108}
108DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data)
109 109
110RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); 110RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
111RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); 111RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
@@ -152,6 +152,8 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
152 */ 152 */
153static int rcu_scheduler_fully_active __read_mostly; 153static int rcu_scheduler_fully_active __read_mostly;
154 154
155static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
156static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
155static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 157static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
156static void invoke_rcu_core(void); 158static void invoke_rcu_core(void);
157static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 159static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
@@ -160,6 +162,12 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
160static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; 162static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
161module_param(kthread_prio, int, 0644); 163module_param(kthread_prio, int, 0644);
162 164
165/* Delay in jiffies for grace-period initialization delays. */
166static int gp_init_delay = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT)
167 ? CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY
168 : 0;
169module_param(gp_init_delay, int, 0644);
170
163/* 171/*
164 * Track the rcutorture test sequence number and the update version 172 * Track the rcutorture test sequence number and the update version
165 * number within a given test. The rcutorture_testseq is incremented 173 * number within a given test. The rcutorture_testseq is incremented
@@ -173,6 +181,17 @@ unsigned long rcutorture_testseq;
173unsigned long rcutorture_vernum; 181unsigned long rcutorture_vernum;
174 182
175/* 183/*
184 * Compute the mask of online CPUs for the specified rcu_node structure.
185 * This will not be stable unless the rcu_node structure's ->lock is
186 * held, but the bit corresponding to the current CPU will be stable
187 * in most contexts.
188 */
189unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
190{
191 return ACCESS_ONCE(rnp->qsmaskinitnext);
192}
193
194/*
176 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 195 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
177 * permit this function to be invoked without holding the root rcu_node 196 * permit this function to be invoked without holding the root rcu_node
178 * structure's ->lock, but of course results can be subject to change. 197 * structure's ->lock, but of course results can be subject to change.
@@ -292,10 +311,10 @@ void rcu_note_context_switch(void)
292EXPORT_SYMBOL_GPL(rcu_note_context_switch); 311EXPORT_SYMBOL_GPL(rcu_note_context_switch);
293 312
294/* 313/*
295 * Register a quiesecent state for all RCU flavors. If there is an 314 * Register a quiescent state for all RCU flavors. If there is an
296 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight 315 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
297 * dyntick-idle quiescent state visible to other CPUs (but only for those 316 * dyntick-idle quiescent state visible to other CPUs (but only for those
298 * RCU flavors in desparate need of a quiescent state, which will normally 317 * RCU flavors in desperate need of a quiescent state, which will normally
299 * be none of them). Either way, do a lightweight quiescent state for 318 * be none of them). Either way, do a lightweight quiescent state for
300 * all RCU flavors. 319 * all RCU flavors.
301 */ 320 */
@@ -410,6 +429,15 @@ void rcu_bh_force_quiescent_state(void)
410EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); 429EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
411 430
412/* 431/*
432 * Force a quiescent state for RCU-sched.
433 */
434void rcu_sched_force_quiescent_state(void)
435{
436 force_quiescent_state(&rcu_sched_state);
437}
438EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
439
440/*
413 * Show the state of the grace-period kthreads. 441 * Show the state of the grace-period kthreads.
414 */ 442 */
415void show_rcu_gp_kthreads(void) 443void show_rcu_gp_kthreads(void)
@@ -483,15 +511,6 @@ void rcutorture_record_progress(unsigned long vernum)
483EXPORT_SYMBOL_GPL(rcutorture_record_progress); 511EXPORT_SYMBOL_GPL(rcutorture_record_progress);
484 512
485/* 513/*
486 * Force a quiescent state for RCU-sched.
487 */
488void rcu_sched_force_quiescent_state(void)
489{
490 force_quiescent_state(&rcu_sched_state);
491}
492EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
493
494/*
495 * Does the CPU have callbacks ready to be invoked? 514 * Does the CPU have callbacks ready to be invoked?
496 */ 515 */
497static int 516static int
@@ -954,7 +973,7 @@ bool rcu_lockdep_current_cpu_online(void)
954 preempt_disable(); 973 preempt_disable();
955 rdp = this_cpu_ptr(&rcu_sched_data); 974 rdp = this_cpu_ptr(&rcu_sched_data);
956 rnp = rdp->mynode; 975 rnp = rdp->mynode;
957 ret = (rdp->grpmask & rnp->qsmaskinit) || 976 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
958 !rcu_scheduler_fully_active; 977 !rcu_scheduler_fully_active;
959 preempt_enable(); 978 preempt_enable();
960 return ret; 979 return ret;
@@ -1196,9 +1215,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1196 } else { 1215 } else {
1197 j = jiffies; 1216 j = jiffies;
1198 gpa = ACCESS_ONCE(rsp->gp_activity); 1217 gpa = ACCESS_ONCE(rsp->gp_activity);
1199 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n", 1218 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1200 rsp->name, j - gpa, j, gpa, 1219 rsp->name, j - gpa, j, gpa,
1201 jiffies_till_next_fqs); 1220 jiffies_till_next_fqs,
1221 rcu_get_root(rsp)->qsmask);
1202 /* In this case, the current CPU might be at fault. */ 1222 /* In this case, the current CPU might be at fault. */
1203 sched_show_task(current); 1223 sched_show_task(current);
1204 } 1224 }
@@ -1328,20 +1348,30 @@ void rcu_cpu_stall_reset(void)
1328} 1348}
1329 1349
1330/* 1350/*
1331 * Initialize the specified rcu_data structure's callback list to empty. 1351 * Initialize the specified rcu_data structure's default callback list
1352 * to empty. The default callback list is the one that is not used by
1353 * no-callbacks CPUs.
1332 */ 1354 */
1333static void init_callback_list(struct rcu_data *rdp) 1355static void init_default_callback_list(struct rcu_data *rdp)
1334{ 1356{
1335 int i; 1357 int i;
1336 1358
1337 if (init_nocb_callback_list(rdp))
1338 return;
1339 rdp->nxtlist = NULL; 1359 rdp->nxtlist = NULL;
1340 for (i = 0; i < RCU_NEXT_SIZE; i++) 1360 for (i = 0; i < RCU_NEXT_SIZE; i++)
1341 rdp->nxttail[i] = &rdp->nxtlist; 1361 rdp->nxttail[i] = &rdp->nxtlist;
1342} 1362}
1343 1363
1344/* 1364/*
1365 * Initialize the specified rcu_data structure's callback list to empty.
1366 */
1367static void init_callback_list(struct rcu_data *rdp)
1368{
1369 if (init_nocb_callback_list(rdp))
1370 return;
1371 init_default_callback_list(rdp);
1372}
1373
1374/*
1345 * Determine the value that ->completed will have at the end of the 1375 * Determine the value that ->completed will have at the end of the
1346 * next subsequent grace period. This is used to tag callbacks so that 1376 * next subsequent grace period. This is used to tag callbacks so that
1347 * a CPU can invoke callbacks in a timely fashion even if that CPU has 1377 * a CPU can invoke callbacks in a timely fashion even if that CPU has
@@ -1703,11 +1733,11 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1703 */ 1733 */
1704static int rcu_gp_init(struct rcu_state *rsp) 1734static int rcu_gp_init(struct rcu_state *rsp)
1705{ 1735{
1736 unsigned long oldmask;
1706 struct rcu_data *rdp; 1737 struct rcu_data *rdp;
1707 struct rcu_node *rnp = rcu_get_root(rsp); 1738 struct rcu_node *rnp = rcu_get_root(rsp);
1708 1739
1709 ACCESS_ONCE(rsp->gp_activity) = jiffies; 1740 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1710 rcu_bind_gp_kthread();
1711 raw_spin_lock_irq(&rnp->lock); 1741 raw_spin_lock_irq(&rnp->lock);
1712 smp_mb__after_unlock_lock(); 1742 smp_mb__after_unlock_lock();
1713 if (!ACCESS_ONCE(rsp->gp_flags)) { 1743 if (!ACCESS_ONCE(rsp->gp_flags)) {
@@ -1733,9 +1763,54 @@ static int rcu_gp_init(struct rcu_state *rsp)
1733 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); 1763 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1734 raw_spin_unlock_irq(&rnp->lock); 1764 raw_spin_unlock_irq(&rnp->lock);
1735 1765
1736 /* Exclude any concurrent CPU-hotplug operations. */ 1766 /*
1737 mutex_lock(&rsp->onoff_mutex); 1767 * Apply per-leaf buffered online and offline operations to the
1738 smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */ 1768 * rcu_node tree. Note that this new grace period need not wait
1769 * for subsequent online CPUs, and that quiescent-state forcing
1770 * will handle subsequent offline CPUs.
1771 */
1772 rcu_for_each_leaf_node(rsp, rnp) {
1773 raw_spin_lock_irq(&rnp->lock);
1774 smp_mb__after_unlock_lock();
1775 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1776 !rnp->wait_blkd_tasks) {
1777 /* Nothing to do on this leaf rcu_node structure. */
1778 raw_spin_unlock_irq(&rnp->lock);
1779 continue;
1780 }
1781
1782 /* Record old state, apply changes to ->qsmaskinit field. */
1783 oldmask = rnp->qsmaskinit;
1784 rnp->qsmaskinit = rnp->qsmaskinitnext;
1785
1786 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1787 if (!oldmask != !rnp->qsmaskinit) {
1788 if (!oldmask) /* First online CPU for this rcu_node. */
1789 rcu_init_new_rnp(rnp);
1790 else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
1791 rnp->wait_blkd_tasks = true;
1792 else /* Last offline CPU and can propagate. */
1793 rcu_cleanup_dead_rnp(rnp);
1794 }
1795
1796 /*
1797 * If all waited-on tasks from prior grace period are
1798 * done, and if all this rcu_node structure's CPUs are
1799 * still offline, propagate up the rcu_node tree and
1800 * clear ->wait_blkd_tasks. Otherwise, if one of this
1801 * rcu_node structure's CPUs has since come back online,
1802 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
1803 * checks for this, so just call it unconditionally).
1804 */
1805 if (rnp->wait_blkd_tasks &&
1806 (!rcu_preempt_has_tasks(rnp) ||
1807 rnp->qsmaskinit)) {
1808 rnp->wait_blkd_tasks = false;
1809 rcu_cleanup_dead_rnp(rnp);
1810 }
1811
1812 raw_spin_unlock_irq(&rnp->lock);
1813 }
1739 1814
1740 /* 1815 /*
1741 * Set the quiescent-state-needed bits in all the rcu_node 1816 * Set the quiescent-state-needed bits in all the rcu_node
@@ -1757,8 +1832,8 @@ static int rcu_gp_init(struct rcu_state *rsp)
1757 rcu_preempt_check_blocked_tasks(rnp); 1832 rcu_preempt_check_blocked_tasks(rnp);
1758 rnp->qsmask = rnp->qsmaskinit; 1833 rnp->qsmask = rnp->qsmaskinit;
1759 ACCESS_ONCE(rnp->gpnum) = rsp->gpnum; 1834 ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
1760 WARN_ON_ONCE(rnp->completed != rsp->completed); 1835 if (WARN_ON_ONCE(rnp->completed != rsp->completed))
1761 ACCESS_ONCE(rnp->completed) = rsp->completed; 1836 ACCESS_ONCE(rnp->completed) = rsp->completed;
1762 if (rnp == rdp->mynode) 1837 if (rnp == rdp->mynode)
1763 (void)__note_gp_changes(rsp, rnp, rdp); 1838 (void)__note_gp_changes(rsp, rnp, rdp);
1764 rcu_preempt_boost_start_gp(rnp); 1839 rcu_preempt_boost_start_gp(rnp);
@@ -1768,9 +1843,12 @@ static int rcu_gp_init(struct rcu_state *rsp)
1768 raw_spin_unlock_irq(&rnp->lock); 1843 raw_spin_unlock_irq(&rnp->lock);
1769 cond_resched_rcu_qs(); 1844 cond_resched_rcu_qs();
1770 ACCESS_ONCE(rsp->gp_activity) = jiffies; 1845 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1846 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) &&
1847 gp_init_delay > 0 &&
1848 !(rsp->gpnum % (rcu_num_nodes * 10)))
1849 schedule_timeout_uninterruptible(gp_init_delay);
1771 } 1850 }
1772 1851
1773 mutex_unlock(&rsp->onoff_mutex);
1774 return 1; 1852 return 1;
1775} 1853}
1776 1854
@@ -1798,7 +1876,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1798 fqs_state = RCU_FORCE_QS; 1876 fqs_state = RCU_FORCE_QS;
1799 } else { 1877 } else {
1800 /* Handle dyntick-idle and offline CPUs. */ 1878 /* Handle dyntick-idle and offline CPUs. */
1801 isidle = false; 1879 isidle = true;
1802 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj); 1880 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
1803 } 1881 }
1804 /* Clear flag to prevent immediate re-entry. */ 1882 /* Clear flag to prevent immediate re-entry. */
@@ -1852,6 +1930,8 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1852 rcu_for_each_node_breadth_first(rsp, rnp) { 1930 rcu_for_each_node_breadth_first(rsp, rnp) {
1853 raw_spin_lock_irq(&rnp->lock); 1931 raw_spin_lock_irq(&rnp->lock);
1854 smp_mb__after_unlock_lock(); 1932 smp_mb__after_unlock_lock();
1933 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
1934 WARN_ON_ONCE(rnp->qsmask);
1855 ACCESS_ONCE(rnp->completed) = rsp->gpnum; 1935 ACCESS_ONCE(rnp->completed) = rsp->gpnum;
1856 rdp = this_cpu_ptr(rsp->rda); 1936 rdp = this_cpu_ptr(rsp->rda);
1857 if (rnp == rdp->mynode) 1937 if (rnp == rdp->mynode)
@@ -1895,6 +1975,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
1895 struct rcu_state *rsp = arg; 1975 struct rcu_state *rsp = arg;
1896 struct rcu_node *rnp = rcu_get_root(rsp); 1976 struct rcu_node *rnp = rcu_get_root(rsp);
1897 1977
1978 rcu_bind_gp_kthread();
1898 for (;;) { 1979 for (;;) {
1899 1980
1900 /* Handle grace-period start. */ 1981 /* Handle grace-period start. */
@@ -2062,25 +2143,32 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2062 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 2143 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2063 * Allows quiescent states for a group of CPUs to be reported at one go 2144 * Allows quiescent states for a group of CPUs to be reported at one go
2064 * to the specified rcu_node structure, though all the CPUs in the group 2145 * to the specified rcu_node structure, though all the CPUs in the group
2065 * must be represented by the same rcu_node structure (which need not be 2146 * must be represented by the same rcu_node structure (which need not be a
2066 * a leaf rcu_node structure, though it often will be). That structure's 2147 * leaf rcu_node structure, though it often will be). The gps parameter
2067 * lock must be held upon entry, and it is released before return. 2148 * is the grace-period snapshot, which means that the quiescent states
2149 * are valid only if rnp->gpnum is equal to gps. That structure's lock
2150 * must be held upon entry, and it is released before return.
2068 */ 2151 */
2069static void 2152static void
2070rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, 2153rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2071 struct rcu_node *rnp, unsigned long flags) 2154 struct rcu_node *rnp, unsigned long gps, unsigned long flags)
2072 __releases(rnp->lock) 2155 __releases(rnp->lock)
2073{ 2156{
2157 unsigned long oldmask = 0;
2074 struct rcu_node *rnp_c; 2158 struct rcu_node *rnp_c;
2075 2159
2076 /* Walk up the rcu_node hierarchy. */ 2160 /* Walk up the rcu_node hierarchy. */
2077 for (;;) { 2161 for (;;) {
2078 if (!(rnp->qsmask & mask)) { 2162 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
2079 2163
2080 /* Our bit has already been cleared, so done. */ 2164 /*
2165 * Our bit has already been cleared, or the
2166 * relevant grace period is already over, so done.
2167 */
2081 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2168 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2082 return; 2169 return;
2083 } 2170 }
2171 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2084 rnp->qsmask &= ~mask; 2172 rnp->qsmask &= ~mask;
2085 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, 2173 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2086 mask, rnp->qsmask, rnp->level, 2174 mask, rnp->qsmask, rnp->level,
@@ -2104,7 +2192,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2104 rnp = rnp->parent; 2192 rnp = rnp->parent;
2105 raw_spin_lock_irqsave(&rnp->lock, flags); 2193 raw_spin_lock_irqsave(&rnp->lock, flags);
2106 smp_mb__after_unlock_lock(); 2194 smp_mb__after_unlock_lock();
2107 WARN_ON_ONCE(rnp_c->qsmask); 2195 oldmask = rnp_c->qsmask;
2108 } 2196 }
2109 2197
2110 /* 2198 /*
@@ -2116,6 +2204,46 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2116} 2204}
2117 2205
2118/* 2206/*
2207 * Record a quiescent state for all tasks that were previously queued
2208 * on the specified rcu_node structure and that were blocking the current
2209 * RCU grace period. The caller must hold the specified rnp->lock with
2210 * irqs disabled, and this lock is released upon return, but irqs remain
2211 * disabled.
2212 */
2213static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2214 struct rcu_node *rnp, unsigned long flags)
2215 __releases(rnp->lock)
2216{
2217 unsigned long gps;
2218 unsigned long mask;
2219 struct rcu_node *rnp_p;
2220
2221 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2222 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2223 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2224 return; /* Still need more quiescent states! */
2225 }
2226
2227 rnp_p = rnp->parent;
2228 if (rnp_p == NULL) {
2229 /*
2230 * Only one rcu_node structure in the tree, so don't
2231 * try to report up to its nonexistent parent!
2232 */
2233 rcu_report_qs_rsp(rsp, flags);
2234 return;
2235 }
2236
2237 /* Report up the rest of the hierarchy, tracking current ->gpnum. */
2238 gps = rnp->gpnum;
2239 mask = rnp->grpmask;
2240 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2241 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
2242 smp_mb__after_unlock_lock();
2243 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2244}
2245
2246/*
2119 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2247 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2120 * structure. This must be either called from the specified CPU, or 2248 * structure. This must be either called from the specified CPU, or
2121 * called when the specified CPU is known to be offline (and when it is 2249 * called when the specified CPU is known to be offline (and when it is
@@ -2163,7 +2291,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2163 */ 2291 */
2164 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); 2292 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2165 2293
2166 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ 2294 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2295 /* ^^^ Released rnp->lock */
2167 if (needwake) 2296 if (needwake)
2168 rcu_gp_kthread_wake(rsp); 2297 rcu_gp_kthread_wake(rsp);
2169 } 2298 }
@@ -2256,8 +2385,12 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2256 rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL]; 2385 rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
2257 } 2386 }
2258 2387
2259 /* Finally, initialize the rcu_data structure's list to empty. */ 2388 /*
2389 * Finally, initialize the rcu_data structure's list to empty and
2390 * disallow further callbacks on this CPU.
2391 */
2260 init_callback_list(rdp); 2392 init_callback_list(rdp);
2393 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2261} 2394}
2262 2395
2263/* 2396/*
@@ -2355,6 +2488,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2355 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 2488 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2356 smp_mb__after_unlock_lock(); /* GP memory ordering. */ 2489 smp_mb__after_unlock_lock(); /* GP memory ordering. */
2357 rnp->qsmaskinit &= ~mask; 2490 rnp->qsmaskinit &= ~mask;
2491 rnp->qsmask &= ~mask;
2358 if (rnp->qsmaskinit) { 2492 if (rnp->qsmaskinit) {
2359 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2493 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2360 return; 2494 return;
@@ -2364,6 +2498,26 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2364} 2498}
2365 2499
2366/* 2500/*
2501 * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
2502 * function. We now remove it from the rcu_node tree's ->qsmaskinit
2503 * bit masks.
2504 */
2505static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2506{
2507 unsigned long flags;
2508 unsigned long mask;
2509 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2510 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2511
2512 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2513 mask = rdp->grpmask;
2514 raw_spin_lock_irqsave(&rnp->lock, flags);
2515 smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
2516 rnp->qsmaskinitnext &= ~mask;
2517 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2518}
2519
2520/*
2367 * The CPU has been completely removed, and some other CPU is reporting 2521 * The CPU has been completely removed, and some other CPU is reporting
2368 * this fact from process context. Do the remainder of the cleanup, 2522 * this fact from process context. Do the remainder of the cleanup,
2369 * including orphaning the outgoing CPU's RCU callbacks, and also 2523 * including orphaning the outgoing CPU's RCU callbacks, and also
@@ -2379,29 +2533,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2379 /* Adjust any no-longer-needed kthreads. */ 2533 /* Adjust any no-longer-needed kthreads. */
2380 rcu_boost_kthread_setaffinity(rnp, -1); 2534 rcu_boost_kthread_setaffinity(rnp, -1);
2381 2535
2382 /* Exclude any attempts to start a new grace period. */
2383 mutex_lock(&rsp->onoff_mutex);
2384 raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2385
2386 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ 2536 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2537 raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2387 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 2538 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2388 rcu_adopt_orphan_cbs(rsp, flags); 2539 rcu_adopt_orphan_cbs(rsp, flags);
2389 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags); 2540 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
2390 2541
2391 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2392 raw_spin_lock_irqsave(&rnp->lock, flags);
2393 smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
2394 rnp->qsmaskinit &= ~rdp->grpmask;
2395 if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
2396 rcu_cleanup_dead_rnp(rnp);
2397 rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
2398 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, 2542 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2399 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", 2543 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2400 cpu, rdp->qlen, rdp->nxtlist); 2544 cpu, rdp->qlen, rdp->nxtlist);
2401 init_callback_list(rdp);
2402 /* Disallow further callbacks on this CPU. */
2403 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2404 mutex_unlock(&rsp->onoff_mutex);
2405} 2545}
2406 2546
2407#else /* #ifdef CONFIG_HOTPLUG_CPU */ 2547#else /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -2414,6 +2554,10 @@ static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2414{ 2554{
2415} 2555}
2416 2556
2557static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2558{
2559}
2560
2417static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2561static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2418{ 2562{
2419} 2563}
@@ -2589,26 +2733,47 @@ static void force_qs_rnp(struct rcu_state *rsp,
2589 return; 2733 return;
2590 } 2734 }
2591 if (rnp->qsmask == 0) { 2735 if (rnp->qsmask == 0) {
2592 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ 2736 if (rcu_state_p == &rcu_sched_state ||
2593 continue; 2737 rsp != rcu_state_p ||
2738 rcu_preempt_blocked_readers_cgp(rnp)) {
2739 /*
2740 * No point in scanning bits because they
2741 * are all zero. But we might need to
2742 * priority-boost blocked readers.
2743 */
2744 rcu_initiate_boost(rnp, flags);
2745 /* rcu_initiate_boost() releases rnp->lock */
2746 continue;
2747 }
2748 if (rnp->parent &&
2749 (rnp->parent->qsmask & rnp->grpmask)) {
2750 /*
2751 * Race between grace-period
2752 * initialization and task exiting RCU
2753 * read-side critical section: Report.
2754 */
2755 rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2756 /* rcu_report_unblock_qs_rnp() rlses ->lock */
2757 continue;
2758 }
2594 } 2759 }
2595 cpu = rnp->grplo; 2760 cpu = rnp->grplo;
2596 bit = 1; 2761 bit = 1;
2597 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { 2762 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
2598 if ((rnp->qsmask & bit) != 0) { 2763 if ((rnp->qsmask & bit) != 0) {
2599 if ((rnp->qsmaskinit & bit) != 0) 2764 if ((rnp->qsmaskinit & bit) == 0)
2600 *isidle = false; 2765 *isidle = false; /* Pending hotplug. */
2601 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj)) 2766 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2602 mask |= bit; 2767 mask |= bit;
2603 } 2768 }
2604 } 2769 }
2605 if (mask != 0) { 2770 if (mask != 0) {
2606 2771 /* Idle/offline CPUs, report (releases rnp->lock. */
2607 /* rcu_report_qs_rnp() releases rnp->lock. */ 2772 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2608 rcu_report_qs_rnp(mask, rsp, rnp, flags); 2773 } else {
2609 continue; 2774 /* Nothing to do here, so just drop the lock. */
2775 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2610 } 2776 }
2611 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2612 } 2777 }
2613} 2778}
2614 2779
@@ -2741,7 +2906,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2741 * If called from an extended quiescent state, invoke the RCU 2906 * If called from an extended quiescent state, invoke the RCU
2742 * core in order to force a re-evaluation of RCU's idleness. 2907 * core in order to force a re-evaluation of RCU's idleness.
2743 */ 2908 */
2744 if (!rcu_is_watching() && cpu_online(smp_processor_id())) 2909 if (!rcu_is_watching())
2745 invoke_rcu_core(); 2910 invoke_rcu_core();
2746 2911
2747 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2912 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
@@ -2827,11 +2992,22 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2827 2992
2828 if (cpu != -1) 2993 if (cpu != -1)
2829 rdp = per_cpu_ptr(rsp->rda, cpu); 2994 rdp = per_cpu_ptr(rsp->rda, cpu);
2830 offline = !__call_rcu_nocb(rdp, head, lazy, flags); 2995 if (likely(rdp->mynode)) {
2831 WARN_ON_ONCE(offline); 2996 /* Post-boot, so this should be for a no-CBs CPU. */
2832 /* _call_rcu() is illegal on offline CPU; leak the callback. */ 2997 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
2833 local_irq_restore(flags); 2998 WARN_ON_ONCE(offline);
2834 return; 2999 /* Offline CPU, _call_rcu() illegal, leak callback. */
3000 local_irq_restore(flags);
3001 return;
3002 }
3003 /*
3004 * Very early boot, before rcu_init(). Initialize if needed
3005 * and then drop through to queue the callback.
3006 */
3007 BUG_ON(cpu != -1);
3008 WARN_ON_ONCE(!rcu_is_watching());
3009 if (!likely(rdp->nxtlist))
3010 init_default_callback_list(rdp);
2835 } 3011 }
2836 ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1; 3012 ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
2837 if (lazy) 3013 if (lazy)
@@ -2954,7 +3130,7 @@ void synchronize_sched(void)
2954 "Illegal synchronize_sched() in RCU-sched read-side critical section"); 3130 "Illegal synchronize_sched() in RCU-sched read-side critical section");
2955 if (rcu_blocking_is_gp()) 3131 if (rcu_blocking_is_gp())
2956 return; 3132 return;
2957 if (rcu_expedited) 3133 if (rcu_gp_is_expedited())
2958 synchronize_sched_expedited(); 3134 synchronize_sched_expedited();
2959 else 3135 else
2960 wait_rcu_gp(call_rcu_sched); 3136 wait_rcu_gp(call_rcu_sched);
@@ -2981,7 +3157,7 @@ void synchronize_rcu_bh(void)
2981 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section"); 3157 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
2982 if (rcu_blocking_is_gp()) 3158 if (rcu_blocking_is_gp())
2983 return; 3159 return;
2984 if (rcu_expedited) 3160 if (rcu_gp_is_expedited())
2985 synchronize_rcu_bh_expedited(); 3161 synchronize_rcu_bh_expedited();
2986 else 3162 else
2987 wait_rcu_gp(call_rcu_bh); 3163 wait_rcu_gp(call_rcu_bh);
@@ -3518,6 +3694,28 @@ void rcu_barrier_sched(void)
3518EXPORT_SYMBOL_GPL(rcu_barrier_sched); 3694EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3519 3695
3520/* 3696/*
3697 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3698 * first CPU in a given leaf rcu_node structure coming online. The caller
3699 * must hold the corresponding leaf rcu_node ->lock with interrrupts
3700 * disabled.
3701 */
3702static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3703{
3704 long mask;
3705 struct rcu_node *rnp = rnp_leaf;
3706
3707 for (;;) {
3708 mask = rnp->grpmask;
3709 rnp = rnp->parent;
3710 if (rnp == NULL)
3711 return;
3712 raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */
3713 rnp->qsmaskinit |= mask;
3714 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
3715 }
3716}
3717
3718/*
3521 * Do boot-time initialization of a CPU's per-CPU RCU data. 3719 * Do boot-time initialization of a CPU's per-CPU RCU data.
3522 */ 3720 */
3523static void __init 3721static void __init
@@ -3553,49 +3751,37 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3553 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3751 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3554 struct rcu_node *rnp = rcu_get_root(rsp); 3752 struct rcu_node *rnp = rcu_get_root(rsp);
3555 3753
3556 /* Exclude new grace periods. */
3557 mutex_lock(&rsp->onoff_mutex);
3558
3559 /* Set up local state, ensuring consistent view of global state. */ 3754 /* Set up local state, ensuring consistent view of global state. */
3560 raw_spin_lock_irqsave(&rnp->lock, flags); 3755 raw_spin_lock_irqsave(&rnp->lock, flags);
3561 rdp->beenonline = 1; /* We have now been online. */ 3756 rdp->beenonline = 1; /* We have now been online. */
3562 rdp->qlen_last_fqs_check = 0; 3757 rdp->qlen_last_fqs_check = 0;
3563 rdp->n_force_qs_snap = rsp->n_force_qs; 3758 rdp->n_force_qs_snap = rsp->n_force_qs;
3564 rdp->blimit = blimit; 3759 rdp->blimit = blimit;
3565 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */ 3760 if (!rdp->nxtlist)
3761 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
3566 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 3762 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3567 rcu_sysidle_init_percpu_data(rdp->dynticks); 3763 rcu_sysidle_init_percpu_data(rdp->dynticks);
3568 atomic_set(&rdp->dynticks->dynticks, 3764 atomic_set(&rdp->dynticks->dynticks,
3569 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); 3765 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
3570 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 3766 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
3571 3767
3572 /* Add CPU to rcu_node bitmasks. */ 3768 /*
3769 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
3770 * propagation up the rcu_node tree will happen at the beginning
3771 * of the next grace period.
3772 */
3573 rnp = rdp->mynode; 3773 rnp = rdp->mynode;
3574 mask = rdp->grpmask; 3774 mask = rdp->grpmask;
3575 do { 3775 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
3576 /* Exclude any attempts to start a new GP on small systems. */ 3776 smp_mb__after_unlock_lock();
3577 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 3777 rnp->qsmaskinitnext |= mask;
3578 rnp->qsmaskinit |= mask; 3778 rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
3579 mask = rnp->grpmask; 3779 rdp->completed = rnp->completed;
3580 if (rnp == rdp->mynode) { 3780 rdp->passed_quiesce = false;
3581 /* 3781 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
3582 * If there is a grace period in progress, we will 3782 rdp->qs_pending = false;
3583 * set up to wait for it next time we run the 3783 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3584 * RCU core code. 3784 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3585 */
3586 rdp->gpnum = rnp->completed;
3587 rdp->completed = rnp->completed;
3588 rdp->passed_quiesce = 0;
3589 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
3590 rdp->qs_pending = 0;
3591 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3592 }
3593 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
3594 rnp = rnp->parent;
3595 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
3596 local_irq_restore(flags);
3597
3598 mutex_unlock(&rsp->onoff_mutex);
3599} 3785}
3600 3786
3601static void rcu_prepare_cpu(int cpu) 3787static void rcu_prepare_cpu(int cpu)
@@ -3609,15 +3795,14 @@ static void rcu_prepare_cpu(int cpu)
3609/* 3795/*
3610 * Handle CPU online/offline notification events. 3796 * Handle CPU online/offline notification events.
3611 */ 3797 */
3612static int rcu_cpu_notify(struct notifier_block *self, 3798int rcu_cpu_notify(struct notifier_block *self,
3613 unsigned long action, void *hcpu) 3799 unsigned long action, void *hcpu)
3614{ 3800{
3615 long cpu = (long)hcpu; 3801 long cpu = (long)hcpu;
3616 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); 3802 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3617 struct rcu_node *rnp = rdp->mynode; 3803 struct rcu_node *rnp = rdp->mynode;
3618 struct rcu_state *rsp; 3804 struct rcu_state *rsp;
3619 3805
3620 trace_rcu_utilization(TPS("Start CPU hotplug"));
3621 switch (action) { 3806 switch (action) {
3622 case CPU_UP_PREPARE: 3807 case CPU_UP_PREPARE:
3623 case CPU_UP_PREPARE_FROZEN: 3808 case CPU_UP_PREPARE_FROZEN:
@@ -3637,6 +3822,11 @@ static int rcu_cpu_notify(struct notifier_block *self,
3637 for_each_rcu_flavor(rsp) 3822 for_each_rcu_flavor(rsp)
3638 rcu_cleanup_dying_cpu(rsp); 3823 rcu_cleanup_dying_cpu(rsp);
3639 break; 3824 break;
3825 case CPU_DYING_IDLE:
3826 for_each_rcu_flavor(rsp) {
3827 rcu_cleanup_dying_idle_cpu(cpu, rsp);
3828 }
3829 break;
3640 case CPU_DEAD: 3830 case CPU_DEAD:
3641 case CPU_DEAD_FROZEN: 3831 case CPU_DEAD_FROZEN:
3642 case CPU_UP_CANCELED: 3832 case CPU_UP_CANCELED:
@@ -3649,7 +3839,6 @@ static int rcu_cpu_notify(struct notifier_block *self,
3649 default: 3839 default:
3650 break; 3840 break;
3651 } 3841 }
3652 trace_rcu_utilization(TPS("End CPU hotplug"));
3653 return NOTIFY_OK; 3842 return NOTIFY_OK;
3654} 3843}
3655 3844
@@ -3660,11 +3849,12 @@ static int rcu_pm_notify(struct notifier_block *self,
3660 case PM_HIBERNATION_PREPARE: 3849 case PM_HIBERNATION_PREPARE:
3661 case PM_SUSPEND_PREPARE: 3850 case PM_SUSPEND_PREPARE:
3662 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ 3851 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3663 rcu_expedited = 1; 3852 rcu_expedite_gp();
3664 break; 3853 break;
3665 case PM_POST_HIBERNATION: 3854 case PM_POST_HIBERNATION:
3666 case PM_POST_SUSPEND: 3855 case PM_POST_SUSPEND:
3667 rcu_expedited = 0; 3856 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3857 rcu_unexpedite_gp();
3668 break; 3858 break;
3669 default: 3859 default:
3670 break; 3860 break;
@@ -3734,30 +3924,26 @@ void rcu_scheduler_starting(void)
3734 * Compute the per-level fanout, either using the exact fanout specified 3924 * Compute the per-level fanout, either using the exact fanout specified
3735 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. 3925 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
3736 */ 3926 */
3737#ifdef CONFIG_RCU_FANOUT_EXACT
3738static void __init rcu_init_levelspread(struct rcu_state *rsp)
3739{
3740 int i;
3741
3742 rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
3743 for (i = rcu_num_lvls - 2; i >= 0; i--)
3744 rsp->levelspread[i] = CONFIG_RCU_FANOUT;
3745}
3746#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
3747static void __init rcu_init_levelspread(struct rcu_state *rsp) 3927static void __init rcu_init_levelspread(struct rcu_state *rsp)
3748{ 3928{
3749 int ccur;
3750 int cprv;
3751 int i; 3929 int i;
3752 3930
3753 cprv = nr_cpu_ids; 3931 if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT)) {
3754 for (i = rcu_num_lvls - 1; i >= 0; i--) { 3932 rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
3755 ccur = rsp->levelcnt[i]; 3933 for (i = rcu_num_lvls - 2; i >= 0; i--)
3756 rsp->levelspread[i] = (cprv + ccur - 1) / ccur; 3934 rsp->levelspread[i] = CONFIG_RCU_FANOUT;
3757 cprv = ccur; 3935 } else {
3936 int ccur;
3937 int cprv;
3938
3939 cprv = nr_cpu_ids;
3940 for (i = rcu_num_lvls - 1; i >= 0; i--) {
3941 ccur = rsp->levelcnt[i];
3942 rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
3943 cprv = ccur;
3944 }
3758 } 3945 }
3759} 3946}
3760#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
3761 3947
3762/* 3948/*
3763 * Helper function for rcu_init() that initializes one rcu_state structure. 3949 * Helper function for rcu_init() that initializes one rcu_state structure.
@@ -3833,7 +4019,6 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3833 } 4019 }
3834 } 4020 }
3835 4021
3836 rsp->rda = rda;
3837 init_waitqueue_head(&rsp->gp_wq); 4022 init_waitqueue_head(&rsp->gp_wq);
3838 rnp = rsp->level[rcu_num_lvls - 1]; 4023 rnp = rsp->level[rcu_num_lvls - 1];
3839 for_each_possible_cpu(i) { 4024 for_each_possible_cpu(i) {
@@ -3926,6 +4111,8 @@ void __init rcu_init(void)
3926{ 4111{
3927 int cpu; 4112 int cpu;
3928 4113
4114 rcu_early_boot_tests();
4115
3929 rcu_bootup_announce(); 4116 rcu_bootup_announce();
3930 rcu_init_geometry(); 4117 rcu_init_geometry();
3931 rcu_init_one(&rcu_bh_state, &rcu_bh_data); 4118 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
@@ -3942,8 +4129,6 @@ void __init rcu_init(void)
3942 pm_notifier(rcu_pm_notify, 0); 4129 pm_notifier(rcu_pm_notify, 0);
3943 for_each_online_cpu(cpu) 4130 for_each_online_cpu(cpu)
3944 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 4131 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
3945
3946 rcu_early_boot_tests();
3947} 4132}
3948 4133
3949#include "tree_plugin.h" 4134#include "tree_plugin.h"