aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c97
1 files changed, 82 insertions, 15 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index dd081987a8ec..b3d116cd072d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -369,6 +369,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
369static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, 369static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
370 bool user) 370 bool user)
371{ 371{
372 struct rcu_state *rsp;
373 struct rcu_data *rdp;
374
372 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); 375 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
373 if (!user && !is_idle_task(current)) { 376 if (!user && !is_idle_task(current)) {
374 struct task_struct *idle __maybe_unused = 377 struct task_struct *idle __maybe_unused =
@@ -380,6 +383,10 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
380 current->pid, current->comm, 383 current->pid, current->comm,
381 idle->pid, idle->comm); /* must be idle task! */ 384 idle->pid, idle->comm); /* must be idle task! */
382 } 385 }
386 for_each_rcu_flavor(rsp) {
387 rdp = this_cpu_ptr(rsp->rda);
388 do_nocb_deferred_wakeup(rdp);
389 }
383 rcu_prepare_for_idle(smp_processor_id()); 390 rcu_prepare_for_idle(smp_processor_id());
384 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 391 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
385 smp_mb__before_atomic_inc(); /* See above. */ 392 smp_mb__before_atomic_inc(); /* See above. */
@@ -411,11 +418,12 @@ static void rcu_eqs_enter(bool user)
411 rdtp = this_cpu_ptr(&rcu_dynticks); 418 rdtp = this_cpu_ptr(&rcu_dynticks);
412 oldval = rdtp->dynticks_nesting; 419 oldval = rdtp->dynticks_nesting;
413 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); 420 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
414 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) 421 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
415 rdtp->dynticks_nesting = 0; 422 rdtp->dynticks_nesting = 0;
416 else 423 rcu_eqs_enter_common(rdtp, oldval, user);
424 } else {
417 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; 425 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
418 rcu_eqs_enter_common(rdtp, oldval, user); 426 }
419} 427}
420 428
421/** 429/**
@@ -533,11 +541,12 @@ static void rcu_eqs_exit(bool user)
533 rdtp = this_cpu_ptr(&rcu_dynticks); 541 rdtp = this_cpu_ptr(&rcu_dynticks);
534 oldval = rdtp->dynticks_nesting; 542 oldval = rdtp->dynticks_nesting;
535 WARN_ON_ONCE(oldval < 0); 543 WARN_ON_ONCE(oldval < 0);
536 if (oldval & DYNTICK_TASK_NEST_MASK) 544 if (oldval & DYNTICK_TASK_NEST_MASK) {
537 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; 545 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
538 else 546 } else {
539 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 547 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
540 rcu_eqs_exit_common(rdtp, oldval, user); 548 rcu_eqs_exit_common(rdtp, oldval, user);
549 }
541} 550}
542 551
543/** 552/**
@@ -716,7 +725,7 @@ bool rcu_lockdep_current_cpu_online(void)
716 bool ret; 725 bool ret;
717 726
718 if (in_nmi()) 727 if (in_nmi())
719 return 1; 728 return true;
720 preempt_disable(); 729 preempt_disable();
721 rdp = this_cpu_ptr(&rcu_sched_data); 730 rdp = this_cpu_ptr(&rcu_sched_data);
722 rnp = rdp->mynode; 731 rnp = rdp->mynode;
@@ -755,6 +764,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
755} 764}
756 765
757/* 766/*
767 * This function really isn't for public consumption, but RCU is special in
768 * that context switches can allow the state machine to make progress.
769 */
770extern void resched_cpu(int cpu);
771
772/*
758 * Return true if the specified CPU has passed through a quiescent 773 * Return true if the specified CPU has passed through a quiescent
759 * state by virtue of being in or having passed through an dynticks 774 * state by virtue of being in or having passed through an dynticks
760 * idle state since the last call to dyntick_save_progress_counter() 775 * idle state since the last call to dyntick_save_progress_counter()
@@ -812,16 +827,34 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
812 */ 827 */
813 rcu_kick_nohz_cpu(rdp->cpu); 828 rcu_kick_nohz_cpu(rdp->cpu);
814 829
830 /*
831 * Alternatively, the CPU might be running in the kernel
832 * for an extended period of time without a quiescent state.
833 * Attempt to force the CPU through the scheduler to gain the
834 * needed quiescent state, but only if the grace period has gone
835 * on for an uncommonly long time. If there are many stuck CPUs,
836 * we will beat on the first one until it gets unstuck, then move
837 * to the next. Only do this for the primary flavor of RCU.
838 */
839 if (rdp->rsp == rcu_state &&
840 ULONG_CMP_GE(ACCESS_ONCE(jiffies), rdp->rsp->jiffies_resched)) {
841 rdp->rsp->jiffies_resched += 5;
842 resched_cpu(rdp->cpu);
843 }
844
815 return 0; 845 return 0;
816} 846}
817 847
818static void record_gp_stall_check_time(struct rcu_state *rsp) 848static void record_gp_stall_check_time(struct rcu_state *rsp)
819{ 849{
820 unsigned long j = ACCESS_ONCE(jiffies); 850 unsigned long j = ACCESS_ONCE(jiffies);
851 unsigned long j1;
821 852
822 rsp->gp_start = j; 853 rsp->gp_start = j;
823 smp_wmb(); /* Record start time before stall time. */ 854 smp_wmb(); /* Record start time before stall time. */
824 rsp->jiffies_stall = j + rcu_jiffies_till_stall_check(); 855 j1 = rcu_jiffies_till_stall_check();
856 rsp->jiffies_stall = j + j1;
857 rsp->jiffies_resched = j + j1 / 2;
825} 858}
826 859
827/* 860/*
@@ -1133,8 +1166,10 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
1133 * hold it, acquire the root rcu_node structure's lock in order to 1166 * hold it, acquire the root rcu_node structure's lock in order to
1134 * start one (if needed). 1167 * start one (if needed).
1135 */ 1168 */
1136 if (rnp != rnp_root) 1169 if (rnp != rnp_root) {
1137 raw_spin_lock(&rnp_root->lock); 1170 raw_spin_lock(&rnp_root->lock);
1171 smp_mb__after_unlock_lock();
1172 }
1138 1173
1139 /* 1174 /*
1140 * Get a new grace-period number. If there really is no grace 1175 * Get a new grace-period number. If there really is no grace
@@ -1354,6 +1389,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1354 local_irq_restore(flags); 1389 local_irq_restore(flags);
1355 return; 1390 return;
1356 } 1391 }
1392 smp_mb__after_unlock_lock();
1357 __note_gp_changes(rsp, rnp, rdp); 1393 __note_gp_changes(rsp, rnp, rdp);
1358 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1394 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1359} 1395}
@@ -1368,6 +1404,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1368 1404
1369 rcu_bind_gp_kthread(); 1405 rcu_bind_gp_kthread();
1370 raw_spin_lock_irq(&rnp->lock); 1406 raw_spin_lock_irq(&rnp->lock);
1407 smp_mb__after_unlock_lock();
1371 if (rsp->gp_flags == 0) { 1408 if (rsp->gp_flags == 0) {
1372 /* Spurious wakeup, tell caller to go back to sleep. */ 1409 /* Spurious wakeup, tell caller to go back to sleep. */
1373 raw_spin_unlock_irq(&rnp->lock); 1410 raw_spin_unlock_irq(&rnp->lock);
@@ -1409,6 +1446,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1409 */ 1446 */
1410 rcu_for_each_node_breadth_first(rsp, rnp) { 1447 rcu_for_each_node_breadth_first(rsp, rnp) {
1411 raw_spin_lock_irq(&rnp->lock); 1448 raw_spin_lock_irq(&rnp->lock);
1449 smp_mb__after_unlock_lock();
1412 rdp = this_cpu_ptr(rsp->rda); 1450 rdp = this_cpu_ptr(rsp->rda);
1413 rcu_preempt_check_blocked_tasks(rnp); 1451 rcu_preempt_check_blocked_tasks(rnp);
1414 rnp->qsmask = rnp->qsmaskinit; 1452 rnp->qsmask = rnp->qsmaskinit;
@@ -1463,6 +1501,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1463 /* Clear flag to prevent immediate re-entry. */ 1501 /* Clear flag to prevent immediate re-entry. */
1464 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 1502 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1465 raw_spin_lock_irq(&rnp->lock); 1503 raw_spin_lock_irq(&rnp->lock);
1504 smp_mb__after_unlock_lock();
1466 rsp->gp_flags &= ~RCU_GP_FLAG_FQS; 1505 rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
1467 raw_spin_unlock_irq(&rnp->lock); 1506 raw_spin_unlock_irq(&rnp->lock);
1468 } 1507 }
@@ -1480,6 +1519,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1480 struct rcu_node *rnp = rcu_get_root(rsp); 1519 struct rcu_node *rnp = rcu_get_root(rsp);
1481 1520
1482 raw_spin_lock_irq(&rnp->lock); 1521 raw_spin_lock_irq(&rnp->lock);
1522 smp_mb__after_unlock_lock();
1483 gp_duration = jiffies - rsp->gp_start; 1523 gp_duration = jiffies - rsp->gp_start;
1484 if (gp_duration > rsp->gp_max) 1524 if (gp_duration > rsp->gp_max)
1485 rsp->gp_max = gp_duration; 1525 rsp->gp_max = gp_duration;
@@ -1505,16 +1545,19 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1505 */ 1545 */
1506 rcu_for_each_node_breadth_first(rsp, rnp) { 1546 rcu_for_each_node_breadth_first(rsp, rnp) {
1507 raw_spin_lock_irq(&rnp->lock); 1547 raw_spin_lock_irq(&rnp->lock);
1548 smp_mb__after_unlock_lock();
1508 ACCESS_ONCE(rnp->completed) = rsp->gpnum; 1549 ACCESS_ONCE(rnp->completed) = rsp->gpnum;
1509 rdp = this_cpu_ptr(rsp->rda); 1550 rdp = this_cpu_ptr(rsp->rda);
1510 if (rnp == rdp->mynode) 1551 if (rnp == rdp->mynode)
1511 __note_gp_changes(rsp, rnp, rdp); 1552 __note_gp_changes(rsp, rnp, rdp);
1553 /* smp_mb() provided by prior unlock-lock pair. */
1512 nocb += rcu_future_gp_cleanup(rsp, rnp); 1554 nocb += rcu_future_gp_cleanup(rsp, rnp);
1513 raw_spin_unlock_irq(&rnp->lock); 1555 raw_spin_unlock_irq(&rnp->lock);
1514 cond_resched(); 1556 cond_resched();
1515 } 1557 }
1516 rnp = rcu_get_root(rsp); 1558 rnp = rcu_get_root(rsp);
1517 raw_spin_lock_irq(&rnp->lock); 1559 raw_spin_lock_irq(&rnp->lock);
1560 smp_mb__after_unlock_lock();
1518 rcu_nocb_gp_set(rnp, nocb); 1561 rcu_nocb_gp_set(rnp, nocb);
1519 1562
1520 rsp->completed = rsp->gpnum; /* Declare grace period done. */ 1563 rsp->completed = rsp->gpnum; /* Declare grace period done. */
@@ -1553,6 +1596,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
1553 wait_event_interruptible(rsp->gp_wq, 1596 wait_event_interruptible(rsp->gp_wq,
1554 ACCESS_ONCE(rsp->gp_flags) & 1597 ACCESS_ONCE(rsp->gp_flags) &
1555 RCU_GP_FLAG_INIT); 1598 RCU_GP_FLAG_INIT);
1599 /* Locking provides needed memory barrier. */
1556 if (rcu_gp_init(rsp)) 1600 if (rcu_gp_init(rsp))
1557 break; 1601 break;
1558 cond_resched(); 1602 cond_resched();
@@ -1582,6 +1626,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
1582 (!ACCESS_ONCE(rnp->qsmask) && 1626 (!ACCESS_ONCE(rnp->qsmask) &&
1583 !rcu_preempt_blocked_readers_cgp(rnp)), 1627 !rcu_preempt_blocked_readers_cgp(rnp)),
1584 j); 1628 j);
1629 /* Locking provides needed memory barriers. */
1585 /* If grace period done, leave loop. */ 1630 /* If grace period done, leave loop. */
1586 if (!ACCESS_ONCE(rnp->qsmask) && 1631 if (!ACCESS_ONCE(rnp->qsmask) &&
1587 !rcu_preempt_blocked_readers_cgp(rnp)) 1632 !rcu_preempt_blocked_readers_cgp(rnp))
@@ -1749,6 +1794,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1749 rnp_c = rnp; 1794 rnp_c = rnp;
1750 rnp = rnp->parent; 1795 rnp = rnp->parent;
1751 raw_spin_lock_irqsave(&rnp->lock, flags); 1796 raw_spin_lock_irqsave(&rnp->lock, flags);
1797 smp_mb__after_unlock_lock();
1752 WARN_ON_ONCE(rnp_c->qsmask); 1798 WARN_ON_ONCE(rnp_c->qsmask);
1753 } 1799 }
1754 1800
@@ -1778,6 +1824,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
1778 1824
1779 rnp = rdp->mynode; 1825 rnp = rdp->mynode;
1780 raw_spin_lock_irqsave(&rnp->lock, flags); 1826 raw_spin_lock_irqsave(&rnp->lock, flags);
1827 smp_mb__after_unlock_lock();
1781 if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || 1828 if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
1782 rnp->completed == rnp->gpnum) { 1829 rnp->completed == rnp->gpnum) {
1783 1830
@@ -1901,13 +1948,13 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
1901 * Adopt the RCU callbacks from the specified rcu_state structure's 1948 * Adopt the RCU callbacks from the specified rcu_state structure's
1902 * orphanage. The caller must hold the ->orphan_lock. 1949 * orphanage. The caller must hold the ->orphan_lock.
1903 */ 1950 */
1904static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) 1951static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
1905{ 1952{
1906 int i; 1953 int i;
1907 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); 1954 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
1908 1955
1909 /* No-CBs CPUs are handled specially. */ 1956 /* No-CBs CPUs are handled specially. */
1910 if (rcu_nocb_adopt_orphan_cbs(rsp, rdp)) 1957 if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
1911 return; 1958 return;
1912 1959
1913 /* Do the accounting first. */ 1960 /* Do the accounting first. */
@@ -1986,12 +2033,13 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1986 2033
1987 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ 2034 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
1988 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 2035 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
1989 rcu_adopt_orphan_cbs(rsp); 2036 rcu_adopt_orphan_cbs(rsp, flags);
1990 2037
1991 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 2038 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
1992 mask = rdp->grpmask; /* rnp->grplo is constant. */ 2039 mask = rdp->grpmask; /* rnp->grplo is constant. */
1993 do { 2040 do {
1994 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 2041 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2042 smp_mb__after_unlock_lock();
1995 rnp->qsmaskinit &= ~mask; 2043 rnp->qsmaskinit &= ~mask;
1996 if (rnp->qsmaskinit != 0) { 2044 if (rnp->qsmaskinit != 0) {
1997 if (rnp != rdp->mynode) 2045 if (rnp != rdp->mynode)
@@ -2202,6 +2250,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
2202 cond_resched(); 2250 cond_resched();
2203 mask = 0; 2251 mask = 0;
2204 raw_spin_lock_irqsave(&rnp->lock, flags); 2252 raw_spin_lock_irqsave(&rnp->lock, flags);
2253 smp_mb__after_unlock_lock();
2205 if (!rcu_gp_in_progress(rsp)) { 2254 if (!rcu_gp_in_progress(rsp)) {
2206 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2255 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2207 return; 2256 return;
@@ -2231,6 +2280,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
2231 rnp = rcu_get_root(rsp); 2280 rnp = rcu_get_root(rsp);
2232 if (rnp->qsmask == 0) { 2281 if (rnp->qsmask == 0) {
2233 raw_spin_lock_irqsave(&rnp->lock, flags); 2282 raw_spin_lock_irqsave(&rnp->lock, flags);
2283 smp_mb__after_unlock_lock();
2234 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ 2284 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2235 } 2285 }
2236} 2286}
@@ -2263,6 +2313,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2263 2313
2264 /* Reached the root of the rcu_node tree, acquire lock. */ 2314 /* Reached the root of the rcu_node tree, acquire lock. */
2265 raw_spin_lock_irqsave(&rnp_old->lock, flags); 2315 raw_spin_lock_irqsave(&rnp_old->lock, flags);
2316 smp_mb__after_unlock_lock();
2266 raw_spin_unlock(&rnp_old->fqslock); 2317 raw_spin_unlock(&rnp_old->fqslock);
2267 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2318 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2268 rsp->n_force_qs_lh++; 2319 rsp->n_force_qs_lh++;
@@ -2303,6 +2354,9 @@ __rcu_process_callbacks(struct rcu_state *rsp)
2303 /* If there are callbacks ready, invoke them. */ 2354 /* If there are callbacks ready, invoke them. */
2304 if (cpu_has_callbacks_ready_to_invoke(rdp)) 2355 if (cpu_has_callbacks_ready_to_invoke(rdp))
2305 invoke_rcu_callbacks(rsp, rdp); 2356 invoke_rcu_callbacks(rsp, rdp);
2357
2358 /* Do any needed deferred wakeups of rcuo kthreads. */
2359 do_nocb_deferred_wakeup(rdp);
2306} 2360}
2307 2361
2308/* 2362/*
@@ -2378,6 +2432,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2378 struct rcu_node *rnp_root = rcu_get_root(rsp); 2432 struct rcu_node *rnp_root = rcu_get_root(rsp);
2379 2433
2380 raw_spin_lock(&rnp_root->lock); 2434 raw_spin_lock(&rnp_root->lock);
2435 smp_mb__after_unlock_lock();
2381 rcu_start_gp(rsp); 2436 rcu_start_gp(rsp);
2382 raw_spin_unlock(&rnp_root->lock); 2437 raw_spin_unlock(&rnp_root->lock);
2383 } else { 2438 } else {
@@ -2437,7 +2492,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2437 2492
2438 if (cpu != -1) 2493 if (cpu != -1)
2439 rdp = per_cpu_ptr(rsp->rda, cpu); 2494 rdp = per_cpu_ptr(rsp->rda, cpu);
2440 offline = !__call_rcu_nocb(rdp, head, lazy); 2495 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
2441 WARN_ON_ONCE(offline); 2496 WARN_ON_ONCE(offline);
2442 /* _call_rcu() is illegal on offline CPU; leak the callback. */ 2497 /* _call_rcu() is illegal on offline CPU; leak the callback. */
2443 local_irq_restore(flags); 2498 local_irq_restore(flags);
@@ -2757,6 +2812,10 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
2757 /* Check for CPU stalls, if enabled. */ 2812 /* Check for CPU stalls, if enabled. */
2758 check_cpu_stall(rsp, rdp); 2813 check_cpu_stall(rsp, rdp);
2759 2814
2815 /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
2816 if (rcu_nohz_full_cpu(rsp))
2817 return 0;
2818
2760 /* Is the RCU core waiting for a quiescent state from this CPU? */ 2819 /* Is the RCU core waiting for a quiescent state from this CPU? */
2761 if (rcu_scheduler_fully_active && 2820 if (rcu_scheduler_fully_active &&
2762 rdp->qs_pending && !rdp->passed_quiesce) { 2821 rdp->qs_pending && !rdp->passed_quiesce) {
@@ -2790,6 +2849,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
2790 return 1; 2849 return 1;
2791 } 2850 }
2792 2851
2852 /* Does this CPU need a deferred NOCB wakeup? */
2853 if (rcu_nocb_need_deferred_wakeup(rdp)) {
2854 rdp->n_rp_nocb_defer_wakeup++;
2855 return 1;
2856 }
2857
2793 /* nothing to do */ 2858 /* nothing to do */
2794 rdp->n_rp_need_nothing++; 2859 rdp->n_rp_need_nothing++;
2795 return 0; 2860 return 0;
@@ -3214,9 +3279,9 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
3214{ 3279{
3215 int i; 3280 int i;
3216 3281
3217 for (i = rcu_num_lvls - 1; i > 0; i--) 3282 rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
3283 for (i = rcu_num_lvls - 2; i >= 0; i--)
3218 rsp->levelspread[i] = CONFIG_RCU_FANOUT; 3284 rsp->levelspread[i] = CONFIG_RCU_FANOUT;
3219 rsp->levelspread[0] = rcu_fanout_leaf;
3220} 3285}
3221#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ 3286#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
3222static void __init rcu_init_levelspread(struct rcu_state *rsp) 3287static void __init rcu_init_levelspread(struct rcu_state *rsp)
@@ -3346,6 +3411,8 @@ static void __init rcu_init_geometry(void)
3346 if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF && 3411 if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
3347 nr_cpu_ids == NR_CPUS) 3412 nr_cpu_ids == NR_CPUS)
3348 return; 3413 return;
3414 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
3415 rcu_fanout_leaf, nr_cpu_ids);
3349 3416
3350 /* 3417 /*
3351 * Compute number of nodes that can be handled an rcu_node tree 3418 * Compute number of nodes that can be handled an rcu_node tree