diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
| -rw-r--r-- | kernel/rcu/tree_plugin.h | 267 |
1 files changed, 132 insertions, 135 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 0a571e9a0f1d..8c0ec0f5a027 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
| @@ -58,38 +58,33 @@ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ | |||
| 58 | */ | 58 | */ |
| 59 | static void __init rcu_bootup_announce_oddness(void) | 59 | static void __init rcu_bootup_announce_oddness(void) |
| 60 | { | 60 | { |
| 61 | #ifdef CONFIG_RCU_TRACE | 61 | if (IS_ENABLED(CONFIG_RCU_TRACE)) |
| 62 | pr_info("\tRCU debugfs-based tracing is enabled.\n"); | 62 | pr_info("\tRCU debugfs-based tracing is enabled.\n"); |
| 63 | #endif | 63 | if ((IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || |
| 64 | #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) | 64 | (!IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)) |
| 65 | pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n", | 65 | pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n", |
| 66 | CONFIG_RCU_FANOUT); | 66 | CONFIG_RCU_FANOUT); |
| 67 | #endif | 67 | if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT)) |
| 68 | #ifdef CONFIG_RCU_FANOUT_EXACT | 68 | pr_info("\tHierarchical RCU autobalancing is disabled.\n"); |
| 69 | pr_info("\tHierarchical RCU autobalancing is disabled.\n"); | 69 | if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) |
| 70 | #endif | 70 | pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); |
| 71 | #ifdef CONFIG_RCU_FAST_NO_HZ | 71 | if (IS_ENABLED(CONFIG_PROVE_RCU)) |
| 72 | pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); | 72 | pr_info("\tRCU lockdep checking is enabled.\n"); |
| 73 | #endif | 73 | if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_RUNNABLE)) |
| 74 | #ifdef CONFIG_PROVE_RCU | 74 | pr_info("\tRCU torture testing starts during boot.\n"); |
| 75 | pr_info("\tRCU lockdep checking is enabled.\n"); | 75 | if (IS_ENABLED(CONFIG_RCU_CPU_STALL_INFO)) |
| 76 | #endif | 76 | pr_info("\tAdditional per-CPU info printed with stalls.\n"); |
| 77 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | 77 | if (NUM_RCU_LVL_4 != 0) |
| 78 | pr_info("\tRCU torture testing starts during boot.\n"); | 78 | pr_info("\tFour-level hierarchy is enabled.\n"); |
| 79 | #endif | 79 | if (CONFIG_RCU_FANOUT_LEAF != 16) |
| 80 | #if defined(CONFIG_RCU_CPU_STALL_INFO) | 80 | pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", |
| 81 | pr_info("\tAdditional per-CPU info printed with stalls.\n"); | 81 | CONFIG_RCU_FANOUT_LEAF); |
| 82 | #endif | ||
| 83 | #if NUM_RCU_LVL_4 != 0 | ||
| 84 | pr_info("\tFour-level hierarchy is enabled.\n"); | ||
| 85 | #endif | ||
| 86 | if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) | 82 | if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) |
| 87 | pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); | 83 | pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); |
| 88 | if (nr_cpu_ids != NR_CPUS) | 84 | if (nr_cpu_ids != NR_CPUS) |
| 89 | pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); | 85 | pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); |
| 90 | #ifdef CONFIG_RCU_BOOST | 86 | if (IS_ENABLED(CONFIG_RCU_BOOST)) |
| 91 | pr_info("\tRCU kthread priority: %d.\n", kthread_prio); | 87 | pr_info("\tRCU kthread priority: %d.\n", kthread_prio); |
| 92 | #endif | ||
| 93 | } | 88 | } |
| 94 | 89 | ||
| 95 | #ifdef CONFIG_PREEMPT_RCU | 90 | #ifdef CONFIG_PREEMPT_RCU |
| @@ -180,7 +175,7 @@ static void rcu_preempt_note_context_switch(void) | |||
| 180 | * But first, note that the current CPU must still be | 175 | * But first, note that the current CPU must still be |
| 181 | * on line! | 176 | * on line! |
| 182 | */ | 177 | */ |
| 183 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); | 178 | WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); |
| 184 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); | 179 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); |
| 185 | if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { | 180 | if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { |
| 186 | list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); | 181 | list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); |
| @@ -233,43 +228,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) | |||
| 233 | } | 228 | } |
| 234 | 229 | ||
| 235 | /* | 230 | /* |
| 236 | * Record a quiescent state for all tasks that were previously queued | ||
| 237 | * on the specified rcu_node structure and that were blocking the current | ||
| 238 | * RCU grace period. The caller must hold the specified rnp->lock with | ||
| 239 | * irqs disabled, and this lock is released upon return, but irqs remain | ||
| 240 | * disabled. | ||
| 241 | */ | ||
| 242 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | ||
| 243 | __releases(rnp->lock) | ||
| 244 | { | ||
| 245 | unsigned long mask; | ||
| 246 | struct rcu_node *rnp_p; | ||
| 247 | |||
| 248 | if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { | ||
| 249 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 250 | return; /* Still need more quiescent states! */ | ||
| 251 | } | ||
| 252 | |||
| 253 | rnp_p = rnp->parent; | ||
| 254 | if (rnp_p == NULL) { | ||
| 255 | /* | ||
| 256 | * Either there is only one rcu_node in the tree, | ||
| 257 | * or tasks were kicked up to root rcu_node due to | ||
| 258 | * CPUs going offline. | ||
| 259 | */ | ||
| 260 | rcu_report_qs_rsp(&rcu_preempt_state, flags); | ||
| 261 | return; | ||
| 262 | } | ||
| 263 | |||
| 264 | /* Report up the rest of the hierarchy. */ | ||
| 265 | mask = rnp->grpmask; | ||
| 266 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
| 267 | raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ | ||
| 268 | smp_mb__after_unlock_lock(); | ||
| 269 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); | ||
| 270 | } | ||
| 271 | |||
| 272 | /* | ||
| 273 | * Advance a ->blkd_tasks-list pointer to the next entry, instead | 231 | * Advance a ->blkd_tasks-list pointer to the next entry, instead |
| 274 | * returning NULL if at the end of the list. | 232 | * returning NULL if at the end of the list. |
| 275 | */ | 233 | */ |
| @@ -300,7 +258,6 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp) | |||
| 300 | */ | 258 | */ |
| 301 | void rcu_read_unlock_special(struct task_struct *t) | 259 | void rcu_read_unlock_special(struct task_struct *t) |
| 302 | { | 260 | { |
| 303 | bool empty; | ||
| 304 | bool empty_exp; | 261 | bool empty_exp; |
| 305 | bool empty_norm; | 262 | bool empty_norm; |
| 306 | bool empty_exp_now; | 263 | bool empty_exp_now; |
| @@ -334,7 +291,13 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
| 334 | } | 291 | } |
| 335 | 292 | ||
| 336 | /* Hardware IRQ handlers cannot block, complain if they get here. */ | 293 | /* Hardware IRQ handlers cannot block, complain if they get here. */ |
| 337 | if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) { | 294 | if (in_irq() || in_serving_softirq()) { |
| 295 | lockdep_rcu_suspicious(__FILE__, __LINE__, | ||
| 296 | "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); | ||
| 297 | pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n", | ||
| 298 | t->rcu_read_unlock_special.s, | ||
| 299 | t->rcu_read_unlock_special.b.blocked, | ||
| 300 | t->rcu_read_unlock_special.b.need_qs); | ||
| 338 | local_irq_restore(flags); | 301 | local_irq_restore(flags); |
| 339 | return; | 302 | return; |
| 340 | } | 303 | } |
| @@ -356,7 +319,6 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
| 356 | break; | 319 | break; |
| 357 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 320 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
| 358 | } | 321 | } |
| 359 | empty = !rcu_preempt_has_tasks(rnp); | ||
| 360 | empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); | 322 | empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); |
| 361 | empty_exp = !rcu_preempted_readers_exp(rnp); | 323 | empty_exp = !rcu_preempted_readers_exp(rnp); |
| 362 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | 324 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ |
| @@ -377,14 +339,6 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
| 377 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 339 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
| 378 | 340 | ||
| 379 | /* | 341 | /* |
| 380 | * If this was the last task on the list, go see if we | ||
| 381 | * need to propagate ->qsmaskinit bit clearing up the | ||
| 382 | * rcu_node tree. | ||
| 383 | */ | ||
| 384 | if (!empty && !rcu_preempt_has_tasks(rnp)) | ||
| 385 | rcu_cleanup_dead_rnp(rnp); | ||
| 386 | |||
| 387 | /* | ||
| 388 | * If this was the last task on the current list, and if | 342 | * If this was the last task on the current list, and if |
| 389 | * we aren't waiting on any CPUs, report the quiescent state. | 343 | * we aren't waiting on any CPUs, report the quiescent state. |
| 390 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, | 344 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, |
| @@ -399,7 +353,8 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
| 399 | rnp->grplo, | 353 | rnp->grplo, |
| 400 | rnp->grphi, | 354 | rnp->grphi, |
| 401 | !!rnp->gp_tasks); | 355 | !!rnp->gp_tasks); |
| 402 | rcu_report_unblock_qs_rnp(rnp, flags); | 356 | rcu_report_unblock_qs_rnp(&rcu_preempt_state, |
| 357 | rnp, flags); | ||
| 403 | } else { | 358 | } else { |
| 404 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 359 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
| 405 | } | 360 | } |
| @@ -520,10 +475,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
| 520 | WARN_ON_ONCE(rnp->qsmask); | 475 | WARN_ON_ONCE(rnp->qsmask); |
| 521 | } | 476 | } |
| 522 | 477 | ||
| 523 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 524 | |||
| 525 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 526 | |||
| 527 | /* | 478 | /* |
| 528 | * Check for a quiescent state from the current CPU. When a task blocks, | 479 | * Check for a quiescent state from the current CPU. When a task blocks, |
| 529 | * the task is recorded in the corresponding CPU's rcu_node structure, | 480 | * the task is recorded in the corresponding CPU's rcu_node structure, |
| @@ -585,7 +536,7 @@ void synchronize_rcu(void) | |||
| 585 | "Illegal synchronize_rcu() in RCU read-side critical section"); | 536 | "Illegal synchronize_rcu() in RCU read-side critical section"); |
| 586 | if (!rcu_scheduler_active) | 537 | if (!rcu_scheduler_active) |
| 587 | return; | 538 | return; |
| 588 | if (rcu_expedited) | 539 | if (rcu_gp_is_expedited()) |
| 589 | synchronize_rcu_expedited(); | 540 | synchronize_rcu_expedited(); |
| 590 | else | 541 | else |
| 591 | wait_rcu_gp(call_rcu); | 542 | wait_rcu_gp(call_rcu); |
| @@ -630,9 +581,6 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | |||
| 630 | * recursively up the tree. (Calm down, calm down, we do the recursion | 581 | * recursively up the tree. (Calm down, calm down, we do the recursion |
| 631 | * iteratively!) | 582 | * iteratively!) |
| 632 | * | 583 | * |
| 633 | * Most callers will set the "wake" flag, but the task initiating the | ||
| 634 | * expedited grace period need not wake itself. | ||
| 635 | * | ||
| 636 | * Caller must hold sync_rcu_preempt_exp_mutex. | 584 | * Caller must hold sync_rcu_preempt_exp_mutex. |
| 637 | */ | 585 | */ |
| 638 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | 586 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, |
| @@ -667,29 +615,85 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | |||
| 667 | 615 | ||
| 668 | /* | 616 | /* |
| 669 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | 617 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited |
| 670 | * grace period for the specified rcu_node structure. If there are no such | 618 | * grace period for the specified rcu_node structure, phase 1. If there |
| 671 | * tasks, report it up the rcu_node hierarchy. | 619 | * are such tasks, set the ->expmask bits up the rcu_node tree and also |
| 620 | * set the ->expmask bits on the leaf rcu_node structures to tell phase 2 | ||
| 621 | * that work is needed here. | ||
| 672 | * | 622 | * |
| 673 | * Caller must hold sync_rcu_preempt_exp_mutex and must exclude | 623 | * Caller must hold sync_rcu_preempt_exp_mutex. |
| 674 | * CPU hotplug operations. | ||
| 675 | */ | 624 | */ |
| 676 | static void | 625 | static void |
| 677 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | 626 | sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp) |
| 678 | { | 627 | { |
| 679 | unsigned long flags; | 628 | unsigned long flags; |
| 680 | int must_wait = 0; | 629 | unsigned long mask; |
| 630 | struct rcu_node *rnp_up; | ||
| 681 | 631 | ||
| 682 | raw_spin_lock_irqsave(&rnp->lock, flags); | 632 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| 683 | smp_mb__after_unlock_lock(); | 633 | smp_mb__after_unlock_lock(); |
| 634 | WARN_ON_ONCE(rnp->expmask); | ||
| 635 | WARN_ON_ONCE(rnp->exp_tasks); | ||
| 684 | if (!rcu_preempt_has_tasks(rnp)) { | 636 | if (!rcu_preempt_has_tasks(rnp)) { |
| 637 | /* No blocked tasks, nothing to do. */ | ||
| 685 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 638 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
| 686 | } else { | 639 | return; |
| 640 | } | ||
| 641 | /* Call for Phase 2 and propagate ->expmask bits up the tree. */ | ||
| 642 | rnp->expmask = 1; | ||
| 643 | rnp_up = rnp; | ||
| 644 | while (rnp_up->parent) { | ||
| 645 | mask = rnp_up->grpmask; | ||
| 646 | rnp_up = rnp_up->parent; | ||
| 647 | if (rnp_up->expmask & mask) | ||
| 648 | break; | ||
| 649 | raw_spin_lock(&rnp_up->lock); /* irqs already off */ | ||
| 650 | smp_mb__after_unlock_lock(); | ||
| 651 | rnp_up->expmask |= mask; | ||
| 652 | raw_spin_unlock(&rnp_up->lock); /* irqs still off */ | ||
| 653 | } | ||
| 654 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 655 | } | ||
| 656 | |||
| 657 | /* | ||
| 658 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | ||
| 659 | * grace period for the specified rcu_node structure, phase 2. If the | ||
| 660 | * leaf rcu_node structure has its ->expmask field set, check for tasks. | ||
| 661 | * If there are some, clear ->expmask and set ->exp_tasks accordingly, | ||
| 662 | * then initiate RCU priority boosting. Otherwise, clear ->expmask and | ||
| 663 | * invoke rcu_report_exp_rnp() to clear out the upper-level ->expmask bits, | ||
| 664 | * enabling rcu_read_unlock_special() to do the bit-clearing. | ||
| 665 | * | ||
| 666 | * Caller must hold sync_rcu_preempt_exp_mutex. | ||
| 667 | */ | ||
| 668 | static void | ||
| 669 | sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp) | ||
| 670 | { | ||
| 671 | unsigned long flags; | ||
| 672 | |||
| 673 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
| 674 | smp_mb__after_unlock_lock(); | ||
| 675 | if (!rnp->expmask) { | ||
| 676 | /* Phase 1 didn't do anything, so Phase 2 doesn't either. */ | ||
| 677 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 678 | return; | ||
| 679 | } | ||
| 680 | |||
| 681 | /* Phase 1 is over. */ | ||
| 682 | rnp->expmask = 0; | ||
| 683 | |||
| 684 | /* | ||
| 685 | * If there are still blocked tasks, set up ->exp_tasks so that | ||
| 686 | * rcu_read_unlock_special() will wake us and then boost them. | ||
| 687 | */ | ||
| 688 | if (rcu_preempt_has_tasks(rnp)) { | ||
| 687 | rnp->exp_tasks = rnp->blkd_tasks.next; | 689 | rnp->exp_tasks = rnp->blkd_tasks.next; |
| 688 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ | 690 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
| 689 | must_wait = 1; | 691 | return; |
| 690 | } | 692 | } |
| 691 | if (!must_wait) | 693 | |
| 692 | rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ | 694 | /* No longer any blocked tasks, so undo bit setting. */ |
| 695 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 696 | rcu_report_exp_rnp(rsp, rnp, false); | ||
| 693 | } | 697 | } |
| 694 | 698 | ||
| 695 | /** | 699 | /** |
| @@ -706,7 +710,6 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |||
| 706 | */ | 710 | */ |
| 707 | void synchronize_rcu_expedited(void) | 711 | void synchronize_rcu_expedited(void) |
| 708 | { | 712 | { |
| 709 | unsigned long flags; | ||
| 710 | struct rcu_node *rnp; | 713 | struct rcu_node *rnp; |
| 711 | struct rcu_state *rsp = &rcu_preempt_state; | 714 | struct rcu_state *rsp = &rcu_preempt_state; |
| 712 | unsigned long snap; | 715 | unsigned long snap; |
| @@ -757,19 +760,16 @@ void synchronize_rcu_expedited(void) | |||
| 757 | /* force all RCU readers onto ->blkd_tasks lists. */ | 760 | /* force all RCU readers onto ->blkd_tasks lists. */ |
| 758 | synchronize_sched_expedited(); | 761 | synchronize_sched_expedited(); |
| 759 | 762 | ||
| 760 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | 763 | /* |
| 761 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | 764 | * Snapshot current state of ->blkd_tasks lists into ->expmask. |
| 762 | raw_spin_lock_irqsave(&rnp->lock, flags); | 765 | * Phase 1 sets bits and phase 2 permits rcu_read_unlock_special() |
| 763 | smp_mb__after_unlock_lock(); | 766 | * to start clearing them. Doing this in one phase leads to |
| 764 | rnp->expmask = rnp->qsmaskinit; | 767 | * strange races between setting and clearing bits, so just say "no"! |
| 765 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 768 | */ |
| 766 | } | 769 | rcu_for_each_leaf_node(rsp, rnp) |
| 767 | 770 | sync_rcu_preempt_exp_init1(rsp, rnp); | |
| 768 | /* Snapshot current state of ->blkd_tasks lists. */ | ||
| 769 | rcu_for_each_leaf_node(rsp, rnp) | 771 | rcu_for_each_leaf_node(rsp, rnp) |
| 770 | sync_rcu_preempt_exp_init(rsp, rnp); | 772 | sync_rcu_preempt_exp_init2(rsp, rnp); |
| 771 | if (NUM_RCU_NODES > 1) | ||
| 772 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | ||
| 773 | 773 | ||
| 774 | put_online_cpus(); | 774 | put_online_cpus(); |
| 775 | 775 | ||
| @@ -859,8 +859,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) | |||
| 859 | return 0; | 859 | return 0; |
| 860 | } | 860 | } |
| 861 | 861 | ||
| 862 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 863 | |||
| 864 | /* | 862 | /* |
| 865 | * Because there is no preemptible RCU, there can be no readers blocked. | 863 | * Because there is no preemptible RCU, there can be no readers blocked. |
| 866 | */ | 864 | */ |
| @@ -869,8 +867,6 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp) | |||
| 869 | return false; | 867 | return false; |
| 870 | } | 868 | } |
| 871 | 869 | ||
| 872 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 873 | |||
| 874 | /* | 870 | /* |
| 875 | * Because preemptible RCU does not exist, we never have to check for | 871 | * Because preemptible RCU does not exist, we never have to check for |
| 876 | * tasks blocked within RCU read-side critical sections. | 872 | * tasks blocked within RCU read-side critical sections. |
| @@ -1170,7 +1166,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |||
| 1170 | * Returns zero if all is well, a negated errno otherwise. | 1166 | * Returns zero if all is well, a negated errno otherwise. |
| 1171 | */ | 1167 | */ |
| 1172 | static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 1168 | static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
| 1173 | struct rcu_node *rnp) | 1169 | struct rcu_node *rnp) |
| 1174 | { | 1170 | { |
| 1175 | int rnp_index = rnp - &rsp->node[0]; | 1171 | int rnp_index = rnp - &rsp->node[0]; |
| 1176 | unsigned long flags; | 1172 | unsigned long flags; |
| @@ -1180,7 +1176,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
| 1180 | if (&rcu_preempt_state != rsp) | 1176 | if (&rcu_preempt_state != rsp) |
| 1181 | return 0; | 1177 | return 0; |
| 1182 | 1178 | ||
| 1183 | if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0) | 1179 | if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) |
| 1184 | return 0; | 1180 | return 0; |
| 1185 | 1181 | ||
| 1186 | rsp->boost = 1; | 1182 | rsp->boost = 1; |
| @@ -1273,7 +1269,7 @@ static void rcu_cpu_kthread(unsigned int cpu) | |||
| 1273 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | 1269 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) |
| 1274 | { | 1270 | { |
| 1275 | struct task_struct *t = rnp->boost_kthread_task; | 1271 | struct task_struct *t = rnp->boost_kthread_task; |
| 1276 | unsigned long mask = rnp->qsmaskinit; | 1272 | unsigned long mask = rcu_rnp_online_cpus(rnp); |
| 1277 | cpumask_var_t cm; | 1273 | cpumask_var_t cm; |
| 1278 | int cpu; | 1274 | int cpu; |
| 1279 | 1275 | ||
| @@ -1945,7 +1941,8 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) | |||
| 1945 | rhp = ACCESS_ONCE(rdp->nocb_follower_head); | 1941 | rhp = ACCESS_ONCE(rdp->nocb_follower_head); |
| 1946 | 1942 | ||
| 1947 | /* Having no rcuo kthread but CBs after scheduler starts is bad! */ | 1943 | /* Having no rcuo kthread but CBs after scheduler starts is bad! */ |
| 1948 | if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp) { | 1944 | if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp && |
| 1945 | rcu_scheduler_fully_active) { | ||
| 1949 | /* RCU callback enqueued before CPU first came online??? */ | 1946 | /* RCU callback enqueued before CPU first came online??? */ |
| 1950 | pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", | 1947 | pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", |
| 1951 | cpu, rhp->func); | 1948 | cpu, rhp->func); |
| @@ -2392,18 +2389,8 @@ void __init rcu_init_nohz(void) | |||
| 2392 | pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); | 2389 | pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); |
| 2393 | 2390 | ||
| 2394 | for_each_rcu_flavor(rsp) { | 2391 | for_each_rcu_flavor(rsp) { |
| 2395 | for_each_cpu(cpu, rcu_nocb_mask) { | 2392 | for_each_cpu(cpu, rcu_nocb_mask) |
| 2396 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 2393 | init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu)); |
| 2397 | |||
| 2398 | /* | ||
| 2399 | * If there are early callbacks, they will need | ||
| 2400 | * to be moved to the nocb lists. | ||
| 2401 | */ | ||
| 2402 | WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] != | ||
| 2403 | &rdp->nxtlist && | ||
| 2404 | rdp->nxttail[RCU_NEXT_TAIL] != NULL); | ||
| 2405 | init_nocb_callback_list(rdp); | ||
| 2406 | } | ||
| 2407 | rcu_organize_nocb_kthreads(rsp); | 2394 | rcu_organize_nocb_kthreads(rsp); |
| 2408 | } | 2395 | } |
| 2409 | } | 2396 | } |
| @@ -2540,6 +2527,16 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) | |||
| 2540 | if (!rcu_is_nocb_cpu(rdp->cpu)) | 2527 | if (!rcu_is_nocb_cpu(rdp->cpu)) |
| 2541 | return false; | 2528 | return false; |
| 2542 | 2529 | ||
| 2530 | /* If there are early-boot callbacks, move them to nocb lists. */ | ||
| 2531 | if (rdp->nxtlist) { | ||
| 2532 | rdp->nocb_head = rdp->nxtlist; | ||
| 2533 | rdp->nocb_tail = rdp->nxttail[RCU_NEXT_TAIL]; | ||
| 2534 | atomic_long_set(&rdp->nocb_q_count, rdp->qlen); | ||
| 2535 | atomic_long_set(&rdp->nocb_q_count_lazy, rdp->qlen_lazy); | ||
| 2536 | rdp->nxtlist = NULL; | ||
| 2537 | rdp->qlen = 0; | ||
| 2538 | rdp->qlen_lazy = 0; | ||
| 2539 | } | ||
| 2543 | rdp->nxttail[RCU_NEXT_TAIL] = NULL; | 2540 | rdp->nxttail[RCU_NEXT_TAIL] = NULL; |
| 2544 | return true; | 2541 | return true; |
| 2545 | } | 2542 | } |
| @@ -2763,7 +2760,8 @@ static void rcu_sysidle_exit(int irq) | |||
| 2763 | 2760 | ||
| 2764 | /* | 2761 | /* |
| 2765 | * Check to see if the current CPU is idle. Note that usermode execution | 2762 | * Check to see if the current CPU is idle. Note that usermode execution |
| 2766 | * does not count as idle. The caller must have disabled interrupts. | 2763 | * does not count as idle. The caller must have disabled interrupts, |
| 2764 | * and must be running on tick_do_timer_cpu. | ||
| 2767 | */ | 2765 | */ |
| 2768 | static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, | 2766 | static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, |
| 2769 | unsigned long *maxj) | 2767 | unsigned long *maxj) |
| @@ -2784,8 +2782,8 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, | |||
| 2784 | if (!*isidle || rdp->rsp != rcu_state_p || | 2782 | if (!*isidle || rdp->rsp != rcu_state_p || |
| 2785 | cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu) | 2783 | cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu) |
| 2786 | return; | 2784 | return; |
| 2787 | if (rcu_gp_in_progress(rdp->rsp)) | 2785 | /* Verify affinity of current kthread. */ |
| 2788 | WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); | 2786 | WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); |
| 2789 | 2787 | ||
| 2790 | /* Pick up current idle and NMI-nesting counter and check. */ | 2788 | /* Pick up current idle and NMI-nesting counter and check. */ |
| 2791 | cur = atomic_read(&rdtp->dynticks_idle); | 2789 | cur = atomic_read(&rdtp->dynticks_idle); |
| @@ -3068,11 +3066,10 @@ static void rcu_bind_gp_kthread(void) | |||
| 3068 | return; | 3066 | return; |
| 3069 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE | 3067 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE |
| 3070 | cpu = tick_do_timer_cpu; | 3068 | cpu = tick_do_timer_cpu; |
| 3071 | if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu) | 3069 | if (cpu >= 0 && cpu < nr_cpu_ids) |
| 3072 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | 3070 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
| 3073 | #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | 3071 | #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
| 3074 | if (!is_housekeeping_cpu(raw_smp_processor_id())) | 3072 | housekeeping_affine(current); |
| 3075 | housekeeping_affine(current); | ||
| 3076 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | 3073 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
| 3077 | } | 3074 | } |
| 3078 | 3075 | ||
