diff options
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 174 |
1 files changed, 57 insertions, 117 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9bb5dff50815..84fe96641b2e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -97,9 +97,6 @@ struct rcu_state sname##_state = { \ | |||
97 | .gp_state = RCU_GP_IDLE, \ | 97 | .gp_state = RCU_GP_IDLE, \ |
98 | .gpnum = 0UL - 300UL, \ | 98 | .gpnum = 0UL - 300UL, \ |
99 | .completed = 0UL - 300UL, \ | 99 | .completed = 0UL - 300UL, \ |
100 | .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \ | ||
101 | .orphan_pend = RCU_CBLIST_INITIALIZER(sname##_state.orphan_pend), \ | ||
102 | .orphan_done = RCU_CBLIST_INITIALIZER(sname##_state.orphan_done), \ | ||
103 | .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ | 100 | .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ |
104 | .name = RCU_STATE_NAME(sname), \ | 101 | .name = RCU_STATE_NAME(sname), \ |
105 | .abbr = sabbr, \ | 102 | .abbr = sabbr, \ |
@@ -843,13 +840,9 @@ static void rcu_eqs_enter(bool user) | |||
843 | */ | 840 | */ |
844 | void rcu_idle_enter(void) | 841 | void rcu_idle_enter(void) |
845 | { | 842 | { |
846 | unsigned long flags; | 843 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_idle_enter() invoked with irqs enabled!!!"); |
847 | |||
848 | local_irq_save(flags); | ||
849 | rcu_eqs_enter(false); | 844 | rcu_eqs_enter(false); |
850 | local_irq_restore(flags); | ||
851 | } | 845 | } |
852 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | ||
853 | 846 | ||
854 | #ifdef CONFIG_NO_HZ_FULL | 847 | #ifdef CONFIG_NO_HZ_FULL |
855 | /** | 848 | /** |
@@ -862,7 +855,8 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter); | |||
862 | */ | 855 | */ |
863 | void rcu_user_enter(void) | 856 | void rcu_user_enter(void) |
864 | { | 857 | { |
865 | rcu_eqs_enter(1); | 858 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_user_enter() invoked with irqs enabled!!!"); |
859 | rcu_eqs_enter(true); | ||
866 | } | 860 | } |
867 | #endif /* CONFIG_NO_HZ_FULL */ | 861 | #endif /* CONFIG_NO_HZ_FULL */ |
868 | 862 | ||
@@ -955,8 +949,10 @@ static void rcu_eqs_exit(bool user) | |||
955 | if (oldval & DYNTICK_TASK_NEST_MASK) { | 949 | if (oldval & DYNTICK_TASK_NEST_MASK) { |
956 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | 950 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; |
957 | } else { | 951 | } else { |
952 | __this_cpu_inc(disable_rcu_irq_enter); | ||
958 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | 953 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
959 | rcu_eqs_exit_common(oldval, user); | 954 | rcu_eqs_exit_common(oldval, user); |
955 | __this_cpu_dec(disable_rcu_irq_enter); | ||
960 | } | 956 | } |
961 | } | 957 | } |
962 | 958 | ||
@@ -979,7 +975,6 @@ void rcu_idle_exit(void) | |||
979 | rcu_eqs_exit(false); | 975 | rcu_eqs_exit(false); |
980 | local_irq_restore(flags); | 976 | local_irq_restore(flags); |
981 | } | 977 | } |
982 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | ||
983 | 978 | ||
984 | #ifdef CONFIG_NO_HZ_FULL | 979 | #ifdef CONFIG_NO_HZ_FULL |
985 | /** | 980 | /** |
@@ -1358,12 +1353,13 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) | |||
1358 | j = jiffies; | 1353 | j = jiffies; |
1359 | gpa = READ_ONCE(rsp->gp_activity); | 1354 | gpa = READ_ONCE(rsp->gp_activity); |
1360 | if (j - gpa > 2 * HZ) { | 1355 | if (j - gpa > 2 * HZ) { |
1361 | pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx\n", | 1356 | pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n", |
1362 | rsp->name, j - gpa, | 1357 | rsp->name, j - gpa, |
1363 | rsp->gpnum, rsp->completed, | 1358 | rsp->gpnum, rsp->completed, |
1364 | rsp->gp_flags, | 1359 | rsp->gp_flags, |
1365 | gp_state_getname(rsp->gp_state), rsp->gp_state, | 1360 | gp_state_getname(rsp->gp_state), rsp->gp_state, |
1366 | rsp->gp_kthread ? rsp->gp_kthread->state : ~0); | 1361 | rsp->gp_kthread ? rsp->gp_kthread->state : ~0, |
1362 | rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1); | ||
1367 | if (rsp->gp_kthread) { | 1363 | if (rsp->gp_kthread) { |
1368 | sched_show_task(rsp->gp_kthread); | 1364 | sched_show_task(rsp->gp_kthread); |
1369 | wake_up_process(rsp->gp_kthread); | 1365 | wake_up_process(rsp->gp_kthread); |
@@ -2067,8 +2063,8 @@ static bool rcu_gp_init(struct rcu_state *rsp) | |||
2067 | } | 2063 | } |
2068 | 2064 | ||
2069 | /* | 2065 | /* |
2070 | * Helper function for wait_event_interruptible_timeout() wakeup | 2066 | * Helper function for swait_event_idle() wakeup at force-quiescent-state |
2071 | * at force-quiescent-state time. | 2067 | * time. |
2072 | */ | 2068 | */ |
2073 | static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) | 2069 | static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) |
2074 | { | 2070 | { |
@@ -2206,9 +2202,8 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
2206 | READ_ONCE(rsp->gpnum), | 2202 | READ_ONCE(rsp->gpnum), |
2207 | TPS("reqwait")); | 2203 | TPS("reqwait")); |
2208 | rsp->gp_state = RCU_GP_WAIT_GPS; | 2204 | rsp->gp_state = RCU_GP_WAIT_GPS; |
2209 | swait_event_interruptible(rsp->gp_wq, | 2205 | swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) & |
2210 | READ_ONCE(rsp->gp_flags) & | 2206 | RCU_GP_FLAG_INIT); |
2211 | RCU_GP_FLAG_INIT); | ||
2212 | rsp->gp_state = RCU_GP_DONE_GPS; | 2207 | rsp->gp_state = RCU_GP_DONE_GPS; |
2213 | /* Locking provides needed memory barrier. */ | 2208 | /* Locking provides needed memory barrier. */ |
2214 | if (rcu_gp_init(rsp)) | 2209 | if (rcu_gp_init(rsp)) |
@@ -2239,7 +2234,7 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
2239 | READ_ONCE(rsp->gpnum), | 2234 | READ_ONCE(rsp->gpnum), |
2240 | TPS("fqswait")); | 2235 | TPS("fqswait")); |
2241 | rsp->gp_state = RCU_GP_WAIT_FQS; | 2236 | rsp->gp_state = RCU_GP_WAIT_FQS; |
2242 | ret = swait_event_interruptible_timeout(rsp->gp_wq, | 2237 | ret = swait_event_idle_timeout(rsp->gp_wq, |
2243 | rcu_gp_fqs_check_wake(rsp, &gf), j); | 2238 | rcu_gp_fqs_check_wake(rsp, &gf), j); |
2244 | rsp->gp_state = RCU_GP_DOING_FQS; | 2239 | rsp->gp_state = RCU_GP_DOING_FQS; |
2245 | /* Locking provides needed memory barriers. */ | 2240 | /* Locking provides needed memory barriers. */ |
@@ -2409,6 +2404,8 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, | |||
2409 | return; | 2404 | return; |
2410 | } | 2405 | } |
2411 | WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ | 2406 | WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ |
2407 | WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1 && | ||
2408 | rcu_preempt_blocked_readers_cgp(rnp)); | ||
2412 | rnp->qsmask &= ~mask; | 2409 | rnp->qsmask &= ~mask; |
2413 | trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, | 2410 | trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, |
2414 | mask, rnp->qsmask, rnp->level, | 2411 | mask, rnp->qsmask, rnp->level, |
@@ -3476,10 +3473,11 @@ static void rcu_barrier_callback(struct rcu_head *rhp) | |||
3476 | struct rcu_state *rsp = rdp->rsp; | 3473 | struct rcu_state *rsp = rdp->rsp; |
3477 | 3474 | ||
3478 | if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { | 3475 | if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { |
3479 | _rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence); | 3476 | _rcu_barrier_trace(rsp, TPS("LastCB"), -1, |
3477 | rsp->barrier_sequence); | ||
3480 | complete(&rsp->barrier_completion); | 3478 | complete(&rsp->barrier_completion); |
3481 | } else { | 3479 | } else { |
3482 | _rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence); | 3480 | _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence); |
3483 | } | 3481 | } |
3484 | } | 3482 | } |
3485 | 3483 | ||
@@ -3491,14 +3489,15 @@ static void rcu_barrier_func(void *type) | |||
3491 | struct rcu_state *rsp = type; | 3489 | struct rcu_state *rsp = type; |
3492 | struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); | 3490 | struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); |
3493 | 3491 | ||
3494 | _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence); | 3492 | _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence); |
3495 | rdp->barrier_head.func = rcu_barrier_callback; | 3493 | rdp->barrier_head.func = rcu_barrier_callback; |
3496 | debug_rcu_head_queue(&rdp->barrier_head); | 3494 | debug_rcu_head_queue(&rdp->barrier_head); |
3497 | if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { | 3495 | if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { |
3498 | atomic_inc(&rsp->barrier_cpu_count); | 3496 | atomic_inc(&rsp->barrier_cpu_count); |
3499 | } else { | 3497 | } else { |
3500 | debug_rcu_head_unqueue(&rdp->barrier_head); | 3498 | debug_rcu_head_unqueue(&rdp->barrier_head); |
3501 | _rcu_barrier_trace(rsp, "IRQNQ", -1, rsp->barrier_sequence); | 3499 | _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1, |
3500 | rsp->barrier_sequence); | ||
3502 | } | 3501 | } |
3503 | } | 3502 | } |
3504 | 3503 | ||
@@ -3512,14 +3511,15 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3512 | struct rcu_data *rdp; | 3511 | struct rcu_data *rdp; |
3513 | unsigned long s = rcu_seq_snap(&rsp->barrier_sequence); | 3512 | unsigned long s = rcu_seq_snap(&rsp->barrier_sequence); |
3514 | 3513 | ||
3515 | _rcu_barrier_trace(rsp, "Begin", -1, s); | 3514 | _rcu_barrier_trace(rsp, TPS("Begin"), -1, s); |
3516 | 3515 | ||
3517 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ | 3516 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ |
3518 | mutex_lock(&rsp->barrier_mutex); | 3517 | mutex_lock(&rsp->barrier_mutex); |
3519 | 3518 | ||
3520 | /* Did someone else do our work for us? */ | 3519 | /* Did someone else do our work for us? */ |
3521 | if (rcu_seq_done(&rsp->barrier_sequence, s)) { | 3520 | if (rcu_seq_done(&rsp->barrier_sequence, s)) { |
3522 | _rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence); | 3521 | _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1, |
3522 | rsp->barrier_sequence); | ||
3523 | smp_mb(); /* caller's subsequent code after above check. */ | 3523 | smp_mb(); /* caller's subsequent code after above check. */ |
3524 | mutex_unlock(&rsp->barrier_mutex); | 3524 | mutex_unlock(&rsp->barrier_mutex); |
3525 | return; | 3525 | return; |
@@ -3527,7 +3527,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3527 | 3527 | ||
3528 | /* Mark the start of the barrier operation. */ | 3528 | /* Mark the start of the barrier operation. */ |
3529 | rcu_seq_start(&rsp->barrier_sequence); | 3529 | rcu_seq_start(&rsp->barrier_sequence); |
3530 | _rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence); | 3530 | _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence); |
3531 | 3531 | ||
3532 | /* | 3532 | /* |
3533 | * Initialize the count to one rather than to zero in order to | 3533 | * Initialize the count to one rather than to zero in order to |
@@ -3550,10 +3550,10 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3550 | rdp = per_cpu_ptr(rsp->rda, cpu); | 3550 | rdp = per_cpu_ptr(rsp->rda, cpu); |
3551 | if (rcu_is_nocb_cpu(cpu)) { | 3551 | if (rcu_is_nocb_cpu(cpu)) { |
3552 | if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { | 3552 | if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { |
3553 | _rcu_barrier_trace(rsp, "OfflineNoCB", cpu, | 3553 | _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu, |
3554 | rsp->barrier_sequence); | 3554 | rsp->barrier_sequence); |
3555 | } else { | 3555 | } else { |
3556 | _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, | 3556 | _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu, |
3557 | rsp->barrier_sequence); | 3557 | rsp->barrier_sequence); |
3558 | smp_mb__before_atomic(); | 3558 | smp_mb__before_atomic(); |
3559 | atomic_inc(&rsp->barrier_cpu_count); | 3559 | atomic_inc(&rsp->barrier_cpu_count); |
@@ -3561,11 +3561,11 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3561 | rcu_barrier_callback, rsp, cpu, 0); | 3561 | rcu_barrier_callback, rsp, cpu, 0); |
3562 | } | 3562 | } |
3563 | } else if (rcu_segcblist_n_cbs(&rdp->cblist)) { | 3563 | } else if (rcu_segcblist_n_cbs(&rdp->cblist)) { |
3564 | _rcu_barrier_trace(rsp, "OnlineQ", cpu, | 3564 | _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu, |
3565 | rsp->barrier_sequence); | 3565 | rsp->barrier_sequence); |
3566 | smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); | 3566 | smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); |
3567 | } else { | 3567 | } else { |
3568 | _rcu_barrier_trace(rsp, "OnlineNQ", cpu, | 3568 | _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu, |
3569 | rsp->barrier_sequence); | 3569 | rsp->barrier_sequence); |
3570 | } | 3570 | } |
3571 | } | 3571 | } |
@@ -3582,7 +3582,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
3582 | wait_for_completion(&rsp->barrier_completion); | 3582 | wait_for_completion(&rsp->barrier_completion); |
3583 | 3583 | ||
3584 | /* Mark the end of the barrier operation. */ | 3584 | /* Mark the end of the barrier operation. */ |
3585 | _rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence); | 3585 | _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence); |
3586 | rcu_seq_end(&rsp->barrier_sequence); | 3586 | rcu_seq_end(&rsp->barrier_sequence); |
3587 | 3587 | ||
3588 | /* Other rcu_barrier() invocations can now safely proceed. */ | 3588 | /* Other rcu_barrier() invocations can now safely proceed. */ |
@@ -3684,8 +3684,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
3684 | */ | 3684 | */ |
3685 | rnp = rdp->mynode; | 3685 | rnp = rdp->mynode; |
3686 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ | 3686 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ |
3687 | if (!rdp->beenonline) | ||
3688 | WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1); | ||
3689 | rdp->beenonline = true; /* We have now been online. */ | 3687 | rdp->beenonline = true; /* We have now been online. */ |
3690 | rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ | 3688 | rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ |
3691 | rdp->completed = rnp->completed; | 3689 | rdp->completed = rnp->completed; |
@@ -3789,6 +3787,8 @@ void rcu_cpu_starting(unsigned int cpu) | |||
3789 | { | 3787 | { |
3790 | unsigned long flags; | 3788 | unsigned long flags; |
3791 | unsigned long mask; | 3789 | unsigned long mask; |
3790 | int nbits; | ||
3791 | unsigned long oldmask; | ||
3792 | struct rcu_data *rdp; | 3792 | struct rcu_data *rdp; |
3793 | struct rcu_node *rnp; | 3793 | struct rcu_node *rnp; |
3794 | struct rcu_state *rsp; | 3794 | struct rcu_state *rsp; |
@@ -3799,9 +3799,15 @@ void rcu_cpu_starting(unsigned int cpu) | |||
3799 | mask = rdp->grpmask; | 3799 | mask = rdp->grpmask; |
3800 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 3800 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
3801 | rnp->qsmaskinitnext |= mask; | 3801 | rnp->qsmaskinitnext |= mask; |
3802 | oldmask = rnp->expmaskinitnext; | ||
3802 | rnp->expmaskinitnext |= mask; | 3803 | rnp->expmaskinitnext |= mask; |
3804 | oldmask ^= rnp->expmaskinitnext; | ||
3805 | nbits = bitmap_weight(&oldmask, BITS_PER_LONG); | ||
3806 | /* Allow lockless access for expedited grace periods. */ | ||
3807 | smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */ | ||
3803 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 3808 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
3804 | } | 3809 | } |
3810 | smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ | ||
3805 | } | 3811 | } |
3806 | 3812 | ||
3807 | #ifdef CONFIG_HOTPLUG_CPU | 3813 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -3845,96 +3851,30 @@ void rcu_report_dead(unsigned int cpu) | |||
3845 | rcu_cleanup_dying_idle_cpu(cpu, rsp); | 3851 | rcu_cleanup_dying_idle_cpu(cpu, rsp); |
3846 | } | 3852 | } |
3847 | 3853 | ||
3848 | /* | 3854 | /* Migrate the dead CPU's callbacks to the current CPU. */ |
3849 | * Send the specified CPU's RCU callbacks to the orphanage. The | ||
3850 | * specified CPU must be offline, and the caller must hold the | ||
3851 | * ->orphan_lock. | ||
3852 | */ | ||
3853 | static void | ||
3854 | rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, | ||
3855 | struct rcu_node *rnp, struct rcu_data *rdp) | ||
3856 | { | ||
3857 | lockdep_assert_held(&rsp->orphan_lock); | ||
3858 | |||
3859 | /* No-CBs CPUs do not have orphanable callbacks. */ | ||
3860 | if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu)) | ||
3861 | return; | ||
3862 | |||
3863 | /* | ||
3864 | * Orphan the callbacks. First adjust the counts. This is safe | ||
3865 | * because _rcu_barrier() excludes CPU-hotplug operations, so it | ||
3866 | * cannot be running now. Thus no memory barrier is required. | ||
3867 | */ | ||
3868 | rdp->n_cbs_orphaned += rcu_segcblist_n_cbs(&rdp->cblist); | ||
3869 | rcu_segcblist_extract_count(&rdp->cblist, &rsp->orphan_done); | ||
3870 | |||
3871 | /* | ||
3872 | * Next, move those callbacks still needing a grace period to | ||
3873 | * the orphanage, where some other CPU will pick them up. | ||
3874 | * Some of the callbacks might have gone partway through a grace | ||
3875 | * period, but that is too bad. They get to start over because we | ||
3876 | * cannot assume that grace periods are synchronized across CPUs. | ||
3877 | */ | ||
3878 | rcu_segcblist_extract_pend_cbs(&rdp->cblist, &rsp->orphan_pend); | ||
3879 | |||
3880 | /* | ||
3881 | * Then move the ready-to-invoke callbacks to the orphanage, | ||
3882 | * where some other CPU will pick them up. These will not be | ||
3883 | * required to pass though another grace period: They are done. | ||
3884 | */ | ||
3885 | rcu_segcblist_extract_done_cbs(&rdp->cblist, &rsp->orphan_done); | ||
3886 | |||
3887 | /* Finally, disallow further callbacks on this CPU. */ | ||
3888 | rcu_segcblist_disable(&rdp->cblist); | ||
3889 | } | ||
3890 | |||
3891 | /* | ||
3892 | * Adopt the RCU callbacks from the specified rcu_state structure's | ||
3893 | * orphanage. The caller must hold the ->orphan_lock. | ||
3894 | */ | ||
3895 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags) | ||
3896 | { | ||
3897 | struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); | ||
3898 | |||
3899 | lockdep_assert_held(&rsp->orphan_lock); | ||
3900 | |||
3901 | /* No-CBs CPUs are handled specially. */ | ||
3902 | if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || | ||
3903 | rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags)) | ||
3904 | return; | ||
3905 | |||
3906 | /* Do the accounting first. */ | ||
3907 | rdp->n_cbs_adopted += rsp->orphan_done.len; | ||
3908 | if (rsp->orphan_done.len_lazy != rsp->orphan_done.len) | ||
3909 | rcu_idle_count_callbacks_posted(); | ||
3910 | rcu_segcblist_insert_count(&rdp->cblist, &rsp->orphan_done); | ||
3911 | |||
3912 | /* | ||
3913 | * We do not need a memory barrier here because the only way we | ||
3914 | * can get here if there is an rcu_barrier() in flight is if | ||
3915 | * we are the task doing the rcu_barrier(). | ||
3916 | */ | ||
3917 | |||
3918 | /* First adopt the ready-to-invoke callbacks, then the done ones. */ | ||
3919 | rcu_segcblist_insert_done_cbs(&rdp->cblist, &rsp->orphan_done); | ||
3920 | WARN_ON_ONCE(rsp->orphan_done.head); | ||
3921 | rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rsp->orphan_pend); | ||
3922 | WARN_ON_ONCE(rsp->orphan_pend.head); | ||
3923 | WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != | ||
3924 | !rcu_segcblist_n_cbs(&rdp->cblist)); | ||
3925 | } | ||
3926 | |||
3927 | /* Orphan the dead CPU's callbacks, and then adopt them. */ | ||
3928 | static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) | 3855 | static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) |
3929 | { | 3856 | { |
3930 | unsigned long flags; | 3857 | unsigned long flags; |
3858 | struct rcu_data *my_rdp; | ||
3931 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 3859 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
3932 | struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ | 3860 | struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); |
3933 | 3861 | ||
3934 | raw_spin_lock_irqsave(&rsp->orphan_lock, flags); | 3862 | if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist)) |
3935 | rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); | 3863 | return; /* No callbacks to migrate. */ |
3936 | rcu_adopt_orphan_cbs(rsp, flags); | 3864 | |
3937 | raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags); | 3865 | local_irq_save(flags); |
3866 | my_rdp = this_cpu_ptr(rsp->rda); | ||
3867 | if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) { | ||
3868 | local_irq_restore(flags); | ||
3869 | return; | ||
3870 | } | ||
3871 | raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ | ||
3872 | rcu_advance_cbs(rsp, rnp_root, rdp); /* Leverage recent GPs. */ | ||
3873 | rcu_advance_cbs(rsp, rnp_root, my_rdp); /* Assign GP to pending CBs. */ | ||
3874 | rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); | ||
3875 | WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != | ||
3876 | !rcu_segcblist_n_cbs(&my_rdp->cblist)); | ||
3877 | raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags); | ||
3938 | WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || | 3878 | WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || |
3939 | !rcu_segcblist_empty(&rdp->cblist), | 3879 | !rcu_segcblist_empty(&rdp->cblist), |
3940 | "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", | 3880 | "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", |