aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c97
1 files changed, 57 insertions, 40 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9815447d22e0..7680fc275036 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -105,7 +105,7 @@ struct rcu_state sname##_state = { \
105 .name = RCU_STATE_NAME(sname), \ 105 .name = RCU_STATE_NAME(sname), \
106 .abbr = sabbr, \ 106 .abbr = sabbr, \
107}; \ 107}; \
108DEFINE_PER_CPU(struct rcu_data, sname##_data) 108DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data)
109 109
110RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); 110RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
111RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); 111RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
@@ -152,19 +152,6 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
152 */ 152 */
153static int rcu_scheduler_fully_active __read_mostly; 153static int rcu_scheduler_fully_active __read_mostly;
154 154
155#ifdef CONFIG_RCU_BOOST
156
157/*
158 * Control variables for per-CPU and per-rcu_node kthreads. These
159 * handle all flavors of RCU.
160 */
161static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
162DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
163DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
164DEFINE_PER_CPU(char, rcu_cpu_has_work);
165
166#endif /* #ifdef CONFIG_RCU_BOOST */
167
168static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 155static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
169static void invoke_rcu_core(void); 156static void invoke_rcu_core(void);
170static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 157static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
@@ -286,11 +273,11 @@ static void rcu_momentary_dyntick_idle(void)
286 * and requires special handling for preemptible RCU. 273 * and requires special handling for preemptible RCU.
287 * The caller must have disabled preemption. 274 * The caller must have disabled preemption.
288 */ 275 */
289void rcu_note_context_switch(int cpu) 276void rcu_note_context_switch(void)
290{ 277{
291 trace_rcu_utilization(TPS("Start context switch")); 278 trace_rcu_utilization(TPS("Start context switch"));
292 rcu_sched_qs(); 279 rcu_sched_qs();
293 rcu_preempt_note_context_switch(cpu); 280 rcu_preempt_note_context_switch();
294 if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) 281 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
295 rcu_momentary_dyntick_idle(); 282 rcu_momentary_dyntick_idle();
296 trace_rcu_utilization(TPS("End context switch")); 283 trace_rcu_utilization(TPS("End context switch"));
@@ -325,7 +312,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
325 unsigned long *maxj), 312 unsigned long *maxj),
326 bool *isidle, unsigned long *maxj); 313 bool *isidle, unsigned long *maxj);
327static void force_quiescent_state(struct rcu_state *rsp); 314static void force_quiescent_state(struct rcu_state *rsp);
328static int rcu_pending(int cpu); 315static int rcu_pending(void);
329 316
330/* 317/*
331 * Return the number of RCU-sched batches processed thus far for debug & stats. 318 * Return the number of RCU-sched batches processed thus far for debug & stats.
@@ -510,11 +497,11 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
510 * we really have entered idle, and must do the appropriate accounting. 497 * we really have entered idle, and must do the appropriate accounting.
511 * The caller must have disabled interrupts. 498 * The caller must have disabled interrupts.
512 */ 499 */
513static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, 500static void rcu_eqs_enter_common(long long oldval, bool user)
514 bool user)
515{ 501{
516 struct rcu_state *rsp; 502 struct rcu_state *rsp;
517 struct rcu_data *rdp; 503 struct rcu_data *rdp;
504 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
518 505
519 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); 506 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
520 if (!user && !is_idle_task(current)) { 507 if (!user && !is_idle_task(current)) {
@@ -531,7 +518,7 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
531 rdp = this_cpu_ptr(rsp->rda); 518 rdp = this_cpu_ptr(rsp->rda);
532 do_nocb_deferred_wakeup(rdp); 519 do_nocb_deferred_wakeup(rdp);
533 } 520 }
534 rcu_prepare_for_idle(smp_processor_id()); 521 rcu_prepare_for_idle();
535 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 522 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
536 smp_mb__before_atomic(); /* See above. */ 523 smp_mb__before_atomic(); /* See above. */
537 atomic_inc(&rdtp->dynticks); 524 atomic_inc(&rdtp->dynticks);
@@ -565,7 +552,7 @@ static void rcu_eqs_enter(bool user)
565 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); 552 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
566 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { 553 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
567 rdtp->dynticks_nesting = 0; 554 rdtp->dynticks_nesting = 0;
568 rcu_eqs_enter_common(rdtp, oldval, user); 555 rcu_eqs_enter_common(oldval, user);
569 } else { 556 } else {
570 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; 557 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
571 } 558 }
@@ -589,7 +576,7 @@ void rcu_idle_enter(void)
589 576
590 local_irq_save(flags); 577 local_irq_save(flags);
591 rcu_eqs_enter(false); 578 rcu_eqs_enter(false);
592 rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0); 579 rcu_sysidle_enter(0);
593 local_irq_restore(flags); 580 local_irq_restore(flags);
594} 581}
595EXPORT_SYMBOL_GPL(rcu_idle_enter); 582EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -639,8 +626,8 @@ void rcu_irq_exit(void)
639 if (rdtp->dynticks_nesting) 626 if (rdtp->dynticks_nesting)
640 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); 627 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
641 else 628 else
642 rcu_eqs_enter_common(rdtp, oldval, true); 629 rcu_eqs_enter_common(oldval, true);
643 rcu_sysidle_enter(rdtp, 1); 630 rcu_sysidle_enter(1);
644 local_irq_restore(flags); 631 local_irq_restore(flags);
645} 632}
646 633
@@ -651,16 +638,17 @@ void rcu_irq_exit(void)
651 * we really have exited idle, and must do the appropriate accounting. 638 * we really have exited idle, and must do the appropriate accounting.
652 * The caller must have disabled interrupts. 639 * The caller must have disabled interrupts.
653 */ 640 */
654static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, 641static void rcu_eqs_exit_common(long long oldval, int user)
655 int user)
656{ 642{
643 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
644
657 rcu_dynticks_task_exit(); 645 rcu_dynticks_task_exit();
658 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ 646 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
659 atomic_inc(&rdtp->dynticks); 647 atomic_inc(&rdtp->dynticks);
660 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 648 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
661 smp_mb__after_atomic(); /* See above. */ 649 smp_mb__after_atomic(); /* See above. */
662 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 650 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
663 rcu_cleanup_after_idle(smp_processor_id()); 651 rcu_cleanup_after_idle();
664 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); 652 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
665 if (!user && !is_idle_task(current)) { 653 if (!user && !is_idle_task(current)) {
666 struct task_struct *idle __maybe_unused = 654 struct task_struct *idle __maybe_unused =
@@ -691,7 +679,7 @@ static void rcu_eqs_exit(bool user)
691 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; 679 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
692 } else { 680 } else {
693 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 681 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
694 rcu_eqs_exit_common(rdtp, oldval, user); 682 rcu_eqs_exit_common(oldval, user);
695 } 683 }
696} 684}
697 685
@@ -712,7 +700,7 @@ void rcu_idle_exit(void)
712 700
713 local_irq_save(flags); 701 local_irq_save(flags);
714 rcu_eqs_exit(false); 702 rcu_eqs_exit(false);
715 rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0); 703 rcu_sysidle_exit(0);
716 local_irq_restore(flags); 704 local_irq_restore(flags);
717} 705}
718EXPORT_SYMBOL_GPL(rcu_idle_exit); 706EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -763,8 +751,8 @@ void rcu_irq_enter(void)
763 if (oldval) 751 if (oldval)
764 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting); 752 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
765 else 753 else
766 rcu_eqs_exit_common(rdtp, oldval, true); 754 rcu_eqs_exit_common(oldval, true);
767 rcu_sysidle_exit(rdtp, 1); 755 rcu_sysidle_exit(1);
768 local_irq_restore(flags); 756 local_irq_restore(flags);
769} 757}
770 758
@@ -2387,7 +2375,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2387 * invoked from the scheduling-clock interrupt. If rcu_pending returns 2375 * invoked from the scheduling-clock interrupt. If rcu_pending returns
2388 * false, there is no point in invoking rcu_check_callbacks(). 2376 * false, there is no point in invoking rcu_check_callbacks().
2389 */ 2377 */
2390void rcu_check_callbacks(int cpu, int user) 2378void rcu_check_callbacks(int user)
2391{ 2379{
2392 trace_rcu_utilization(TPS("Start scheduler-tick")); 2380 trace_rcu_utilization(TPS("Start scheduler-tick"));
2393 increment_cpu_stall_ticks(); 2381 increment_cpu_stall_ticks();
@@ -2419,8 +2407,8 @@ void rcu_check_callbacks(int cpu, int user)
2419 2407
2420 rcu_bh_qs(); 2408 rcu_bh_qs();
2421 } 2409 }
2422 rcu_preempt_check_callbacks(cpu); 2410 rcu_preempt_check_callbacks();
2423 if (rcu_pending(cpu)) 2411 if (rcu_pending())
2424 invoke_rcu_core(); 2412 invoke_rcu_core();
2425 if (user) 2413 if (user)
2426 rcu_note_voluntary_context_switch(current); 2414 rcu_note_voluntary_context_switch(current);
@@ -2963,6 +2951,9 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
2963 */ 2951 */
2964void synchronize_sched_expedited(void) 2952void synchronize_sched_expedited(void)
2965{ 2953{
2954 cpumask_var_t cm;
2955 bool cma = false;
2956 int cpu;
2966 long firstsnap, s, snap; 2957 long firstsnap, s, snap;
2967 int trycount = 0; 2958 int trycount = 0;
2968 struct rcu_state *rsp = &rcu_sched_state; 2959 struct rcu_state *rsp = &rcu_sched_state;
@@ -2997,11 +2988,26 @@ void synchronize_sched_expedited(void)
2997 } 2988 }
2998 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); 2989 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
2999 2990
2991 /* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */
2992 cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
2993 if (cma) {
2994 cpumask_copy(cm, cpu_online_mask);
2995 cpumask_clear_cpu(raw_smp_processor_id(), cm);
2996 for_each_cpu(cpu, cm) {
2997 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2998
2999 if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
3000 cpumask_clear_cpu(cpu, cm);
3001 }
3002 if (cpumask_weight(cm) == 0)
3003 goto all_cpus_idle;
3004 }
3005
3000 /* 3006 /*
3001 * Each pass through the following loop attempts to force a 3007 * Each pass through the following loop attempts to force a
3002 * context switch on each CPU. 3008 * context switch on each CPU.
3003 */ 3009 */
3004 while (try_stop_cpus(cpu_online_mask, 3010 while (try_stop_cpus(cma ? cm : cpu_online_mask,
3005 synchronize_sched_expedited_cpu_stop, 3011 synchronize_sched_expedited_cpu_stop,
3006 NULL) == -EAGAIN) { 3012 NULL) == -EAGAIN) {
3007 put_online_cpus(); 3013 put_online_cpus();
@@ -3013,6 +3019,7 @@ void synchronize_sched_expedited(void)
3013 /* ensure test happens before caller kfree */ 3019 /* ensure test happens before caller kfree */
3014 smp_mb__before_atomic(); /* ^^^ */ 3020 smp_mb__before_atomic(); /* ^^^ */
3015 atomic_long_inc(&rsp->expedited_workdone1); 3021 atomic_long_inc(&rsp->expedited_workdone1);
3022 free_cpumask_var(cm);
3016 return; 3023 return;
3017 } 3024 }
3018 3025
@@ -3022,6 +3029,7 @@ void synchronize_sched_expedited(void)
3022 } else { 3029 } else {
3023 wait_rcu_gp(call_rcu_sched); 3030 wait_rcu_gp(call_rcu_sched);
3024 atomic_long_inc(&rsp->expedited_normal); 3031 atomic_long_inc(&rsp->expedited_normal);
3032 free_cpumask_var(cm);
3025 return; 3033 return;
3026 } 3034 }
3027 3035
@@ -3031,6 +3039,7 @@ void synchronize_sched_expedited(void)
3031 /* ensure test happens before caller kfree */ 3039 /* ensure test happens before caller kfree */
3032 smp_mb__before_atomic(); /* ^^^ */ 3040 smp_mb__before_atomic(); /* ^^^ */
3033 atomic_long_inc(&rsp->expedited_workdone2); 3041 atomic_long_inc(&rsp->expedited_workdone2);
3042 free_cpumask_var(cm);
3034 return; 3043 return;
3035 } 3044 }
3036 3045
@@ -3045,6 +3054,7 @@ void synchronize_sched_expedited(void)
3045 /* CPU hotplug operation in flight, use normal GP. */ 3054 /* CPU hotplug operation in flight, use normal GP. */
3046 wait_rcu_gp(call_rcu_sched); 3055 wait_rcu_gp(call_rcu_sched);
3047 atomic_long_inc(&rsp->expedited_normal); 3056 atomic_long_inc(&rsp->expedited_normal);
3057 free_cpumask_var(cm);
3048 return; 3058 return;
3049 } 3059 }
3050 snap = atomic_long_read(&rsp->expedited_start); 3060 snap = atomic_long_read(&rsp->expedited_start);
@@ -3052,6 +3062,9 @@ void synchronize_sched_expedited(void)
3052 } 3062 }
3053 atomic_long_inc(&rsp->expedited_stoppedcpus); 3063 atomic_long_inc(&rsp->expedited_stoppedcpus);
3054 3064
3065all_cpus_idle:
3066 free_cpumask_var(cm);
3067
3055 /* 3068 /*
3056 * Everyone up to our most recent fetch is covered by our grace 3069 * Everyone up to our most recent fetch is covered by our grace
3057 * period. Update the counter, but only if our work is still 3070 * period. Update the counter, but only if our work is still
@@ -3143,12 +3156,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3143 * by the current CPU, returning 1 if so. This function is part of the 3156 * by the current CPU, returning 1 if so. This function is part of the
3144 * RCU implementation; it is -not- an exported member of the RCU API. 3157 * RCU implementation; it is -not- an exported member of the RCU API.
3145 */ 3158 */
3146static int rcu_pending(int cpu) 3159static int rcu_pending(void)
3147{ 3160{
3148 struct rcu_state *rsp; 3161 struct rcu_state *rsp;
3149 3162
3150 for_each_rcu_flavor(rsp) 3163 for_each_rcu_flavor(rsp)
3151 if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu))) 3164 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
3152 return 1; 3165 return 1;
3153 return 0; 3166 return 0;
3154} 3167}
@@ -3158,7 +3171,7 @@ static int rcu_pending(int cpu)
3158 * non-NULL, store an indication of whether all callbacks are lazy. 3171 * non-NULL, store an indication of whether all callbacks are lazy.
3159 * (If there are no callbacks, all of them are deemed to be lazy.) 3172 * (If there are no callbacks, all of them are deemed to be lazy.)
3160 */ 3173 */
3161static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy) 3174static int __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
3162{ 3175{
3163 bool al = true; 3176 bool al = true;
3164 bool hc = false; 3177 bool hc = false;
@@ -3166,7 +3179,7 @@ static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
3166 struct rcu_state *rsp; 3179 struct rcu_state *rsp;
3167 3180
3168 for_each_rcu_flavor(rsp) { 3181 for_each_rcu_flavor(rsp) {
3169 rdp = per_cpu_ptr(rsp->rda, cpu); 3182 rdp = this_cpu_ptr(rsp->rda);
3170 if (!rdp->nxtlist) 3183 if (!rdp->nxtlist)
3171 continue; 3184 continue;
3172 hc = true; 3185 hc = true;
@@ -3485,8 +3498,10 @@ static int rcu_cpu_notify(struct notifier_block *self,
3485 case CPU_DEAD_FROZEN: 3498 case CPU_DEAD_FROZEN:
3486 case CPU_UP_CANCELED: 3499 case CPU_UP_CANCELED:
3487 case CPU_UP_CANCELED_FROZEN: 3500 case CPU_UP_CANCELED_FROZEN:
3488 for_each_rcu_flavor(rsp) 3501 for_each_rcu_flavor(rsp) {
3489 rcu_cleanup_dead_cpu(cpu, rsp); 3502 rcu_cleanup_dead_cpu(cpu, rsp);
3503 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3504 }
3490 break; 3505 break;
3491 default: 3506 default:
3492 break; 3507 break;
@@ -3766,6 +3781,8 @@ void __init rcu_init(void)
3766 pm_notifier(rcu_pm_notify, 0); 3781 pm_notifier(rcu_pm_notify, 0);
3767 for_each_online_cpu(cpu) 3782 for_each_online_cpu(cpu)
3768 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 3783 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
3784
3785 rcu_early_boot_tests();
3769} 3786}
3770 3787
3771#include "tree_plugin.h" 3788#include "tree_plugin.h"