diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-07-13 13:17:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-07-15 04:41:44 -0400 |
commit | 4df8374254ea9294dfe4b8c447a1b7eddc543dbf (patch) | |
tree | b2223178530e7c18e36caa40cfeb1780578d18d2 | |
parent | 15d7e3d349a329ad48a29c3a818eacd1c3f7e3ef (diff) |
rcu: Convert rcutree to hotplug state machine
Straight forward conversion to the state machine. Though the question arises
whether this needs really all these state transitions to work.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153337.982013161@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/cpuhotplug.h | 3 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 7 | ||||
-rw-r--r-- | include/linux/rcutree.h | 7 | ||||
-rw-r--r-- | kernel/cpu.c | 14 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 105 |
5 files changed, 83 insertions, 53 deletions
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 544b5563720e..201a2e23bc49 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
@@ -20,11 +20,13 @@ enum cpuhp_state { | |||
20 | CPUHP_X2APIC_PREPARE, | 20 | CPUHP_X2APIC_PREPARE, |
21 | CPUHP_SMPCFD_PREPARE, | 21 | CPUHP_SMPCFD_PREPARE, |
22 | CPUHP_TIMERS_DEAD, | 22 | CPUHP_TIMERS_DEAD, |
23 | CPUHP_RCUTREE_PREP, | ||
23 | CPUHP_NOTIFY_PREPARE, | 24 | CPUHP_NOTIFY_PREPARE, |
24 | CPUHP_BRINGUP_CPU, | 25 | CPUHP_BRINGUP_CPU, |
25 | CPUHP_AP_IDLE_DEAD, | 26 | CPUHP_AP_IDLE_DEAD, |
26 | CPUHP_AP_OFFLINE, | 27 | CPUHP_AP_OFFLINE, |
27 | CPUHP_AP_SCHED_STARTING, | 28 | CPUHP_AP_SCHED_STARTING, |
29 | CPUHP_AP_RCUTREE_DYING, | ||
28 | CPUHP_AP_IRQ_GIC_STARTING, | 30 | CPUHP_AP_IRQ_GIC_STARTING, |
29 | CPUHP_AP_IRQ_GICV3_STARTING, | 31 | CPUHP_AP_IRQ_GICV3_STARTING, |
30 | CPUHP_AP_IRQ_HIP04_STARTING, | 32 | CPUHP_AP_IRQ_HIP04_STARTING, |
@@ -80,6 +82,7 @@ enum cpuhp_state { | |||
80 | CPUHP_AP_PERF_ARM_CCI_ONLINE, | 82 | CPUHP_AP_PERF_ARM_CCI_ONLINE, |
81 | CPUHP_AP_PERF_ARM_CCN_ONLINE, | 83 | CPUHP_AP_PERF_ARM_CCN_ONLINE, |
82 | CPUHP_AP_WORKQUEUE_ONLINE, | 84 | CPUHP_AP_WORKQUEUE_ONLINE, |
85 | CPUHP_AP_RCUTREE_ONLINE, | ||
83 | CPUHP_AP_NOTIFY_ONLINE, | 86 | CPUHP_AP_NOTIFY_ONLINE, |
84 | CPUHP_AP_ONLINE_DYN, | 87 | CPUHP_AP_ONLINE_DYN, |
85 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, | 88 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 93aea75029fb..ac81e4063b40 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -243,4 +243,11 @@ static inline void rcu_all_qs(void) | |||
243 | barrier(); /* Avoid RCU read-side critical sections leaking across. */ | 243 | barrier(); /* Avoid RCU read-side critical sections leaking across. */ |
244 | } | 244 | } |
245 | 245 | ||
246 | /* RCUtree hotplug events */ | ||
247 | #define rcutree_prepare_cpu NULL | ||
248 | #define rcutree_online_cpu NULL | ||
249 | #define rcutree_offline_cpu NULL | ||
250 | #define rcutree_dead_cpu NULL | ||
251 | #define rcutree_dying_cpu NULL | ||
252 | |||
246 | #endif /* __LINUX_RCUTINY_H */ | 253 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 5043cb823fb2..63a4e4cf40a5 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -111,4 +111,11 @@ bool rcu_is_watching(void); | |||
111 | 111 | ||
112 | void rcu_all_qs(void); | 112 | void rcu_all_qs(void); |
113 | 113 | ||
114 | /* RCUtree hotplug events */ | ||
115 | int rcutree_prepare_cpu(unsigned int cpu); | ||
116 | int rcutree_online_cpu(unsigned int cpu); | ||
117 | int rcutree_offline_cpu(unsigned int cpu); | ||
118 | int rcutree_dead_cpu(unsigned int cpu); | ||
119 | int rcutree_dying_cpu(unsigned int cpu); | ||
120 | |||
114 | #endif /* __LINUX_RCUTREE_H */ | 121 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 008e2fd40cb1..f24f45915b54 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -1205,6 +1205,11 @@ static struct cpuhp_step cpuhp_bp_states[] = { | |||
1205 | .startup = NULL, | 1205 | .startup = NULL, |
1206 | .teardown = timers_dead_cpu, | 1206 | .teardown = timers_dead_cpu, |
1207 | }, | 1207 | }, |
1208 | [CPUHP_RCUTREE_PREP] = { | ||
1209 | .name = "RCU-tree prepare", | ||
1210 | .startup = rcutree_prepare_cpu, | ||
1211 | .teardown = rcutree_dead_cpu, | ||
1212 | }, | ||
1208 | /* | 1213 | /* |
1209 | * Preparatory and dead notifiers. Will be replaced once the notifiers | 1214 | * Preparatory and dead notifiers. Will be replaced once the notifiers |
1210 | * are converted to states. | 1215 | * are converted to states. |
@@ -1263,6 +1268,10 @@ static struct cpuhp_step cpuhp_ap_states[] = { | |||
1263 | .startup = sched_cpu_starting, | 1268 | .startup = sched_cpu_starting, |
1264 | .teardown = sched_cpu_dying, | 1269 | .teardown = sched_cpu_dying, |
1265 | }, | 1270 | }, |
1271 | [CPUHP_AP_RCUTREE_DYING] = { | ||
1272 | .startup = NULL, | ||
1273 | .teardown = rcutree_dying_cpu, | ||
1274 | }, | ||
1266 | /* | 1275 | /* |
1267 | * Low level startup/teardown notifiers. Run with interrupts | 1276 | * Low level startup/teardown notifiers. Run with interrupts |
1268 | * disabled. Will be removed once the notifiers are converted to | 1277 | * disabled. Will be removed once the notifiers are converted to |
@@ -1296,6 +1305,11 @@ static struct cpuhp_step cpuhp_ap_states[] = { | |||
1296 | .startup = workqueue_online_cpu, | 1305 | .startup = workqueue_online_cpu, |
1297 | .teardown = workqueue_offline_cpu, | 1306 | .teardown = workqueue_offline_cpu, |
1298 | }, | 1307 | }, |
1308 | [CPUHP_AP_RCUTREE_ONLINE] = { | ||
1309 | .name = "RCU-tree online", | ||
1310 | .startup = rcutree_online_cpu, | ||
1311 | .teardown = rcutree_offline_cpu, | ||
1312 | }, | ||
1299 | 1313 | ||
1300 | /* | 1314 | /* |
1301 | * Online/down_prepare notifiers. Will be removed once the notifiers | 1315 | * Online/down_prepare notifiers. Will be removed once the notifiers |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f433959e9322..5d80925e7fc8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -1073,11 +1073,11 @@ EXPORT_SYMBOL_GPL(rcu_is_watching); | |||
1073 | * offline to continue to use RCU for one jiffy after marking itself | 1073 | * offline to continue to use RCU for one jiffy after marking itself |
1074 | * offline in the cpu_online_mask. This leniency is necessary given the | 1074 | * offline in the cpu_online_mask. This leniency is necessary given the |
1075 | * non-atomic nature of the online and offline processing, for example, | 1075 | * non-atomic nature of the online and offline processing, for example, |
1076 | * the fact that a CPU enters the scheduler after completing the CPU_DYING | 1076 | * the fact that a CPU enters the scheduler after completing the teardown |
1077 | * notifiers. | 1077 | * of the CPU. |
1078 | * | 1078 | * |
1079 | * This is also why RCU internally marks CPUs online during the | 1079 | * This is also why RCU internally marks CPUs online during in the |
1080 | * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase. | 1080 | * preparation phase and offline after the CPU has been taken down. |
1081 | * | 1081 | * |
1082 | * Disable checking if in an NMI handler because we cannot safely report | 1082 | * Disable checking if in an NMI handler because we cannot safely report |
1083 | * errors from NMI handlers anyway. | 1083 | * errors from NMI handlers anyway. |
@@ -3806,12 +3806,58 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
3806 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 3806 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
3807 | } | 3807 | } |
3808 | 3808 | ||
3809 | static void rcu_prepare_cpu(int cpu) | 3809 | int rcutree_prepare_cpu(unsigned int cpu) |
3810 | { | 3810 | { |
3811 | struct rcu_state *rsp; | 3811 | struct rcu_state *rsp; |
3812 | 3812 | ||
3813 | for_each_rcu_flavor(rsp) | 3813 | for_each_rcu_flavor(rsp) |
3814 | rcu_init_percpu_data(cpu, rsp); | 3814 | rcu_init_percpu_data(cpu, rsp); |
3815 | |||
3816 | rcu_prepare_kthreads(cpu); | ||
3817 | rcu_spawn_all_nocb_kthreads(cpu); | ||
3818 | |||
3819 | return 0; | ||
3820 | } | ||
3821 | |||
3822 | static void rcutree_affinity_setting(unsigned int cpu, int outgoing) | ||
3823 | { | ||
3824 | struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); | ||
3825 | |||
3826 | rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); | ||
3827 | } | ||
3828 | |||
3829 | int rcutree_online_cpu(unsigned int cpu) | ||
3830 | { | ||
3831 | sync_sched_exp_online_cleanup(cpu); | ||
3832 | rcutree_affinity_setting(cpu, -1); | ||
3833 | return 0; | ||
3834 | } | ||
3835 | |||
3836 | int rcutree_offline_cpu(unsigned int cpu) | ||
3837 | { | ||
3838 | rcutree_affinity_setting(cpu, cpu); | ||
3839 | return 0; | ||
3840 | } | ||
3841 | |||
3842 | |||
3843 | int rcutree_dying_cpu(unsigned int cpu) | ||
3844 | { | ||
3845 | struct rcu_state *rsp; | ||
3846 | |||
3847 | for_each_rcu_flavor(rsp) | ||
3848 | rcu_cleanup_dying_cpu(rsp); | ||
3849 | return 0; | ||
3850 | } | ||
3851 | |||
3852 | int rcutree_dead_cpu(unsigned int cpu) | ||
3853 | { | ||
3854 | struct rcu_state *rsp; | ||
3855 | |||
3856 | for_each_rcu_flavor(rsp) { | ||
3857 | rcu_cleanup_dead_cpu(cpu, rsp); | ||
3858 | do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); | ||
3859 | } | ||
3860 | return 0; | ||
3815 | } | 3861 | } |
3816 | 3862 | ||
3817 | #ifdef CONFIG_HOTPLUG_CPU | 3863 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -3851,52 +3897,6 @@ void rcu_report_dead(unsigned int cpu) | |||
3851 | } | 3897 | } |
3852 | #endif | 3898 | #endif |
3853 | 3899 | ||
3854 | /* | ||
3855 | * Handle CPU online/offline notification events. | ||
3856 | */ | ||
3857 | int rcu_cpu_notify(struct notifier_block *self, | ||
3858 | unsigned long action, void *hcpu) | ||
3859 | { | ||
3860 | long cpu = (long)hcpu; | ||
3861 | struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); | ||
3862 | struct rcu_node *rnp = rdp->mynode; | ||
3863 | struct rcu_state *rsp; | ||
3864 | |||
3865 | switch (action) { | ||
3866 | case CPU_UP_PREPARE: | ||
3867 | case CPU_UP_PREPARE_FROZEN: | ||
3868 | rcu_prepare_cpu(cpu); | ||
3869 | rcu_prepare_kthreads(cpu); | ||
3870 | rcu_spawn_all_nocb_kthreads(cpu); | ||
3871 | break; | ||
3872 | case CPU_ONLINE: | ||
3873 | case CPU_DOWN_FAILED: | ||
3874 | sync_sched_exp_online_cleanup(cpu); | ||
3875 | rcu_boost_kthread_setaffinity(rnp, -1); | ||
3876 | break; | ||
3877 | case CPU_DOWN_PREPARE: | ||
3878 | rcu_boost_kthread_setaffinity(rnp, cpu); | ||
3879 | break; | ||
3880 | case CPU_DYING: | ||
3881 | case CPU_DYING_FROZEN: | ||
3882 | for_each_rcu_flavor(rsp) | ||
3883 | rcu_cleanup_dying_cpu(rsp); | ||
3884 | break; | ||
3885 | case CPU_DEAD: | ||
3886 | case CPU_DEAD_FROZEN: | ||
3887 | case CPU_UP_CANCELED: | ||
3888 | case CPU_UP_CANCELED_FROZEN: | ||
3889 | for_each_rcu_flavor(rsp) { | ||
3890 | rcu_cleanup_dead_cpu(cpu, rsp); | ||
3891 | do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); | ||
3892 | } | ||
3893 | break; | ||
3894 | default: | ||
3895 | break; | ||
3896 | } | ||
3897 | return NOTIFY_OK; | ||
3898 | } | ||
3899 | |||
3900 | static int rcu_pm_notify(struct notifier_block *self, | 3900 | static int rcu_pm_notify(struct notifier_block *self, |
3901 | unsigned long action, void *hcpu) | 3901 | unsigned long action, void *hcpu) |
3902 | { | 3902 | { |
@@ -4208,10 +4208,9 @@ void __init rcu_init(void) | |||
4208 | * this is called early in boot, before either interrupts | 4208 | * this is called early in boot, before either interrupts |
4209 | * or the scheduler are operational. | 4209 | * or the scheduler are operational. |
4210 | */ | 4210 | */ |
4211 | cpu_notifier(rcu_cpu_notify, 0); | ||
4212 | pm_notifier(rcu_pm_notify, 0); | 4211 | pm_notifier(rcu_pm_notify, 0); |
4213 | for_each_online_cpu(cpu) | 4212 | for_each_online_cpu(cpu) |
4214 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | 4213 | rcutree_prepare_cpu(cpu); |
4215 | } | 4214 | } |
4216 | 4215 | ||
4217 | #include "tree_exp.h" | 4216 | #include "tree_exp.h" |