aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 16:55:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 16:55:30 -0400
commita6408f6cb63ac0958fee7dbce7861ffb540d8a49 (patch)
treec94a835d343974171951e3b805e6bbbb02852ebc /kernel/rcu
parent1a81a8f2a5918956e214bb718099a89e500e7ec5 (diff)
parent4fae16dffb812f0e0d98a0b2b0856ca48ca63e6c (diff)
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug updates from Thomas Gleixner: "This is the next part of the hotplug rework. - Convert all notifiers with a priority assigned - Convert all CPU_STARTING/DYING notifiers The final removal of the STARTING/DYING infrastructure will happen when the merge window closes. Another 700 hundred line of unpenetrable maze gone :)" * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) timers/core: Correct callback order during CPU hot plug leds/trigger/cpu: Move from CPU_STARTING to ONLINE level powerpc/numa: Convert to hotplug state machine arm/perf: Fix hotplug state machine conversion irqchip/armada: Avoid unused function warnings ARC/time: Convert to hotplug state machine clocksource/atlas7: Convert to hotplug state machine clocksource/armada-370-xp: Convert to hotplug state machine clocksource/exynos_mct: Convert to hotplug state machine clocksource/arm_global_timer: Convert to hotplug state machine rcu: Convert rcutree to hotplug state machine KVM/arm/arm64/vgic-new: Convert to hotplug state machine smp/cfd: Convert core to hotplug state machine x86/x2apic: Convert to CPU hotplug state machine profile: Convert to hotplug state machine timers/core: Convert to hotplug state machine hrtimer: Convert to hotplug state machine x86/tboot: Convert to hotplug state machine arm64/armv8 deprecated: Convert to hotplug state machine hwtracing/coresight-etm4x: Convert to hotplug state machine ...
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c105
1 files changed, 52 insertions, 53 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f433959e9322..5d80925e7fc8 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1073,11 +1073,11 @@ EXPORT_SYMBOL_GPL(rcu_is_watching);
1073 * offline to continue to use RCU for one jiffy after marking itself 1073 * offline to continue to use RCU for one jiffy after marking itself
1074 * offline in the cpu_online_mask. This leniency is necessary given the 1074 * offline in the cpu_online_mask. This leniency is necessary given the
1075 * non-atomic nature of the online and offline processing, for example, 1075 * non-atomic nature of the online and offline processing, for example,
1076 * the fact that a CPU enters the scheduler after completing the CPU_DYING 1076 * the fact that a CPU enters the scheduler after completing the teardown
1077 * notifiers. 1077 * of the CPU.
1078 * 1078 *
1079 * This is also why RCU internally marks CPUs online during the 1079 * This is also why RCU internally marks CPUs online during in the
1080 * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase. 1080 * preparation phase and offline after the CPU has been taken down.
1081 * 1081 *
1082 * Disable checking if in an NMI handler because we cannot safely report 1082 * Disable checking if in an NMI handler because we cannot safely report
1083 * errors from NMI handlers anyway. 1083 * errors from NMI handlers anyway.
@@ -3806,12 +3806,58 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3806 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3806 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3807} 3807}
3808 3808
3809static void rcu_prepare_cpu(int cpu) 3809int rcutree_prepare_cpu(unsigned int cpu)
3810{ 3810{
3811 struct rcu_state *rsp; 3811 struct rcu_state *rsp;
3812 3812
3813 for_each_rcu_flavor(rsp) 3813 for_each_rcu_flavor(rsp)
3814 rcu_init_percpu_data(cpu, rsp); 3814 rcu_init_percpu_data(cpu, rsp);
3815
3816 rcu_prepare_kthreads(cpu);
3817 rcu_spawn_all_nocb_kthreads(cpu);
3818
3819 return 0;
3820}
3821
3822static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3823{
3824 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3825
3826 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3827}
3828
3829int rcutree_online_cpu(unsigned int cpu)
3830{
3831 sync_sched_exp_online_cleanup(cpu);
3832 rcutree_affinity_setting(cpu, -1);
3833 return 0;
3834}
3835
3836int rcutree_offline_cpu(unsigned int cpu)
3837{
3838 rcutree_affinity_setting(cpu, cpu);
3839 return 0;
3840}
3841
3842
3843int rcutree_dying_cpu(unsigned int cpu)
3844{
3845 struct rcu_state *rsp;
3846
3847 for_each_rcu_flavor(rsp)
3848 rcu_cleanup_dying_cpu(rsp);
3849 return 0;
3850}
3851
3852int rcutree_dead_cpu(unsigned int cpu)
3853{
3854 struct rcu_state *rsp;
3855
3856 for_each_rcu_flavor(rsp) {
3857 rcu_cleanup_dead_cpu(cpu, rsp);
3858 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3859 }
3860 return 0;
3815} 3861}
3816 3862
3817#ifdef CONFIG_HOTPLUG_CPU 3863#ifdef CONFIG_HOTPLUG_CPU
@@ -3851,52 +3897,6 @@ void rcu_report_dead(unsigned int cpu)
3851} 3897}
3852#endif 3898#endif
3853 3899
3854/*
3855 * Handle CPU online/offline notification events.
3856 */
3857int rcu_cpu_notify(struct notifier_block *self,
3858 unsigned long action, void *hcpu)
3859{
3860 long cpu = (long)hcpu;
3861 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3862 struct rcu_node *rnp = rdp->mynode;
3863 struct rcu_state *rsp;
3864
3865 switch (action) {
3866 case CPU_UP_PREPARE:
3867 case CPU_UP_PREPARE_FROZEN:
3868 rcu_prepare_cpu(cpu);
3869 rcu_prepare_kthreads(cpu);
3870 rcu_spawn_all_nocb_kthreads(cpu);
3871 break;
3872 case CPU_ONLINE:
3873 case CPU_DOWN_FAILED:
3874 sync_sched_exp_online_cleanup(cpu);
3875 rcu_boost_kthread_setaffinity(rnp, -1);
3876 break;
3877 case CPU_DOWN_PREPARE:
3878 rcu_boost_kthread_setaffinity(rnp, cpu);
3879 break;
3880 case CPU_DYING:
3881 case CPU_DYING_FROZEN:
3882 for_each_rcu_flavor(rsp)
3883 rcu_cleanup_dying_cpu(rsp);
3884 break;
3885 case CPU_DEAD:
3886 case CPU_DEAD_FROZEN:
3887 case CPU_UP_CANCELED:
3888 case CPU_UP_CANCELED_FROZEN:
3889 for_each_rcu_flavor(rsp) {
3890 rcu_cleanup_dead_cpu(cpu, rsp);
3891 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3892 }
3893 break;
3894 default:
3895 break;
3896 }
3897 return NOTIFY_OK;
3898}
3899
3900static int rcu_pm_notify(struct notifier_block *self, 3900static int rcu_pm_notify(struct notifier_block *self,
3901 unsigned long action, void *hcpu) 3901 unsigned long action, void *hcpu)
3902{ 3902{
@@ -4208,10 +4208,9 @@ void __init rcu_init(void)
4208 * this is called early in boot, before either interrupts 4208 * this is called early in boot, before either interrupts
4209 * or the scheduler are operational. 4209 * or the scheduler are operational.
4210 */ 4210 */
4211 cpu_notifier(rcu_cpu_notify, 0);
4212 pm_notifier(rcu_pm_notify, 0); 4211 pm_notifier(rcu_pm_notify, 0);
4213 for_each_online_cpu(cpu) 4212 for_each_online_cpu(cpu)
4214 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 4213 rcutree_prepare_cpu(cpu);
4215} 4214}
4216 4215
4217#include "tree_exp.h" 4216#include "tree_exp.h"