diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-03-23 16:21:30 -0400 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-19 12:29:18 -0400 |
| commit | deb34f3643980832c03bdfb315d5a7e3371bd269 (patch) | |
| tree | 51e13a3d1c8d5e41f115ad6b2082c4c94e07b046 /kernel/rcu/tree.c | |
| parent | 48ac34666ff76843d8743db1cc78b303759916f1 (diff) | |
rcu: Improve comments for hotplug/suspend/hibernate functions
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
| -rw-r--r-- | kernel/rcu/tree.c | 41 |
1 files changed, 37 insertions, 4 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index bdaa69d23a8a..c4f195dd7c94 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -3894,6 +3894,10 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 3894 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 3894 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 3895 | } | 3895 | } |
| 3896 | 3896 | ||
| 3897 | /* | ||
| 3898 | * Invoked early in the CPU-online process, when pretty much all | ||
| 3899 | * services are available. The incoming CPU is not present. | ||
| 3900 | */ | ||
| 3897 | int rcutree_prepare_cpu(unsigned int cpu) | 3901 | int rcutree_prepare_cpu(unsigned int cpu) |
| 3898 | { | 3902 | { |
| 3899 | struct rcu_state *rsp; | 3903 | struct rcu_state *rsp; |
| @@ -3907,6 +3911,9 @@ int rcutree_prepare_cpu(unsigned int cpu) | |||
| 3907 | return 0; | 3911 | return 0; |
| 3908 | } | 3912 | } |
| 3909 | 3913 | ||
| 3914 | /* | ||
| 3915 | * Update RCU priority boot kthread affinity for CPU-hotplug changes. | ||
| 3916 | */ | ||
| 3910 | static void rcutree_affinity_setting(unsigned int cpu, int outgoing) | 3917 | static void rcutree_affinity_setting(unsigned int cpu, int outgoing) |
| 3911 | { | 3918 | { |
| 3912 | struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); | 3919 | struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); |
| @@ -3914,6 +3921,10 @@ static void rcutree_affinity_setting(unsigned int cpu, int outgoing) | |||
| 3914 | rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); | 3921 | rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); |
| 3915 | } | 3922 | } |
| 3916 | 3923 | ||
| 3924 | /* | ||
| 3925 | * Near the end of the CPU-online process. Pretty much all services | ||
| 3926 | * enabled, and the CPU is now very much alive. | ||
| 3927 | */ | ||
| 3917 | int rcutree_online_cpu(unsigned int cpu) | 3928 | int rcutree_online_cpu(unsigned int cpu) |
| 3918 | { | 3929 | { |
| 3919 | sync_sched_exp_online_cleanup(cpu); | 3930 | sync_sched_exp_online_cleanup(cpu); |
| @@ -3921,13 +3932,19 @@ int rcutree_online_cpu(unsigned int cpu) | |||
| 3921 | return 0; | 3932 | return 0; |
| 3922 | } | 3933 | } |
| 3923 | 3934 | ||
| 3935 | /* | ||
| 3936 | * Near the beginning of the process. The CPU is still very much alive | ||
| 3937 | * with pretty much all services enabled. | ||
| 3938 | */ | ||
| 3924 | int rcutree_offline_cpu(unsigned int cpu) | 3939 | int rcutree_offline_cpu(unsigned int cpu) |
| 3925 | { | 3940 | { |
| 3926 | rcutree_affinity_setting(cpu, cpu); | 3941 | rcutree_affinity_setting(cpu, cpu); |
| 3927 | return 0; | 3942 | return 0; |
| 3928 | } | 3943 | } |
| 3929 | 3944 | ||
| 3930 | 3945 | /* | |
| 3946 | * Near the end of the offline process. We do only tracing here. | ||
| 3947 | */ | ||
| 3931 | int rcutree_dying_cpu(unsigned int cpu) | 3948 | int rcutree_dying_cpu(unsigned int cpu) |
| 3932 | { | 3949 | { |
| 3933 | struct rcu_state *rsp; | 3950 | struct rcu_state *rsp; |
| @@ -3937,6 +3954,9 @@ int rcutree_dying_cpu(unsigned int cpu) | |||
| 3937 | return 0; | 3954 | return 0; |
| 3938 | } | 3955 | } |
| 3939 | 3956 | ||
| 3957 | /* | ||
| 3958 | * The outgoing CPU is gone and we are running elsewhere. | ||
| 3959 | */ | ||
| 3940 | int rcutree_dead_cpu(unsigned int cpu) | 3960 | int rcutree_dead_cpu(unsigned int cpu) |
| 3941 | { | 3961 | { |
| 3942 | struct rcu_state *rsp; | 3962 | struct rcu_state *rsp; |
| @@ -3954,6 +3974,10 @@ int rcutree_dead_cpu(unsigned int cpu) | |||
| 3954 | * incoming CPUs are not allowed to use RCU read-side critical sections | 3974 | * incoming CPUs are not allowed to use RCU read-side critical sections |
| 3955 | * until this function is called. Failing to observe this restriction | 3975 | * until this function is called. Failing to observe this restriction |
| 3956 | * will result in lockdep splats. | 3976 | * will result in lockdep splats. |
| 3977 | * | ||
| 3978 | * Note that this function is special in that it is invoked directly | ||
| 3979 | * from the incoming CPU rather than from the cpuhp_step mechanism. | ||
| 3980 | * This is because this function must be invoked at a precise location. | ||
| 3957 | */ | 3981 | */ |
| 3958 | void rcu_cpu_starting(unsigned int cpu) | 3982 | void rcu_cpu_starting(unsigned int cpu) |
| 3959 | { | 3983 | { |
| @@ -3979,9 +4003,6 @@ void rcu_cpu_starting(unsigned int cpu) | |||
| 3979 | * The CPU is exiting the idle loop into the arch_cpu_idle_dead() | 4003 | * The CPU is exiting the idle loop into the arch_cpu_idle_dead() |
| 3980 | * function. We now remove it from the rcu_node tree's ->qsmaskinit | 4004 | * function. We now remove it from the rcu_node tree's ->qsmaskinit |
| 3981 | * bit masks. | 4005 | * bit masks. |
| 3982 | * The CPU is exiting the idle loop into the arch_cpu_idle_dead() | ||
| 3983 | * function. We now remove it from the rcu_node tree's ->qsmaskinit | ||
| 3984 | * bit masks. | ||
| 3985 | */ | 4006 | */ |
| 3986 | static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) | 4007 | static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) |
| 3987 | { | 4008 | { |
| @@ -3997,6 +4018,14 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) | |||
| 3997 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 4018 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 3998 | } | 4019 | } |
| 3999 | 4020 | ||
| 4021 | /* | ||
| 4022 | * The outgoing function has no further need of RCU, so remove it from | ||
| 4023 | * the list of CPUs that RCU must track. | ||
| 4024 | * | ||
| 4025 | * Note that this function is special in that it is invoked directly | ||
| 4026 | * from the outgoing CPU rather than from the cpuhp_step mechanism. | ||
| 4027 | * This is because this function must be invoked at a precise location. | ||
| 4028 | */ | ||
| 4000 | void rcu_report_dead(unsigned int cpu) | 4029 | void rcu_report_dead(unsigned int cpu) |
| 4001 | { | 4030 | { |
| 4002 | struct rcu_state *rsp; | 4031 | struct rcu_state *rsp; |
| @@ -4011,6 +4040,10 @@ void rcu_report_dead(unsigned int cpu) | |||
| 4011 | } | 4040 | } |
| 4012 | #endif | 4041 | #endif |
| 4013 | 4042 | ||
| 4043 | /* | ||
| 4044 | * On non-huge systems, use expedited RCU grace periods to make suspend | ||
| 4045 | * and hibernation run faster. | ||
| 4046 | */ | ||
| 4014 | static int rcu_pm_notify(struct notifier_block *self, | 4047 | static int rcu_pm_notify(struct notifier_block *self, |
| 4015 | unsigned long action, void *hcpu) | 4048 | unsigned long action, void *hcpu) |
| 4016 | { | 4049 | { |
