aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/rcutree.c66
-rw-r--r--kernel/rcutree.h3
-rw-r--r--kernel/rcutree_plugin.h12
3 files changed, 45 insertions, 36 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 4ca7e0292fd8..a9f51031d3e8 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -740,11 +740,13 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
740} 740}
741 741
742/* 742/*
743 * Clean up after the prior grace period and let rcu_start_gp() start up 743 * Report a full set of quiescent states to the specified rcu_state
744 * the next grace period if one is needed. Note that the caller must 744 * data structure. This involves cleaning up after the prior grace
745 * hold rnp->lock, as required by rcu_start_gp(), which will release it. 745 * period and letting rcu_start_gp() start up the next grace period
746 * if one is needed. Note that the caller must hold rnp->lock, as
747 * required by rcu_start_gp(), which will release it.
746 */ 748 */
747static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) 749static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
748 __releases(rcu_get_root(rsp)->lock) 750 __releases(rcu_get_root(rsp)->lock)
749{ 751{
750 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 752 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
@@ -754,15 +756,16 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
754} 756}
755 757
756/* 758/*
757 * Similar to cpu_quiet(), for which it is a helper function. Allows 759 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
758 * a group of CPUs to be quieted at one go, though all the CPUs in the 760 * Allows quiescent states for a group of CPUs to be reported at one go
759 * group must be represented by the same leaf rcu_node structure. 761 * to the specified rcu_node structure, though all the CPUs in the group
760 * That structure's lock must be held upon entry, and it is released 762 * must be represented by the same rcu_node structure (which need not be
761 * before return. 763 * a leaf rcu_node structure, though it often will be). That structure's
764 * lock must be held upon entry, and it is released before return.
762 */ 765 */
763static void 766static void
764cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, 767rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
765 unsigned long flags) 768 struct rcu_node *rnp, unsigned long flags)
766 __releases(rnp->lock) 769 __releases(rnp->lock)
767{ 770{
768 struct rcu_node *rnp_c; 771 struct rcu_node *rnp_c;
@@ -798,21 +801,23 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
798 801
799 /* 802 /*
800 * Get here if we are the last CPU to pass through a quiescent 803 * Get here if we are the last CPU to pass through a quiescent
801 * state for this grace period. Invoke cpu_quiet_msk_finish() 804 * state for this grace period. Invoke rcu_report_qs_rsp()
802 * to clean up and start the next grace period if one is needed. 805 * to clean up and start the next grace period if one is needed.
803 */ 806 */
804 cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ 807 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
805} 808}
806 809
807/* 810/*
808 * Record a quiescent state for the specified CPU, which must either be 811 * Record a quiescent state for the specified CPU to that CPU's rcu_data
809 * the current CPU. The lastcomp argument is used to make sure we are 812 * structure. This must be either called from the specified CPU, or
810 * still in the grace period of interest. We don't want to end the current 813 * called when the specified CPU is known to be offline (and when it is
811 * grace period based on quiescent states detected in an earlier grace 814 * also known that no other CPU is concurrently trying to help the offline
812 * period! 815 * CPU). The lastcomp argument is used to make sure we are still in the
816 * grace period of interest. We don't want to end the current grace period
817 * based on quiescent states detected in an earlier grace period!
813 */ 818 */
814static void 819static void
815cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 820rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
816{ 821{
817 unsigned long flags; 822 unsigned long flags;
818 unsigned long mask; 823 unsigned long mask;
@@ -827,8 +832,8 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
827 * The race with GP start is resolved by the fact that we 832 * The race with GP start is resolved by the fact that we
828 * hold the leaf rcu_node lock, so that the per-CPU bits 833 * hold the leaf rcu_node lock, so that the per-CPU bits
829 * cannot yet be initialized -- so we would simply find our 834 * cannot yet be initialized -- so we would simply find our
830 * CPU's bit already cleared in cpu_quiet_msk() if this race 835 * CPU's bit already cleared in rcu_report_qs_rnp() if this
831 * occurred. 836 * race occurred.
832 */ 837 */
833 rdp->passed_quiesc = 0; /* try again later! */ 838 rdp->passed_quiesc = 0; /* try again later! */
834 spin_unlock_irqrestore(&rnp->lock, flags); 839 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -846,7 +851,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
846 */ 851 */
847 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 852 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
848 853
849 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ 854 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
850 } 855 }
851} 856}
852 857
@@ -877,8 +882,11 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
877 if (!rdp->passed_quiesc) 882 if (!rdp->passed_quiesc)
878 return; 883 return;
879 884
880 /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ 885 /*
881 cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); 886 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
887 * judge of that).
888 */
889 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
882} 890}
883 891
884#ifdef CONFIG_HOTPLUG_CPU 892#ifdef CONFIG_HOTPLUG_CPU
@@ -968,13 +976,13 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
968 /* 976 /*
969 * We still hold the leaf rcu_node structure lock here, and 977 * We still hold the leaf rcu_node structure lock here, and
970 * irqs are still disabled. The reason for this subterfuge is 978 * irqs are still disabled. The reason for this subterfuge is
971 * because invoking task_quiet() with ->onofflock held leads 979 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
972 * to deadlock. 980 * held leads to deadlock.
973 */ 981 */
974 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 982 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
975 rnp = rdp->mynode; 983 rnp = rdp->mynode;
976 if (need_quiet) 984 if (need_quiet)
977 task_quiet(rnp, flags); 985 rcu_report_unblock_qs_rnp(rnp, flags);
978 else 986 else
979 spin_unlock_irqrestore(&rnp->lock, flags); 987 spin_unlock_irqrestore(&rnp->lock, flags);
980 988
@@ -1164,8 +1172,8 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1164 } 1172 }
1165 if (mask != 0 && rnp->completed == lastcomp) { 1173 if (mask != 0 && rnp->completed == lastcomp) {
1166 1174
1167 /* cpu_quiet_msk() releases rnp->lock. */ 1175 /* rcu_report_qs_rnp() releases rnp->lock. */
1168 cpu_quiet_msk(mask, rsp, rnp, flags); 1176 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1169 continue; 1177 continue;
1170 } 1178 }
1171 spin_unlock_irqrestore(&rnp->lock, flags); 1179 spin_unlock_irqrestore(&rnp->lock, flags);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index a81188c42929..8bb03cb07447 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -306,7 +306,8 @@ long rcu_batches_completed(void);
306static void rcu_preempt_note_context_switch(int cpu); 306static void rcu_preempt_note_context_switch(int cpu);
307static int rcu_preempted_readers(struct rcu_node *rnp); 307static int rcu_preempted_readers(struct rcu_node *rnp);
308#ifdef CONFIG_HOTPLUG_CPU 308#ifdef CONFIG_HOTPLUG_CPU
309static void task_quiet(struct rcu_node *rnp, unsigned long flags); 309static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
310 unsigned long flags);
310#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 311#endif /* #ifdef CONFIG_HOTPLUG_CPU */
311#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 312#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
312static void rcu_print_task_stall(struct rcu_node *rnp); 313static void rcu_print_task_stall(struct rcu_node *rnp);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 1d295c789d3d..c9f0c975c003 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -167,7 +167,7 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
167 * irqs disabled, and this lock is released upon return, but irqs remain 167 * irqs disabled, and this lock is released upon return, but irqs remain
168 * disabled. 168 * disabled.
169 */ 169 */
170static void task_quiet(struct rcu_node *rnp, unsigned long flags) 170static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
171 __releases(rnp->lock) 171 __releases(rnp->lock)
172{ 172{
173 unsigned long mask; 173 unsigned long mask;
@@ -185,7 +185,7 @@ static void task_quiet(struct rcu_node *rnp, unsigned long flags)
185 * or tasks were kicked up to root rcu_node due to 185 * or tasks were kicked up to root rcu_node due to
186 * CPUs going offline. 186 * CPUs going offline.
187 */ 187 */
188 cpu_quiet_msk_finish(&rcu_preempt_state, flags); 188 rcu_report_qs_rsp(&rcu_preempt_state, flags);
189 return; 189 return;
190 } 190 }
191 191
@@ -193,7 +193,7 @@ static void task_quiet(struct rcu_node *rnp, unsigned long flags)
193 mask = rnp->grpmask; 193 mask = rnp->grpmask;
194 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 194 spin_unlock(&rnp->lock); /* irqs remain disabled. */
195 spin_lock(&rnp_p->lock); /* irqs already disabled. */ 195 spin_lock(&rnp_p->lock); /* irqs already disabled. */
196 cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags); 196 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
197} 197}
198 198
199/* 199/*
@@ -253,12 +253,12 @@ static void rcu_read_unlock_special(struct task_struct *t)
253 /* 253 /*
254 * If this was the last task on the current list, and if 254 * If this was the last task on the current list, and if
255 * we aren't waiting on any CPUs, report the quiescent state. 255 * we aren't waiting on any CPUs, report the quiescent state.
256 * Note that task_quiet() releases rnp->lock. 256 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
257 */ 257 */
258 if (empty) 258 if (empty)
259 spin_unlock_irqrestore(&rnp->lock, flags); 259 spin_unlock_irqrestore(&rnp->lock, flags);
260 else 260 else
261 task_quiet(rnp, flags); 261 rcu_report_unblock_qs_rnp(rnp, flags);
262 } else { 262 } else {
263 local_irq_restore(flags); 263 local_irq_restore(flags);
264 } 264 }
@@ -566,7 +566,7 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
566#ifdef CONFIG_HOTPLUG_CPU 566#ifdef CONFIG_HOTPLUG_CPU
567 567
568/* Because preemptible RCU does not exist, no quieting of tasks. */ 568/* Because preemptible RCU does not exist, no quieting of tasks. */
569static void task_quiet(struct rcu_node *rnp, unsigned long flags) 569static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
570{ 570{
571 spin_unlock_irqrestore(&rnp->lock, flags); 571 spin_unlock_irqrestore(&rnp->lock, flags);
572} 572}