summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-06-22 20:06:26 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-23 10:41:54 -0400
commit4cdfc175c25c89eedc08460b5e6239c2ec67fcb6 (patch)
tree716acd208cb0633cdd19fc0cd5ad601906cceb24
parentb402b73b3afe3614bc0e921ebe18013ea103115a (diff)
rcu: Move quiescent-state forcing into kthread
As the first step towards allowing quiescent-state forcing to be preemptible, this commit moves RCU quiescent-state forcing into the same kthread that is now used to initialize and clean up after grace periods. This is yet another step towards keeping scheduling latency down to a dull roar. Updated to change from raw_spin_lock_irqsave() to raw_spin_lock_irq() and to remove the now-unused rcu_state structure fields as suggested by Peter Zijlstra. Reported-by: Mike Galbraith <mgalbraith@suse.de> Reported-by: Dimitri Sivanich <sivanich@sgi.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcutree.c199
-rw-r--r--kernel/rcutree.h13
-rw-r--r--kernel/rcutree_plugin.h8
3 files changed, 82 insertions, 138 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 340a5f54b6af..6182686de4a6 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -72,7 +72,6 @@ static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
72 .orphan_nxttail = &sname##_state.orphan_nxtlist, \ 72 .orphan_nxttail = &sname##_state.orphan_nxtlist, \
73 .orphan_donetail = &sname##_state.orphan_donelist, \ 73 .orphan_donetail = &sname##_state.orphan_donelist, \
74 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ 74 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
75 .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \
76 .name = #sname, \ 75 .name = #sname, \
77} 76}
78 77
@@ -226,7 +225,8 @@ int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
226module_param(rcu_cpu_stall_suppress, int, 0644); 225module_param(rcu_cpu_stall_suppress, int, 0644);
227module_param(rcu_cpu_stall_timeout, int, 0644); 226module_param(rcu_cpu_stall_timeout, int, 0644);
228 227
229static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 228static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *));
229static void force_quiescent_state(struct rcu_state *rsp);
230static int rcu_pending(int cpu); 230static int rcu_pending(int cpu);
231 231
232/* 232/*
@@ -252,7 +252,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
252 */ 252 */
253void rcu_bh_force_quiescent_state(void) 253void rcu_bh_force_quiescent_state(void)
254{ 254{
255 force_quiescent_state(&rcu_bh_state, 0); 255 force_quiescent_state(&rcu_bh_state);
256} 256}
257EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); 257EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
258 258
@@ -286,7 +286,7 @@ EXPORT_SYMBOL_GPL(rcutorture_record_progress);
286 */ 286 */
287void rcu_sched_force_quiescent_state(void) 287void rcu_sched_force_quiescent_state(void)
288{ 288{
289 force_quiescent_state(&rcu_sched_state, 0); 289 force_quiescent_state(&rcu_sched_state);
290} 290}
291EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); 291EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
292 292
@@ -784,11 +784,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
784 else if (!trigger_all_cpu_backtrace()) 784 else if (!trigger_all_cpu_backtrace())
785 dump_stack(); 785 dump_stack();
786 786
787 /* If so configured, complain about tasks blocking the grace period. */ 787 /* Complain about tasks blocking the grace period. */
788 788
789 rcu_print_detail_task_stall(rsp); 789 rcu_print_detail_task_stall(rsp);
790 790
791 force_quiescent_state(rsp, 0); /* Kick them all. */ 791 force_quiescent_state(rsp); /* Kick them all. */
792} 792}
793 793
794static void print_cpu_stall(struct rcu_state *rsp) 794static void print_cpu_stall(struct rcu_state *rsp)
@@ -1036,7 +1036,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1036 struct rcu_node *rnp = rcu_get_root(rsp); 1036 struct rcu_node *rnp = rcu_get_root(rsp);
1037 1037
1038 raw_spin_lock_irq(&rnp->lock); 1038 raw_spin_lock_irq(&rnp->lock);
1039 rsp->gp_flags = 0; 1039 rsp->gp_flags = 0; /* Clear all flags: New grace period. */
1040 1040
1041 if (rcu_gp_in_progress(rsp)) { 1041 if (rcu_gp_in_progress(rsp)) {
1042 /* Grace period already in progress, don't start another. */ 1042 /* Grace period already in progress, don't start another. */
@@ -1044,22 +1044,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
1044 return 0; 1044 return 0;
1045 } 1045 }
1046 1046
1047 if (rsp->fqs_active) {
1048 /*
1049 * We need a grace period, but force_quiescent_state()
1050 * is running. Tell it to start one on our behalf.
1051 */
1052 rsp->fqs_need_gp = 1;
1053 raw_spin_unlock_irq(&rnp->lock);
1054 return 0;
1055 }
1056
1057 /* Advance to a new grace period and initialize state. */ 1047 /* Advance to a new grace period and initialize state. */
1058 rsp->gpnum++; 1048 rsp->gpnum++;
1059 trace_rcu_grace_period(rsp->name, rsp->gpnum, "start"); 1049 trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
1060 WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT);
1061 rsp->fqs_state = RCU_GP_INIT; /* Stop force_quiescent_state. */
1062 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1063 record_gp_stall_check_time(rsp); 1050 record_gp_stall_check_time(rsp);
1064 raw_spin_unlock_irq(&rnp->lock); 1051 raw_spin_unlock_irq(&rnp->lock);
1065 1052
@@ -1096,19 +1083,40 @@ static int rcu_gp_init(struct rcu_state *rsp)
1096 cond_resched(); 1083 cond_resched();
1097 } 1084 }
1098 1085
1099 rnp = rcu_get_root(rsp);
1100 raw_spin_lock_irq(&rnp->lock);
1101 /* force_quiescent_state() now OK. */
1102 rsp->fqs_state = RCU_SIGNAL_INIT;
1103 raw_spin_unlock_irq(&rnp->lock);
1104 put_online_cpus(); 1086 put_online_cpus();
1105 return 1; 1087 return 1;
1106} 1088}
1107 1089
1108/* 1090/*
1091 * Do one round of quiescent-state forcing.
1092 */
1093int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1094{
1095 int fqs_state = fqs_state_in;
1096 struct rcu_node *rnp = rcu_get_root(rsp);
1097
1098 rsp->n_force_qs++;
1099 if (fqs_state == RCU_SAVE_DYNTICK) {
1100 /* Collect dyntick-idle snapshots. */
1101 force_qs_rnp(rsp, dyntick_save_progress_counter);
1102 fqs_state = RCU_FORCE_QS;
1103 } else {
1104 /* Handle dyntick-idle and offline CPUs. */
1105 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
1106 }
1107 /* Clear flag to prevent immediate re-entry. */
1108 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1109 raw_spin_lock_irq(&rnp->lock);
1110 rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
1111 raw_spin_unlock_irq(&rnp->lock);
1112 }
1113 return fqs_state;
1114}
1115
1116/*
1109 * Clean up after the old grace period. 1117 * Clean up after the old grace period.
1110 */ 1118 */
1111static int rcu_gp_cleanup(struct rcu_state *rsp) 1119static void rcu_gp_cleanup(struct rcu_state *rsp)
1112{ 1120{
1113 unsigned long gp_duration; 1121 unsigned long gp_duration;
1114 struct rcu_data *rdp; 1122 struct rcu_data *rdp;
@@ -1160,7 +1168,6 @@ static int rcu_gp_cleanup(struct rcu_state *rsp)
1160 if (cpu_needs_another_gp(rsp, rdp)) 1168 if (cpu_needs_another_gp(rsp, rdp))
1161 rsp->gp_flags = 1; 1169 rsp->gp_flags = 1;
1162 raw_spin_unlock_irq(&rnp->lock); 1170 raw_spin_unlock_irq(&rnp->lock);
1163 return 1;
1164} 1171}
1165 1172
1166/* 1173/*
@@ -1168,6 +1175,8 @@ static int rcu_gp_cleanup(struct rcu_state *rsp)
1168 */ 1175 */
1169static int __noreturn rcu_gp_kthread(void *arg) 1176static int __noreturn rcu_gp_kthread(void *arg)
1170{ 1177{
1178 int fqs_state;
1179 int ret;
1171 struct rcu_state *rsp = arg; 1180 struct rcu_state *rsp = arg;
1172 struct rcu_node *rnp = rcu_get_root(rsp); 1181 struct rcu_node *rnp = rcu_get_root(rsp);
1173 1182
@@ -1175,26 +1184,43 @@ static int __noreturn rcu_gp_kthread(void *arg)
1175 1184
1176 /* Handle grace-period start. */ 1185 /* Handle grace-period start. */
1177 for (;;) { 1186 for (;;) {
1178 wait_event_interruptible(rsp->gp_wq, rsp->gp_flags); 1187 wait_event_interruptible(rsp->gp_wq,
1179 if (rsp->gp_flags && rcu_gp_init(rsp)) 1188 rsp->gp_flags &
1189 RCU_GP_FLAG_INIT);
1190 if ((rsp->gp_flags & RCU_GP_FLAG_INIT) &&
1191 rcu_gp_init(rsp))
1180 break; 1192 break;
1181 cond_resched(); 1193 cond_resched();
1182 flush_signals(current); 1194 flush_signals(current);
1183 } 1195 }
1184 1196
1185 /* Handle grace-period end. */ 1197 /* Handle quiescent-state forcing. */
1186 rnp = rcu_get_root(rsp); 1198 fqs_state = RCU_SAVE_DYNTICK;
1187 for (;;) { 1199 for (;;) {
1188 wait_event_interruptible(rsp->gp_wq, 1200 rsp->jiffies_force_qs = jiffies +
1189 !ACCESS_ONCE(rnp->qsmask) && 1201 RCU_JIFFIES_TILL_FORCE_QS;
1190 !rcu_preempt_blocked_readers_cgp(rnp)); 1202 ret = wait_event_interruptible_timeout(rsp->gp_wq,
1203 (rsp->gp_flags & RCU_GP_FLAG_FQS) ||
1204 (!ACCESS_ONCE(rnp->qsmask) &&
1205 !rcu_preempt_blocked_readers_cgp(rnp)),
1206 RCU_JIFFIES_TILL_FORCE_QS);
1207 /* If grace period done, leave loop. */
1191 if (!ACCESS_ONCE(rnp->qsmask) && 1208 if (!ACCESS_ONCE(rnp->qsmask) &&
1192 !rcu_preempt_blocked_readers_cgp(rnp) && 1209 !rcu_preempt_blocked_readers_cgp(rnp))
1193 rcu_gp_cleanup(rsp))
1194 break; 1210 break;
1195 cond_resched(); 1211 /* If time for quiescent-state forcing, do it. */
1196 flush_signals(current); 1212 if (ret == 0 || (rsp->gp_flags & RCU_GP_FLAG_FQS)) {
1213 fqs_state = rcu_gp_fqs(rsp, fqs_state);
1214 cond_resched();
1215 } else {
1216 /* Deal with stray signal. */
1217 cond_resched();
1218 flush_signals(current);
1219 }
1197 } 1220 }
1221
1222 /* Handle grace-period end. */
1223 rcu_gp_cleanup(rsp);
1198 } 1224 }
1199} 1225}
1200 1226
@@ -1226,7 +1252,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
1226 return; 1252 return;
1227 } 1253 }
1228 1254
1229 rsp->gp_flags = 1; 1255 rsp->gp_flags = RCU_GP_FLAG_INIT;
1230 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1256 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1231 wake_up(&rsp->gp_wq); 1257 wake_up(&rsp->gp_wq);
1232} 1258}
@@ -1777,72 +1803,20 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
1777 * Force quiescent states on reluctant CPUs, and also detect which 1803 * Force quiescent states on reluctant CPUs, and also detect which
1778 * CPUs are in dyntick-idle mode. 1804 * CPUs are in dyntick-idle mode.
1779 */ 1805 */
1780static void force_quiescent_state(struct rcu_state *rsp, int relaxed) 1806static void force_quiescent_state(struct rcu_state *rsp)
1781{ 1807{
1782 unsigned long flags; 1808 unsigned long flags;
1783 struct rcu_node *rnp = rcu_get_root(rsp); 1809 struct rcu_node *rnp = rcu_get_root(rsp);
1784 1810
1785 trace_rcu_utilization("Start fqs"); 1811 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS)
1786 if (!rcu_gp_in_progress(rsp)) { 1812 return; /* Someone beat us to it. */
1787 trace_rcu_utilization("End fqs"); 1813 if (!raw_spin_trylock_irqsave(&rnp->lock, flags)) {
1788 return; /* No grace period in progress, nothing to force. */
1789 }
1790 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
1791 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1814 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
1792 trace_rcu_utilization("End fqs");
1793 return; /* Someone else is already on the job. */
1794 }
1795 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
1796 goto unlock_fqs_ret; /* no emergency and done recently. */
1797 rsp->n_force_qs++;
1798 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1799 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1800 if(!rcu_gp_in_progress(rsp)) {
1801 rsp->n_force_qs_ngp++;
1802 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1803 goto unlock_fqs_ret; /* no GP in progress, time updated. */
1804 }
1805 rsp->fqs_active = 1;
1806 switch (rsp->fqs_state) {
1807 case RCU_GP_IDLE:
1808 case RCU_GP_INIT:
1809
1810 break; /* grace period idle or initializing, ignore. */
1811
1812 case RCU_SAVE_DYNTICK:
1813
1814 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1815
1816 /* Record dyntick-idle state. */
1817 force_qs_rnp(rsp, dyntick_save_progress_counter);
1818 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1819 if (rcu_gp_in_progress(rsp))
1820 rsp->fqs_state = RCU_FORCE_QS;
1821 break;
1822
1823 case RCU_FORCE_QS:
1824
1825 /* Check dyntick-idle state, send IPI to laggarts. */
1826 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1827 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
1828
1829 /* Leave state in case more forcing is required. */
1830
1831 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1832 break;
1833 }
1834 rsp->fqs_active = 0;
1835 if (rsp->fqs_need_gp) {
1836 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
1837 rsp->fqs_need_gp = 0;
1838 rcu_start_gp(rsp, flags); /* releases rnp->lock */
1839 trace_rcu_utilization("End fqs");
1840 return; 1815 return;
1841 } 1816 }
1842 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 1817 rsp->gp_flags |= RCU_GP_FLAG_FQS;
1843unlock_fqs_ret: 1818 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1844 raw_spin_unlock_irqrestore(&rsp->fqslock, flags); 1819 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
1845 trace_rcu_utilization("End fqs");
1846} 1820}
1847 1821
1848/* 1822/*
@@ -1859,13 +1833,6 @@ __rcu_process_callbacks(struct rcu_state *rsp)
1859 WARN_ON_ONCE(rdp->beenonline == 0); 1833 WARN_ON_ONCE(rdp->beenonline == 0);
1860 1834
1861 /* 1835 /*
1862 * If an RCU GP has gone long enough, go check for dyntick
1863 * idle CPUs and, if needed, send resched IPIs.
1864 */
1865 if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1866 force_quiescent_state(rsp, 1);
1867
1868 /*
1869 * Advance callbacks in response to end of earlier grace 1836 * Advance callbacks in response to end of earlier grace
1870 * period that some other CPU ended. 1837 * period that some other CPU ended.
1871 */ 1838 */
@@ -1965,12 +1932,11 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
1965 rdp->blimit = LONG_MAX; 1932 rdp->blimit = LONG_MAX;
1966 if (rsp->n_force_qs == rdp->n_force_qs_snap && 1933 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1967 *rdp->nxttail[RCU_DONE_TAIL] != head) 1934 *rdp->nxttail[RCU_DONE_TAIL] != head)
1968 force_quiescent_state(rsp, 0); 1935 force_quiescent_state(rsp);
1969 rdp->n_force_qs_snap = rsp->n_force_qs; 1936 rdp->n_force_qs_snap = rsp->n_force_qs;
1970 rdp->qlen_last_fqs_check = rdp->qlen; 1937 rdp->qlen_last_fqs_check = rdp->qlen;
1971 } 1938 }
1972 } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) 1939 }
1973 force_quiescent_state(rsp, 1);
1974} 1940}
1975 1941
1976static void 1942static void
@@ -2251,17 +2217,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
2251 /* Is the RCU core waiting for a quiescent state from this CPU? */ 2217 /* Is the RCU core waiting for a quiescent state from this CPU? */
2252 if (rcu_scheduler_fully_active && 2218 if (rcu_scheduler_fully_active &&
2253 rdp->qs_pending && !rdp->passed_quiesce) { 2219 rdp->qs_pending && !rdp->passed_quiesce) {
2254
2255 /*
2256 * If force_quiescent_state() coming soon and this CPU
2257 * needs a quiescent state, and this is either RCU-sched
2258 * or RCU-bh, force a local reschedule.
2259 */
2260 rdp->n_rp_qs_pending++; 2220 rdp->n_rp_qs_pending++;
2261 if (!rdp->preemptible &&
2262 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
2263 jiffies))
2264 set_need_resched();
2265 } else if (rdp->qs_pending && rdp->passed_quiesce) { 2221 } else if (rdp->qs_pending && rdp->passed_quiesce) {
2266 rdp->n_rp_report_qs++; 2222 rdp->n_rp_report_qs++;
2267 return 1; 2223 return 1;
@@ -2291,13 +2247,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
2291 return 1; 2247 return 1;
2292 } 2248 }
2293 2249
2294 /* Has an RCU GP gone long enough to send resched IPIs &c? */
2295 if (rcu_gp_in_progress(rsp) &&
2296 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
2297 rdp->n_rp_need_fqs++;
2298 return 1;
2299 }
2300
2301 /* nothing to do */ 2250 /* nothing to do */
2302 rdp->n_rp_need_nothing++; 2251 rdp->n_rp_need_nothing++;
2303 return 0; 2252 return 0;
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 5d92b80a0a28..2d04106d1533 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -378,13 +378,6 @@ struct rcu_state {
378 378
379 u8 fqs_state ____cacheline_internodealigned_in_smp; 379 u8 fqs_state ____cacheline_internodealigned_in_smp;
380 /* Force QS state. */ 380 /* Force QS state. */
381 u8 fqs_active; /* force_quiescent_state() */
382 /* is running. */
383 u8 fqs_need_gp; /* A CPU was prevented from */
384 /* starting a new grace */
385 /* period because */
386 /* force_quiescent_state() */
387 /* was running. */
388 u8 boost; /* Subject to priority boost. */ 381 u8 boost; /* Subject to priority boost. */
389 unsigned long gpnum; /* Current gp number. */ 382 unsigned long gpnum; /* Current gp number. */
390 unsigned long completed; /* # of last completed gp. */ 383 unsigned long completed; /* # of last completed gp. */
@@ -413,8 +406,6 @@ struct rcu_state {
413 struct completion barrier_completion; /* Wake at barrier end. */ 406 struct completion barrier_completion; /* Wake at barrier end. */
414 unsigned long n_barrier_done; /* ++ at start and end of */ 407 unsigned long n_barrier_done; /* ++ at start and end of */
415 /* _rcu_barrier(). */ 408 /* _rcu_barrier(). */
416 raw_spinlock_t fqslock; /* Only one task forcing */
417 /* quiescent states. */
418 unsigned long jiffies_force_qs; /* Time at which to invoke */ 409 unsigned long jiffies_force_qs; /* Time at which to invoke */
419 /* force_quiescent_state(). */ 410 /* force_quiescent_state(). */
420 unsigned long n_force_qs; /* Number of calls to */ 411 unsigned long n_force_qs; /* Number of calls to */
@@ -433,6 +424,10 @@ struct rcu_state {
433 struct list_head flavors; /* List of RCU flavors. */ 424 struct list_head flavors; /* List of RCU flavors. */
434}; 425};
435 426
427/* Values for rcu_state structure's gp_flags field. */
428#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
429#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
430
436extern struct list_head rcu_struct_flavors; 431extern struct list_head rcu_struct_flavors;
437#define for_each_rcu_flavor(rsp) \ 432#define for_each_rcu_flavor(rsp) \
438 list_for_each_entry((rsp), &rcu_struct_flavors, flavors) 433 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 587963689328..eb8dcd1bc4b5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
119 */ 119 */
120void rcu_force_quiescent_state(void) 120void rcu_force_quiescent_state(void)
121{ 121{
122 force_quiescent_state(&rcu_preempt_state, 0); 122 force_quiescent_state(&rcu_preempt_state);
123} 123}
124EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 124EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
125 125
@@ -2076,16 +2076,16 @@ static void rcu_prepare_for_idle(int cpu)
2076#ifdef CONFIG_TREE_PREEMPT_RCU 2076#ifdef CONFIG_TREE_PREEMPT_RCU
2077 if (per_cpu(rcu_preempt_data, cpu).nxtlist) { 2077 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2078 rcu_preempt_qs(cpu); 2078 rcu_preempt_qs(cpu);
2079 force_quiescent_state(&rcu_preempt_state, 0); 2079 force_quiescent_state(&rcu_preempt_state);
2080 } 2080 }
2081#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 2081#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2082 if (per_cpu(rcu_sched_data, cpu).nxtlist) { 2082 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2083 rcu_sched_qs(cpu); 2083 rcu_sched_qs(cpu);
2084 force_quiescent_state(&rcu_sched_state, 0); 2084 force_quiescent_state(&rcu_sched_state);
2085 } 2085 }
2086 if (per_cpu(rcu_bh_data, cpu).nxtlist) { 2086 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2087 rcu_bh_qs(cpu); 2087 rcu_bh_qs(cpu);
2088 force_quiescent_state(&rcu_bh_state, 0); 2088 force_quiescent_state(&rcu_bh_state);
2089 } 2089 }
2090 2090
2091 /* 2091 /*