aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-08-22 16:56:46 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-23 04:32:37 -0400
commitd6714c22b43fbcbead7e7b706ff270e15f04a791 (patch)
tree1dc15e57d982351bed45b7b9768a118f86b88c19
parent9f77da9f40045253e91f55c12d4481254b513d2d (diff)
rcu: Renamings to increase RCU clarity
Make RCU-sched, RCU-bh, and RCU-preempt be underlying implementations, with "RCU" defined in terms of one of the three. Update the outdated rcu_qsctr_inc() names, as these functions no longer increment anything. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josht@linux.vnet.ibm.com Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org LKML-Reference: <12509746132696-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--Documentation/RCU/trace.txt7
-rw-r--r--include/linux/rcupdate.h21
-rw-r--r--include/linux/rcupreempt.h4
-rw-r--r--include/linux/rcutree.h8
-rw-r--r--kernel/rcupreempt.c8
-rw-r--r--kernel/rcutree.c80
-rw-r--r--kernel/rcutree.h4
-rw-r--r--kernel/rcutree_trace.c20
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/softirq.c4
10 files changed, 95 insertions, 63 deletions
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 02cced183b2d..187bbf10c923 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -191,8 +191,7 @@ rcu/rcuhier (which displays the struct rcu_node hierarchy).
191 191
192The output of "cat rcu/rcudata" looks as follows: 192The output of "cat rcu/rcudata" looks as follows:
193 193
194rcu: 194rcu_sched:
195rcu:
196 0 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=10951/1 dn=0 df=1101 of=0 ri=36 ql=0 b=10 195 0 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=10951/1 dn=0 df=1101 of=0 ri=36 ql=0 b=10
197 1 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=16117/1 dn=0 df=1015 of=0 ri=0 ql=0 b=10 196 1 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=16117/1 dn=0 df=1015 of=0 ri=0 ql=0 b=10
198 2 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1445/1 dn=0 df=1839 of=0 ri=0 ql=0 b=10 197 2 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1445/1 dn=0 df=1839 of=0 ri=0 ql=0 b=10
@@ -306,7 +305,7 @@ comma-separated-variable spreadsheet format.
306 305
307The output of "cat rcu/rcugp" looks as follows: 306The output of "cat rcu/rcugp" looks as follows:
308 307
309rcu: completed=33062 gpnum=33063 308rcu_sched: completed=33062 gpnum=33063
310rcu_bh: completed=464 gpnum=464 309rcu_bh: completed=464 gpnum=464
311 310
312Again, this output is for both "rcu" and "rcu_bh". The fields are 311Again, this output is for both "rcu" and "rcu_bh". The fields are
@@ -413,7 +412,7 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
413 412
414The output of "cat rcu/rcu_pending" looks as follows: 413The output of "cat rcu/rcu_pending" looks as follows:
415 414
416rcu: 415rcu_sched:
417 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 416 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741
418 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 417 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792
419 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 418 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 3c89d6a2591f..e920f0fd59d8 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -157,17 +157,28 @@ extern int rcu_scheduler_active;
157 * - call_rcu_sched() and rcu_barrier_sched() 157 * - call_rcu_sched() and rcu_barrier_sched()
158 * on the write-side to insure proper synchronization. 158 * on the write-side to insure proper synchronization.
159 */ 159 */
160#define rcu_read_lock_sched() preempt_disable() 160static inline void rcu_read_lock_sched(void)
161#define rcu_read_lock_sched_notrace() preempt_disable_notrace() 161{
162 preempt_disable();
163}
164static inline void rcu_read_lock_sched_notrace(void)
165{
166 preempt_disable_notrace();
167}
162 168
163/* 169/*
164 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section 170 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
165 * 171 *
166 * See rcu_read_lock_sched for more information. 172 * See rcu_read_lock_sched for more information.
167 */ 173 */
168#define rcu_read_unlock_sched() preempt_enable() 174static inline void rcu_read_unlock_sched(void)
169#define rcu_read_unlock_sched_notrace() preempt_enable_notrace() 175{
170 176 preempt_enable();
177}
178static inline void rcu_read_unlock_sched_notrace(void)
179{
180 preempt_enable_notrace();
181}
171 182
172 183
173/** 184/**
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index f164ac9b7807..2963f080e48d 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -40,8 +40,8 @@
40#include <linux/cpumask.h> 40#include <linux/cpumask.h>
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42 42
43extern void rcu_qsctr_inc(int cpu); 43extern void rcu_sched_qs(int cpu);
44static inline void rcu_bh_qsctr_inc(int cpu) { } 44static inline void rcu_bh_qs(int cpu) { }
45 45
46/* 46/*
47 * Someone might want to pass call_rcu_bh as a function pointer. 47 * Someone might want to pass call_rcu_bh as a function pointer.
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index e37d5e2a8353..a0852d0d915b 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,8 +30,8 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33extern void rcu_qsctr_inc(int cpu); 33extern void rcu_sched_qs(int cpu);
34extern void rcu_bh_qsctr_inc(int cpu); 34extern void rcu_bh_qs(int cpu);
35 35
36extern int rcu_pending(int cpu); 36extern int rcu_pending(int cpu);
37extern int rcu_needs_cpu(int cpu); 37extern int rcu_needs_cpu(int cpu);
@@ -73,7 +73,8 @@ static inline void __rcu_read_unlock_bh(void)
73 73
74#define __synchronize_sched() synchronize_rcu() 74#define __synchronize_sched() synchronize_rcu()
75 75
76#define call_rcu_sched(head, func) call_rcu(head, func) 76extern void call_rcu_sched(struct rcu_head *head,
77 void (*func)(struct rcu_head *rcu));
77 78
78static inline void synchronize_rcu_expedited(void) 79static inline void synchronize_rcu_expedited(void)
79{ 80{
@@ -91,6 +92,7 @@ extern void rcu_restart_cpu(int cpu);
91 92
92extern long rcu_batches_completed(void); 93extern long rcu_batches_completed(void);
93extern long rcu_batches_completed_bh(void); 94extern long rcu_batches_completed_bh(void);
95extern long rcu_batches_completed_sched(void);
94 96
95static inline void rcu_init_sched(void) 97static inline void rcu_init_sched(void)
96{ 98{
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 510898a7bd69..7d777c9f394c 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -159,7 +159,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched
159 .dynticks = 1, 159 .dynticks = 1,
160}; 160};
161 161
162void rcu_qsctr_inc(int cpu) 162void rcu_sched_qs(int cpu)
163{ 163{
164 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); 164 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
165 165
@@ -967,12 +967,12 @@ void rcu_check_callbacks(int cpu, int user)
967 * If this CPU took its interrupt from user mode or from the 967 * If this CPU took its interrupt from user mode or from the
968 * idle loop, and this is not a nested interrupt, then 968 * idle loop, and this is not a nested interrupt, then
969 * this CPU has to have exited all prior preept-disable 969 * this CPU has to have exited all prior preept-disable
970 * sections of code. So increment the counter to note this. 970 * sections of code. So invoke rcu_sched_qs() to note this.
971 * 971 *
972 * The memory barrier is needed to handle the case where 972 * The memory barrier is needed to handle the case where
973 * writes from a preempt-disable section of code get reordered 973 * writes from a preempt-disable section of code get reordered
974 * into schedule() by this CPU's write buffer. So the memory 974 * into schedule() by this CPU's write buffer. So the memory
975 * barrier makes sure that the rcu_qsctr_inc() is seen by other 975 * barrier makes sure that the rcu_sched_qs() is seen by other
976 * CPUs to happen after any such write. 976 * CPUs to happen after any such write.
977 */ 977 */
978 978
@@ -980,7 +980,7 @@ void rcu_check_callbacks(int cpu, int user)
980 (idle_cpu(cpu) && !in_softirq() && 980 (idle_cpu(cpu) && !in_softirq() &&
981 hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 981 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
982 smp_mb(); /* Guard against aggressive schedule(). */ 982 smp_mb(); /* Guard against aggressive schedule(). */
983 rcu_qsctr_inc(cpu); 983 rcu_sched_qs(cpu);
984 } 984 }
985 985
986 rcu_check_mb(cpu); 986 rcu_check_mb(cpu);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index a162f859dd32..4d71d4e8b5a8 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -74,26 +74,25 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
74 .n_force_qs_ngp = 0, \ 74 .n_force_qs_ngp = 0, \
75} 75}
76 76
77struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state); 77struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
78DEFINE_PER_CPU(struct rcu_data, rcu_data); 78DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
79 79
80struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 80struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
81DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 81DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
82 82
83/* 83/*
84 * Increment the quiescent state counter. 84 * Note a quiescent state. Because we do not need to know
85 * The counter is a bit degenerated: We do not need to know
86 * how many quiescent states passed, just if there was at least 85 * how many quiescent states passed, just if there was at least
87 * one since the start of the grace period. Thus just a flag. 86 * one since the start of the grace period, this just sets a flag.
88 */ 87 */
89void rcu_qsctr_inc(int cpu) 88void rcu_sched_qs(int cpu)
90{ 89{
91 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 90 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
92 rdp->passed_quiesc = 1; 91 rdp->passed_quiesc = 1;
93 rdp->passed_quiesc_completed = rdp->completed; 92 rdp->passed_quiesc_completed = rdp->completed;
94} 93}
95 94
96void rcu_bh_qsctr_inc(int cpu) 95void rcu_bh_qs(int cpu)
97{ 96{
98 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 97 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
99 rdp->passed_quiesc = 1; 98 rdp->passed_quiesc = 1;
@@ -114,11 +113,21 @@ static int qlowmark = 100; /* Once only this many pending, use blimit. */
114static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 113static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
115 114
116/* 115/*
116 * Return the number of RCU-sched batches processed thus far for debug & stats.
117 */
118long rcu_batches_completed_sched(void)
119{
120 return rcu_sched_state.completed;
121}
122EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
123
124/*
117 * Return the number of RCU batches processed thus far for debug & stats. 125 * Return the number of RCU batches processed thus far for debug & stats.
126 * @@@ placeholder, maps to rcu_batches_completed_sched().
118 */ 127 */
119long rcu_batches_completed(void) 128long rcu_batches_completed(void)
120{ 129{
121 return rcu_state.completed; 130 return rcu_batches_completed_sched();
122} 131}
123EXPORT_SYMBOL_GPL(rcu_batches_completed); 132EXPORT_SYMBOL_GPL(rcu_batches_completed);
124 133
@@ -310,7 +319,7 @@ void rcu_irq_exit(void)
310 WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); 319 WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
311 320
312 /* If the interrupt queued a callback, get out of dyntick mode. */ 321 /* If the interrupt queued a callback, get out of dyntick mode. */
313 if (__get_cpu_var(rcu_data).nxtlist || 322 if (__get_cpu_var(rcu_sched_data).nxtlist ||
314 __get_cpu_var(rcu_bh_data).nxtlist) 323 __get_cpu_var(rcu_bh_data).nxtlist)
315 set_need_resched(); 324 set_need_resched();
316} 325}
@@ -847,7 +856,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
847 /* 856 /*
848 * Move callbacks from the outgoing CPU to the running CPU. 857 * Move callbacks from the outgoing CPU to the running CPU.
849 * Note that the outgoing CPU is now quiscent, so it is now 858 * Note that the outgoing CPU is now quiscent, so it is now
850 * (uncharacteristically) safe to access it rcu_data structure. 859 * (uncharacteristically) safe to access its rcu_data structure.
851 * Note also that we must carefully retain the order of the 860 * Note also that we must carefully retain the order of the
852 * outgoing CPU's callbacks in order for rcu_barrier() to work 861 * outgoing CPU's callbacks in order for rcu_barrier() to work
853 * correctly. Finally, note that we start all the callbacks 862 * correctly. Finally, note that we start all the callbacks
@@ -878,7 +887,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
878 */ 887 */
879static void rcu_offline_cpu(int cpu) 888static void rcu_offline_cpu(int cpu)
880{ 889{
881 __rcu_offline_cpu(cpu, &rcu_state); 890 __rcu_offline_cpu(cpu, &rcu_sched_state);
882 __rcu_offline_cpu(cpu, &rcu_bh_state); 891 __rcu_offline_cpu(cpu, &rcu_bh_state);
883} 892}
884 893
@@ -973,17 +982,16 @@ void rcu_check_callbacks(int cpu, int user)
973 * Get here if this CPU took its interrupt from user 982 * Get here if this CPU took its interrupt from user
974 * mode or from the idle loop, and if this is not a 983 * mode or from the idle loop, and if this is not a
975 * nested interrupt. In this case, the CPU is in 984 * nested interrupt. In this case, the CPU is in
976 * a quiescent state, so count it. 985 * a quiescent state, so note it.
977 * 986 *
978 * No memory barrier is required here because both 987 * No memory barrier is required here because both
979 * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference 988 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
980 * only CPU-local variables that other CPUs neither 989 * variables that other CPUs neither access nor modify,
981 * access nor modify, at least not while the corresponding 990 * at least not while the corresponding CPU is online.
982 * CPU is online.
983 */ 991 */
984 992
985 rcu_qsctr_inc(cpu); 993 rcu_sched_qs(cpu);
986 rcu_bh_qsctr_inc(cpu); 994 rcu_bh_qs(cpu);
987 995
988 } else if (!in_softirq()) { 996 } else if (!in_softirq()) {
989 997
@@ -991,10 +999,10 @@ void rcu_check_callbacks(int cpu, int user)
991 * Get here if this CPU did not take its interrupt from 999 * Get here if this CPU did not take its interrupt from
992 * softirq, in other words, if it is not interrupting 1000 * softirq, in other words, if it is not interrupting
993 * a rcu_bh read-side critical section. This is an _bh 1001 * a rcu_bh read-side critical section. This is an _bh
994 * critical section, so count it. 1002 * critical section, so note it.
995 */ 1003 */
996 1004
997 rcu_bh_qsctr_inc(cpu); 1005 rcu_bh_qs(cpu);
998 } 1006 }
999 raise_softirq(RCU_SOFTIRQ); 1007 raise_softirq(RCU_SOFTIRQ);
1000} 1008}
@@ -1174,7 +1182,8 @@ static void rcu_process_callbacks(struct softirq_action *unused)
1174 */ 1182 */
1175 smp_mb(); /* See above block comment. */ 1183 smp_mb(); /* See above block comment. */
1176 1184
1177 __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data)); 1185 __rcu_process_callbacks(&rcu_sched_state,
1186 &__get_cpu_var(rcu_sched_data));
1178 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1187 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1179 1188
1180 /* 1189 /*
@@ -1231,14 +1240,25 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1231} 1240}
1232 1241
1233/* 1242/*
1234 * Queue an RCU callback for invocation after a grace period. 1243 * Queue an RCU-sched callback for invocation after a grace period.
1244 */
1245void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1246{
1247 __call_rcu(head, func, &rcu_sched_state);
1248}
1249EXPORT_SYMBOL_GPL(call_rcu_sched);
1250
1251/*
1252 * @@@ Queue an RCU callback for invocation after a grace period.
1253 * @@@ Placeholder pending rcutree_plugin.h.
1235 */ 1254 */
1236void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 1255void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1237{ 1256{
1238 __call_rcu(head, func, &rcu_state); 1257 call_rcu_sched(head, func);
1239} 1258}
1240EXPORT_SYMBOL_GPL(call_rcu); 1259EXPORT_SYMBOL_GPL(call_rcu);
1241 1260
1261
1242/* 1262/*
1243 * Queue an RCU for invocation after a quicker grace period. 1263 * Queue an RCU for invocation after a quicker grace period.
1244 */ 1264 */
@@ -1311,7 +1331,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1311 */ 1331 */
1312int rcu_pending(int cpu) 1332int rcu_pending(int cpu)
1313{ 1333{
1314 return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) || 1334 return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
1315 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)); 1335 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu));
1316} 1336}
1317 1337
@@ -1324,7 +1344,7 @@ int rcu_pending(int cpu)
1324int rcu_needs_cpu(int cpu) 1344int rcu_needs_cpu(int cpu)
1325{ 1345{
1326 /* RCU callbacks either ready or pending? */ 1346 /* RCU callbacks either ready or pending? */
1327 return per_cpu(rcu_data, cpu).nxtlist || 1347 return per_cpu(rcu_sched_data, cpu).nxtlist ||
1328 per_cpu(rcu_bh_data, cpu).nxtlist; 1348 per_cpu(rcu_bh_data, cpu).nxtlist;
1329} 1349}
1330 1350
@@ -1418,7 +1438,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
1418 1438
1419static void __cpuinit rcu_online_cpu(int cpu) 1439static void __cpuinit rcu_online_cpu(int cpu)
1420{ 1440{
1421 rcu_init_percpu_data(cpu, &rcu_state); 1441 rcu_init_percpu_data(cpu, &rcu_sched_state);
1422 rcu_init_percpu_data(cpu, &rcu_bh_state); 1442 rcu_init_percpu_data(cpu, &rcu_bh_state);
1423} 1443}
1424 1444
@@ -1545,10 +1565,10 @@ void __init __rcu_init(void)
1545#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1565#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1546 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1566 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
1547#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 1567#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
1548 rcu_init_one(&rcu_state); 1568 rcu_init_one(&rcu_sched_state);
1549 RCU_DATA_PTR_INIT(&rcu_state, rcu_data); 1569 RCU_DATA_PTR_INIT(&rcu_sched_state, rcu_sched_data);
1550 for_each_possible_cpu(i) 1570 for_each_possible_cpu(i)
1551 rcu_boot_init_percpu_data(i, &rcu_state); 1571 rcu_boot_init_percpu_data(i, &rcu_sched_state);
1552 rcu_init_one(&rcu_bh_state); 1572 rcu_init_one(&rcu_bh_state);
1553 RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); 1573 RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data);
1554 for_each_possible_cpu(i) 1574 for_each_possible_cpu(i)
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 7cc830a1c44a..0024e5ddcc68 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -238,8 +238,8 @@ struct rcu_state {
238/* 238/*
239 * RCU implementation internal declarations: 239 * RCU implementation internal declarations:
240 */ 240 */
241extern struct rcu_state rcu_state; 241extern struct rcu_state rcu_sched_state;
242DECLARE_PER_CPU(struct rcu_data, rcu_data); 242DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
243 243
244extern struct rcu_state rcu_bh_state; 244extern struct rcu_state rcu_bh_state;
245DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); 245DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 0cb52b887758..236c0504fee2 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -77,8 +77,8 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
77 77
78static int show_rcudata(struct seq_file *m, void *unused) 78static int show_rcudata(struct seq_file *m, void *unused)
79{ 79{
80 seq_puts(m, "rcu:\n"); 80 seq_puts(m, "rcu_sched:\n");
81 PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m); 81 PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m);
82 seq_puts(m, "rcu_bh:\n"); 82 seq_puts(m, "rcu_bh:\n");
83 PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m); 83 PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
84 return 0; 84 return 0;
@@ -125,8 +125,8 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
125 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); 125 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
126#endif /* #ifdef CONFIG_NO_HZ */ 126#endif /* #ifdef CONFIG_NO_HZ */
127 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n"); 127 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
128 seq_puts(m, "\"rcu:\"\n"); 128 seq_puts(m, "\"rcu_sched:\"\n");
129 PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m); 129 PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m);
130 seq_puts(m, "\"rcu_bh:\"\n"); 130 seq_puts(m, "\"rcu_bh:\"\n");
131 PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m); 131 PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
132 return 0; 132 return 0;
@@ -172,8 +172,8 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
172 172
173static int show_rcuhier(struct seq_file *m, void *unused) 173static int show_rcuhier(struct seq_file *m, void *unused)
174{ 174{
175 seq_puts(m, "rcu:\n"); 175 seq_puts(m, "rcu_sched:\n");
176 print_one_rcu_state(m, &rcu_state); 176 print_one_rcu_state(m, &rcu_sched_state);
177 seq_puts(m, "rcu_bh:\n"); 177 seq_puts(m, "rcu_bh:\n");
178 print_one_rcu_state(m, &rcu_bh_state); 178 print_one_rcu_state(m, &rcu_bh_state);
179 return 0; 179 return 0;
@@ -194,8 +194,8 @@ static struct file_operations rcuhier_fops = {
194 194
195static int show_rcugp(struct seq_file *m, void *unused) 195static int show_rcugp(struct seq_file *m, void *unused)
196{ 196{
197 seq_printf(m, "rcu: completed=%ld gpnum=%ld\n", 197 seq_printf(m, "rcu_sched: completed=%ld gpnum=%ld\n",
198 rcu_state.completed, rcu_state.gpnum); 198 rcu_sched_state.completed, rcu_sched_state.gpnum);
199 seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n", 199 seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n",
200 rcu_bh_state.completed, rcu_bh_state.gpnum); 200 rcu_bh_state.completed, rcu_bh_state.gpnum);
201 return 0; 201 return 0;
@@ -244,8 +244,8 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
244 244
245static int show_rcu_pending(struct seq_file *m, void *unused) 245static int show_rcu_pending(struct seq_file *m, void *unused)
246{ 246{
247 seq_puts(m, "rcu:\n"); 247 seq_puts(m, "rcu_sched:\n");
248 print_rcu_pendings(m, &rcu_state); 248 print_rcu_pendings(m, &rcu_sched_state);
249 seq_puts(m, "rcu_bh:\n"); 249 seq_puts(m, "rcu_bh:\n");
250 print_rcu_pendings(m, &rcu_bh_state); 250 print_rcu_pendings(m, &rcu_bh_state);
251 return 0; 251 return 0;
diff --git a/kernel/sched.c b/kernel/sched.c
index cda8b81f8801..c9beca67a53e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5325,7 +5325,7 @@ need_resched:
5325 preempt_disable(); 5325 preempt_disable();
5326 cpu = smp_processor_id(); 5326 cpu = smp_processor_id();
5327 rq = cpu_rq(cpu); 5327 rq = cpu_rq(cpu);
5328 rcu_qsctr_inc(cpu); 5328 rcu_sched_qs(cpu);
5329 prev = rq->curr; 5329 prev = rq->curr;
5330 switch_count = &prev->nivcsw; 5330 switch_count = &prev->nivcsw;
5331 5331
diff --git a/kernel/softirq.c b/kernel/softirq.c
index eb5e131a0485..7db25067cd2d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -227,7 +227,7 @@ restart:
227 preempt_count() = prev_count; 227 preempt_count() = prev_count;
228 } 228 }
229 229
230 rcu_bh_qsctr_inc(cpu); 230 rcu_bh_qs(cpu);
231 } 231 }
232 h++; 232 h++;
233 pending >>= 1; 233 pending >>= 1;
@@ -721,7 +721,7 @@ static int ksoftirqd(void * __bind_cpu)
721 preempt_enable_no_resched(); 721 preempt_enable_no_resched();
722 cond_resched(); 722 cond_resched();
723 preempt_disable(); 723 preempt_disable();
724 rcu_qsctr_inc((long)__bind_cpu); 724 rcu_sched_qs((long)__bind_cpu);
725 } 725 }
726 preempt_enable(); 726 preempt_enable();
727 set_current_state(TASK_INTERRUPTIBLE); 727 set_current_state(TASK_INTERRUPTIBLE);