aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcupreempt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcupreempt.c')
-rw-r--r--kernel/rcupreempt.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 510898a7bd69..7d777c9f394c 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -159,7 +159,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched
159 .dynticks = 1, 159 .dynticks = 1,
160}; 160};
161 161
162void rcu_qsctr_inc(int cpu) 162void rcu_sched_qs(int cpu)
163{ 163{
164 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); 164 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
165 165
@@ -967,12 +967,12 @@ void rcu_check_callbacks(int cpu, int user)
967 * If this CPU took its interrupt from user mode or from the 967 * If this CPU took its interrupt from user mode or from the
968 * idle loop, and this is not a nested interrupt, then 968 * idle loop, and this is not a nested interrupt, then
969 * this CPU has to have exited all prior preept-disable 969 * this CPU has to have exited all prior preept-disable
970 * sections of code. So increment the counter to note this. 970 * sections of code. So invoke rcu_sched_qs() to note this.
971 * 971 *
972 * The memory barrier is needed to handle the case where 972 * The memory barrier is needed to handle the case where
973 * writes from a preempt-disable section of code get reordered 973 * writes from a preempt-disable section of code get reordered
974 * into schedule() by this CPU's write buffer. So the memory 974 * into schedule() by this CPU's write buffer. So the memory
975 * barrier makes sure that the rcu_qsctr_inc() is seen by other 975 * barrier makes sure that the rcu_sched_qs() is seen by other
976 * CPUs to happen after any such write. 976 * CPUs to happen after any such write.
977 */ 977 */
978 978
@@ -980,7 +980,7 @@ void rcu_check_callbacks(int cpu, int user)
980 (idle_cpu(cpu) && !in_softirq() && 980 (idle_cpu(cpu) && !in_softirq() &&
981 hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 981 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
982 smp_mb(); /* Guard against aggressive schedule(). */ 982 smp_mb(); /* Guard against aggressive schedule(). */
983 rcu_qsctr_inc(cpu); 983 rcu_sched_qs(cpu);
984 } 984 }
985 985
986 rcu_check_mb(cpu); 986 rcu_check_mb(cpu);