diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 27 |
1 files changed, 15 insertions, 12 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 0981c0cd70fe..25e692a36280 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -158,14 +158,16 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); | |||
158 | * As with the other rcu_*_qs() functions, callers to this function | 158 | * As with the other rcu_*_qs() functions, callers to this function |
159 | * must disable preemption. | 159 | * must disable preemption. |
160 | */ | 160 | */ |
161 | static void rcu_preempt_qs(int cpu) | 161 | static void rcu_preempt_qs(void) |
162 | { | 162 | { |
163 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 163 | if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) { |
164 | 164 | trace_rcu_grace_period(TPS("rcu_preempt"), | |
165 | if (rdp->passed_quiesce == 0) | 165 | __this_cpu_read(rcu_preempt_data.gpnum), |
166 | trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs")); | 166 | TPS("cpuqs")); |
167 | rdp->passed_quiesce = 1; | 167 | __this_cpu_write(rcu_preempt_data.passed_quiesce, 1); |
168 | current->rcu_read_unlock_special.b.need_qs = false; | 168 | barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */ |
169 | current->rcu_read_unlock_special.b.need_qs = false; | ||
170 | } | ||
169 | } | 171 | } |
170 | 172 | ||
171 | /* | 173 | /* |
@@ -256,7 +258,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
256 | * grace period, then the fact that the task has been enqueued | 258 | * grace period, then the fact that the task has been enqueued |
257 | * means that we continue to block the current grace period. | 259 | * means that we continue to block the current grace period. |
258 | */ | 260 | */ |
259 | rcu_preempt_qs(cpu); | 261 | rcu_preempt_qs(); |
260 | } | 262 | } |
261 | 263 | ||
262 | /* | 264 | /* |
@@ -352,7 +354,7 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
352 | */ | 354 | */ |
353 | special = t->rcu_read_unlock_special; | 355 | special = t->rcu_read_unlock_special; |
354 | if (special.b.need_qs) { | 356 | if (special.b.need_qs) { |
355 | rcu_preempt_qs(smp_processor_id()); | 357 | rcu_preempt_qs(); |
356 | if (!t->rcu_read_unlock_special.s) { | 358 | if (!t->rcu_read_unlock_special.s) { |
357 | local_irq_restore(flags); | 359 | local_irq_restore(flags); |
358 | return; | 360 | return; |
@@ -651,11 +653,12 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
651 | struct task_struct *t = current; | 653 | struct task_struct *t = current; |
652 | 654 | ||
653 | if (t->rcu_read_lock_nesting == 0) { | 655 | if (t->rcu_read_lock_nesting == 0) { |
654 | rcu_preempt_qs(cpu); | 656 | rcu_preempt_qs(); |
655 | return; | 657 | return; |
656 | } | 658 | } |
657 | if (t->rcu_read_lock_nesting > 0 && | 659 | if (t->rcu_read_lock_nesting > 0 && |
658 | per_cpu(rcu_preempt_data, cpu).qs_pending) | 660 | per_cpu(rcu_preempt_data, cpu).qs_pending && |
661 | !per_cpu(rcu_preempt_data, cpu).passed_quiesce) | ||
659 | t->rcu_read_unlock_special.b.need_qs = true; | 662 | t->rcu_read_unlock_special.b.need_qs = true; |
660 | } | 663 | } |
661 | 664 | ||