diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-08-14 19:01:53 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-09-07 19:27:34 -0400 |
commit | 1d082fd061884a587c490c4fc8a2056ce1e47624 (patch) | |
tree | a8f715a674a036b1b7500a16ea11381d40659e42 /kernel/rcu/tree_plugin.h | |
parent | 4ff475ed4cf61a7f56bbfbc424147189d0022b38 (diff) |
rcu: Remove local_irq_disable() in rcu_preempt_note_context_switch()
The rcu_preempt_note_context_switch() function is on a scheduling fast
path, so it would be good to avoid disabling irqs. The reason that irqs
are disabled is to synchronize process-level and irq-handler access to
the task_struct ->rcu_read_unlock_special bitmask. This commit therefore
makes ->rcu_read_unlock_special instead be a union of bools with a short
allowing single-access checks in RCU's __rcu_read_unlock(). This results
in the process-level and irq-handler accesses being simple loads and
stores, so that irqs need no longer be disabled. This commit therefore
removes the irq disabling from rcu_preempt_note_context_switch().
Reported-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 32 |
1 files changed, 15 insertions, 17 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index e466b40052a7..0981c0cd70fe 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -155,9 +155,8 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); | |||
155 | * not in a quiescent state. There might be any number of tasks blocked | 155 | * not in a quiescent state. There might be any number of tasks blocked |
156 | * while in an RCU read-side critical section. | 156 | * while in an RCU read-side critical section. |
157 | * | 157 | * |
158 | * Unlike the other rcu_*_qs() functions, callers to this function | 158 | * As with the other rcu_*_qs() functions, callers to this function |
159 | * must disable irqs in order to protect the assignment to | 159 | * must disable preemption. |
160 | * ->rcu_read_unlock_special. | ||
161 | */ | 160 | */ |
162 | static void rcu_preempt_qs(int cpu) | 161 | static void rcu_preempt_qs(int cpu) |
163 | { | 162 | { |
@@ -166,7 +165,7 @@ static void rcu_preempt_qs(int cpu) | |||
166 | if (rdp->passed_quiesce == 0) | 165 | if (rdp->passed_quiesce == 0) |
167 | trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs")); | 166 | trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs")); |
168 | rdp->passed_quiesce = 1; | 167 | rdp->passed_quiesce = 1; |
169 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | 168 | current->rcu_read_unlock_special.b.need_qs = false; |
170 | } | 169 | } |
171 | 170 | ||
172 | /* | 171 | /* |
@@ -190,14 +189,14 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
190 | struct rcu_node *rnp; | 189 | struct rcu_node *rnp; |
191 | 190 | ||
192 | if (t->rcu_read_lock_nesting > 0 && | 191 | if (t->rcu_read_lock_nesting > 0 && |
193 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 192 | !t->rcu_read_unlock_special.b.blocked) { |
194 | 193 | ||
195 | /* Possibly blocking in an RCU read-side critical section. */ | 194 | /* Possibly blocking in an RCU read-side critical section. */ |
196 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); | 195 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
197 | rnp = rdp->mynode; | 196 | rnp = rdp->mynode; |
198 | raw_spin_lock_irqsave(&rnp->lock, flags); | 197 | raw_spin_lock_irqsave(&rnp->lock, flags); |
199 | smp_mb__after_unlock_lock(); | 198 | smp_mb__after_unlock_lock(); |
200 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 199 | t->rcu_read_unlock_special.b.blocked = true; |
201 | t->rcu_blocked_node = rnp; | 200 | t->rcu_blocked_node = rnp; |
202 | 201 | ||
203 | /* | 202 | /* |
@@ -239,7 +238,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
239 | : rnp->gpnum + 1); | 238 | : rnp->gpnum + 1); |
240 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 239 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
241 | } else if (t->rcu_read_lock_nesting < 0 && | 240 | } else if (t->rcu_read_lock_nesting < 0 && |
242 | t->rcu_read_unlock_special) { | 241 | t->rcu_read_unlock_special.s) { |
243 | 242 | ||
244 | /* | 243 | /* |
245 | * Complete exit from RCU read-side critical section on | 244 | * Complete exit from RCU read-side critical section on |
@@ -257,9 +256,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
257 | * grace period, then the fact that the task has been enqueued | 256 | * grace period, then the fact that the task has been enqueued |
258 | * means that we continue to block the current grace period. | 257 | * means that we continue to block the current grace period. |
259 | */ | 258 | */ |
260 | local_irq_save(flags); | ||
261 | rcu_preempt_qs(cpu); | 259 | rcu_preempt_qs(cpu); |
262 | local_irq_restore(flags); | ||
263 | } | 260 | } |
264 | 261 | ||
265 | /* | 262 | /* |
@@ -340,7 +337,7 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
340 | bool drop_boost_mutex = false; | 337 | bool drop_boost_mutex = false; |
341 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 338 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
342 | struct rcu_node *rnp; | 339 | struct rcu_node *rnp; |
343 | int special; | 340 | union rcu_special special; |
344 | 341 | ||
345 | /* NMI handlers cannot block and cannot safely manipulate state. */ | 342 | /* NMI handlers cannot block and cannot safely manipulate state. */ |
346 | if (in_nmi()) | 343 | if (in_nmi()) |
@@ -350,12 +347,13 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
350 | 347 | ||
351 | /* | 348 | /* |
352 | * If RCU core is waiting for this CPU to exit critical section, | 349 | * If RCU core is waiting for this CPU to exit critical section, |
353 | * let it know that we have done so. | 350 | * let it know that we have done so. Because irqs are disabled, |
351 | * t->rcu_read_unlock_special cannot change. | ||
354 | */ | 352 | */ |
355 | special = t->rcu_read_unlock_special; | 353 | special = t->rcu_read_unlock_special; |
356 | if (special & RCU_READ_UNLOCK_NEED_QS) { | 354 | if (special.b.need_qs) { |
357 | rcu_preempt_qs(smp_processor_id()); | 355 | rcu_preempt_qs(smp_processor_id()); |
358 | if (!t->rcu_read_unlock_special) { | 356 | if (!t->rcu_read_unlock_special.s) { |
359 | local_irq_restore(flags); | 357 | local_irq_restore(flags); |
360 | return; | 358 | return; |
361 | } | 359 | } |
@@ -368,8 +366,8 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
368 | } | 366 | } |
369 | 367 | ||
370 | /* Clean up if blocked during RCU read-side critical section. */ | 368 | /* Clean up if blocked during RCU read-side critical section. */ |
371 | if (special & RCU_READ_UNLOCK_BLOCKED) { | 369 | if (special.b.blocked) { |
372 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | 370 | t->rcu_read_unlock_special.b.blocked = false; |
373 | 371 | ||
374 | /* | 372 | /* |
375 | * Remove this task from the list it blocked on. The | 373 | * Remove this task from the list it blocked on. The |
@@ -658,7 +656,7 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
658 | } | 656 | } |
659 | if (t->rcu_read_lock_nesting > 0 && | 657 | if (t->rcu_read_lock_nesting > 0 && |
660 | per_cpu(rcu_preempt_data, cpu).qs_pending) | 658 | per_cpu(rcu_preempt_data, cpu).qs_pending) |
661 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | 659 | t->rcu_read_unlock_special.b.need_qs = true; |
662 | } | 660 | } |
663 | 661 | ||
664 | #ifdef CONFIG_RCU_BOOST | 662 | #ifdef CONFIG_RCU_BOOST |
@@ -941,7 +939,7 @@ void exit_rcu(void) | |||
941 | return; | 939 | return; |
942 | t->rcu_read_lock_nesting = 1; | 940 | t->rcu_read_lock_nesting = 1; |
943 | barrier(); | 941 | barrier(); |
944 | t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED; | 942 | t->rcu_read_unlock_special.b.blocked = true; |
945 | __rcu_read_unlock(); | 943 | __rcu_read_unlock(); |
946 | } | 944 | } |
947 | 945 | ||