summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-03-08 17:20:30 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-05-27 15:58:37 -0400
commit0a0ba1c93f8a0ff28bacec0d1d018081e762e2f0 (patch)
treeb76fd4bf7358f5a0cdeea2a97ee73485a5c4f720 /kernel/rcu
parentea46351cea79f54729d8546e5bd7f091a2e6484b (diff)
rcu: Adjust ->lock acquisition for tasks no longer migrating
Tasks are no longer migrated away from a given rcu_node structure when all CPUs corresponding to that rcu_node structure have gone offline. This means that rcu_read_unlock_special() no longer needs to loop retrying rcu_node ->lock acquisition because the current task is guaranteed to stay put. This commit takes a small and paranoid step towards relying on this guarantee by placing a WARN_ON_ONCE() just after the early exit from the lock-acquisition loop. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree_plugin.h9
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 58b1ebdc4387..c8340e929eb4 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -307,9 +307,11 @@ void rcu_read_unlock_special(struct task_struct *t)
307 t->rcu_read_unlock_special.b.blocked = false; 307 t->rcu_read_unlock_special.b.blocked = false;
308 308
309 /* 309 /*
310 * Remove this task from the list it blocked on. The 310 * Remove this task from the list it blocked on. The task
311 * task can migrate while we acquire the lock, but at 311 * now remains queued on the rcu_node corresponding to
312 * most one time. So at most two passes through loop. 312 * the CPU it first blocked on, so the first attempt to
313 * acquire the task's rcu_node's ->lock will succeed.
314 * Keep the loop and add a WARN_ON() out of sheer paranoia.
313 */ 315 */
314 for (;;) { 316 for (;;) {
315 rnp = t->rcu_blocked_node; 317 rnp = t->rcu_blocked_node;
@@ -317,6 +319,7 @@ void rcu_read_unlock_special(struct task_struct *t)
317 smp_mb__after_unlock_lock(); 319 smp_mb__after_unlock_lock();
318 if (rnp == t->rcu_blocked_node) 320 if (rnp == t->rcu_blocked_node)
319 break; 321 break;
322 WARN_ON_ONCE(1);
320 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 323 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
321 } 324 }
322 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 325 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);