diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-10-31 15:05:04 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-01-06 14:02:43 -0500 |
commit | b6a932d1d9840727eee619d455bdeeedaa205be9 (patch) | |
tree | bb1f9962187de759feb932378dd1299ec9488ff0 /kernel/rcu | |
parent | 8af3a5e78cfb63abe8813743946b7bd5a8a3134c (diff) |
rcu: Make rcu_read_unlock_special() propagate ->qsmaskinit bit clearing
This commit causes rcu_read_unlock_special() to propagate ->qsmaskinit
bit clearing up the rcu_node tree once a given rcu_node structure's
blkd_tasks list becomes empty. This is the final commit in preparation
for the rework of RCU priority boosting: It enables preempted tasks to
remain queued on their rcu_node structure even after all of that rcu_node
structure's CPUs have gone offline.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 4 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 16 |
2 files changed, 17 insertions, 3 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 75c6b3301abb..6625a1b5d9a1 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -2329,6 +2329,10 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | |||
2329 | { | 2329 | { |
2330 | } | 2330 | } |
2331 | 2331 | ||
2332 | static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) | ||
2333 | { | ||
2334 | } | ||
2335 | |||
2332 | static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | 2336 | static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) |
2333 | { | 2337 | { |
2334 | } | 2338 | } |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index d044b9cbbd97..8a2b84157d34 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -322,9 +322,10 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp) | |||
322 | */ | 322 | */ |
323 | void rcu_read_unlock_special(struct task_struct *t) | 323 | void rcu_read_unlock_special(struct task_struct *t) |
324 | { | 324 | { |
325 | int empty_exp; | 325 | bool empty; |
326 | int empty_norm; | 326 | bool empty_exp; |
327 | int empty_exp_now; | 327 | bool empty_norm; |
328 | bool empty_exp_now; | ||
328 | unsigned long flags; | 329 | unsigned long flags; |
329 | struct list_head *np; | 330 | struct list_head *np; |
330 | #ifdef CONFIG_RCU_BOOST | 331 | #ifdef CONFIG_RCU_BOOST |
@@ -376,6 +377,7 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
376 | break; | 377 | break; |
377 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 378 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
378 | } | 379 | } |
380 | empty = !rcu_preempt_has_tasks(rnp); | ||
379 | empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); | 381 | empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); |
380 | empty_exp = !rcu_preempted_readers_exp(rnp); | 382 | empty_exp = !rcu_preempted_readers_exp(rnp); |
381 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | 383 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ |
@@ -396,6 +398,14 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
396 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 398 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
397 | 399 | ||
398 | /* | 400 | /* |
401 | * If this was the last task on the list, go see if we | ||
402 | * need to propagate ->qsmaskinit bit clearing up the | ||
403 | * rcu_node tree. | ||
404 | */ | ||
405 | if (!empty && !rcu_preempt_has_tasks(rnp)) | ||
406 | rcu_cleanup_dead_rnp(rnp); | ||
407 | |||
408 | /* | ||
399 | * If this was the last task on the current list, and if | 409 | * If this was the last task on the current list, and if |
400 | * we aren't waiting on any CPUs, report the quiescent state. | 410 | * we aren't waiting on any CPUs, report the quiescent state. |
401 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, | 411 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, |