diff options
| -rw-r--r-- | kernel/rcutree.c | 2 | ||||
| -rw-r--r-- | kernel/rcutree_plugin.h | 25 |
2 files changed, 27 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index da301e2fd84f..e9a4ae94647f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -632,6 +632,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
| 632 | /* Special-case the common single-level case. */ | 632 | /* Special-case the common single-level case. */ |
| 633 | if (NUM_RCU_NODES == 1) { | 633 | if (NUM_RCU_NODES == 1) { |
| 634 | rnp->qsmask = rnp->qsmaskinit; | 634 | rnp->qsmask = rnp->qsmaskinit; |
| 635 | rcu_preempt_check_blocked_tasks(rnp); | ||
| 635 | rnp->gpnum = rsp->gpnum; | 636 | rnp->gpnum = rsp->gpnum; |
| 636 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | 637 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
| 637 | spin_unlock_irqrestore(&rnp->lock, flags); | 638 | spin_unlock_irqrestore(&rnp->lock, flags); |
| @@ -665,6 +666,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
| 665 | for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) { | 666 | for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) { |
| 666 | spin_lock(&rnp_cur->lock); /* irqs already disabled. */ | 667 | spin_lock(&rnp_cur->lock); /* irqs already disabled. */ |
| 667 | rnp_cur->qsmask = rnp_cur->qsmaskinit; | 668 | rnp_cur->qsmask = rnp_cur->qsmaskinit; |
| 669 | rcu_preempt_check_blocked_tasks(rnp); | ||
| 668 | rnp->gpnum = rsp->gpnum; | 670 | rnp->gpnum = rsp->gpnum; |
| 669 | spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ | 671 | spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ |
| 670 | } | 672 | } |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 47789369ea59..b8e4b0384f00 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -86,6 +86,7 @@ static void rcu_preempt_qs(int cpu) | |||
| 86 | 86 | ||
| 87 | if (t->rcu_read_lock_nesting && | 87 | if (t->rcu_read_lock_nesting && |
| 88 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 88 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
| 89 | WARN_ON_ONCE(cpu != smp_processor_id()); | ||
| 89 | 90 | ||
| 90 | /* Possibly blocking in an RCU read-side critical section. */ | 91 | /* Possibly blocking in an RCU read-side critical section. */ |
| 91 | rdp = rcu_preempt_state.rda[cpu]; | 92 | rdp = rcu_preempt_state.rda[cpu]; |
| @@ -103,7 +104,11 @@ static void rcu_preempt_qs(int cpu) | |||
| 103 | * state for the current grace period), then as long | 104 | * state for the current grace period), then as long |
| 104 | * as that task remains queued, the current grace period | 105 | * as that task remains queued, the current grace period |
| 105 | * cannot end. | 106 | * cannot end. |
| 107 | * | ||
| 108 | * But first, note that the current CPU must still be | ||
| 109 | * on line! | ||
| 106 | */ | 110 | */ |
| 111 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); | ||
| 107 | phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); | 112 | phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); |
| 108 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); | 113 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); |
| 109 | smp_mb(); /* Ensure later ctxt swtch seen after above. */ | 114 | smp_mb(); /* Ensure later ctxt swtch seen after above. */ |
| @@ -259,6 +264,18 @@ static void rcu_print_task_stall(struct rcu_node *rnp) | |||
| 259 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 264 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 260 | 265 | ||
| 261 | /* | 266 | /* |
| 267 | * Check that the list of blocked tasks for the newly completed grace | ||
| 268 | * period is in fact empty. It is a serious bug to complete a grace | ||
| 269 | * period that still has RCU readers blocked! This function must be | ||
| 270 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | ||
| 271 | * must be held by the caller. | ||
| 272 | */ | ||
| 273 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | ||
| 274 | { | ||
| 275 | WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); | ||
| 276 | } | ||
| 277 | |||
| 278 | /* | ||
| 262 | * Check for preempted RCU readers for the specified rcu_node structure. | 279 | * Check for preempted RCU readers for the specified rcu_node structure. |
| 263 | * If the caller needs a reliable answer, it must hold the rcu_node's | 280 | * If the caller needs a reliable answer, it must hold the rcu_node's |
| 264 | * >lock. | 281 | * >lock. |
| @@ -451,6 +468,14 @@ static void rcu_print_task_stall(struct rcu_node *rnp) | |||
| 451 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 468 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 452 | 469 | ||
| 453 | /* | 470 | /* |
| 471 | * Because there is no preemptable RCU, there can be no readers blocked, | ||
| 472 | * so there is no need to check for blocked tasks. | ||
| 473 | */ | ||
| 474 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | ||
| 475 | { | ||
| 476 | } | ||
| 477 | |||
| 478 | /* | ||
| 454 | * Because preemptable RCU does not exist, there are never any preempted | 479 | * Because preemptable RCU does not exist, there are never any preempted |
| 455 | * RCU readers. | 480 | * RCU readers. |
| 456 | */ | 481 | */ |
