aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-20 19:02:49 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-10-20 16:47:10 -0400
commit62da1921292ef789c23a7bf01d671d7572baf377 (patch)
treeaec958e9dbfe4d7fbdfe13e8a622721bc87a1313
parent6f0c0580b70c89094b3422ba81118c7b959c7556 (diff)
rcu: Accelerate callbacks for CPU initiating a grace period
Because grace-period initialization is carried out by a separate kthread, it might happen on a different CPU than the one that had the callback needing a grace period -- which is where the callback acceleration needs to happen. Fortunately, rcu_start_gp() holds the root rcu_node structure's ->lock, which prevents a new grace period from starting. This allows this function to safely determine that a grace period has not yet started, which in turn allows it to fully accelerate any callbacks that it has pending. This commit adds this acceleration. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcutree.c26
1 files changed, 24 insertions, 2 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 74df86bd9204..93d6871bf7f9 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1404,15 +1404,37 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
1404 !cpu_needs_another_gp(rsp, rdp)) { 1404 !cpu_needs_another_gp(rsp, rdp)) {
1405 /* 1405 /*
1406 * Either we have not yet spawned the grace-period 1406 * Either we have not yet spawned the grace-period
1407 * task or this CPU does not need another grace period. 1407 * task, this CPU does not need another grace period,
1408 * or a grace period is already in progress.
1408 * Either way, don't start a new grace period. 1409 * Either way, don't start a new grace period.
1409 */ 1410 */
1410 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1411 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1411 return; 1412 return;
1412 } 1413 }
1413 1414
1415 /*
1416 * Because there is no grace period in progress right now,
1417 * any callbacks we have up to this point will be satisfied
1418 * by the next grace period. So promote all callbacks to be
1419 * handled after the end of the next grace period. If the
1420 * CPU is not yet aware of the end of the previous grace period,
1421 * we need to allow for the callback advancement that will
1422 * occur when it does become aware. Deadlock prevents us from
1423 * making it aware at this point: We cannot acquire a leaf
1424 * rcu_node ->lock while holding the root rcu_node ->lock.
1425 */
1426 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
1427 if (rdp->completed == rsp->completed)
1428 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
1429
1414 rsp->gp_flags = RCU_GP_FLAG_INIT; 1430 rsp->gp_flags = RCU_GP_FLAG_INIT;
1415 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1431 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
1432
1433 /* Ensure that CPU is aware of completion of last grace period. */
1434 rcu_process_gp_end(rsp, rdp);
1435 local_irq_restore(flags);
1436
1437 /* Wake up rcu_gp_kthread() to start the grace period. */
1416 wake_up(&rsp->gp_wq); 1438 wake_up(&rsp->gp_wq);
1417} 1439}
1418 1440