aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-03-10 13:55:52 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-04-29 11:43:32 -0400
commit365187fbc04fd55766bf6a94e37e558505bf480a (patch)
tree6249491e31d2c9e6e1fdb62048694b28f0974d97
parent83ebe63ead0fe60e4b548730800cb68293ce098b (diff)
rcu: Update cpu_needs_another_gp() for futures from non-NOCB CPUs
In the old days, the only source of requests for future grace periods was NOCB CPUs. This has changed: CPUs routinely post requests for future grace periods in order to promote power efficiency and reduce OS jitter with minimal impact on grace-period latency. This commit therefore updates cpu_needs_another_gp() to invoke rcu_future_needs_gp() instead of rcu_nocb_needs_gp(). The latter is no longer used, so is now removed. This commit also adds tracing for the irq_work_queue() wakeup case. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r--kernel/rcu/tree.c39
-rw-r--r--kernel/rcu/tree.h1
-rw-r--r--kernel/rcu/tree_plugin.h18
3 files changed, 29 insertions, 29 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b33c29a99df3..b4688993e956 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -324,6 +324,28 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
324} 324}
325 325
326/* 326/*
327 * Return the root node of the specified rcu_state structure.
328 */
329static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
330{
331 return &rsp->node[0];
332}
333
334/*
335 * Is there any need for future grace periods?
336 * Interrupts must be disabled. If the caller does not hold the root
337 * rnp_node structure's ->lock, the results are advisory only.
338 */
339static int rcu_future_needs_gp(struct rcu_state *rsp)
340{
341 struct rcu_node *rnp = rcu_get_root(rsp);
342 int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1;
343 int *fp = &rnp->need_future_gp[idx];
344
345 return ACCESS_ONCE(*fp);
346}
347
348/*
327 * Does the current CPU require a not-yet-started grace period? 349 * Does the current CPU require a not-yet-started grace period?
328 * The caller must have disabled interrupts to prevent races with 350 * The caller must have disabled interrupts to prevent races with
329 * normal callback registry. 351 * normal callback registry.
@@ -335,7 +357,7 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
335 357
336 if (rcu_gp_in_progress(rsp)) 358 if (rcu_gp_in_progress(rsp))
337 return 0; /* No, a grace period is already in progress. */ 359 return 0; /* No, a grace period is already in progress. */
338 if (rcu_nocb_needs_gp(rsp)) 360 if (rcu_future_needs_gp(rsp))
339 return 1; /* Yes, a no-CBs CPU needs one. */ 361 return 1; /* Yes, a no-CBs CPU needs one. */
340 if (!rdp->nxttail[RCU_NEXT_TAIL]) 362 if (!rdp->nxttail[RCU_NEXT_TAIL])
341 return 0; /* No, this is a no-CBs (or offline) CPU. */ 363 return 0; /* No, this is a no-CBs (or offline) CPU. */
@@ -350,14 +372,6 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
350} 372}
351 373
352/* 374/*
353 * Return the root node of the specified rcu_state structure.
354 */
355static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
356{
357 return &rsp->node[0];
358}
359
360/*
361 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state 375 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
362 * 376 *
363 * If the new value of the ->dynticks_nesting counter now is zero, 377 * If the new value of the ->dynticks_nesting counter now is zero,
@@ -1672,6 +1686,8 @@ static void rsp_wakeup(struct irq_work *work)
1672 1686
1673 /* Wake up rcu_gp_kthread() to start the grace period. */ 1687 /* Wake up rcu_gp_kthread() to start the grace period. */
1674 wake_up(&rsp->gp_wq); 1688 wake_up(&rsp->gp_wq);
1689 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
1690 "Workqueuewoken");
1675} 1691}
1676 1692
1677/* 1693/*
@@ -1706,8 +1722,11 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
1706 * the wakeup to interrupt context. And don't bother waking 1722 * the wakeup to interrupt context. And don't bother waking
1707 * up the running kthread. 1723 * up the running kthread.
1708 */ 1724 */
1709 if (current != rsp->gp_kthread) 1725 if (current != rsp->gp_kthread) {
1726 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
1727 "Workqueuewake");
1710 irq_work_queue(&rsp->wakeup_work); 1728 irq_work_queue(&rsp->wakeup_work);
1729 }
1711} 1730}
1712 1731
1713/* 1732/*
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 75dc3c39a02a..7b572c5c65e1 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -547,7 +547,6 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
547static void print_cpu_stall_info_end(void); 547static void print_cpu_stall_info_end(void);
548static void zero_cpu_stall_ticks(struct rcu_data *rdp); 548static void zero_cpu_stall_ticks(struct rcu_data *rdp);
549static void increment_cpu_stall_ticks(void); 549static void increment_cpu_stall_ticks(void);
550static int rcu_nocb_needs_gp(struct rcu_state *rsp);
551static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); 550static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
552static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); 551static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
553static void rcu_init_one_nocb(struct rcu_node *rnp); 552static void rcu_init_one_nocb(struct rcu_node *rnp);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index f9c9057239a9..f60dd6ea8333 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2068,19 +2068,6 @@ static int __init parse_rcu_nocb_poll(char *arg)
2068early_param("rcu_nocb_poll", parse_rcu_nocb_poll); 2068early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2069 2069
2070/* 2070/*
2071 * Do any no-CBs CPUs need another grace period?
2072 *
2073 * Interrupts must be disabled. If the caller does not hold the root
2074 * rnp_node structure's ->lock, the results are advisory only.
2075 */
2076static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2077{
2078 struct rcu_node *rnp = rcu_get_root(rsp);
2079
2080 return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
2081}
2082
2083/*
2084 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended 2071 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
2085 * grace period. 2072 * grace period.
2086 */ 2073 */
@@ -2402,11 +2389,6 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
2402 2389
2403#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2390#else /* #ifdef CONFIG_RCU_NOCB_CPU */
2404 2391
2405static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2406{
2407 return 0;
2408}
2409
2410static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 2392static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2411{ 2393{
2412} 2394}