diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-04-12 13:45:06 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-05-15 13:30:27 -0400 |
commit | d5cd96851d520e5caff13ddf99e3b2b759ae3b1d (patch) | |
tree | 4b087ff1d4a3f1480947c9b0163795e5d3aa6d70 | |
parent | a824a287f6eaec65f1cf7aedfd5f6b69d2d3858f (diff) |
rcu: Inline rcu_start_gp_advanced() into rcu_start_future_gp()
The rcu_start_gp_advanced() is invoked only from rcu_start_future_gp() and
much of its code is redundant when invoked from that context. This commit
therefore inlines rcu_start_gp_advanced() into rcu_start_future_gp(),
then removes rcu_start_gp_advanced().
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Nicholas Piggin <npiggin@gmail.com>
-rw-r--r-- | kernel/rcu/tree.c | 56 |
1 files changed, 12 insertions, 44 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 25dbbc753fef..4433f68a1c7b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -524,8 +524,6 @@ module_param(rcu_kick_kthreads, bool, 0644); | |||
524 | static ulong jiffies_till_sched_qs = HZ / 10; | 524 | static ulong jiffies_till_sched_qs = HZ / 10; |
525 | module_param(jiffies_till_sched_qs, ulong, 0444); | 525 | module_param(jiffies_till_sched_qs, ulong, 0444); |
526 | 526 | ||
527 | static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, | ||
528 | struct rcu_data *rdp); | ||
529 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)); | 527 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)); |
530 | static void force_quiescent_state(struct rcu_state *rsp); | 528 | static void force_quiescent_state(struct rcu_state *rsp); |
531 | static int rcu_pending(void); | 529 | static int rcu_pending(void); |
@@ -1679,7 +1677,8 @@ static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | |||
1679 | * rcu_node structure's ->need_future_gp field. Returns true if there | 1677 | * rcu_node structure's ->need_future_gp field. Returns true if there |
1680 | * is reason to awaken the grace-period kthread. | 1678 | * is reason to awaken the grace-period kthread. |
1681 | * | 1679 | * |
1682 | * The caller must hold the specified rcu_node structure's ->lock. | 1680 | * The caller must hold the specified rcu_node structure's ->lock, which |
1681 | * is why the caller is responsible for waking the grace-period kthread. | ||
1683 | */ | 1682 | */ |
1684 | static bool __maybe_unused | 1683 | static bool __maybe_unused |
1685 | rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | 1684 | rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, |
@@ -1687,7 +1686,8 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | |||
1687 | { | 1686 | { |
1688 | unsigned long c; | 1687 | unsigned long c; |
1689 | bool ret = false; | 1688 | bool ret = false; |
1690 | struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); | 1689 | struct rcu_state *rsp = rdp->rsp; |
1690 | struct rcu_node *rnp_root = rcu_get_root(rsp); | ||
1691 | 1691 | ||
1692 | raw_lockdep_assert_held_rcu_node(rnp); | 1692 | raw_lockdep_assert_held_rcu_node(rnp); |
1693 | 1693 | ||
@@ -1695,7 +1695,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | |||
1695 | * Pick up grace-period number for new callbacks. If this | 1695 | * Pick up grace-period number for new callbacks. If this |
1696 | * grace period is already marked as needed, return to the caller. | 1696 | * grace period is already marked as needed, return to the caller. |
1697 | */ | 1697 | */ |
1698 | c = rcu_cbs_completed(rdp->rsp, rnp); | 1698 | c = rcu_cbs_completed(rsp, rnp); |
1699 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf")); | 1699 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf")); |
1700 | if (need_future_gp_element(rnp, c)) { | 1700 | if (need_future_gp_element(rnp, c)) { |
1701 | trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); | 1701 | trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); |
@@ -1727,7 +1727,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | |||
1727 | * period in progress, it will be smaller than the one we obtained | 1727 | * period in progress, it will be smaller than the one we obtained |
1728 | * earlier. Adjust callbacks as needed. | 1728 | * earlier. Adjust callbacks as needed. |
1729 | */ | 1729 | */ |
1730 | c = rcu_cbs_completed(rdp->rsp, rnp_root); | 1730 | c = rcu_cbs_completed(rsp, rnp_root); |
1731 | if (!rcu_is_nocb_cpu(rdp->cpu)) | 1731 | if (!rcu_is_nocb_cpu(rdp->cpu)) |
1732 | (void)rcu_segcblist_accelerate(&rdp->cblist, c); | 1732 | (void)rcu_segcblist_accelerate(&rdp->cblist, c); |
1733 | 1733 | ||
@@ -1748,7 +1748,12 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | |||
1748 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot")); | 1748 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot")); |
1749 | } else { | 1749 | } else { |
1750 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot")); | 1750 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot")); |
1751 | ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp); | 1751 | if (!rsp->gp_kthread) |
1752 | goto unlock_out; /* No grace-period kthread yet! */ | ||
1753 | WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT); | ||
1754 | trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), | ||
1755 | TPS("newreq")); | ||
1756 | ret = true; /* Caller must wake GP kthread. */ | ||
1752 | } | 1757 | } |
1753 | unlock_out: | 1758 | unlock_out: |
1754 | if (rnp != rnp_root) | 1759 | if (rnp != rnp_root) |
@@ -2299,43 +2304,6 @@ static int __noreturn rcu_gp_kthread(void *arg) | |||
2299 | } | 2304 | } |
2300 | 2305 | ||
2301 | /* | 2306 | /* |
2302 | * Start a new RCU grace period if warranted, re-initializing the hierarchy | ||
2303 | * in preparation for detecting the next grace period. The caller must hold | ||
2304 | * the root node's ->lock and hard irqs must be disabled. | ||
2305 | * | ||
2306 | * Note that it is legal for a dying CPU (which is marked as offline) to | ||
2307 | * invoke this function. This can happen when the dying CPU reports its | ||
2308 | * quiescent state. | ||
2309 | * | ||
2310 | * Returns true if the grace-period kthread must be awakened. | ||
2311 | */ | ||
2312 | static bool | ||
2313 | rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, | ||
2314 | struct rcu_data *rdp) | ||
2315 | { | ||
2316 | raw_lockdep_assert_held_rcu_node(rnp); | ||
2317 | if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) { | ||
2318 | /* | ||
2319 | * Either we have not yet spawned the grace-period | ||
2320 | * task, this CPU does not need another grace period, | ||
2321 | * or a grace period is already in progress. | ||
2322 | * Either way, don't start a new grace period. | ||
2323 | */ | ||
2324 | return false; | ||
2325 | } | ||
2326 | WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); | ||
2327 | trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), | ||
2328 | TPS("newreq")); | ||
2329 | |||
2330 | /* | ||
2331 | * We can't do wakeups while holding the rnp->lock, as that | ||
2332 | * could cause possible deadlocks with the rq->lock. Defer | ||
2333 | * the wakeup to our caller. | ||
2334 | */ | ||
2335 | return true; | ||
2336 | } | ||
2337 | |||
2338 | /* | ||
2339 | * Report a full set of quiescent states to the specified rcu_state data | 2307 | * Report a full set of quiescent states to the specified rcu_state data |
2340 | * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period | 2308 | * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period |
2341 | * kthread if another grace period is required. Whether we wake | 2309 | * kthread if another grace period is required. Whether we wake |