diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-04-12 14:24:09 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-05-15 13:30:32 -0400 |
commit | 41e80595abfc608eb0fe5148bcaed1ed78d7a6b7 (patch) | |
tree | 0f32698b8e2e74343af7bcf3a75e46ed49fdc0f2 | |
parent | d5cd96851d520e5caff13ddf99e3b2b759ae3b1d (diff) |
rcu: Make rcu_start_future_gp() caller select grace period
The rcu_accelerate_cbs() function selects a grace-period target, which
it uses to have rcu_segcblist_accelerate() assign numbers to recently
queued callbacks. Then it invokes rcu_start_future_gp(), which selects
a grace-period target again, which is a bit pointless. This commit
therefore changes rcu_start_future_gp() to take the grace-period target as
a parameter, thus avoiding double selection. This commit also changes
the name of rcu_start_future_gp() to rcu_start_this_gp() to reflect
this change in functionality, and also makes a similar change to the
name of trace_rcu_future_gp().
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Nicholas Piggin <npiggin@gmail.com>
-rw-r--r-- | kernel/rcu/tree.c | 53 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 9 |
2 files changed, 27 insertions, 35 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4433f68a1c7b..94519c7d552f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -1659,12 +1659,9 @@ static unsigned long rcu_cbs_completed(struct rcu_state *rsp, | |||
1659 | return rnp->completed + 2; | 1659 | return rnp->completed + 2; |
1660 | } | 1660 | } |
1661 | 1661 | ||
1662 | /* | 1662 | /* Trace-event wrapper function for trace_rcu_future_grace_period. */ |
1663 | * Trace-event helper function for rcu_start_future_gp() and | 1663 | static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, |
1664 | * rcu_nocb_wait_gp(). | 1664 | unsigned long c, const char *s) |
1665 | */ | ||
1666 | static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | ||
1667 | unsigned long c, const char *s) | ||
1668 | { | 1665 | { |
1669 | trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, | 1666 | trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, |
1670 | rnp->completed, c, rnp->level, | 1667 | rnp->completed, c, rnp->level, |
@@ -1672,33 +1669,27 @@ static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | |||
1672 | } | 1669 | } |
1673 | 1670 | ||
1674 | /* | 1671 | /* |
1675 | * Start some future grace period, as needed to handle newly arrived | 1672 | * Start the specified grace period, as needed to handle newly arrived |
1676 | * callbacks. The required future grace periods are recorded in each | 1673 | * callbacks. The required future grace periods are recorded in each |
1677 | * rcu_node structure's ->need_future_gp field. Returns true if there | 1674 | * rcu_node structure's ->need_future_gp[] field. Returns true if there |
1678 | * is reason to awaken the grace-period kthread. | 1675 | * is reason to awaken the grace-period kthread. |
1679 | * | 1676 | * |
1680 | * The caller must hold the specified rcu_node structure's ->lock, which | 1677 | * The caller must hold the specified rcu_node structure's ->lock, which |
1681 | * is why the caller is responsible for waking the grace-period kthread. | 1678 | * is why the caller is responsible for waking the grace-period kthread. |
1682 | */ | 1679 | */ |
1683 | static bool __maybe_unused | 1680 | static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, |
1684 | rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | 1681 | unsigned long c) |
1685 | unsigned long *c_out) | ||
1686 | { | 1682 | { |
1687 | unsigned long c; | ||
1688 | bool ret = false; | 1683 | bool ret = false; |
1689 | struct rcu_state *rsp = rdp->rsp; | 1684 | struct rcu_state *rsp = rdp->rsp; |
1690 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 1685 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
1691 | 1686 | ||
1692 | raw_lockdep_assert_held_rcu_node(rnp); | 1687 | raw_lockdep_assert_held_rcu_node(rnp); |
1693 | 1688 | ||
1694 | /* | 1689 | /* If the specified GP is already known needed, return to caller. */ |
1695 | * Pick up grace-period number for new callbacks. If this | 1690 | trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf")); |
1696 | * grace period is already marked as needed, return to the caller. | ||
1697 | */ | ||
1698 | c = rcu_cbs_completed(rsp, rnp); | ||
1699 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf")); | ||
1700 | if (need_future_gp_element(rnp, c)) { | 1691 | if (need_future_gp_element(rnp, c)) { |
1701 | trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); | 1692 | trace_rcu_this_gp(rnp, rdp, c, TPS("Prestartleaf")); |
1702 | goto out; | 1693 | goto out; |
1703 | } | 1694 | } |
1704 | 1695 | ||
@@ -1710,7 +1701,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | |||
1710 | */ | 1701 | */ |
1711 | if (rnp->gpnum != rnp->completed) { | 1702 | if (rnp->gpnum != rnp->completed) { |
1712 | need_future_gp_element(rnp, c) = true; | 1703 | need_future_gp_element(rnp, c) = true; |
1713 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); | 1704 | trace_rcu_this_gp(rnp, rdp, c, TPS("Startedleaf")); |
1714 | goto out; | 1705 | goto out; |
1715 | } | 1706 | } |
1716 | 1707 | ||
@@ -1736,7 +1727,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | |||
1736 | * recorded, trace and leave. | 1727 | * recorded, trace and leave. |
1737 | */ | 1728 | */ |
1738 | if (need_future_gp_element(rnp_root, c)) { | 1729 | if (need_future_gp_element(rnp_root, c)) { |
1739 | trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot")); | 1730 | trace_rcu_this_gp(rnp, rdp, c, TPS("Prestartedroot")); |
1740 | goto unlock_out; | 1731 | goto unlock_out; |
1741 | } | 1732 | } |
1742 | 1733 | ||
@@ -1745,9 +1736,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, | |||
1745 | 1736 | ||
1746 | /* If a grace period is not already in progress, start one. */ | 1737 | /* If a grace period is not already in progress, start one. */ |
1747 | if (rnp_root->gpnum != rnp_root->completed) { | 1738 | if (rnp_root->gpnum != rnp_root->completed) { |
1748 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot")); | 1739 | trace_rcu_this_gp(rnp, rdp, c, TPS("Startedleafroot")); |
1749 | } else { | 1740 | } else { |
1750 | trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot")); | 1741 | trace_rcu_this_gp(rnp, rdp, c, TPS("Startedroot")); |
1751 | if (!rsp->gp_kthread) | 1742 | if (!rsp->gp_kthread) |
1752 | goto unlock_out; /* No grace-period kthread yet! */ | 1743 | goto unlock_out; /* No grace-period kthread yet! */ |
1753 | WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT); | 1744 | WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT); |
@@ -1759,8 +1750,6 @@ unlock_out: | |||
1759 | if (rnp != rnp_root) | 1750 | if (rnp != rnp_root) |
1760 | raw_spin_unlock_rcu_node(rnp_root); | 1751 | raw_spin_unlock_rcu_node(rnp_root); |
1761 | out: | 1752 | out: |
1762 | if (c_out != NULL) | ||
1763 | *c_out = c; | ||
1764 | return ret; | 1753 | return ret; |
1765 | } | 1754 | } |
1766 | 1755 | ||
@@ -1776,8 +1765,8 @@ static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) | |||
1776 | 1765 | ||
1777 | need_future_gp_element(rnp, c) = false; | 1766 | need_future_gp_element(rnp, c) = false; |
1778 | needmore = need_any_future_gp(rnp); | 1767 | needmore = need_any_future_gp(rnp); |
1779 | trace_rcu_future_gp(rnp, rdp, c, | 1768 | trace_rcu_this_gp(rnp, rdp, c, |
1780 | needmore ? TPS("CleanupMore") : TPS("Cleanup")); | 1769 | needmore ? TPS("CleanupMore") : TPS("Cleanup")); |
1781 | return needmore; | 1770 | return needmore; |
1782 | } | 1771 | } |
1783 | 1772 | ||
@@ -1812,6 +1801,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp) | |||
1812 | static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, | 1801 | static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, |
1813 | struct rcu_data *rdp) | 1802 | struct rcu_data *rdp) |
1814 | { | 1803 | { |
1804 | unsigned long c; | ||
1815 | bool ret = false; | 1805 | bool ret = false; |
1816 | 1806 | ||
1817 | raw_lockdep_assert_held_rcu_node(rnp); | 1807 | raw_lockdep_assert_held_rcu_node(rnp); |
@@ -1830,8 +1820,9 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, | |||
1830 | * accelerating callback invocation to an earlier grace-period | 1820 | * accelerating callback invocation to an earlier grace-period |
1831 | * number. | 1821 | * number. |
1832 | */ | 1822 | */ |
1833 | if (rcu_segcblist_accelerate(&rdp->cblist, rcu_cbs_completed(rsp, rnp))) | 1823 | c = rcu_cbs_completed(rsp, rnp); |
1834 | ret = rcu_start_future_gp(rnp, rdp, NULL); | 1824 | if (rcu_segcblist_accelerate(&rdp->cblist, c)) |
1825 | ret = rcu_start_this_gp(rnp, rdp, c); | ||
1835 | 1826 | ||
1836 | /* Trace depending on how much we were able to accelerate. */ | 1827 | /* Trace depending on how much we were able to accelerate. */ |
1837 | if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) | 1828 | if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) |
@@ -2174,8 +2165,8 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
2174 | /* Check for GP requests since above loop. */ | 2165 | /* Check for GP requests since above loop. */ |
2175 | rdp = this_cpu_ptr(rsp->rda); | 2166 | rdp = this_cpu_ptr(rsp->rda); |
2176 | if (need_any_future_gp(rnp)) { | 2167 | if (need_any_future_gp(rnp)) { |
2177 | trace_rcu_future_gp(rnp, rdp, rsp->completed - 1, | 2168 | trace_rcu_this_gp(rnp, rdp, rsp->completed - 1, |
2178 | TPS("CleanupMore")); | 2169 | TPS("CleanupMore")); |
2179 | needgp = true; | 2170 | needgp = true; |
2180 | } | 2171 | } |
2181 | /* Advance CBs to reduce false positives below. */ | 2172 | /* Advance CBs to reduce false positives below. */ |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 313b77d9cf06..322777492fff 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -2035,7 +2035,8 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) | |||
2035 | struct rcu_node *rnp = rdp->mynode; | 2035 | struct rcu_node *rnp = rdp->mynode; |
2036 | 2036 | ||
2037 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 2037 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
2038 | needwake = rcu_start_future_gp(rnp, rdp, &c); | 2038 | c = rcu_cbs_completed(rdp->rsp, rnp); |
2039 | needwake = rcu_start_this_gp(rnp, rdp, c); | ||
2039 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 2040 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
2040 | if (needwake) | 2041 | if (needwake) |
2041 | rcu_gp_kthread_wake(rdp->rsp); | 2042 | rcu_gp_kthread_wake(rdp->rsp); |
@@ -2044,7 +2045,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) | |||
2044 | * Wait for the grace period. Do so interruptibly to avoid messing | 2045 | * Wait for the grace period. Do so interruptibly to avoid messing |
2045 | * up the load average. | 2046 | * up the load average. |
2046 | */ | 2047 | */ |
2047 | trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); | 2048 | trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait")); |
2048 | for (;;) { | 2049 | for (;;) { |
2049 | swait_event_interruptible( | 2050 | swait_event_interruptible( |
2050 | rnp->nocb_gp_wq[c & 0x1], | 2051 | rnp->nocb_gp_wq[c & 0x1], |
@@ -2052,9 +2053,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) | |||
2052 | if (likely(d)) | 2053 | if (likely(d)) |
2053 | break; | 2054 | break; |
2054 | WARN_ON(signal_pending(current)); | 2055 | WARN_ON(signal_pending(current)); |
2055 | trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); | 2056 | trace_rcu_this_gp(rnp, rdp, c, TPS("ResumeWait")); |
2056 | } | 2057 | } |
2057 | trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); | 2058 | trace_rcu_this_gp(rnp, rdp, c, TPS("EndWait")); |
2058 | smp_mb(); /* Ensure that CB invocation happens after GP end. */ | 2059 | smp_mb(); /* Ensure that CB invocation happens after GP end. */ |
2059 | } | 2060 | } |
2060 | 2061 | ||