aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-01 19:29:47 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-12 18:38:49 -0400
commite44e73ca47b47510dac491329d453d82aea1d8d8 (patch)
tree4daaaf0e5b57d81fa4fdfced7a2e4671abf2ae78 /kernel/rcu/tree.c
parentff3bb6f4d06247508489345ee90a8a9b6f3ffd3b (diff)
rcu: Make simple callback acceleration refer to rdp->gp_seq_needed
Now that the rcu_data structure contains ->gp_seq_needed, create an rcu_accelerate_cbs_unlocked() helper function that locklessly checks to see if new callbacks' required grace period has already been requested. If so, update the callback list locally and again locklessly. (Though interrupts must be and are disabled to avoid racing with conflicting updates in interrupt handlers.) Otherwise, call rcu_accelerate_cbs() as before. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c51
1 files changed, 32 insertions, 19 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0aeddc908181..5643c135fb06 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1702,6 +1702,34 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1702} 1702}
1703 1703
1704/* 1704/*
1705 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1706 * rcu_node structure's ->lock be held. It consults the cached value
1707 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1708 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1709 * while holding the leaf rcu_node structure's ->lock.
1710 */
1711static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
1712 struct rcu_node *rnp,
1713 struct rcu_data *rdp)
1714{
1715 unsigned long c;
1716 bool needwake;
1717
1718 lockdep_assert_irqs_disabled();
1719 c = rcu_seq_snap(&rsp->gp_seq);
1720 if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1721 /* Old request still live, so mark recent callbacks. */
1722 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1723 return;
1724 }
1725 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1726 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1727 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1728 if (needwake)
1729 rcu_gp_kthread_wake(rsp);
1730}
1731
1732/*
1705 * Move any callbacks whose grace period has completed to the 1733 * Move any callbacks whose grace period has completed to the
1706 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1734 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1707 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1735 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
@@ -2739,7 +2767,6 @@ static void
2739__rcu_process_callbacks(struct rcu_state *rsp) 2767__rcu_process_callbacks(struct rcu_state *rsp)
2740{ 2768{
2741 unsigned long flags; 2769 unsigned long flags;
2742 bool needwake;
2743 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 2770 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2744 struct rcu_node *rnp = rdp->mynode; 2771 struct rcu_node *rnp = rdp->mynode;
2745 2772
@@ -2752,15 +2779,9 @@ __rcu_process_callbacks(struct rcu_state *rsp)
2752 if (!rcu_gp_in_progress(rsp) && 2779 if (!rcu_gp_in_progress(rsp) &&
2753 rcu_segcblist_is_enabled(&rdp->cblist)) { 2780 rcu_segcblist_is_enabled(&rdp->cblist)) {
2754 local_irq_save(flags); 2781 local_irq_save(flags);
2755 if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) { 2782 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2756 local_irq_restore(flags); 2783 rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
2757 } else { 2784 local_irq_restore(flags);
2758 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
2759 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2760 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2761 if (needwake)
2762 rcu_gp_kthread_wake(rsp);
2763 }
2764 } 2785 }
2765 2786
2766 rcu_check_gp_start_stall(rsp, rnp, rdp); 2787 rcu_check_gp_start_stall(rsp, rnp, rdp);
@@ -2818,8 +2839,6 @@ static void invoke_rcu_core(void)
2818static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, 2839static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2819 struct rcu_head *head, unsigned long flags) 2840 struct rcu_head *head, unsigned long flags)
2820{ 2841{
2821 bool needwake;
2822
2823 /* 2842 /*
2824 * If called from an extended quiescent state, invoke the RCU 2843 * If called from an extended quiescent state, invoke the RCU
2825 * core in order to force a re-evaluation of RCU's idleness. 2844 * core in order to force a re-evaluation of RCU's idleness.
@@ -2846,13 +2865,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2846 2865
2847 /* Start a new grace period if one not already started. */ 2866 /* Start a new grace period if one not already started. */
2848 if (!rcu_gp_in_progress(rsp)) { 2867 if (!rcu_gp_in_progress(rsp)) {
2849 struct rcu_node *rnp = rdp->mynode; 2868 rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
2850
2851 raw_spin_lock_rcu_node(rnp);
2852 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2853 raw_spin_unlock_rcu_node(rnp);
2854 if (needwake)
2855 rcu_gp_kthread_wake(rsp);
2856 } else { 2869 } else {
2857 /* Give the grace period a kick. */ 2870 /* Give the grace period a kick. */
2858 rdp->blimit = LONG_MAX; 2871 rdp->blimit = LONG_MAX;