aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-03-25 01:45:39 -0400
committerIngo Molnar <mingo@kernel.org>2014-03-25 01:45:39 -0400
commit7de700e6806cafa30c70bc84478431a11197a5ea (patch)
treedf182c2e017f621cac0570d622b24b2ea23f538f
parent62c206bd514600d4d73751ade00dca8e488390a3 (diff)
parent765a3f4fed708ae429ee095914a7897acb3a65bd (diff)
Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU update from Paul E. McKenney: " [...] one late-breaking commit. This one was requested for 3.15 by Peter Zijlstra. It is low risk because it adds a new in-kernel API with minimal changes to the existing code. Those minimal changes are the addition of memory barriers and ACCESS_ONCE() macro calls, neither of which should be able to break things. This commit has passed significant rcutorture testing, with these additional additions to rcutorture slated for 3.16. This commit has also been exposed to -next testing. " Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/rcutiny.h10
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--kernel/rcu/tree.c62
3 files changed, 70 insertions, 4 deletions
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e8cb6e3b52a7..425c659d54e5 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,6 +27,16 @@
27 27
28#include <linux/cache.h> 28#include <linux/cache.h>
29 29
30static inline unsigned long get_state_synchronize_rcu(void)
31{
32 return 0;
33}
34
35static inline void cond_synchronize_rcu(unsigned long oldstate)
36{
37 might_sleep();
38}
39
30static inline void rcu_barrier_bh(void) 40static inline void rcu_barrier_bh(void)
31{ 41{
32 wait_rcu_gp(call_rcu_bh); 42 wait_rcu_gp(call_rcu_bh);
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index e9c63884df0a..a59ca05fd4e3 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -76,6 +76,8 @@ static inline void synchronize_rcu_bh_expedited(void)
76void rcu_barrier(void); 76void rcu_barrier(void);
77void rcu_barrier_bh(void); 77void rcu_barrier_bh(void);
78void rcu_barrier_sched(void); 78void rcu_barrier_sched(void);
79unsigned long get_state_synchronize_rcu(void);
80void cond_synchronize_rcu(unsigned long oldstate);
79 81
80extern unsigned long rcutorture_testseq; 82extern unsigned long rcutorture_testseq;
81extern unsigned long rcutorture_vernum; 83extern unsigned long rcutorture_vernum;
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 351faba48b91..0c47e300210a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1421,13 +1421,14 @@ static int rcu_gp_init(struct rcu_state *rsp)
1421 1421
1422 /* Advance to a new grace period and initialize state. */ 1422 /* Advance to a new grace period and initialize state. */
1423 record_gp_stall_check_time(rsp); 1423 record_gp_stall_check_time(rsp);
1424 smp_wmb(); /* Record GP times before starting GP. */ 1424 /* Record GP times before starting GP, hence smp_store_release(). */
1425 rsp->gpnum++; 1425 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1426 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); 1426 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1427 raw_spin_unlock_irq(&rnp->lock); 1427 raw_spin_unlock_irq(&rnp->lock);
1428 1428
1429 /* Exclude any concurrent CPU-hotplug operations. */ 1429 /* Exclude any concurrent CPU-hotplug operations. */
1430 mutex_lock(&rsp->onoff_mutex); 1430 mutex_lock(&rsp->onoff_mutex);
1431 smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
1431 1432
1432 /* 1433 /*
1433 * Set the quiescent-state-needed bits in all the rcu_node 1434 * Set the quiescent-state-needed bits in all the rcu_node
@@ -1555,10 +1556,11 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1555 } 1556 }
1556 rnp = rcu_get_root(rsp); 1557 rnp = rcu_get_root(rsp);
1557 raw_spin_lock_irq(&rnp->lock); 1558 raw_spin_lock_irq(&rnp->lock);
1558 smp_mb__after_unlock_lock(); 1559 smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
1559 rcu_nocb_gp_set(rnp, nocb); 1560 rcu_nocb_gp_set(rnp, nocb);
1560 1561
1561 rsp->completed = rsp->gpnum; /* Declare grace period done. */ 1562 /* Declare grace period done. */
1563 ACCESS_ONCE(rsp->completed) = rsp->gpnum;
1562 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); 1564 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
1563 rsp->fqs_state = RCU_GP_IDLE; 1565 rsp->fqs_state = RCU_GP_IDLE;
1564 rdp = this_cpu_ptr(rsp->rda); 1566 rdp = this_cpu_ptr(rsp->rda);
@@ -2637,6 +2639,58 @@ void synchronize_rcu_bh(void)
2637} 2639}
2638EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 2640EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
2639 2641
2642/**
2643 * get_state_synchronize_rcu - Snapshot current RCU state
2644 *
2645 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
2646 * to determine whether or not a full grace period has elapsed in the
2647 * meantime.
2648 */
2649unsigned long get_state_synchronize_rcu(void)
2650{
2651 /*
2652 * Any prior manipulation of RCU-protected data must happen
2653 * before the load from ->gpnum.
2654 */
2655 smp_mb(); /* ^^^ */
2656
2657 /*
2658 * Make sure this load happens before the purportedly
2659 * time-consuming work between get_state_synchronize_rcu()
2660 * and cond_synchronize_rcu().
2661 */
2662 return smp_load_acquire(&rcu_state->gpnum);
2663}
2664EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
2665
2666/**
2667 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
2668 *
2669 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
2670 *
2671 * If a full RCU grace period has elapsed since the earlier call to
2672 * get_state_synchronize_rcu(), just return. Otherwise, invoke
2673 * synchronize_rcu() to wait for a full grace period.
2674 *
2675 * Yes, this function does not take counter wrap into account. But
2676 * counter wrap is harmless. If the counter wraps, we have waited for
2677 * more than 2 billion grace periods (and way more on a 64-bit system!),
2678 * so waiting for one additional grace period should be just fine.
2679 */
2680void cond_synchronize_rcu(unsigned long oldstate)
2681{
2682 unsigned long newstate;
2683
2684 /*
2685 * Ensure that this load happens before any RCU-destructive
2686 * actions the caller might carry out after we return.
2687 */
2688 newstate = smp_load_acquire(&rcu_state->completed);
2689 if (ULONG_CMP_GE(oldstate, newstate))
2690 synchronize_rcu();
2691}
2692EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
2693
2640static int synchronize_sched_expedited_cpu_stop(void *data) 2694static int synchronize_sched_expedited_cpu_stop(void *data)
2641{ 2695{
2642 /* 2696 /*