aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rcupdate.h25
-rw-r--r--kernel/rcutree.c29
-rw-r--r--kernel/rcutree_plugin.h8
3 files changed, 58 insertions, 4 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 7c968e4f929e..6256759fb81e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -90,6 +90,25 @@ extern void do_trace_rcu_torture_read(char *rcutorturename,
90 * that started after call_rcu() was invoked. RCU read-side critical 90 * that started after call_rcu() was invoked. RCU read-side critical
91 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 91 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
92 * and may be nested. 92 * and may be nested.
93 *
94 * Note that all CPUs must agree that the grace period extended beyond
95 * all pre-existing RCU read-side critical section. On systems with more
96 * than one CPU, this means that when "func()" is invoked, each CPU is
97 * guaranteed to have executed a full memory barrier since the end of its
98 * last RCU read-side critical section whose beginning preceded the call
99 * to call_rcu(). It also means that each CPU executing an RCU read-side
100 * critical section that continues beyond the start of "func()" must have
101 * executed a memory barrier after the call_rcu() but before the beginning
102 * of that RCU read-side critical section. Note that these guarantees
103 * include CPUs that are offline, idle, or executing in user mode, as
104 * well as CPUs that are executing in the kernel.
105 *
106 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
107 * resulting RCU callback function "func()", then both CPU A and CPU B are
108 * guaranteed to execute a full memory barrier during the time interval
109 * between the call to call_rcu() and the invocation of "func()" -- even
110 * if CPU A and CPU B are the same CPU (but again only if the system has
111 * more than one CPU).
93 */ 112 */
94extern void call_rcu(struct rcu_head *head, 113extern void call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *head)); 114 void (*func)(struct rcu_head *head));
@@ -118,6 +137,9 @@ extern void call_rcu(struct rcu_head *head,
118 * OR 137 * OR
119 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 138 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
120 * These may be nested. 139 * These may be nested.
140 *
141 * See the description of call_rcu() for more detailed information on
142 * memory ordering guarantees.
121 */ 143 */
122extern void call_rcu_bh(struct rcu_head *head, 144extern void call_rcu_bh(struct rcu_head *head,
123 void (*func)(struct rcu_head *head)); 145 void (*func)(struct rcu_head *head));
@@ -137,6 +159,9 @@ extern void call_rcu_bh(struct rcu_head *head,
137 * OR 159 * OR
138 * anything that disables preemption. 160 * anything that disables preemption.
139 * These may be nested. 161 * These may be nested.
162 *
163 * See the description of call_rcu() for more detailed information on
164 * memory ordering guarantees.
140 */ 165 */
141extern void call_rcu_sched(struct rcu_head *head, 166extern void call_rcu_sched(struct rcu_head *head,
142 void (*func)(struct rcu_head *rcu)); 167 void (*func)(struct rcu_head *rcu));
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index e4c2192b47c8..15a2beec320f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -2228,10 +2228,28 @@ static inline int rcu_blocking_is_gp(void)
2228 * rcu_read_lock_sched(). 2228 * rcu_read_lock_sched().
2229 * 2229 *
2230 * This means that all preempt_disable code sequences, including NMI and 2230 * This means that all preempt_disable code sequences, including NMI and
2231 * hardware-interrupt handlers, in progress on entry will have completed 2231 * non-threaded hardware-interrupt handlers, in progress on entry will
2232 * before this primitive returns. However, this does not guarantee that 2232 * have completed before this primitive returns. However, this does not
2233 * softirq handlers will have completed, since in some kernels, these 2233 * guarantee that softirq handlers will have completed, since in some
2234 * handlers can run in process context, and can block. 2234 * kernels, these handlers can run in process context, and can block.
2235 *
2236 * Note that this guarantee implies further memory-ordering guarantees.
2237 * On systems with more than one CPU, when synchronize_sched() returns,
2238 * each CPU is guaranteed to have executed a full memory barrier since the
2239 * end of its last RCU-sched read-side critical section whose beginning
2240 * preceded the call to synchronize_sched(). In addition, each CPU having
2241 * an RCU read-side critical section that extends beyond the return from
2242 * synchronize_sched() is guaranteed to have executed a full memory barrier
2243 * after the beginning of synchronize_sched() and before the beginning of
2244 * that RCU read-side critical section. Note that these guarantees include
2245 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
2246 * that are executing in the kernel.
2247 *
2248 * Furthermore, if CPU A invoked synchronize_sched(), which returned
2249 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
2250 * to have executed a full memory barrier during the execution of
2251 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
2252 * again only if the system has more than one CPU).
2235 * 2253 *
2236 * This primitive provides the guarantees made by the (now removed) 2254 * This primitive provides the guarantees made by the (now removed)
2237 * synchronize_kernel() API. In contrast, synchronize_rcu() only 2255 * synchronize_kernel() API. In contrast, synchronize_rcu() only
@@ -2259,6 +2277,9 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
2259 * read-side critical sections have completed. RCU read-side critical 2277 * read-side critical sections have completed. RCU read-side critical
2260 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), 2278 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
2261 * and may be nested. 2279 * and may be nested.
2280 *
2281 * See the description of synchronize_sched() for more detailed information
2282 * on memory ordering guarantees.
2262 */ 2283 */
2263void synchronize_rcu_bh(void) 2284void synchronize_rcu_bh(void)
2264{ 2285{
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index f92115488187..57e0ef8ed721 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -670,6 +670,9 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
670 * concurrently with new RCU read-side critical sections that began while 670 * concurrently with new RCU read-side critical sections that began while
671 * synchronize_rcu() was waiting. RCU read-side critical sections are 671 * synchronize_rcu() was waiting. RCU read-side critical sections are
672 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 672 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
673 *
674 * See the description of synchronize_sched() for more detailed information
675 * on memory ordering guarantees.
673 */ 676 */
674void synchronize_rcu(void) 677void synchronize_rcu(void)
675{ 678{
@@ -875,6 +878,11 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
875 878
876/** 879/**
877 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 880 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
881 *
882 * Note that this primitive does not necessarily wait for an RCU grace period
883 * to complete. For example, if there are no RCU callbacks queued anywhere
884 * in the system, then rcu_barrier() is within its rights to return
885 * immediately, without waiting for anything, much less an RCU grace period.
878 */ 886 */
879void rcu_barrier(void) 887void rcu_barrier(void)
880{ 888{