aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rcupdate.h117
-rw-r--r--kernel/rcu/tree.c42
-rw-r--r--kernel/rcu/tree_plugin.h33
-rw-r--r--kernel/rcu/update.c20
4 files changed, 89 insertions, 123 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 393e461d3ea8..7a206f039fc2 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -140,115 +140,14 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
140/* Exported common interfaces */ 140/* Exported common interfaces */
141 141
142#ifdef CONFIG_PREEMPT_RCU 142#ifdef CONFIG_PREEMPT_RCU
143 143void call_rcu(struct rcu_head *head, rcu_callback_t func);
144/**
145 * call_rcu() - Queue an RCU callback for invocation after a grace period.
146 * @head: structure to be used for queueing the RCU updates.
147 * @func: actual callback function to be invoked after the grace period
148 *
149 * The callback function will be invoked some time after a full grace
150 * period elapses, in other words after all pre-existing RCU read-side
151 * critical sections have completed. However, the callback function
152 * might well execute concurrently with RCU read-side critical sections
153 * that started after call_rcu() was invoked. RCU read-side critical
154 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
155 * and may be nested.
156 *
157 * Note that all CPUs must agree that the grace period extended beyond
158 * all pre-existing RCU read-side critical section. On systems with more
159 * than one CPU, this means that when "func()" is invoked, each CPU is
160 * guaranteed to have executed a full memory barrier since the end of its
161 * last RCU read-side critical section whose beginning preceded the call
162 * to call_rcu(). It also means that each CPU executing an RCU read-side
163 * critical section that continues beyond the start of "func()" must have
164 * executed a memory barrier after the call_rcu() but before the beginning
165 * of that RCU read-side critical section. Note that these guarantees
166 * include CPUs that are offline, idle, or executing in user mode, as
167 * well as CPUs that are executing in the kernel.
168 *
169 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
170 * resulting RCU callback function "func()", then both CPU A and CPU B are
171 * guaranteed to execute a full memory barrier during the time interval
172 * between the call to call_rcu() and the invocation of "func()" -- even
173 * if CPU A and CPU B are the same CPU (but again only if the system has
174 * more than one CPU).
175 */
176void call_rcu(struct rcu_head *head,
177 rcu_callback_t func);
178
179#else /* #ifdef CONFIG_PREEMPT_RCU */ 144#else /* #ifdef CONFIG_PREEMPT_RCU */
180
181/* In classic RCU, call_rcu() is just call_rcu_sched(). */
182#define call_rcu call_rcu_sched 145#define call_rcu call_rcu_sched
183
184#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 146#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
185 147
186/** 148void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
187 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. 149void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
188 * @head: structure to be used for queueing the RCU updates.
189 * @func: actual callback function to be invoked after the grace period
190 *
191 * The callback function will be invoked some time after a full grace
192 * period elapses, in other words after all currently executing RCU
193 * read-side critical sections have completed. call_rcu_bh() assumes
194 * that the read-side critical sections end on completion of a softirq
195 * handler. This means that read-side critical sections in process
196 * context must not be interrupted by softirqs. This interface is to be
197 * used when most of the read-side critical sections are in softirq context.
198 * RCU read-side critical sections are delimited by :
199 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
200 * OR
201 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
202 * These may be nested.
203 *
204 * See the description of call_rcu() for more detailed information on
205 * memory ordering guarantees.
206 */
207void call_rcu_bh(struct rcu_head *head,
208 rcu_callback_t func);
209
210/**
211 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
212 * @head: structure to be used for queueing the RCU updates.
213 * @func: actual callback function to be invoked after the grace period
214 *
215 * The callback function will be invoked some time after a full grace
216 * period elapses, in other words after all currently executing RCU
217 * read-side critical sections have completed. call_rcu_sched() assumes
218 * that the read-side critical sections end on enabling of preemption
219 * or on voluntary preemption.
220 * RCU read-side critical sections are delimited by :
221 * - rcu_read_lock_sched() and rcu_read_unlock_sched(),
222 * OR
223 * anything that disables preemption.
224 * These may be nested.
225 *
226 * See the description of call_rcu() for more detailed information on
227 * memory ordering guarantees.
228 */
229void call_rcu_sched(struct rcu_head *head,
230 rcu_callback_t func);
231
232void synchronize_sched(void); 150void synchronize_sched(void);
233
234/**
235 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
236 * @head: structure to be used for queueing the RCU updates.
237 * @func: actual callback function to be invoked after the grace period
238 *
239 * The callback function will be invoked some time after a full grace
240 * period elapses, in other words after all currently executing RCU
241 * read-side critical sections have completed. call_rcu_tasks() assumes
242 * that the read-side critical sections end at a voluntary context
243 * switch (not a preemption!), entry into idle, or transition to usermode
244 * execution. As such, there are no read-side primitives analogous to
245 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
246 * to determine that all tasks have passed through a safe state, not so
247 * much for data-strcuture synchronization.
248 *
249 * See the description of call_rcu() for more detailed information on
250 * memory ordering guarantees.
251 */
252void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); 151void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
253void synchronize_rcu_tasks(void); 152void synchronize_rcu_tasks(void);
254void rcu_barrier_tasks(void); 153void rcu_barrier_tasks(void);
@@ -474,18 +373,8 @@ extern struct lockdep_map rcu_bh_lock_map;
474extern struct lockdep_map rcu_sched_lock_map; 373extern struct lockdep_map rcu_sched_lock_map;
475extern struct lockdep_map rcu_callback_map; 374extern struct lockdep_map rcu_callback_map;
476int debug_lockdep_rcu_enabled(void); 375int debug_lockdep_rcu_enabled(void);
477
478int rcu_read_lock_held(void); 376int rcu_read_lock_held(void);
479int rcu_read_lock_bh_held(void); 377int rcu_read_lock_bh_held(void);
480
481/**
482 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
483 *
484 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
485 * RCU-sched read-side critical section. In absence of
486 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
487 * critical section unless it can prove otherwise.
488 */
489int rcu_read_lock_sched_held(void); 378int rcu_read_lock_sched_held(void);
490 379
491#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 380#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 121c1436a7f3..5ebc830297c1 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3223,8 +3223,24 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
3223 local_irq_restore(flags); 3223 local_irq_restore(flags);
3224} 3224}
3225 3225
3226/* 3226/**
3227 * Queue an RCU-sched callback for invocation after a grace period. 3227 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
3228 * @head: structure to be used for queueing the RCU updates.
3229 * @func: actual callback function to be invoked after the grace period
3230 *
3231 * The callback function will be invoked some time after a full grace
3232 * period elapses, in other words after all currently executing RCU
3233 * read-side critical sections have completed. call_rcu_sched() assumes
3234 * that the read-side critical sections end on enabling of preemption
3235 * or on voluntary preemption.
3236 * RCU read-side critical sections are delimited by :
3237 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
3238 * - anything that disables preemption.
3239 *
3240 * These may be nested.
3241 *
3242 * See the description of call_rcu() for more detailed information on
3243 * memory ordering guarantees.
3228 */ 3244 */
3229void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) 3245void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
3230{ 3246{
@@ -3232,8 +3248,26 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
3232} 3248}
3233EXPORT_SYMBOL_GPL(call_rcu_sched); 3249EXPORT_SYMBOL_GPL(call_rcu_sched);
3234 3250
3235/* 3251/**
3236 * Queue an RCU callback for invocation after a quicker grace period. 3252 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
3253 * @head: structure to be used for queueing the RCU updates.
3254 * @func: actual callback function to be invoked after the grace period
3255 *
3256 * The callback function will be invoked some time after a full grace
3257 * period elapses, in other words after all currently executing RCU
3258 * read-side critical sections have completed. call_rcu_bh() assumes
3259 * that the read-side critical sections end on completion of a softirq
3260 * handler. This means that read-side critical sections in process
3261 * context must not be interrupted by softirqs. This interface is to be
3262 * used when most of the read-side critical sections are in softirq context.
3263 * RCU read-side critical sections are delimited by :
3264 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
3265 * OR
3266 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
3267 * These may be nested.
3268 *
3269 * See the description of call_rcu() for more detailed information on
3270 * memory ordering guarantees.
3237 */ 3271 */
3238void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) 3272void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
3239{ 3273{
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 573fbe9640a0..116cf8339826 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -675,8 +675,37 @@ static void rcu_preempt_do_callbacks(void)
675 675
676#endif /* #ifdef CONFIG_RCU_BOOST */ 676#endif /* #ifdef CONFIG_RCU_BOOST */
677 677
678/* 678/**
679 * Queue a preemptible-RCU callback for invocation after a grace period. 679 * call_rcu() - Queue an RCU callback for invocation after a grace period.
680 * @head: structure to be used for queueing the RCU updates.
681 * @func: actual callback function to be invoked after the grace period
682 *
683 * The callback function will be invoked some time after a full grace
684 * period elapses, in other words after all pre-existing RCU read-side
685 * critical sections have completed. However, the callback function
686 * might well execute concurrently with RCU read-side critical sections
687 * that started after call_rcu() was invoked. RCU read-side critical
688 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
689 * and may be nested.
690 *
691 * Note that all CPUs must agree that the grace period extended beyond
692 * all pre-existing RCU read-side critical section. On systems with more
693 * than one CPU, this means that when "func()" is invoked, each CPU is
694 * guaranteed to have executed a full memory barrier since the end of its
695 * last RCU read-side critical section whose beginning preceded the call
696 * to call_rcu(). It also means that each CPU executing an RCU read-side
697 * critical section that continues beyond the start of "func()" must have
698 * executed a memory barrier after the call_rcu() but before the beginning
699 * of that RCU read-side critical section. Note that these guarantees
700 * include CPUs that are offline, idle, or executing in user mode, as
701 * well as CPUs that are executing in the kernel.
702 *
703 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
704 * resulting RCU callback function "func()", then both CPU A and CPU B are
705 * guaranteed to execute a full memory barrier during the time interval
706 * between the call to call_rcu() and the invocation of "func()" -- even
707 * if CPU A and CPU B are the same CPU (but again only if the system has
708 * more than one CPU).
680 */ 709 */
681void call_rcu(struct rcu_head *head, rcu_callback_t func) 710void call_rcu(struct rcu_head *head, rcu_callback_t func)
682{ 711{
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 123a9c4b5055..84dec2c8ad1b 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -576,9 +576,23 @@ module_param(rcu_task_stall_timeout, int, 0644);
576static void rcu_spawn_tasks_kthread(void); 576static void rcu_spawn_tasks_kthread(void);
577static struct task_struct *rcu_tasks_kthread_ptr; 577static struct task_struct *rcu_tasks_kthread_ptr;
578 578
579/* 579/**
580 * Post an RCU-tasks callback. First call must be from process context 580 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
581 * after the scheduler if fully operational. 581 * @rhp: structure to be used for queueing the RCU updates.
582 * @func: actual callback function to be invoked after the grace period
583 *
584 * The callback function will be invoked some time after a full grace
585 * period elapses, in other words after all currently executing RCU
586 * read-side critical sections have completed. call_rcu_tasks() assumes
587 * that the read-side critical sections end at a voluntary context
588 * switch (not a preemption!), entry into idle, or transition to usermode
589 * execution. As such, there are no read-side primitives analogous to
590 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
591 * to determine that all tasks have passed through a safe state, not so
592 * much for data-strcuture synchronization.
593 *
594 * See the description of call_rcu() for more detailed information on
595 * memory ordering guarantees.
582 */ 596 */
583void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 597void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
584{ 598{