aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rcupdate.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r--include/linux/rcupdate.h72
1 files changed, 71 insertions, 1 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 00a7fd61b3c6..5a75d19aa661 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,6 +44,7 @@
44#include <linux/debugobjects.h> 44#include <linux/debugobjects.h>
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/percpu.h>
47#include <asm/barrier.h> 48#include <asm/barrier.h>
48 49
49extern int rcu_expedited; /* for sysctl */ 50extern int rcu_expedited; /* for sysctl */
@@ -51,7 +52,17 @@ extern int rcu_expedited; /* for sysctl */
51extern int rcutorture_runnable; /* for sysctl */ 52extern int rcutorture_runnable; /* for sysctl */
52#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ 53#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
53 54
55enum rcutorture_type {
56 RCU_FLAVOR,
57 RCU_BH_FLAVOR,
58 RCU_SCHED_FLAVOR,
59 SRCU_FLAVOR,
60 INVALID_RCU_FLAVOR
61};
62
54#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 63#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
64void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
65 unsigned long *gpnum, unsigned long *completed);
55void rcutorture_record_test_transition(void); 66void rcutorture_record_test_transition(void);
56void rcutorture_record_progress(unsigned long vernum); 67void rcutorture_record_progress(unsigned long vernum);
57void do_trace_rcu_torture_read(const char *rcutorturename, 68void do_trace_rcu_torture_read(const char *rcutorturename,
@@ -60,6 +71,15 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
60 unsigned long c_old, 71 unsigned long c_old,
61 unsigned long c); 72 unsigned long c);
62#else 73#else
74static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
75 int *flags,
76 unsigned long *gpnum,
77 unsigned long *completed)
78{
79 *flags = 0;
80 *gpnum = 0;
81 *completed = 0;
82}
63static inline void rcutorture_record_test_transition(void) 83static inline void rcutorture_record_test_transition(void)
64{ 84{
65} 85}
@@ -228,6 +248,18 @@ void rcu_idle_exit(void);
228void rcu_irq_enter(void); 248void rcu_irq_enter(void);
229void rcu_irq_exit(void); 249void rcu_irq_exit(void);
230 250
251#ifdef CONFIG_RCU_STALL_COMMON
252void rcu_sysrq_start(void);
253void rcu_sysrq_end(void);
254#else /* #ifdef CONFIG_RCU_STALL_COMMON */
255static inline void rcu_sysrq_start(void)
256{
257}
258static inline void rcu_sysrq_end(void)
259{
260}
261#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
262
231#ifdef CONFIG_RCU_USER_QS 263#ifdef CONFIG_RCU_USER_QS
232void rcu_user_enter(void); 264void rcu_user_enter(void);
233void rcu_user_exit(void); 265void rcu_user_exit(void);
@@ -268,6 +300,41 @@ bool __rcu_is_watching(void);
268#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 300#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
269 301
270/* 302/*
303 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
304 */
305
306#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
307DECLARE_PER_CPU(int, rcu_cond_resched_count);
308void rcu_resched(void);
309
310/*
311 * Is it time to report RCU quiescent states?
312 *
313 * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
314 * increment some random CPU's count, and possibly also load the result from
315 * yet another CPU's count. We might even clobber some other CPU's attempt
316 * to zero its counter. This is all OK because the goal is not precision,
317 * but rather reasonable amortization of rcu_note_context_switch() overhead
318 * and extremely high probability of avoiding RCU CPU stall warnings.
319 * Note that this function has to be preempted in just the wrong place,
320 * many thousands of times in a row, for anything bad to happen.
321 */
322static inline bool rcu_should_resched(void)
323{
324 return raw_cpu_inc_return(rcu_cond_resched_count) >=
325 RCU_COND_RESCHED_LIM;
326}
327
328/*
329 * Report quiscent states to RCU if it is time to do so.
330 */
331static inline void rcu_cond_resched(void)
332{
333 if (unlikely(rcu_should_resched()))
334 rcu_resched();
335}
336
337/*
271 * Infrastructure to implement the synchronize_() primitives in 338 * Infrastructure to implement the synchronize_() primitives in
272 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 339 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
273 */ 340 */
@@ -328,7 +395,7 @@ extern struct lockdep_map rcu_lock_map;
328extern struct lockdep_map rcu_bh_lock_map; 395extern struct lockdep_map rcu_bh_lock_map;
329extern struct lockdep_map rcu_sched_lock_map; 396extern struct lockdep_map rcu_sched_lock_map;
330extern struct lockdep_map rcu_callback_map; 397extern struct lockdep_map rcu_callback_map;
331extern int debug_lockdep_rcu_enabled(void); 398int debug_lockdep_rcu_enabled(void);
332 399
333/** 400/**
334 * rcu_read_lock_held() - might we be in RCU read-side critical section? 401 * rcu_read_lock_held() - might we be in RCU read-side critical section?
@@ -949,6 +1016,9 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
949 * pointers, but you must use rcu_assign_pointer() to initialize the 1016 * pointers, but you must use rcu_assign_pointer() to initialize the
950 * external-to-structure pointer -after- you have completely initialized 1017 * external-to-structure pointer -after- you have completely initialized
951 * the reader-accessible portions of the linked structure. 1018 * the reader-accessible portions of the linked structure.
1019 *
1020 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
1021 * ordering guarantees for either the CPU or the compiler.
952 */ 1022 */
953#define RCU_INIT_POINTER(p, v) \ 1023#define RCU_INIT_POINTER(p, v) \
954 do { \ 1024 do { \