diff options
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 46 |
1 files changed, 10 insertions, 36 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 5a75d19aa661..6a94cc8b1ca0 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/debugobjects.h> | 44 | #include <linux/debugobjects.h> |
45 | #include <linux/bug.h> | 45 | #include <linux/bug.h> |
46 | #include <linux/compiler.h> | 46 | #include <linux/compiler.h> |
47 | #include <linux/percpu.h> | ||
48 | #include <asm/barrier.h> | 47 | #include <asm/barrier.h> |
49 | 48 | ||
50 | extern int rcu_expedited; /* for sysctl */ | 49 | extern int rcu_expedited; /* for sysctl */ |
@@ -300,41 +299,6 @@ bool __rcu_is_watching(void); | |||
300 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ | 299 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ |
301 | 300 | ||
302 | /* | 301 | /* |
303 | * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings. | ||
304 | */ | ||
305 | |||
306 | #define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */ | ||
307 | DECLARE_PER_CPU(int, rcu_cond_resched_count); | ||
308 | void rcu_resched(void); | ||
309 | |||
310 | /* | ||
311 | * Is it time to report RCU quiescent states? | ||
312 | * | ||
313 | * Note unsynchronized access to rcu_cond_resched_count. Yes, we might | ||
314 | * increment some random CPU's count, and possibly also load the result from | ||
315 | * yet another CPU's count. We might even clobber some other CPU's attempt | ||
316 | * to zero its counter. This is all OK because the goal is not precision, | ||
317 | * but rather reasonable amortization of rcu_note_context_switch() overhead | ||
318 | * and extremely high probability of avoiding RCU CPU stall warnings. | ||
319 | * Note that this function has to be preempted in just the wrong place, | ||
320 | * many thousands of times in a row, for anything bad to happen. | ||
321 | */ | ||
322 | static inline bool rcu_should_resched(void) | ||
323 | { | ||
324 | return raw_cpu_inc_return(rcu_cond_resched_count) >= | ||
325 | RCU_COND_RESCHED_LIM; | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * Report quiscent states to RCU if it is time to do so. | ||
330 | */ | ||
331 | static inline void rcu_cond_resched(void) | ||
332 | { | ||
333 | if (unlikely(rcu_should_resched())) | ||
334 | rcu_resched(); | ||
335 | } | ||
336 | |||
337 | /* | ||
338 | * Infrastructure to implement the synchronize_() primitives in | 302 | * Infrastructure to implement the synchronize_() primitives in |
339 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. | 303 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. |
340 | */ | 304 | */ |
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf); | |||
358 | * initialization. | 322 | * initialization. |
359 | */ | 323 | */ |
360 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | 324 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
325 | void init_rcu_head(struct rcu_head *head); | ||
326 | void destroy_rcu_head(struct rcu_head *head); | ||
361 | void init_rcu_head_on_stack(struct rcu_head *head); | 327 | void init_rcu_head_on_stack(struct rcu_head *head); |
362 | void destroy_rcu_head_on_stack(struct rcu_head *head); | 328 | void destroy_rcu_head_on_stack(struct rcu_head *head); |
363 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 329 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
330 | static inline void init_rcu_head(struct rcu_head *head) | ||
331 | { | ||
332 | } | ||
333 | |||
334 | static inline void destroy_rcu_head(struct rcu_head *head) | ||
335 | { | ||
336 | } | ||
337 | |||
364 | static inline void init_rcu_head_on_stack(struct rcu_head *head) | 338 | static inline void init_rcu_head_on_stack(struct rcu_head *head) |
365 | { | 339 | { |
366 | } | 340 | } |