aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/rculist.h32
-rw-r--r--include/linux/rcupdate.h154
-rw-r--r--include/linux/rcupdate_wait.h14
-rw-r--r--include/linux/rcutiny.h53
-rw-r--r--include/linux/rcutree.h31
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/torture.h2
7 files changed, 150 insertions, 142 deletions
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4786c2235b98..e91ec9ddcd30 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -182,7 +182,7 @@ static inline void list_replace_rcu(struct list_head *old,
182 * @list: the RCU-protected list to splice 182 * @list: the RCU-protected list to splice
183 * @prev: points to the last element of the existing list 183 * @prev: points to the last element of the existing list
184 * @next: points to the first element of the existing list 184 * @next: points to the first element of the existing list
185 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... 185 * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
186 * 186 *
187 * The list pointed to by @prev and @next can be RCU-read traversed 187 * The list pointed to by @prev and @next can be RCU-read traversed
188 * concurrently with this function. 188 * concurrently with this function.
@@ -240,7 +240,7 @@ static inline void __list_splice_init_rcu(struct list_head *list,
240 * designed for stacks. 240 * designed for stacks.
241 * @list: the RCU-protected list to splice 241 * @list: the RCU-protected list to splice
242 * @head: the place in the existing list to splice the first list into 242 * @head: the place in the existing list to splice the first list into
243 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... 243 * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
244 */ 244 */
245static inline void list_splice_init_rcu(struct list_head *list, 245static inline void list_splice_init_rcu(struct list_head *list,
246 struct list_head *head, 246 struct list_head *head,
@@ -255,7 +255,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
255 * list, designed for queues. 255 * list, designed for queues.
256 * @list: the RCU-protected list to splice 256 * @list: the RCU-protected list to splice
257 * @head: the place in the existing list to splice the first list into 257 * @head: the place in the existing list to splice the first list into
258 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... 258 * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
259 */ 259 */
260static inline void list_splice_tail_init_rcu(struct list_head *list, 260static inline void list_splice_tail_init_rcu(struct list_head *list,
261 struct list_head *head, 261 struct list_head *head,
@@ -359,13 +359,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
359 * @type: the type of the struct this is embedded in. 359 * @type: the type of the struct this is embedded in.
360 * @member: the name of the list_head within the struct. 360 * @member: the name of the list_head within the struct.
361 * 361 *
362 * This primitive may safely run concurrently with the _rcu list-mutation 362 * This primitive may safely run concurrently with the _rcu
363 * primitives such as list_add_rcu(), but requires some implicit RCU 363 * list-mutation primitives such as list_add_rcu(), but requires some
364 * read-side guarding. One example is running within a special 364 * implicit RCU read-side guarding. One example is running within a special
365 * exception-time environment where preemption is disabled and where 365 * exception-time environment where preemption is disabled and where lockdep
366 * lockdep cannot be invoked (in which case updaters must use RCU-sched, 366 * cannot be invoked. Another example is when items are added to the list,
367 * as in synchronize_sched(), call_rcu_sched(), and friends). Another 367 * but never deleted.
368 * example is when items are added to the list, but never deleted.
369 */ 368 */
370#define list_entry_lockless(ptr, type, member) \ 369#define list_entry_lockless(ptr, type, member) \
371 container_of((typeof(ptr))READ_ONCE(ptr), type, member) 370 container_of((typeof(ptr))READ_ONCE(ptr), type, member)
@@ -376,13 +375,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
376 * @head: the head for your list. 375 * @head: the head for your list.
377 * @member: the name of the list_struct within the struct. 376 * @member: the name of the list_struct within the struct.
378 * 377 *
379 * This primitive may safely run concurrently with the _rcu list-mutation 378 * This primitive may safely run concurrently with the _rcu
380 * primitives such as list_add_rcu(), but requires some implicit RCU 379 * list-mutation primitives such as list_add_rcu(), but requires some
381 * read-side guarding. One example is running within a special 380 * implicit RCU read-side guarding. One example is running within a special
382 * exception-time environment where preemption is disabled and where 381 * exception-time environment where preemption is disabled and where lockdep
383 * lockdep cannot be invoked (in which case updaters must use RCU-sched, 382 * cannot be invoked. Another example is when items are added to the list,
384 * as in synchronize_sched(), call_rcu_sched(), and friends). Another 383 * but never deleted.
385 * example is when items are added to the list, but never deleted.
386 */ 384 */
387#define list_for_each_entry_lockless(pos, head, member) \ 385#define list_for_each_entry_lockless(pos, head, member) \
388 for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ 386 for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 75e5b393cf44..4db8bcacc51a 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -48,23 +48,14 @@
48#define ulong2long(a) (*(long *)(&(a))) 48#define ulong2long(a) (*(long *)(&(a)))
49 49
50/* Exported common interfaces */ 50/* Exported common interfaces */
51
52#ifdef CONFIG_PREEMPT_RCU
53void call_rcu(struct rcu_head *head, rcu_callback_t func); 51void call_rcu(struct rcu_head *head, rcu_callback_t func);
54#else /* #ifdef CONFIG_PREEMPT_RCU */
55#define call_rcu call_rcu_sched
56#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
57
58void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
59void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
60void synchronize_sched(void);
61void rcu_barrier_tasks(void); 52void rcu_barrier_tasks(void);
53void synchronize_rcu(void);
62 54
63#ifdef CONFIG_PREEMPT_RCU 55#ifdef CONFIG_PREEMPT_RCU
64 56
65void __rcu_read_lock(void); 57void __rcu_read_lock(void);
66void __rcu_read_unlock(void); 58void __rcu_read_unlock(void);
67void synchronize_rcu(void);
68 59
69/* 60/*
70 * Defined as a macro as it is a very low level header included from 61 * Defined as a macro as it is a very low level header included from
@@ -88,11 +79,6 @@ static inline void __rcu_read_unlock(void)
88 preempt_enable(); 79 preempt_enable();
89} 80}
90 81
91static inline void synchronize_rcu(void)
92{
93 synchronize_sched();
94}
95
96static inline int rcu_preempt_depth(void) 82static inline int rcu_preempt_depth(void)
97{ 83{
98 return 0; 84 return 0;
@@ -103,8 +89,6 @@ static inline int rcu_preempt_depth(void)
103/* Internal to kernel */ 89/* Internal to kernel */
104void rcu_init(void); 90void rcu_init(void);
105extern int rcu_scheduler_active __read_mostly; 91extern int rcu_scheduler_active __read_mostly;
106void rcu_sched_qs(void);
107void rcu_bh_qs(void);
108void rcu_check_callbacks(int user); 92void rcu_check_callbacks(int user);
109void rcu_report_dead(unsigned int cpu); 93void rcu_report_dead(unsigned int cpu);
110void rcutree_migrate_callbacks(int cpu); 94void rcutree_migrate_callbacks(int cpu);
@@ -135,11 +119,10 @@ static inline void rcu_init_nohz(void) { }
135 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers 119 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
136 * @a: Code that RCU needs to pay attention to. 120 * @a: Code that RCU needs to pay attention to.
137 * 121 *
138 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden 122 * RCU read-side critical sections are forbidden in the inner idle loop,
139 * in the inner idle loop, that is, between the rcu_idle_enter() and 123 * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
140 * the rcu_idle_exit() -- RCU will happily ignore any such read-side 124 * will happily ignore any such read-side critical sections. However,
141 * critical sections. However, things like powertop need tracepoints 125 * things like powertop need tracepoints in the inner idle loop.
142 * in the inner idle loop.
143 * 126 *
144 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) 127 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
145 * will tell RCU that it needs to pay attention, invoke its argument 128 * will tell RCU that it needs to pay attention, invoke its argument
@@ -167,20 +150,16 @@ static inline void rcu_init_nohz(void) { }
167 if (READ_ONCE((t)->rcu_tasks_holdout)) \ 150 if (READ_ONCE((t)->rcu_tasks_holdout)) \
168 WRITE_ONCE((t)->rcu_tasks_holdout, false); \ 151 WRITE_ONCE((t)->rcu_tasks_holdout, false); \
169 } while (0) 152 } while (0)
170#define rcu_note_voluntary_context_switch(t) \ 153#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
171 do { \
172 rcu_all_qs(); \
173 rcu_tasks_qs(t); \
174 } while (0)
175void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); 154void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
176void synchronize_rcu_tasks(void); 155void synchronize_rcu_tasks(void);
177void exit_tasks_rcu_start(void); 156void exit_tasks_rcu_start(void);
178void exit_tasks_rcu_finish(void); 157void exit_tasks_rcu_finish(void);
179#else /* #ifdef CONFIG_TASKS_RCU */ 158#else /* #ifdef CONFIG_TASKS_RCU */
180#define rcu_tasks_qs(t) do { } while (0) 159#define rcu_tasks_qs(t) do { } while (0)
181#define rcu_note_voluntary_context_switch(t) rcu_all_qs() 160#define rcu_note_voluntary_context_switch(t) do { } while (0)
182#define call_rcu_tasks call_rcu_sched 161#define call_rcu_tasks call_rcu
183#define synchronize_rcu_tasks synchronize_sched 162#define synchronize_rcu_tasks synchronize_rcu
184static inline void exit_tasks_rcu_start(void) { } 163static inline void exit_tasks_rcu_start(void) { }
185static inline void exit_tasks_rcu_finish(void) { } 164static inline void exit_tasks_rcu_finish(void) { }
186#endif /* #else #ifdef CONFIG_TASKS_RCU */ 165#endif /* #else #ifdef CONFIG_TASKS_RCU */
@@ -325,9 +304,8 @@ static inline void rcu_preempt_sleep_check(void) { }
325 * Helper functions for rcu_dereference_check(), rcu_dereference_protected() 304 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
326 * and rcu_assign_pointer(). Some of these could be folded into their 305 * and rcu_assign_pointer(). Some of these could be folded into their
327 * callers, but they are left separate in order to ease introduction of 306 * callers, but they are left separate in order to ease introduction of
328 * multiple flavors of pointers to match the multiple flavors of RCU 307 * multiple pointers markings to match different RCU implementations
329 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in 308 * (e.g., __srcu), should this make sense in the future.
330 * the future.
331 */ 309 */
332 310
333#ifdef __CHECKER__ 311#ifdef __CHECKER__
@@ -686,14 +664,9 @@ static inline void rcu_read_unlock(void)
686/** 664/**
687 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section 665 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
688 * 666 *
689 * This is equivalent of rcu_read_lock(), but to be used when updates 667 * This is equivalent of rcu_read_lock(), but also disables softirqs.
690 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since 668 * Note that anything else that disables softirqs can also serve as
691 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a 669 * an RCU read-side critical section.
692 * softirq handler to be a quiescent state, a process in RCU read-side
693 * critical section must be protected by disabling softirqs. Read-side
694 * critical sections in interrupt context can use just rcu_read_lock(),
695 * though this should at least be commented to avoid confusing people
696 * reading the code.
697 * 670 *
698 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() 671 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
699 * must occur in the same context, for example, it is illegal to invoke 672 * must occur in the same context, for example, it is illegal to invoke
@@ -726,10 +699,9 @@ static inline void rcu_read_unlock_bh(void)
726/** 699/**
727 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section 700 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
728 * 701 *
729 * This is equivalent of rcu_read_lock(), but to be used when updates 702 * This is equivalent of rcu_read_lock(), but disables preemption.
730 * are being done using call_rcu_sched() or synchronize_rcu_sched(). 703 * Read-side critical sections can also be introduced by anything else
731 * Read-side critical sections can also be introduced by anything that 704 * that disables preemption, including local_irq_disable() and friends.
732 * disables preemption, including local_irq_disable() and friends.
733 * 705 *
734 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() 706 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
735 * must occur in the same context, for example, it is illegal to invoke 707 * must occur in the same context, for example, it is illegal to invoke
@@ -885,4 +857,96 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
885#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ 857#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
886 858
887 859
860/* Has the specified rcu_head structure been handed to call_rcu()? */
861
862/*
863 * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
864 * @rhp: The rcu_head structure to initialize.
865 *
866 * If you intend to invoke rcu_head_after_call_rcu() to test whether a
867 * given rcu_head structure has already been passed to call_rcu(), then
868 * you must also invoke this rcu_head_init() function on it just after
869 * allocating that structure. Calls to this function must not race with
870 * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
871 */
872static inline void rcu_head_init(struct rcu_head *rhp)
873{
874 rhp->func = (rcu_callback_t)~0L;
875}
876
877/*
878 * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()?
879 * @rhp: The rcu_head structure to test.
880 * @func: The function passed to call_rcu() along with @rhp.
881 *
882 * Returns @true if the @rhp has been passed to call_rcu() with @func,
883 * and @false otherwise. Emits a warning in any other case, including
884 * the case where @rhp has already been invoked after a grace period.
885 * Calls to this function must not race with callback invocation. One way
886 * to avoid such races is to enclose the call to rcu_head_after_call_rcu()
887 * in an RCU read-side critical section that includes a read-side fetch
888 * of the pointer to the structure containing @rhp.
889 */
890static inline bool
891rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
892{
893 if (READ_ONCE(rhp->func) == f)
894 return true;
895 WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L);
896 return false;
897}
898
899
900/* Transitional pre-consolidation compatibility definitions. */
901
902static inline void synchronize_rcu_bh(void)
903{
904 synchronize_rcu();
905}
906
907static inline void synchronize_rcu_bh_expedited(void)
908{
909 synchronize_rcu_expedited();
910}
911
912static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
913{
914 call_rcu(head, func);
915}
916
917static inline void rcu_barrier_bh(void)
918{
919 rcu_barrier();
920}
921
922static inline void synchronize_sched(void)
923{
924 synchronize_rcu();
925}
926
927static inline void synchronize_sched_expedited(void)
928{
929 synchronize_rcu_expedited();
930}
931
932static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
933{
934 call_rcu(head, func);
935}
936
937static inline void rcu_barrier_sched(void)
938{
939 rcu_barrier();
940}
941
942static inline unsigned long get_state_synchronize_sched(void)
943{
944 return get_state_synchronize_rcu();
945}
946
947static inline void cond_synchronize_sched(unsigned long oldstate)
948{
949 cond_synchronize_rcu(oldstate);
950}
951
888#endif /* __LINUX_RCUPDATE_H */ 952#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 57f371344152..8a16c3eb3dd0 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -33,17 +33,17 @@ do { \
33 33
34/** 34/**
35 * synchronize_rcu_mult - Wait concurrently for multiple grace periods 35 * synchronize_rcu_mult - Wait concurrently for multiple grace periods
36 * @...: List of call_rcu() functions for the flavors to wait on. 36 * @...: List of call_rcu() functions for different grace periods to wait on
37 * 37 *
38 * This macro waits concurrently for multiple flavors of RCU grace periods. 38 * This macro waits concurrently for multiple types of RCU grace periods.
39 * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait 39 * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
40 * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU 40 * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU
41 * domain requires you to write a wrapper function for that SRCU domain's 41 * domain requires you to write a wrapper function for that SRCU domain's
42 * call_srcu() function, supplying the corresponding srcu_struct. 42 * call_srcu() function, supplying the corresponding srcu_struct.
43 * 43 *
44 * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU 44 * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU,
45 * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called 45 * given that anywhere synchronize_rcu_mult() can be called is automatically
46 * is automatically a grace period. 46 * a grace period.
47 */ 47 */
48#define synchronize_rcu_mult(...) \ 48#define synchronize_rcu_mult(...) \
49 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) 49 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 8d9a0ea8f0b5..af65d1f36ddb 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,12 +27,6 @@
27 27
28#include <linux/ktime.h> 28#include <linux/ktime.h>
29 29
30struct rcu_dynticks;
31static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
32{
33 return 0;
34}
35
36/* Never flag non-existent other CPUs! */ 30/* Never flag non-existent other CPUs! */
37static inline bool rcu_eqs_special_set(int cpu) { return false; } 31static inline bool rcu_eqs_special_set(int cpu) { return false; }
38 32
@@ -46,53 +40,28 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
46 might_sleep(); 40 might_sleep();
47} 41}
48 42
49static inline unsigned long get_state_synchronize_sched(void) 43extern void rcu_barrier(void);
50{
51 return 0;
52}
53
54static inline void cond_synchronize_sched(unsigned long oldstate)
55{
56 might_sleep();
57}
58
59extern void rcu_barrier_bh(void);
60extern void rcu_barrier_sched(void);
61 44
62static inline void synchronize_rcu_expedited(void) 45static inline void synchronize_rcu_expedited(void)
63{ 46{
64 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 47 synchronize_rcu();
65} 48}
66 49
67static inline void rcu_barrier(void) 50static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
68{ 51{
69 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 52 call_rcu(head, func);
70}
71
72static inline void synchronize_rcu_bh(void)
73{
74 synchronize_sched();
75}
76
77static inline void synchronize_rcu_bh_expedited(void)
78{
79 synchronize_sched();
80} 53}
81 54
82static inline void synchronize_sched_expedited(void) 55void rcu_qs(void);
83{
84 synchronize_sched();
85}
86 56
87static inline void kfree_call_rcu(struct rcu_head *head, 57static inline void rcu_softirq_qs(void)
88 rcu_callback_t func)
89{ 58{
90 call_rcu(head, func); 59 rcu_qs();
91} 60}
92 61
93#define rcu_note_context_switch(preempt) \ 62#define rcu_note_context_switch(preempt) \
94 do { \ 63 do { \
95 rcu_sched_qs(); \ 64 rcu_qs(); \
96 rcu_tasks_qs(current); \ 65 rcu_tasks_qs(current); \
97 } while (0) 66 } while (0)
98 67
@@ -108,6 +77,7 @@ static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
108 */ 77 */
109static inline void rcu_virt_note_context_switch(int cpu) { } 78static inline void rcu_virt_note_context_switch(int cpu) { }
110static inline void rcu_cpu_stall_reset(void) { } 79static inline void rcu_cpu_stall_reset(void) { }
80static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
111static inline void rcu_idle_enter(void) { } 81static inline void rcu_idle_enter(void) { }
112static inline void rcu_idle_exit(void) { } 82static inline void rcu_idle_exit(void) { }
113static inline void rcu_irq_enter(void) { } 83static inline void rcu_irq_enter(void) { }
@@ -115,6 +85,11 @@ static inline void rcu_irq_exit_irqson(void) { }
115static inline void rcu_irq_enter_irqson(void) { } 85static inline void rcu_irq_enter_irqson(void) { }
116static inline void rcu_irq_exit(void) { } 86static inline void rcu_irq_exit(void) { }
117static inline void exit_rcu(void) { } 87static inline void exit_rcu(void) { }
88static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
89{
90 return false;
91}
92static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
118#ifdef CONFIG_SRCU 93#ifdef CONFIG_SRCU
119void rcu_scheduler_starting(void); 94void rcu_scheduler_starting(void);
120#else /* #ifndef CONFIG_SRCU */ 95#else /* #ifndef CONFIG_SRCU */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 914655848ef6..7f83179177d1 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,6 +30,7 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33void rcu_softirq_qs(void);
33void rcu_note_context_switch(bool preempt); 34void rcu_note_context_switch(bool preempt);
34int rcu_needs_cpu(u64 basem, u64 *nextevt); 35int rcu_needs_cpu(u64 basem, u64 *nextevt);
35void rcu_cpu_stall_reset(void); 36void rcu_cpu_stall_reset(void);
@@ -44,41 +45,13 @@ static inline void rcu_virt_note_context_switch(int cpu)
44 rcu_note_context_switch(false); 45 rcu_note_context_switch(false);
45} 46}
46 47
47void synchronize_rcu_bh(void);
48void synchronize_sched_expedited(void);
49void synchronize_rcu_expedited(void); 48void synchronize_rcu_expedited(void);
50
51void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); 49void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
52 50
53/**
54 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
55 *
56 * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
57 * approach to force the grace period to end quickly. This consumes
58 * significant time on all CPUs and is unfriendly to real-time workloads,
59 * so is thus not recommended for any sort of common-case code. In fact,
60 * if you are using synchronize_rcu_bh_expedited() in a loop, please
61 * restructure your code to batch your updates, and then use a single
62 * synchronize_rcu_bh() instead.
63 *
64 * Note that it is illegal to call this function while holding any lock
65 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
66 * to call this function from a CPU-hotplug notifier. Failing to observe
67 * these restriction will result in deadlock.
68 */
69static inline void synchronize_rcu_bh_expedited(void)
70{
71 synchronize_sched_expedited();
72}
73
74void rcu_barrier(void); 51void rcu_barrier(void);
75void rcu_barrier_bh(void);
76void rcu_barrier_sched(void);
77bool rcu_eqs_special_set(int cpu); 52bool rcu_eqs_special_set(int cpu);
78unsigned long get_state_synchronize_rcu(void); 53unsigned long get_state_synchronize_rcu(void);
79void cond_synchronize_rcu(unsigned long oldstate); 54void cond_synchronize_rcu(unsigned long oldstate);
80unsigned long get_state_synchronize_sched(void);
81void cond_synchronize_sched(unsigned long oldstate);
82 55
83void rcu_idle_enter(void); 56void rcu_idle_enter(void);
84void rcu_idle_exit(void); 57void rcu_idle_exit(void);
@@ -93,7 +66,9 @@ void rcu_scheduler_starting(void);
93extern int rcu_scheduler_active __read_mostly; 66extern int rcu_scheduler_active __read_mostly;
94void rcu_end_inkernel_boot(void); 67void rcu_end_inkernel_boot(void);
95bool rcu_is_watching(void); 68bool rcu_is_watching(void);
69#ifndef CONFIG_PREEMPT
96void rcu_all_qs(void); 70void rcu_all_qs(void);
71#endif
97 72
98/* RCUtree hotplug events */ 73/* RCUtree hotplug events */
99int rcutree_prepare_cpu(unsigned int cpu); 74int rcutree_prepare_cpu(unsigned int cpu);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 977cb57d7bc9..004ca21f7e80 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -571,12 +571,8 @@ union rcu_special {
571 struct { 571 struct {
572 u8 blocked; 572 u8 blocked;
573 u8 need_qs; 573 u8 need_qs;
574 u8 exp_need_qs;
575
576 /* Otherwise the compiler can store garbage here: */
577 u8 pad;
578 } b; /* Bits. */ 574 } b; /* Bits. */
579 u32 s; /* Set of bits. */ 575 u16 s; /* Set of bits. */
580}; 576};
581 577
582enum perf_event_task_context { 578enum perf_event_task_context {
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 61dfd93b6ee4..48fad21109fc 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -77,7 +77,7 @@ void torture_shutdown_absorb(const char *title);
77int torture_shutdown_init(int ssecs, void (*cleanup)(void)); 77int torture_shutdown_init(int ssecs, void (*cleanup)(void));
78 78
79/* Task stuttering, which forces load/no-load transitions. */ 79/* Task stuttering, which forces load/no-load transitions. */
80void stutter_wait(const char *title); 80bool stutter_wait(const char *title);
81int torture_stutter_init(int s); 81int torture_stutter_init(int s);
82 82
83/* Initialization and cleanup. */ 83/* Initialization and cleanup. */