aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-12-04 01:52:30 -0500
committerIngo Molnar <mingo@kernel.org>2018-12-04 01:52:30 -0500
commit4bbfd7467cfc7d42e18d3008fa6a28ffd56e901a (patch)
tree3b6d27e740976d0393fd13ae675ae6a0e07812a9 /include/linux
parent2595646791c319cadfdbf271563aac97d0843dc7 (diff)
parent5ac7cdc29897e5fc3f5e214f3f8c8b03ef8d7029 (diff)
Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU changes from Paul E. McKenney: - Convert RCU's BUG_ON() and similar calls to WARN_ON() and similar. - Replace calls of RCU-bh and RCU-sched update-side functions to their vanilla RCU counterparts. This series is a step towards complete removal of the RCU-bh and RCU-sched update-side functions. ( Note that some of these conversions are going upstream via their respective maintainers. ) - Documentation updates, including a number of flavor-consolidation updates from Joel Fernandes. - Miscellaneous fixes. - Automate generation of the initrd filesystem used for rcutorture testing. - Convert spin_is_locked() assertions to instead use lockdep. ( Note that some of these conversions are going upstream via their respective maintainers. ) - SRCU updates, especially including a fix from Dennis Krein for a bag-on-head-class bug. - RCU torture-test updates. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/percpu-rwsem.h2
-rw-r--r--include/linux/rcupdate_wait.h17
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/srcu.h79
-rw-r--r--include/linux/srcutiny.h24
-rw-r--r--include/linux/srcutree.h8
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/linux/types.h4
8 files changed, 64 insertions, 76 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 79b99d653e03..71b75643c432 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -41,7 +41,7 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
41 * cannot both change sem->state from readers_fast and start checking 41 * cannot both change sem->state from readers_fast and start checking
42 * counters while we are here. So if we see !sem->state, we know that 42 * counters while we are here. So if we see !sem->state, we know that
43 * the writer won't be checking until we're past the preempt_enable() 43 * the writer won't be checking until we're past the preempt_enable()
44 * and that one the synchronize_sched() is done, the writer will see 44 * and that once the synchronize_rcu() is done, the writer will see
45 * anything we did within this RCU-sched read-size critical section. 45 * anything we did within this RCU-sched read-size critical section.
46 */ 46 */
47 __this_cpu_inc(*sem->read_count); 47 __this_cpu_inc(*sem->read_count);
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 8a16c3eb3dd0..c0578ba23c1a 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -31,21 +31,4 @@ do { \
31 31
32#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) 32#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
33 33
34/**
35 * synchronize_rcu_mult - Wait concurrently for multiple grace periods
36 * @...: List of call_rcu() functions for different grace periods to wait on
37 *
38 * This macro waits concurrently for multiple types of RCU grace periods.
39 * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
40 * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU
41 * domain requires you to write a wrapper function for that SRCU domain's
42 * call_srcu() function, supplying the corresponding srcu_struct.
43 *
44 * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU,
45 * given that anywhere synchronize_rcu_mult() can be called is automatically
46 * a grace period.
47 */
48#define synchronize_rcu_mult(...) \
49 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
50
51#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ 34#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 291a9bd5b97f..4f8fc5294291 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -572,8 +572,10 @@ union rcu_special {
572 struct { 572 struct {
573 u8 blocked; 573 u8 blocked;
574 u8 need_qs; 574 u8 need_qs;
575 u8 exp_hint; /* Hint for performance. */
576 u8 pad; /* No garbage from compiler! */
575 } b; /* Bits. */ 577 } b; /* Bits. */
576 u16 s; /* Set of bits. */ 578 u32 s; /* Set of bits. */
577}; 579};
578 580
579enum perf_event_task_context { 581enum perf_event_task_context {
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 67135d4a8a30..c614375cd264 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -38,20 +38,20 @@ struct srcu_struct;
38 38
39#ifdef CONFIG_DEBUG_LOCK_ALLOC 39#ifdef CONFIG_DEBUG_LOCK_ALLOC
40 40
41int __init_srcu_struct(struct srcu_struct *sp, const char *name, 41int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
42 struct lock_class_key *key); 42 struct lock_class_key *key);
43 43
44#define init_srcu_struct(sp) \ 44#define init_srcu_struct(ssp) \
45({ \ 45({ \
46 static struct lock_class_key __srcu_key; \ 46 static struct lock_class_key __srcu_key; \
47 \ 47 \
48 __init_srcu_struct((sp), #sp, &__srcu_key); \ 48 __init_srcu_struct((ssp), #ssp, &__srcu_key); \
49}) 49})
50 50
51#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, 51#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
52#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 52#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
53 53
54int init_srcu_struct(struct srcu_struct *sp); 54int init_srcu_struct(struct srcu_struct *ssp);
55 55
56#define __SRCU_DEP_MAP_INIT(srcu_name) 56#define __SRCU_DEP_MAP_INIT(srcu_name)
57#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 57#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -67,28 +67,28 @@ int init_srcu_struct(struct srcu_struct *sp);
67struct srcu_struct { }; 67struct srcu_struct { };
68#endif 68#endif
69 69
70void call_srcu(struct srcu_struct *sp, struct rcu_head *head, 70void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
71 void (*func)(struct rcu_head *head)); 71 void (*func)(struct rcu_head *head));
72void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced); 72void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced);
73int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); 73int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
74void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 74void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
75void synchronize_srcu(struct srcu_struct *sp); 75void synchronize_srcu(struct srcu_struct *ssp);
76 76
77/** 77/**
78 * cleanup_srcu_struct - deconstruct a sleep-RCU structure 78 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
79 * @sp: structure to clean up. 79 * @ssp: structure to clean up.
80 * 80 *
81 * Must invoke this after you are finished using a given srcu_struct that 81 * Must invoke this after you are finished using a given srcu_struct that
82 * was initialized via init_srcu_struct(), else you leak memory. 82 * was initialized via init_srcu_struct(), else you leak memory.
83 */ 83 */
84static inline void cleanup_srcu_struct(struct srcu_struct *sp) 84static inline void cleanup_srcu_struct(struct srcu_struct *ssp)
85{ 85{
86 _cleanup_srcu_struct(sp, false); 86 _cleanup_srcu_struct(ssp, false);
87} 87}
88 88
89/** 89/**
90 * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure 90 * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure
91 * @sp: structure to clean up. 91 * @ssp: structure to clean up.
92 * 92 *
93 * Must invoke this after you are finished using a given srcu_struct that 93 * Must invoke this after you are finished using a given srcu_struct that
94 * was initialized via init_srcu_struct(), else you leak memory. Also, 94 * was initialized via init_srcu_struct(), else you leak memory. Also,
@@ -103,16 +103,16 @@ static inline void cleanup_srcu_struct(struct srcu_struct *sp)
103 * (with high probability, anyway), and will also cause the srcu_struct 103 * (with high probability, anyway), and will also cause the srcu_struct
104 * to be leaked. 104 * to be leaked.
105 */ 105 */
106static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) 106static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp)
107{ 107{
108 _cleanup_srcu_struct(sp, true); 108 _cleanup_srcu_struct(ssp, true);
109} 109}
110 110
111#ifdef CONFIG_DEBUG_LOCK_ALLOC 111#ifdef CONFIG_DEBUG_LOCK_ALLOC
112 112
113/** 113/**
114 * srcu_read_lock_held - might we be in SRCU read-side critical section? 114 * srcu_read_lock_held - might we be in SRCU read-side critical section?
115 * @sp: The srcu_struct structure to check 115 * @ssp: The srcu_struct structure to check
116 * 116 *
117 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 117 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
118 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 118 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
@@ -126,16 +126,16 @@ static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp)
126 * relies on normal RCU, it can be called from the CPU which 126 * relies on normal RCU, it can be called from the CPU which
127 * is in the idle loop from an RCU point of view or offline. 127 * is in the idle loop from an RCU point of view or offline.
128 */ 128 */
129static inline int srcu_read_lock_held(const struct srcu_struct *sp) 129static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
130{ 130{
131 if (!debug_lockdep_rcu_enabled()) 131 if (!debug_lockdep_rcu_enabled())
132 return 1; 132 return 1;
133 return lock_is_held(&sp->dep_map); 133 return lock_is_held(&ssp->dep_map);
134} 134}
135 135
136#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 136#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
137 137
138static inline int srcu_read_lock_held(const struct srcu_struct *sp) 138static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
139{ 139{
140 return 1; 140 return 1;
141} 141}
@@ -145,7 +145,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
145/** 145/**
146 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing 146 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
147 * @p: the pointer to fetch and protect for later dereferencing 147 * @p: the pointer to fetch and protect for later dereferencing
148 * @sp: pointer to the srcu_struct, which is used to check that we 148 * @ssp: pointer to the srcu_struct, which is used to check that we
149 * really are in an SRCU read-side critical section. 149 * really are in an SRCU read-side critical section.
150 * @c: condition to check for update-side use 150 * @c: condition to check for update-side use
151 * 151 *
@@ -154,29 +154,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
154 * to 1. The @c argument will normally be a logical expression containing 154 * to 1. The @c argument will normally be a logical expression containing
155 * lockdep_is_held() calls. 155 * lockdep_is_held() calls.
156 */ 156 */
157#define srcu_dereference_check(p, sp, c) \ 157#define srcu_dereference_check(p, ssp, c) \
158 __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu) 158 __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
159 159
160/** 160/**
161 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing 161 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
162 * @p: the pointer to fetch and protect for later dereferencing 162 * @p: the pointer to fetch and protect for later dereferencing
163 * @sp: pointer to the srcu_struct, which is used to check that we 163 * @ssp: pointer to the srcu_struct, which is used to check that we
164 * really are in an SRCU read-side critical section. 164 * really are in an SRCU read-side critical section.
165 * 165 *
166 * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU 166 * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
167 * is enabled, invoking this outside of an RCU read-side critical 167 * is enabled, invoking this outside of an RCU read-side critical
168 * section will result in an RCU-lockdep splat. 168 * section will result in an RCU-lockdep splat.
169 */ 169 */
170#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) 170#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0)
171 171
172/** 172/**
173 * srcu_dereference_notrace - no tracing and no lockdep calls from here 173 * srcu_dereference_notrace - no tracing and no lockdep calls from here
174 * @p: the pointer to fetch and protect for later dereferencing
175 * @ssp: pointer to the srcu_struct, which is used to check that we
176 * really are in an SRCU read-side critical section.
174 */ 177 */
175#define srcu_dereference_notrace(p, sp) srcu_dereference_check((p), (sp), 1) 178#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1)
176 179
177/** 180/**
178 * srcu_read_lock - register a new reader for an SRCU-protected structure. 181 * srcu_read_lock - register a new reader for an SRCU-protected structure.
179 * @sp: srcu_struct in which to register the new reader. 182 * @ssp: srcu_struct in which to register the new reader.
180 * 183 *
181 * Enter an SRCU read-side critical section. Note that SRCU read-side 184 * Enter an SRCU read-side critical section. Note that SRCU read-side
182 * critical sections may be nested. However, it is illegal to 185 * critical sections may be nested. However, it is illegal to
@@ -191,44 +194,44 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
191 * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() 194 * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
192 * was invoked in process context. 195 * was invoked in process context.
193 */ 196 */
194static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) 197static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
195{ 198{
196 int retval; 199 int retval;
197 200
198 retval = __srcu_read_lock(sp); 201 retval = __srcu_read_lock(ssp);
199 rcu_lock_acquire(&(sp)->dep_map); 202 rcu_lock_acquire(&(ssp)->dep_map);
200 return retval; 203 return retval;
201} 204}
202 205
203/* Used by tracing, cannot be traced and cannot invoke lockdep. */ 206/* Used by tracing, cannot be traced and cannot invoke lockdep. */
204static inline notrace int 207static inline notrace int
205srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp) 208srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
206{ 209{
207 int retval; 210 int retval;
208 211
209 retval = __srcu_read_lock(sp); 212 retval = __srcu_read_lock(ssp);
210 return retval; 213 return retval;
211} 214}
212 215
213/** 216/**
214 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 217 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
215 * @sp: srcu_struct in which to unregister the old reader. 218 * @ssp: srcu_struct in which to unregister the old reader.
216 * @idx: return value from corresponding srcu_read_lock(). 219 * @idx: return value from corresponding srcu_read_lock().
217 * 220 *
218 * Exit an SRCU read-side critical section. 221 * Exit an SRCU read-side critical section.
219 */ 222 */
220static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) 223static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
221 __releases(sp) 224 __releases(ssp)
222{ 225{
223 rcu_lock_release(&(sp)->dep_map); 226 rcu_lock_release(&(ssp)->dep_map);
224 __srcu_read_unlock(sp, idx); 227 __srcu_read_unlock(ssp, idx);
225} 228}
226 229
227/* Used by tracing, cannot be traced and cannot call lockdep. */ 230/* Used by tracing, cannot be traced and cannot call lockdep. */
228static inline notrace void 231static inline notrace void
229srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp) 232srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
230{ 233{
231 __srcu_read_unlock(sp, idx); 234 __srcu_read_unlock(ssp, idx);
232} 235}
233 236
234/** 237/**
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index f41d2fb09f87..b19216aaaef2 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -60,7 +60,7 @@ void srcu_drive_gp(struct work_struct *wp);
60#define DEFINE_STATIC_SRCU(name) \ 60#define DEFINE_STATIC_SRCU(name) \
61 static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) 61 static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
62 62
63void synchronize_srcu(struct srcu_struct *sp); 63void synchronize_srcu(struct srcu_struct *ssp);
64 64
65/* 65/*
66 * Counts the new reader in the appropriate per-CPU element of the 66 * Counts the new reader in the appropriate per-CPU element of the
@@ -68,36 +68,36 @@ void synchronize_srcu(struct srcu_struct *sp);
68 * __srcu_read_unlock() must be in the same handler instance. Returns an 68 * __srcu_read_unlock() must be in the same handler instance. Returns an
69 * index that must be passed to the matching srcu_read_unlock(). 69 * index that must be passed to the matching srcu_read_unlock().
70 */ 70 */
71static inline int __srcu_read_lock(struct srcu_struct *sp) 71static inline int __srcu_read_lock(struct srcu_struct *ssp)
72{ 72{
73 int idx; 73 int idx;
74 74
75 idx = READ_ONCE(sp->srcu_idx); 75 idx = READ_ONCE(ssp->srcu_idx);
76 WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1); 76 WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
77 return idx; 77 return idx;
78} 78}
79 79
80static inline void synchronize_srcu_expedited(struct srcu_struct *sp) 80static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
81{ 81{
82 synchronize_srcu(sp); 82 synchronize_srcu(ssp);
83} 83}
84 84
85static inline void srcu_barrier(struct srcu_struct *sp) 85static inline void srcu_barrier(struct srcu_struct *ssp)
86{ 86{
87 synchronize_srcu(sp); 87 synchronize_srcu(ssp);
88} 88}
89 89
90/* Defined here to avoid size increase for non-torture kernels. */ 90/* Defined here to avoid size increase for non-torture kernels. */
91static inline void srcu_torture_stats_print(struct srcu_struct *sp, 91static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
92 char *tt, char *tf) 92 char *tt, char *tf)
93{ 93{
94 int idx; 94 int idx;
95 95
96 idx = READ_ONCE(sp->srcu_idx) & 0x1; 96 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
97 pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", 97 pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
98 tt, tf, idx, 98 tt, tf, idx,
99 READ_ONCE(sp->srcu_lock_nesting[!idx]), 99 READ_ONCE(ssp->srcu_lock_nesting[!idx]),
100 READ_ONCE(sp->srcu_lock_nesting[idx])); 100 READ_ONCE(ssp->srcu_lock_nesting[idx]));
101} 101}
102 102
103#endif 103#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 0ae91b3a7406..6f292bd3e7db 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -51,7 +51,7 @@ struct srcu_data {
51 unsigned long grpmask; /* Mask for leaf srcu_node */ 51 unsigned long grpmask; /* Mask for leaf srcu_node */
52 /* ->srcu_data_have_cbs[]. */ 52 /* ->srcu_data_have_cbs[]. */
53 int cpu; 53 int cpu;
54 struct srcu_struct *sp; 54 struct srcu_struct *ssp;
55}; 55};
56 56
57/* 57/*
@@ -138,8 +138,8 @@ struct srcu_struct {
138#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) 138#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
139#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) 139#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
140 140
141void synchronize_srcu_expedited(struct srcu_struct *sp); 141void synchronize_srcu_expedited(struct srcu_struct *ssp);
142void srcu_barrier(struct srcu_struct *sp); 142void srcu_barrier(struct srcu_struct *ssp);
143void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf); 143void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
144 144
145#endif 145#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index e9de8ad0bad7..9c3186578ce0 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -82,7 +82,7 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
82static inline void tracepoint_synchronize_unregister(void) 82static inline void tracepoint_synchronize_unregister(void)
83{ 83{
84 synchronize_srcu(&tracepoint_srcu); 84 synchronize_srcu(&tracepoint_srcu);
85 synchronize_sched(); 85 synchronize_rcu();
86} 86}
87#else 87#else
88static inline void tracepoint_synchronize_unregister(void) 88static inline void tracepoint_synchronize_unregister(void)
diff --git a/include/linux/types.h b/include/linux/types.h
index 9834e90aa010..c2615d6a019e 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -212,8 +212,8 @@ struct ustat {
212 * weird ABI and we need to ask it explicitly. 212 * weird ABI and we need to ask it explicitly.
213 * 213 *
214 * The alignment is required to guarantee that bit 0 of @next will be 214 * The alignment is required to guarantee that bit 0 of @next will be
215 * clear under normal conditions -- as long as we use call_rcu(), 215 * clear under normal conditions -- as long as we use call_rcu() or
216 * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. 216 * call_srcu() to queue the callback.
217 * 217 *
218 * This guarantee is important for few reasons: 218 * This guarantee is important for few reasons:
219 * - future call_rcu_lazy() will make use of lower bits in the pointer; 219 * - future call_rcu_lazy() will make use of lower bits in the pointer;