diff options
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 70 |
1 files changed, 36 insertions, 34 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff476515f716..a0189ba67fde 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -160,7 +160,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename, | |||
160 | * more than one CPU). | 160 | * more than one CPU). |
161 | */ | 161 | */ |
162 | void call_rcu(struct rcu_head *head, | 162 | void call_rcu(struct rcu_head *head, |
163 | void (*func)(struct rcu_head *head)); | 163 | rcu_callback_t func); |
164 | 164 | ||
165 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | 165 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
166 | 166 | ||
@@ -191,7 +191,7 @@ void call_rcu(struct rcu_head *head, | |||
191 | * memory ordering guarantees. | 191 | * memory ordering guarantees. |
192 | */ | 192 | */ |
193 | void call_rcu_bh(struct rcu_head *head, | 193 | void call_rcu_bh(struct rcu_head *head, |
194 | void (*func)(struct rcu_head *head)); | 194 | rcu_callback_t func); |
195 | 195 | ||
196 | /** | 196 | /** |
197 | * call_rcu_sched() - Queue an RCU for invocation after sched grace period. | 197 | * call_rcu_sched() - Queue an RCU for invocation after sched grace period. |
@@ -213,7 +213,7 @@ void call_rcu_bh(struct rcu_head *head, | |||
213 | * memory ordering guarantees. | 213 | * memory ordering guarantees. |
214 | */ | 214 | */ |
215 | void call_rcu_sched(struct rcu_head *head, | 215 | void call_rcu_sched(struct rcu_head *head, |
216 | void (*func)(struct rcu_head *rcu)); | 216 | rcu_callback_t func); |
217 | 217 | ||
218 | void synchronize_sched(void); | 218 | void synchronize_sched(void); |
219 | 219 | ||
@@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, | |||
230 | struct rcu_synchronize *rs_array); | 230 | struct rcu_synchronize *rs_array); |
231 | 231 | ||
232 | #define _wait_rcu_gp(checktiny, ...) \ | 232 | #define _wait_rcu_gp(checktiny, ...) \ |
233 | do { \ | 233 | do { \ |
234 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ | 234 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ |
235 | const int __n = ARRAY_SIZE(__crcu_array); \ | 235 | struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ |
236 | struct rcu_synchronize __rs_array[__n]; \ | 236 | __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ |
237 | \ | 237 | __crcu_array, __rs_array); \ |
238 | __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ | ||
239 | } while (0) | 238 | } while (0) |
240 | 239 | ||
241 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) | 240 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) |
@@ -275,7 +274,7 @@ do { \ | |||
275 | * See the description of call_rcu() for more detailed information on | 274 | * See the description of call_rcu() for more detailed information on |
276 | * memory ordering guarantees. | 275 | * memory ordering guarantees. |
277 | */ | 276 | */ |
278 | void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); | 277 | void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); |
279 | void synchronize_rcu_tasks(void); | 278 | void synchronize_rcu_tasks(void); |
280 | void rcu_barrier_tasks(void); | 279 | void rcu_barrier_tasks(void); |
281 | 280 | ||
@@ -298,12 +297,14 @@ void synchronize_rcu(void); | |||
298 | 297 | ||
299 | static inline void __rcu_read_lock(void) | 298 | static inline void __rcu_read_lock(void) |
300 | { | 299 | { |
301 | preempt_disable(); | 300 | if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) |
301 | preempt_disable(); | ||
302 | } | 302 | } |
303 | 303 | ||
304 | static inline void __rcu_read_unlock(void) | 304 | static inline void __rcu_read_unlock(void) |
305 | { | 305 | { |
306 | preempt_enable(); | 306 | if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) |
307 | preempt_enable(); | ||
307 | } | 308 | } |
308 | 309 | ||
309 | static inline void synchronize_rcu(void) | 310 | static inline void synchronize_rcu(void) |
@@ -536,29 +537,9 @@ static inline int rcu_read_lock_sched_held(void) | |||
536 | 537 | ||
537 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 538 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
538 | 539 | ||
539 | /* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */ | ||
540 | static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void) | ||
541 | { | ||
542 | } | ||
543 | |||
544 | #ifdef CONFIG_PROVE_RCU | 540 | #ifdef CONFIG_PROVE_RCU |
545 | 541 | ||
546 | /** | 542 | /** |
547 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met | ||
548 | * @c: condition to check | ||
549 | * @s: informative message | ||
550 | */ | ||
551 | #define rcu_lockdep_assert(c, s) \ | ||
552 | do { \ | ||
553 | static bool __section(.data.unlikely) __warned; \ | ||
554 | deprecate_rcu_lockdep_assert(); \ | ||
555 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | ||
556 | __warned = true; \ | ||
557 | lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ | ||
558 | } \ | ||
559 | } while (0) | ||
560 | |||
561 | /** | ||
562 | * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met | 543 | * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met |
563 | * @c: condition to check | 544 | * @c: condition to check |
564 | * @s: informative message | 545 | * @s: informative message |
@@ -595,7 +576,6 @@ static inline void rcu_preempt_sleep_check(void) | |||
595 | 576 | ||
596 | #else /* #ifdef CONFIG_PROVE_RCU */ | 577 | #else /* #ifdef CONFIG_PROVE_RCU */ |
597 | 578 | ||
598 | #define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert() | ||
599 | #define RCU_LOCKDEP_WARN(c, s) do { } while (0) | 579 | #define RCU_LOCKDEP_WARN(c, s) do { } while (0) |
600 | #define rcu_sleep_check() do { } while (0) | 580 | #define rcu_sleep_check() do { } while (0) |
601 | 581 | ||
@@ -812,6 +792,28 @@ static inline void rcu_preempt_sleep_check(void) | |||
812 | #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) | 792 | #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
813 | 793 | ||
814 | /** | 794 | /** |
795 | * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism | ||
796 | * @p: The pointer to hand off | ||
797 | * | ||
798 | * This is simply an identity function, but it documents where a pointer | ||
799 | * is handed off from RCU to some other synchronization mechanism, for | ||
800 | * example, reference counting or locking. In C11, it would map to | ||
801 | * kill_dependency(). It could be used as follows: | ||
802 | * | ||
803 | * rcu_read_lock(); | ||
804 | * p = rcu_dereference(gp); | ||
805 | * long_lived = is_long_lived(p); | ||
806 | * if (long_lived) { | ||
807 | * if (!atomic_inc_not_zero(p->refcnt)) | ||
808 | * long_lived = false; | ||
809 | * else | ||
810 | * p = rcu_pointer_handoff(p); | ||
811 | * } | ||
812 | * rcu_read_unlock(); | ||
813 | */ | ||
814 | #define rcu_pointer_handoff(p) (p) | ||
815 | |||
816 | /** | ||
815 | * rcu_read_lock() - mark the beginning of an RCU read-side critical section | 817 | * rcu_read_lock() - mark the beginning of an RCU read-side critical section |
816 | * | 818 | * |
817 | * When synchronize_rcu() is invoked on one CPU while other CPUs | 819 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
@@ -1066,7 +1068,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
1066 | #define __kfree_rcu(head, offset) \ | 1068 | #define __kfree_rcu(head, offset) \ |
1067 | do { \ | 1069 | do { \ |
1068 | BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ | 1070 | BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ |
1069 | kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \ | 1071 | kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ |
1070 | } while (0) | 1072 | } while (0) |
1071 | 1073 | ||
1072 | /** | 1074 | /** |