diff options
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 300 |
1 files changed, 137 insertions, 163 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 8f4f881a0ad8..2cf4226ade7e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #ifndef __LINUX_RCUPDATE_H | 33 | #ifndef __LINUX_RCUPDATE_H |
34 | #define __LINUX_RCUPDATE_H | 34 | #define __LINUX_RCUPDATE_H |
35 | 35 | ||
36 | #include <linux/types.h> | ||
36 | #include <linux/cache.h> | 37 | #include <linux/cache.h> |
37 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
38 | #include <linux/threads.h> | 39 | #include <linux/threads.h> |
@@ -64,32 +65,74 @@ static inline void rcutorture_record_progress(unsigned long vernum) | |||
64 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | 65 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) |
65 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) | 66 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) |
66 | 67 | ||
68 | /* Exported common interfaces */ | ||
69 | |||
70 | #ifdef CONFIG_PREEMPT_RCU | ||
71 | |||
67 | /** | 72 | /** |
68 | * struct rcu_head - callback structure for use with RCU | 73 | * call_rcu() - Queue an RCU callback for invocation after a grace period. |
69 | * @next: next update requests in a list | 74 | * @head: structure to be used for queueing the RCU updates. |
70 | * @func: actual update function to call after the grace period. | 75 | * @func: actual callback function to be invoked after the grace period |
76 | * | ||
77 | * The callback function will be invoked some time after a full grace | ||
78 | * period elapses, in other words after all pre-existing RCU read-side | ||
79 | * critical sections have completed. However, the callback function | ||
80 | * might well execute concurrently with RCU read-side critical sections | ||
81 | * that started after call_rcu() was invoked. RCU read-side critical | ||
82 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
83 | * and may be nested. | ||
71 | */ | 84 | */ |
72 | struct rcu_head { | 85 | extern void call_rcu(struct rcu_head *head, |
73 | struct rcu_head *next; | 86 | void (*func)(struct rcu_head *head)); |
74 | void (*func)(struct rcu_head *head); | ||
75 | }; | ||
76 | 87 | ||
77 | /* Exported common interfaces */ | 88 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
89 | |||
90 | /* In classic RCU, call_rcu() is just call_rcu_sched(). */ | ||
91 | #define call_rcu call_rcu_sched | ||
92 | |||
93 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
94 | |||
95 | /** | ||
96 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. | ||
97 | * @head: structure to be used for queueing the RCU updates. | ||
98 | * @func: actual callback function to be invoked after the grace period | ||
99 | * | ||
100 | * The callback function will be invoked some time after a full grace | ||
101 | * period elapses, in other words after all currently executing RCU | ||
102 | * read-side critical sections have completed. call_rcu_bh() assumes | ||
103 | * that the read-side critical sections end on completion of a softirq | ||
104 | * handler. This means that read-side critical sections in process | ||
105 | * context must not be interrupted by softirqs. This interface is to be | ||
106 | * used when most of the read-side critical sections are in softirq context. | ||
107 | * RCU read-side critical sections are delimited by : | ||
108 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. | ||
109 | * OR | ||
110 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | ||
111 | * These may be nested. | ||
112 | */ | ||
113 | extern void call_rcu_bh(struct rcu_head *head, | ||
114 | void (*func)(struct rcu_head *head)); | ||
115 | |||
116 | /** | ||
117 | * call_rcu_sched() - Queue an RCU for invocation after sched grace period. | ||
118 | * @head: structure to be used for queueing the RCU updates. | ||
119 | * @func: actual callback function to be invoked after the grace period | ||
120 | * | ||
121 | * The callback function will be invoked some time after a full grace | ||
122 | * period elapses, in other words after all currently executing RCU | ||
123 | * read-side critical sections have completed. call_rcu_sched() assumes | ||
124 | * that the read-side critical sections end on enabling of preemption | ||
125 | * or on voluntary preemption. | ||
126 | * RCU read-side critical sections are delimited by : | ||
127 | * - rcu_read_lock_sched() and rcu_read_unlock_sched(), | ||
128 | * OR | ||
129 | * anything that disables preemption. | ||
130 | * These may be nested. | ||
131 | */ | ||
78 | extern void call_rcu_sched(struct rcu_head *head, | 132 | extern void call_rcu_sched(struct rcu_head *head, |
79 | void (*func)(struct rcu_head *rcu)); | 133 | void (*func)(struct rcu_head *rcu)); |
80 | extern void synchronize_sched(void); | ||
81 | extern void rcu_barrier_bh(void); | ||
82 | extern void rcu_barrier_sched(void); | ||
83 | |||
84 | static inline void __rcu_read_lock_bh(void) | ||
85 | { | ||
86 | local_bh_disable(); | ||
87 | } | ||
88 | 134 | ||
89 | static inline void __rcu_read_unlock_bh(void) | 135 | extern void synchronize_sched(void); |
90 | { | ||
91 | local_bh_enable(); | ||
92 | } | ||
93 | 136 | ||
94 | #ifdef CONFIG_PREEMPT_RCU | 137 | #ifdef CONFIG_PREEMPT_RCU |
95 | 138 | ||
@@ -152,6 +195,15 @@ static inline void rcu_exit_nohz(void) | |||
152 | 195 | ||
153 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 196 | #endif /* #else #ifdef CONFIG_NO_HZ */ |
154 | 197 | ||
198 | /* | ||
199 | * Infrastructure to implement the synchronize_() primitives in | ||
200 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. | ||
201 | */ | ||
202 | |||
203 | typedef void call_rcu_func_t(struct rcu_head *head, | ||
204 | void (*func)(struct rcu_head *head)); | ||
205 | void wait_rcu_gp(call_rcu_func_t crf); | ||
206 | |||
155 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 207 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
156 | #include <linux/rcutree.h> | 208 | #include <linux/rcutree.h> |
157 | #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) | 209 | #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
@@ -297,19 +349,31 @@ extern int rcu_my_thread_group_empty(void); | |||
297 | /** | 349 | /** |
298 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met | 350 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met |
299 | * @c: condition to check | 351 | * @c: condition to check |
352 | * @s: informative message | ||
300 | */ | 353 | */ |
301 | #define rcu_lockdep_assert(c) \ | 354 | #define rcu_lockdep_assert(c, s) \ |
302 | do { \ | 355 | do { \ |
303 | static bool __warned; \ | 356 | static bool __warned; \ |
304 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | 357 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ |
305 | __warned = true; \ | 358 | __warned = true; \ |
306 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | 359 | lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ |
307 | } \ | 360 | } \ |
308 | } while (0) | 361 | } while (0) |
309 | 362 | ||
363 | #define rcu_sleep_check() \ | ||
364 | do { \ | ||
365 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ | ||
366 | "Illegal context switch in RCU-bh" \ | ||
367 | " read-side critical section"); \ | ||
368 | rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \ | ||
369 | "Illegal context switch in RCU-sched"\ | ||
370 | " read-side critical section"); \ | ||
371 | } while (0) | ||
372 | |||
310 | #else /* #ifdef CONFIG_PROVE_RCU */ | 373 | #else /* #ifdef CONFIG_PROVE_RCU */ |
311 | 374 | ||
312 | #define rcu_lockdep_assert(c) do { } while (0) | 375 | #define rcu_lockdep_assert(c, s) do { } while (0) |
376 | #define rcu_sleep_check() do { } while (0) | ||
313 | 377 | ||
314 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | 378 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
315 | 379 | ||
@@ -338,14 +402,16 @@ extern int rcu_my_thread_group_empty(void); | |||
338 | #define __rcu_dereference_check(p, c, space) \ | 402 | #define __rcu_dereference_check(p, c, space) \ |
339 | ({ \ | 403 | ({ \ |
340 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ | 404 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ |
341 | rcu_lockdep_assert(c); \ | 405 | rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \ |
406 | " usage"); \ | ||
342 | rcu_dereference_sparse(p, space); \ | 407 | rcu_dereference_sparse(p, space); \ |
343 | smp_read_barrier_depends(); \ | 408 | smp_read_barrier_depends(); \ |
344 | ((typeof(*p) __force __kernel *)(_________p1)); \ | 409 | ((typeof(*p) __force __kernel *)(_________p1)); \ |
345 | }) | 410 | }) |
346 | #define __rcu_dereference_protected(p, c, space) \ | 411 | #define __rcu_dereference_protected(p, c, space) \ |
347 | ({ \ | 412 | ({ \ |
348 | rcu_lockdep_assert(c); \ | 413 | rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \ |
414 | " usage"); \ | ||
349 | rcu_dereference_sparse(p, space); \ | 415 | rcu_dereference_sparse(p, space); \ |
350 | ((typeof(*p) __force __kernel *)(p)); \ | 416 | ((typeof(*p) __force __kernel *)(p)); \ |
351 | }) | 417 | }) |
@@ -359,15 +425,15 @@ extern int rcu_my_thread_group_empty(void); | |||
359 | #define __rcu_dereference_index_check(p, c) \ | 425 | #define __rcu_dereference_index_check(p, c) \ |
360 | ({ \ | 426 | ({ \ |
361 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | 427 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
362 | rcu_lockdep_assert(c); \ | 428 | rcu_lockdep_assert(c, \ |
429 | "suspicious rcu_dereference_index_check()" \ | ||
430 | " usage"); \ | ||
363 | smp_read_barrier_depends(); \ | 431 | smp_read_barrier_depends(); \ |
364 | (_________p1); \ | 432 | (_________p1); \ |
365 | }) | 433 | }) |
366 | #define __rcu_assign_pointer(p, v, space) \ | 434 | #define __rcu_assign_pointer(p, v, space) \ |
367 | ({ \ | 435 | ({ \ |
368 | if (!__builtin_constant_p(v) || \ | 436 | smp_wmb(); \ |
369 | ((v) != NULL)) \ | ||
370 | smp_wmb(); \ | ||
371 | (p) = (typeof(*v) __force space *)(v); \ | 437 | (p) = (typeof(*v) __force space *)(v); \ |
372 | }) | 438 | }) |
373 | 439 | ||
@@ -500,26 +566,6 @@ extern int rcu_my_thread_group_empty(void); | |||
500 | #define rcu_dereference_protected(p, c) \ | 566 | #define rcu_dereference_protected(p, c) \ |
501 | __rcu_dereference_protected((p), (c), __rcu) | 567 | __rcu_dereference_protected((p), (c), __rcu) |
502 | 568 | ||
503 | /** | ||
504 | * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented | ||
505 | * @p: The pointer to read, prior to dereferencing | ||
506 | * @c: The conditions under which the dereference will take place | ||
507 | * | ||
508 | * This is the RCU-bh counterpart to rcu_dereference_protected(). | ||
509 | */ | ||
510 | #define rcu_dereference_bh_protected(p, c) \ | ||
511 | __rcu_dereference_protected((p), (c), __rcu) | ||
512 | |||
513 | /** | ||
514 | * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented | ||
515 | * @p: The pointer to read, prior to dereferencing | ||
516 | * @c: The conditions under which the dereference will take place | ||
517 | * | ||
518 | * This is the RCU-sched counterpart to rcu_dereference_protected(). | ||
519 | */ | ||
520 | #define rcu_dereference_sched_protected(p, c) \ | ||
521 | __rcu_dereference_protected((p), (c), __rcu) | ||
522 | |||
523 | 569 | ||
524 | /** | 570 | /** |
525 | * rcu_dereference() - fetch RCU-protected pointer for dereferencing | 571 | * rcu_dereference() - fetch RCU-protected pointer for dereferencing |
@@ -630,7 +676,7 @@ static inline void rcu_read_unlock(void) | |||
630 | */ | 676 | */ |
631 | static inline void rcu_read_lock_bh(void) | 677 | static inline void rcu_read_lock_bh(void) |
632 | { | 678 | { |
633 | __rcu_read_lock_bh(); | 679 | local_bh_disable(); |
634 | __acquire(RCU_BH); | 680 | __acquire(RCU_BH); |
635 | rcu_read_acquire_bh(); | 681 | rcu_read_acquire_bh(); |
636 | } | 682 | } |
@@ -644,7 +690,7 @@ static inline void rcu_read_unlock_bh(void) | |||
644 | { | 690 | { |
645 | rcu_read_release_bh(); | 691 | rcu_read_release_bh(); |
646 | __release(RCU_BH); | 692 | __release(RCU_BH); |
647 | __rcu_read_unlock_bh(); | 693 | local_bh_enable(); |
648 | } | 694 | } |
649 | 695 | ||
650 | /** | 696 | /** |
@@ -698,11 +744,18 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
698 | * any prior initialization. Returns the value assigned. | 744 | * any prior initialization. Returns the value assigned. |
699 | * | 745 | * |
700 | * Inserts memory barriers on architectures that require them | 746 | * Inserts memory barriers on architectures that require them |
701 | * (pretty much all of them other than x86), and also prevents | 747 | * (which is most of them), and also prevents the compiler from |
702 | * the compiler from reordering the code that initializes the | 748 | * reordering the code that initializes the structure after the pointer |
703 | * structure after the pointer assignment. More importantly, this | 749 | * assignment. More importantly, this call documents which pointers |
704 | * call documents which pointers will be dereferenced by RCU read-side | 750 | * will be dereferenced by RCU read-side code. |
705 | * code. | 751 | * |
752 | * In some special cases, you may use RCU_INIT_POINTER() instead | ||
753 | * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due | ||
754 | * to the fact that it does not constrain either the CPU or the compiler. | ||
755 | * That said, using RCU_INIT_POINTER() when you should have used | ||
756 | * rcu_assign_pointer() is a very bad thing that results in | ||
757 | * impossible-to-diagnose memory corruption. So please be careful. | ||
758 | * See the RCU_INIT_POINTER() comment header for details. | ||
706 | */ | 759 | */ |
707 | #define rcu_assign_pointer(p, v) \ | 760 | #define rcu_assign_pointer(p, v) \ |
708 | __rcu_assign_pointer((p), (v), __rcu) | 761 | __rcu_assign_pointer((p), (v), __rcu) |
@@ -710,105 +763,38 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
710 | /** | 763 | /** |
711 | * RCU_INIT_POINTER() - initialize an RCU protected pointer | 764 | * RCU_INIT_POINTER() - initialize an RCU protected pointer |
712 | * | 765 | * |
713 | * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep | 766 | * Initialize an RCU-protected pointer in special cases where readers |
714 | * splats. | 767 | * do not need ordering constraints on the CPU or the compiler. These |
768 | * special cases are: | ||
769 | * | ||
770 | * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- | ||
771 | * 2. The caller has taken whatever steps are required to prevent | ||
772 | * RCU readers from concurrently accessing this pointer -or- | ||
773 | * 3. The referenced data structure has already been exposed to | ||
774 | * readers either at compile time or via rcu_assign_pointer() -and- | ||
775 | * a. You have not made -any- reader-visible changes to | ||
776 | * this structure since then -or- | ||
777 | * b. It is OK for readers accessing this structure from its | ||
778 | * new location to see the old state of the structure. (For | ||
779 | * example, the changes were to statistical counters or to | ||
780 | * other state where exact synchronization is not required.) | ||
781 | * | ||
782 | * Failure to follow these rules governing use of RCU_INIT_POINTER() will | ||
783 | * result in impossible-to-diagnose memory corruption. As in the structures | ||
784 | * will look OK in crash dumps, but any concurrent RCU readers might | ||
785 | * see pre-initialized values of the referenced data structure. So | ||
786 | * please be very careful how you use RCU_INIT_POINTER()!!! | ||
787 | * | ||
788 | * If you are creating an RCU-protected linked structure that is accessed | ||
789 | * by a single external-to-structure RCU-protected pointer, then you may | ||
790 | * use RCU_INIT_POINTER() to initialize the internal RCU-protected | ||
791 | * pointers, but you must use rcu_assign_pointer() to initialize the | ||
792 | * external-to-structure pointer -after- you have completely initialized | ||
793 | * the reader-accessible portions of the linked structure. | ||
715 | */ | 794 | */ |
716 | #define RCU_INIT_POINTER(p, v) \ | 795 | #define RCU_INIT_POINTER(p, v) \ |
717 | p = (typeof(*v) __force __rcu *)(v) | 796 | p = (typeof(*v) __force __rcu *)(v) |
718 | 797 | ||
719 | /* Infrastructure to implement the synchronize_() primitives. */ | ||
720 | |||
721 | struct rcu_synchronize { | ||
722 | struct rcu_head head; | ||
723 | struct completion completion; | ||
724 | }; | ||
725 | |||
726 | extern void wakeme_after_rcu(struct rcu_head *head); | ||
727 | |||
728 | #ifdef CONFIG_PREEMPT_RCU | ||
729 | |||
730 | /** | ||
731 | * call_rcu() - Queue an RCU callback for invocation after a grace period. | ||
732 | * @head: structure to be used for queueing the RCU updates. | ||
733 | * @func: actual callback function to be invoked after the grace period | ||
734 | * | ||
735 | * The callback function will be invoked some time after a full grace | ||
736 | * period elapses, in other words after all pre-existing RCU read-side | ||
737 | * critical sections have completed. However, the callback function | ||
738 | * might well execute concurrently with RCU read-side critical sections | ||
739 | * that started after call_rcu() was invoked. RCU read-side critical | ||
740 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
741 | * and may be nested. | ||
742 | */ | ||
743 | extern void call_rcu(struct rcu_head *head, | ||
744 | void (*func)(struct rcu_head *head)); | ||
745 | |||
746 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
747 | |||
748 | /* In classic RCU, call_rcu() is just call_rcu_sched(). */ | ||
749 | #define call_rcu call_rcu_sched | ||
750 | |||
751 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
752 | |||
753 | /** | ||
754 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. | ||
755 | * @head: structure to be used for queueing the RCU updates. | ||
756 | * @func: actual callback function to be invoked after the grace period | ||
757 | * | ||
758 | * The callback function will be invoked some time after a full grace | ||
759 | * period elapses, in other words after all currently executing RCU | ||
760 | * read-side critical sections have completed. call_rcu_bh() assumes | ||
761 | * that the read-side critical sections end on completion of a softirq | ||
762 | * handler. This means that read-side critical sections in process | ||
763 | * context must not be interrupted by softirqs. This interface is to be | ||
764 | * used when most of the read-side critical sections are in softirq context. | ||
765 | * RCU read-side critical sections are delimited by : | ||
766 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. | ||
767 | * OR | ||
768 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | ||
769 | * These may be nested. | ||
770 | */ | ||
771 | extern void call_rcu_bh(struct rcu_head *head, | ||
772 | void (*func)(struct rcu_head *head)); | ||
773 | |||
774 | /* | ||
775 | * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally | ||
776 | * by call_rcu() and rcu callback execution, and are therefore not part of the | ||
777 | * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. | ||
778 | */ | ||
779 | |||
780 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | ||
781 | # define STATE_RCU_HEAD_READY 0 | ||
782 | # define STATE_RCU_HEAD_QUEUED 1 | ||
783 | |||
784 | extern struct debug_obj_descr rcuhead_debug_descr; | ||
785 | |||
786 | static inline void debug_rcu_head_queue(struct rcu_head *head) | ||
787 | { | ||
788 | WARN_ON_ONCE((unsigned long)head & 0x3); | ||
789 | debug_object_activate(head, &rcuhead_debug_descr); | ||
790 | debug_object_active_state(head, &rcuhead_debug_descr, | ||
791 | STATE_RCU_HEAD_READY, | ||
792 | STATE_RCU_HEAD_QUEUED); | ||
793 | } | ||
794 | |||
795 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | ||
796 | { | ||
797 | debug_object_active_state(head, &rcuhead_debug_descr, | ||
798 | STATE_RCU_HEAD_QUEUED, | ||
799 | STATE_RCU_HEAD_READY); | ||
800 | debug_object_deactivate(head, &rcuhead_debug_descr); | ||
801 | } | ||
802 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
803 | static inline void debug_rcu_head_queue(struct rcu_head *head) | ||
804 | { | ||
805 | } | ||
806 | |||
807 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | ||
808 | { | ||
809 | } | ||
810 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
811 | |||
812 | static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) | 798 | static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) |
813 | { | 799 | { |
814 | return offset < 4096; | 800 | return offset < 4096; |
@@ -827,18 +813,6 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset) | |||
827 | call_rcu(head, (rcu_callback)offset); | 813 | call_rcu(head, (rcu_callback)offset); |
828 | } | 814 | } |
829 | 815 | ||
830 | extern void kfree(const void *); | ||
831 | |||
832 | static inline void __rcu_reclaim(struct rcu_head *head) | ||
833 | { | ||
834 | unsigned long offset = (unsigned long)head->func; | ||
835 | |||
836 | if (__is_kfree_rcu_offset(offset)) | ||
837 | kfree((void *)head - offset); | ||
838 | else | ||
839 | head->func(head); | ||
840 | } | ||
841 | |||
842 | /** | 816 | /** |
843 | * kfree_rcu() - kfree an object after a grace period. | 817 | * kfree_rcu() - kfree an object after a grace period. |
844 | * @ptr: pointer to kfree | 818 | * @ptr: pointer to kfree |