diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/dcache.h | 1 | ||||
-rw-r--r-- | include/linux/list.h | 367 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 3 | ||||
-rw-r--r-- | include/linux/rculist.h | 373 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 26 | ||||
-rw-r--r-- | include/linux/rcupreempt.h | 42 |
6 files changed, 437 insertions, 375 deletions
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 2a6639407c80..1f5cebf10a23 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/atomic.h> | 4 | #include <asm/atomic.h> |
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
6 | #include <linux/rculist.h> | ||
6 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock.h> |
7 | #include <linux/cache.h> | 8 | #include <linux/cache.h> |
8 | #include <linux/rcupdate.h> | 9 | #include <linux/rcupdate.h> |
diff --git a/include/linux/list.h b/include/linux/list.h index 08cf4f651889..139ec41d9c2e 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -85,65 +85,6 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head) | |||
85 | } | 85 | } |
86 | 86 | ||
87 | /* | 87 | /* |
88 | * Insert a new entry between two known consecutive entries. | ||
89 | * | ||
90 | * This is only for internal list manipulation where we know | ||
91 | * the prev/next entries already! | ||
92 | */ | ||
93 | static inline void __list_add_rcu(struct list_head * new, | ||
94 | struct list_head * prev, struct list_head * next) | ||
95 | { | ||
96 | new->next = next; | ||
97 | new->prev = prev; | ||
98 | smp_wmb(); | ||
99 | next->prev = new; | ||
100 | prev->next = new; | ||
101 | } | ||
102 | |||
103 | /** | ||
104 | * list_add_rcu - add a new entry to rcu-protected list | ||
105 | * @new: new entry to be added | ||
106 | * @head: list head to add it after | ||
107 | * | ||
108 | * Insert a new entry after the specified head. | ||
109 | * This is good for implementing stacks. | ||
110 | * | ||
111 | * The caller must take whatever precautions are necessary | ||
112 | * (such as holding appropriate locks) to avoid racing | ||
113 | * with another list-mutation primitive, such as list_add_rcu() | ||
114 | * or list_del_rcu(), running on this same list. | ||
115 | * However, it is perfectly legal to run concurrently with | ||
116 | * the _rcu list-traversal primitives, such as | ||
117 | * list_for_each_entry_rcu(). | ||
118 | */ | ||
119 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) | ||
120 | { | ||
121 | __list_add_rcu(new, head, head->next); | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * list_add_tail_rcu - add a new entry to rcu-protected list | ||
126 | * @new: new entry to be added | ||
127 | * @head: list head to add it before | ||
128 | * | ||
129 | * Insert a new entry before the specified head. | ||
130 | * This is useful for implementing queues. | ||
131 | * | ||
132 | * The caller must take whatever precautions are necessary | ||
133 | * (such as holding appropriate locks) to avoid racing | ||
134 | * with another list-mutation primitive, such as list_add_tail_rcu() | ||
135 | * or list_del_rcu(), running on this same list. | ||
136 | * However, it is perfectly legal to run concurrently with | ||
137 | * the _rcu list-traversal primitives, such as | ||
138 | * list_for_each_entry_rcu(). | ||
139 | */ | ||
140 | static inline void list_add_tail_rcu(struct list_head *new, | ||
141 | struct list_head *head) | ||
142 | { | ||
143 | __list_add_rcu(new, head->prev, head); | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Delete a list entry by making the prev/next entries | 88 | * Delete a list entry by making the prev/next entries |
148 | * point to each other. | 89 | * point to each other. |
149 | * | 90 | * |
@@ -174,36 +115,6 @@ extern void list_del(struct list_head *entry); | |||
174 | #endif | 115 | #endif |
175 | 116 | ||
176 | /** | 117 | /** |
177 | * list_del_rcu - deletes entry from list without re-initialization | ||
178 | * @entry: the element to delete from the list. | ||
179 | * | ||
180 | * Note: list_empty() on entry does not return true after this, | ||
181 | * the entry is in an undefined state. It is useful for RCU based | ||
182 | * lockfree traversal. | ||
183 | * | ||
184 | * In particular, it means that we can not poison the forward | ||
185 | * pointers that may still be used for walking the list. | ||
186 | * | ||
187 | * The caller must take whatever precautions are necessary | ||
188 | * (such as holding appropriate locks) to avoid racing | ||
189 | * with another list-mutation primitive, such as list_del_rcu() | ||
190 | * or list_add_rcu(), running on this same list. | ||
191 | * However, it is perfectly legal to run concurrently with | ||
192 | * the _rcu list-traversal primitives, such as | ||
193 | * list_for_each_entry_rcu(). | ||
194 | * | ||
195 | * Note that the caller is not permitted to immediately free | ||
196 | * the newly deleted entry. Instead, either synchronize_rcu() | ||
197 | * or call_rcu() must be used to defer freeing until an RCU | ||
198 | * grace period has elapsed. | ||
199 | */ | ||
200 | static inline void list_del_rcu(struct list_head *entry) | ||
201 | { | ||
202 | __list_del(entry->prev, entry->next); | ||
203 | entry->prev = LIST_POISON2; | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * list_replace - replace old entry by new one | 118 | * list_replace - replace old entry by new one |
208 | * @old : the element to be replaced | 119 | * @old : the element to be replaced |
209 | * @new : the new element to insert | 120 | * @new : the new element to insert |
@@ -227,25 +138,6 @@ static inline void list_replace_init(struct list_head *old, | |||
227 | } | 138 | } |
228 | 139 | ||
229 | /** | 140 | /** |
230 | * list_replace_rcu - replace old entry by new one | ||
231 | * @old : the element to be replaced | ||
232 | * @new : the new element to insert | ||
233 | * | ||
234 | * The @old entry will be replaced with the @new entry atomically. | ||
235 | * Note: @old should not be empty. | ||
236 | */ | ||
237 | static inline void list_replace_rcu(struct list_head *old, | ||
238 | struct list_head *new) | ||
239 | { | ||
240 | new->next = old->next; | ||
241 | new->prev = old->prev; | ||
242 | smp_wmb(); | ||
243 | new->next->prev = new; | ||
244 | new->prev->next = new; | ||
245 | old->prev = LIST_POISON2; | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * list_del_init - deletes entry from list and reinitialize it. | 141 | * list_del_init - deletes entry from list and reinitialize it. |
250 | * @entry: the element to delete from the list. | 142 | * @entry: the element to delete from the list. |
251 | */ | 143 | */ |
@@ -369,62 +261,6 @@ static inline void list_splice_init(struct list_head *list, | |||
369 | } | 261 | } |
370 | 262 | ||
371 | /** | 263 | /** |
372 | * list_splice_init_rcu - splice an RCU-protected list into an existing list. | ||
373 | * @list: the RCU-protected list to splice | ||
374 | * @head: the place in the list to splice the first list into | ||
375 | * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... | ||
376 | * | ||
377 | * @head can be RCU-read traversed concurrently with this function. | ||
378 | * | ||
379 | * Note that this function blocks. | ||
380 | * | ||
381 | * Important note: the caller must take whatever action is necessary to | ||
382 | * prevent any other updates to @head. In principle, it is possible | ||
383 | * to modify the list as soon as sync() begins execution. | ||
384 | * If this sort of thing becomes necessary, an alternative version | ||
385 | * based on call_rcu() could be created. But only if -really- | ||
386 | * needed -- there is no shortage of RCU API members. | ||
387 | */ | ||
388 | static inline void list_splice_init_rcu(struct list_head *list, | ||
389 | struct list_head *head, | ||
390 | void (*sync)(void)) | ||
391 | { | ||
392 | struct list_head *first = list->next; | ||
393 | struct list_head *last = list->prev; | ||
394 | struct list_head *at = head->next; | ||
395 | |||
396 | if (list_empty(head)) | ||
397 | return; | ||
398 | |||
399 | /* "first" and "last" tracking list, so initialize it. */ | ||
400 | |||
401 | INIT_LIST_HEAD(list); | ||
402 | |||
403 | /* | ||
404 | * At this point, the list body still points to the source list. | ||
405 | * Wait for any readers to finish using the list before splicing | ||
406 | * the list body into the new list. Any new readers will see | ||
407 | * an empty list. | ||
408 | */ | ||
409 | |||
410 | sync(); | ||
411 | |||
412 | /* | ||
413 | * Readers are finished with the source list, so perform splice. | ||
414 | * The order is important if the new list is global and accessible | ||
415 | * to concurrent RCU readers. Note that RCU readers are not | ||
416 | * permitted to traverse the prev pointers without excluding | ||
417 | * this function. | ||
418 | */ | ||
419 | |||
420 | last->next = at; | ||
421 | smp_wmb(); | ||
422 | head->next = first; | ||
423 | first->prev = head; | ||
424 | at->prev = last; | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * list_entry - get the struct for this entry | 264 | * list_entry - get the struct for this entry |
429 | * @ptr: the &struct list_head pointer. | 265 | * @ptr: the &struct list_head pointer. |
430 | * @type: the type of the struct this is embedded in. | 266 | * @type: the type of the struct this is embedded in. |
@@ -629,57 +465,6 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
629 | &pos->member != (head); \ | 465 | &pos->member != (head); \ |
630 | pos = n, n = list_entry(n->member.prev, typeof(*n), member)) | 466 | pos = n, n = list_entry(n->member.prev, typeof(*n), member)) |
631 | 467 | ||
632 | /** | ||
633 | * list_for_each_rcu - iterate over an rcu-protected list | ||
634 | * @pos: the &struct list_head to use as a loop cursor. | ||
635 | * @head: the head for your list. | ||
636 | * | ||
637 | * This list-traversal primitive may safely run concurrently with | ||
638 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
639 | * as long as the traversal is guarded by rcu_read_lock(). | ||
640 | */ | ||
641 | #define list_for_each_rcu(pos, head) \ | ||
642 | for (pos = rcu_dereference((head)->next); \ | ||
643 | prefetch(pos->next), pos != (head); \ | ||
644 | pos = rcu_dereference(pos->next)) | ||
645 | |||
646 | #define __list_for_each_rcu(pos, head) \ | ||
647 | for (pos = rcu_dereference((head)->next); \ | ||
648 | pos != (head); \ | ||
649 | pos = rcu_dereference(pos->next)) | ||
650 | |||
651 | /** | ||
652 | * list_for_each_entry_rcu - iterate over rcu list of given type | ||
653 | * @pos: the type * to use as a loop cursor. | ||
654 | * @head: the head for your list. | ||
655 | * @member: the name of the list_struct within the struct. | ||
656 | * | ||
657 | * This list-traversal primitive may safely run concurrently with | ||
658 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
659 | * as long as the traversal is guarded by rcu_read_lock(). | ||
660 | */ | ||
661 | #define list_for_each_entry_rcu(pos, head, member) \ | ||
662 | for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \ | ||
663 | prefetch(pos->member.next), &pos->member != (head); \ | ||
664 | pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member)) | ||
665 | |||
666 | |||
667 | /** | ||
668 | * list_for_each_continue_rcu | ||
669 | * @pos: the &struct list_head to use as a loop cursor. | ||
670 | * @head: the head for your list. | ||
671 | * | ||
672 | * Iterate over an rcu-protected list, continuing after current point. | ||
673 | * | ||
674 | * This list-traversal primitive may safely run concurrently with | ||
675 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
676 | * as long as the traversal is guarded by rcu_read_lock(). | ||
677 | */ | ||
678 | #define list_for_each_continue_rcu(pos, head) \ | ||
679 | for ((pos) = rcu_dereference((pos)->next); \ | ||
680 | prefetch((pos)->next), (pos) != (head); \ | ||
681 | (pos) = rcu_dereference((pos)->next)) | ||
682 | |||
683 | /* | 468 | /* |
684 | * Double linked lists with a single pointer list head. | 469 | * Double linked lists with a single pointer list head. |
685 | * Mostly useful for hash tables where the two pointer list head is | 470 | * Mostly useful for hash tables where the two pointer list head is |
@@ -730,31 +515,6 @@ static inline void hlist_del(struct hlist_node *n) | |||
730 | n->pprev = LIST_POISON2; | 515 | n->pprev = LIST_POISON2; |
731 | } | 516 | } |
732 | 517 | ||
733 | /** | ||
734 | * hlist_del_rcu - deletes entry from hash list without re-initialization | ||
735 | * @n: the element to delete from the hash list. | ||
736 | * | ||
737 | * Note: list_unhashed() on entry does not return true after this, | ||
738 | * the entry is in an undefined state. It is useful for RCU based | ||
739 | * lockfree traversal. | ||
740 | * | ||
741 | * In particular, it means that we can not poison the forward | ||
742 | * pointers that may still be used for walking the hash list. | ||
743 | * | ||
744 | * The caller must take whatever precautions are necessary | ||
745 | * (such as holding appropriate locks) to avoid racing | ||
746 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
747 | * or hlist_del_rcu(), running on this same list. | ||
748 | * However, it is perfectly legal to run concurrently with | ||
749 | * the _rcu list-traversal primitives, such as | ||
750 | * hlist_for_each_entry(). | ||
751 | */ | ||
752 | static inline void hlist_del_rcu(struct hlist_node *n) | ||
753 | { | ||
754 | __hlist_del(n); | ||
755 | n->pprev = LIST_POISON2; | ||
756 | } | ||
757 | |||
758 | static inline void hlist_del_init(struct hlist_node *n) | 518 | static inline void hlist_del_init(struct hlist_node *n) |
759 | { | 519 | { |
760 | if (!hlist_unhashed(n)) { | 520 | if (!hlist_unhashed(n)) { |
@@ -763,27 +523,6 @@ static inline void hlist_del_init(struct hlist_node *n) | |||
763 | } | 523 | } |
764 | } | 524 | } |
765 | 525 | ||
766 | /** | ||
767 | * hlist_replace_rcu - replace old entry by new one | ||
768 | * @old : the element to be replaced | ||
769 | * @new : the new element to insert | ||
770 | * | ||
771 | * The @old entry will be replaced with the @new entry atomically. | ||
772 | */ | ||
773 | static inline void hlist_replace_rcu(struct hlist_node *old, | ||
774 | struct hlist_node *new) | ||
775 | { | ||
776 | struct hlist_node *next = old->next; | ||
777 | |||
778 | new->next = next; | ||
779 | new->pprev = old->pprev; | ||
780 | smp_wmb(); | ||
781 | if (next) | ||
782 | new->next->pprev = &new->next; | ||
783 | *new->pprev = new; | ||
784 | old->pprev = LIST_POISON2; | ||
785 | } | ||
786 | |||
787 | static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) | 526 | static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) |
788 | { | 527 | { |
789 | struct hlist_node *first = h->first; | 528 | struct hlist_node *first = h->first; |
@@ -794,38 +533,6 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) | |||
794 | n->pprev = &h->first; | 533 | n->pprev = &h->first; |
795 | } | 534 | } |
796 | 535 | ||
797 | |||
798 | /** | ||
799 | * hlist_add_head_rcu | ||
800 | * @n: the element to add to the hash list. | ||
801 | * @h: the list to add to. | ||
802 | * | ||
803 | * Description: | ||
804 | * Adds the specified element to the specified hlist, | ||
805 | * while permitting racing traversals. | ||
806 | * | ||
807 | * The caller must take whatever precautions are necessary | ||
808 | * (such as holding appropriate locks) to avoid racing | ||
809 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
810 | * or hlist_del_rcu(), running on this same list. | ||
811 | * However, it is perfectly legal to run concurrently with | ||
812 | * the _rcu list-traversal primitives, such as | ||
813 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
814 | * problems on Alpha CPUs. Regardless of the type of CPU, the | ||
815 | * list-traversal primitive must be guarded by rcu_read_lock(). | ||
816 | */ | ||
817 | static inline void hlist_add_head_rcu(struct hlist_node *n, | ||
818 | struct hlist_head *h) | ||
819 | { | ||
820 | struct hlist_node *first = h->first; | ||
821 | n->next = first; | ||
822 | n->pprev = &h->first; | ||
823 | smp_wmb(); | ||
824 | if (first) | ||
825 | first->pprev = &n->next; | ||
826 | h->first = n; | ||
827 | } | ||
828 | |||
829 | /* next must be != NULL */ | 536 | /* next must be != NULL */ |
830 | static inline void hlist_add_before(struct hlist_node *n, | 537 | static inline void hlist_add_before(struct hlist_node *n, |
831 | struct hlist_node *next) | 538 | struct hlist_node *next) |
@@ -847,63 +554,6 @@ static inline void hlist_add_after(struct hlist_node *n, | |||
847 | next->next->pprev = &next->next; | 554 | next->next->pprev = &next->next; |
848 | } | 555 | } |
849 | 556 | ||
850 | /** | ||
851 | * hlist_add_before_rcu | ||
852 | * @n: the new element to add to the hash list. | ||
853 | * @next: the existing element to add the new element before. | ||
854 | * | ||
855 | * Description: | ||
856 | * Adds the specified element to the specified hlist | ||
857 | * before the specified node while permitting racing traversals. | ||
858 | * | ||
859 | * The caller must take whatever precautions are necessary | ||
860 | * (such as holding appropriate locks) to avoid racing | ||
861 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
862 | * or hlist_del_rcu(), running on this same list. | ||
863 | * However, it is perfectly legal to run concurrently with | ||
864 | * the _rcu list-traversal primitives, such as | ||
865 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
866 | * problems on Alpha CPUs. | ||
867 | */ | ||
868 | static inline void hlist_add_before_rcu(struct hlist_node *n, | ||
869 | struct hlist_node *next) | ||
870 | { | ||
871 | n->pprev = next->pprev; | ||
872 | n->next = next; | ||
873 | smp_wmb(); | ||
874 | next->pprev = &n->next; | ||
875 | *(n->pprev) = n; | ||
876 | } | ||
877 | |||
878 | /** | ||
879 | * hlist_add_after_rcu | ||
880 | * @prev: the existing element to add the new element after. | ||
881 | * @n: the new element to add to the hash list. | ||
882 | * | ||
883 | * Description: | ||
884 | * Adds the specified element to the specified hlist | ||
885 | * after the specified node while permitting racing traversals. | ||
886 | * | ||
887 | * The caller must take whatever precautions are necessary | ||
888 | * (such as holding appropriate locks) to avoid racing | ||
889 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
890 | * or hlist_del_rcu(), running on this same list. | ||
891 | * However, it is perfectly legal to run concurrently with | ||
892 | * the _rcu list-traversal primitives, such as | ||
893 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
894 | * problems on Alpha CPUs. | ||
895 | */ | ||
896 | static inline void hlist_add_after_rcu(struct hlist_node *prev, | ||
897 | struct hlist_node *n) | ||
898 | { | ||
899 | n->next = prev->next; | ||
900 | n->pprev = &prev->next; | ||
901 | smp_wmb(); | ||
902 | prev->next = n; | ||
903 | if (n->next) | ||
904 | n->next->pprev = &n->next; | ||
905 | } | ||
906 | |||
907 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) | 557 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
908 | 558 | ||
909 | #define hlist_for_each(pos, head) \ | 559 | #define hlist_for_each(pos, head) \ |
@@ -964,21 +614,4 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
964 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 614 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
965 | pos = n) | 615 | pos = n) |
966 | 616 | ||
967 | /** | ||
968 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | ||
969 | * @tpos: the type * to use as a loop cursor. | ||
970 | * @pos: the &struct hlist_node to use as a loop cursor. | ||
971 | * @head: the head for your list. | ||
972 | * @member: the name of the hlist_node within the struct. | ||
973 | * | ||
974 | * This list-traversal primitive may safely run concurrently with | ||
975 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | ||
976 | * as long as the traversal is guarded by rcu_read_lock(). | ||
977 | */ | ||
978 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | ||
979 | for (pos = rcu_dereference((head)->first); \ | ||
980 | pos && ({ prefetch(pos->next); 1;}) && \ | ||
981 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | ||
982 | pos = rcu_dereference(pos->next)) | ||
983 | |||
984 | #endif | 617 | #endif |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index b3aa05baab8a..8c774905dcfe 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -151,7 +151,10 @@ extern struct lockdep_map rcu_lock_map; | |||
151 | 151 | ||
152 | #define __synchronize_sched() synchronize_rcu() | 152 | #define __synchronize_sched() synchronize_rcu() |
153 | 153 | ||
154 | #define call_rcu_sched(head, func) call_rcu(head, func) | ||
155 | |||
154 | extern void __rcu_init(void); | 156 | extern void __rcu_init(void); |
157 | #define rcu_init_sched() do { } while (0) | ||
155 | extern void rcu_check_callbacks(int cpu, int user); | 158 | extern void rcu_check_callbacks(int cpu, int user); |
156 | extern void rcu_restart_cpu(int cpu); | 159 | extern void rcu_restart_cpu(int cpu); |
157 | 160 | ||
diff --git a/include/linux/rculist.h b/include/linux/rculist.h new file mode 100644 index 000000000000..b0f39be08b6c --- /dev/null +++ b/include/linux/rculist.h | |||
@@ -0,0 +1,373 @@ | |||
1 | #ifndef _LINUX_RCULIST_H | ||
2 | #define _LINUX_RCULIST_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | /* | ||
7 | * RCU-protected list version | ||
8 | */ | ||
9 | #include <linux/list.h> | ||
10 | #include <linux/rcupdate.h> | ||
11 | |||
12 | /* | ||
13 | * Insert a new entry between two known consecutive entries. | ||
14 | * | ||
15 | * This is only for internal list manipulation where we know | ||
16 | * the prev/next entries already! | ||
17 | */ | ||
18 | static inline void __list_add_rcu(struct list_head *new, | ||
19 | struct list_head *prev, struct list_head *next) | ||
20 | { | ||
21 | new->next = next; | ||
22 | new->prev = prev; | ||
23 | rcu_assign_pointer(prev->next, new); | ||
24 | next->prev = new; | ||
25 | } | ||
26 | |||
27 | /** | ||
28 | * list_add_rcu - add a new entry to rcu-protected list | ||
29 | * @new: new entry to be added | ||
30 | * @head: list head to add it after | ||
31 | * | ||
32 | * Insert a new entry after the specified head. | ||
33 | * This is good for implementing stacks. | ||
34 | * | ||
35 | * The caller must take whatever precautions are necessary | ||
36 | * (such as holding appropriate locks) to avoid racing | ||
37 | * with another list-mutation primitive, such as list_add_rcu() | ||
38 | * or list_del_rcu(), running on this same list. | ||
39 | * However, it is perfectly legal to run concurrently with | ||
40 | * the _rcu list-traversal primitives, such as | ||
41 | * list_for_each_entry_rcu(). | ||
42 | */ | ||
43 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) | ||
44 | { | ||
45 | __list_add_rcu(new, head, head->next); | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * list_add_tail_rcu - add a new entry to rcu-protected list | ||
50 | * @new: new entry to be added | ||
51 | * @head: list head to add it before | ||
52 | * | ||
53 | * Insert a new entry before the specified head. | ||
54 | * This is useful for implementing queues. | ||
55 | * | ||
56 | * The caller must take whatever precautions are necessary | ||
57 | * (such as holding appropriate locks) to avoid racing | ||
58 | * with another list-mutation primitive, such as list_add_tail_rcu() | ||
59 | * or list_del_rcu(), running on this same list. | ||
60 | * However, it is perfectly legal to run concurrently with | ||
61 | * the _rcu list-traversal primitives, such as | ||
62 | * list_for_each_entry_rcu(). | ||
63 | */ | ||
64 | static inline void list_add_tail_rcu(struct list_head *new, | ||
65 | struct list_head *head) | ||
66 | { | ||
67 | __list_add_rcu(new, head->prev, head); | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * list_del_rcu - deletes entry from list without re-initialization | ||
72 | * @entry: the element to delete from the list. | ||
73 | * | ||
74 | * Note: list_empty() on entry does not return true after this, | ||
75 | * the entry is in an undefined state. It is useful for RCU based | ||
76 | * lockfree traversal. | ||
77 | * | ||
78 | * In particular, it means that we can not poison the forward | ||
79 | * pointers that may still be used for walking the list. | ||
80 | * | ||
81 | * The caller must take whatever precautions are necessary | ||
82 | * (such as holding appropriate locks) to avoid racing | ||
83 | * with another list-mutation primitive, such as list_del_rcu() | ||
84 | * or list_add_rcu(), running on this same list. | ||
85 | * However, it is perfectly legal to run concurrently with | ||
86 | * the _rcu list-traversal primitives, such as | ||
87 | * list_for_each_entry_rcu(). | ||
88 | * | ||
89 | * Note that the caller is not permitted to immediately free | ||
90 | * the newly deleted entry. Instead, either synchronize_rcu() | ||
91 | * or call_rcu() must be used to defer freeing until an RCU | ||
92 | * grace period has elapsed. | ||
93 | */ | ||
94 | static inline void list_del_rcu(struct list_head *entry) | ||
95 | { | ||
96 | __list_del(entry->prev, entry->next); | ||
97 | entry->prev = LIST_POISON2; | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * list_replace_rcu - replace old entry by new one | ||
102 | * @old : the element to be replaced | ||
103 | * @new : the new element to insert | ||
104 | * | ||
105 | * The @old entry will be replaced with the @new entry atomically. | ||
106 | * Note: @old should not be empty. | ||
107 | */ | ||
108 | static inline void list_replace_rcu(struct list_head *old, | ||
109 | struct list_head *new) | ||
110 | { | ||
111 | new->next = old->next; | ||
112 | new->prev = old->prev; | ||
113 | rcu_assign_pointer(new->prev->next, new); | ||
114 | new->next->prev = new; | ||
115 | old->prev = LIST_POISON2; | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * list_splice_init_rcu - splice an RCU-protected list into an existing list. | ||
120 | * @list: the RCU-protected list to splice | ||
121 | * @head: the place in the list to splice the first list into | ||
122 | * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... | ||
123 | * | ||
124 | * @head can be RCU-read traversed concurrently with this function. | ||
125 | * | ||
126 | * Note that this function blocks. | ||
127 | * | ||
128 | * Important note: the caller must take whatever action is necessary to | ||
129 | * prevent any other updates to @head. In principle, it is possible | ||
130 | * to modify the list as soon as sync() begins execution. | ||
131 | * If this sort of thing becomes necessary, an alternative version | ||
132 | * based on call_rcu() could be created. But only if -really- | ||
133 | * needed -- there is no shortage of RCU API members. | ||
134 | */ | ||
135 | static inline void list_splice_init_rcu(struct list_head *list, | ||
136 | struct list_head *head, | ||
137 | void (*sync)(void)) | ||
138 | { | ||
139 | struct list_head *first = list->next; | ||
140 | struct list_head *last = list->prev; | ||
141 | struct list_head *at = head->next; | ||
142 | |||
143 | if (list_empty(head)) | ||
144 | return; | ||
145 | |||
146 | /* "first" and "last" tracking list, so initialize it. */ | ||
147 | |||
148 | INIT_LIST_HEAD(list); | ||
149 | |||
150 | /* | ||
151 | * At this point, the list body still points to the source list. | ||
152 | * Wait for any readers to finish using the list before splicing | ||
153 | * the list body into the new list. Any new readers will see | ||
154 | * an empty list. | ||
155 | */ | ||
156 | |||
157 | sync(); | ||
158 | |||
159 | /* | ||
160 | * Readers are finished with the source list, so perform splice. | ||
161 | * The order is important if the new list is global and accessible | ||
162 | * to concurrent RCU readers. Note that RCU readers are not | ||
163 | * permitted to traverse the prev pointers without excluding | ||
164 | * this function. | ||
165 | */ | ||
166 | |||
167 | last->next = at; | ||
168 | rcu_assign_pointer(head->next, first); | ||
169 | first->prev = head; | ||
170 | at->prev = last; | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * list_for_each_rcu - iterate over an rcu-protected list | ||
175 | * @pos: the &struct list_head to use as a loop cursor. | ||
176 | * @head: the head for your list. | ||
177 | * | ||
178 | * This list-traversal primitive may safely run concurrently with | ||
179 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
180 | * as long as the traversal is guarded by rcu_read_lock(). | ||
181 | */ | ||
182 | #define list_for_each_rcu(pos, head) \ | ||
183 | for (pos = rcu_dereference((head)->next); \ | ||
184 | prefetch(pos->next), pos != (head); \ | ||
185 | pos = rcu_dereference(pos->next)) | ||
186 | |||
187 | #define __list_for_each_rcu(pos, head) \ | ||
188 | for (pos = rcu_dereference((head)->next); \ | ||
189 | pos != (head); \ | ||
190 | pos = rcu_dereference(pos->next)) | ||
191 | |||
192 | /** | ||
193 | * list_for_each_entry_rcu - iterate over rcu list of given type | ||
194 | * @pos: the type * to use as a loop cursor. | ||
195 | * @head: the head for your list. | ||
196 | * @member: the name of the list_struct within the struct. | ||
197 | * | ||
198 | * This list-traversal primitive may safely run concurrently with | ||
199 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
200 | * as long as the traversal is guarded by rcu_read_lock(). | ||
201 | */ | ||
202 | #define list_for_each_entry_rcu(pos, head, member) \ | ||
203 | for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \ | ||
204 | prefetch(pos->member.next), &pos->member != (head); \ | ||
205 | pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member)) | ||
206 | |||
207 | |||
208 | /** | ||
209 | * list_for_each_continue_rcu | ||
210 | * @pos: the &struct list_head to use as a loop cursor. | ||
211 | * @head: the head for your list. | ||
212 | * | ||
213 | * Iterate over an rcu-protected list, continuing after current point. | ||
214 | * | ||
215 | * This list-traversal primitive may safely run concurrently with | ||
216 | * the _rcu list-mutation primitives such as list_add_rcu() | ||
217 | * as long as the traversal is guarded by rcu_read_lock(). | ||
218 | */ | ||
219 | #define list_for_each_continue_rcu(pos, head) \ | ||
220 | for ((pos) = rcu_dereference((pos)->next); \ | ||
221 | prefetch((pos)->next), (pos) != (head); \ | ||
222 | (pos) = rcu_dereference((pos)->next)) | ||
223 | |||
224 | /** | ||
225 | * hlist_del_rcu - deletes entry from hash list without re-initialization | ||
226 | * @n: the element to delete from the hash list. | ||
227 | * | ||
228 | * Note: list_unhashed() on entry does not return true after this, | ||
229 | * the entry is in an undefined state. It is useful for RCU based | ||
230 | * lockfree traversal. | ||
231 | * | ||
232 | * In particular, it means that we can not poison the forward | ||
233 | * pointers that may still be used for walking the hash list. | ||
234 | * | ||
235 | * The caller must take whatever precautions are necessary | ||
236 | * (such as holding appropriate locks) to avoid racing | ||
237 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
238 | * or hlist_del_rcu(), running on this same list. | ||
239 | * However, it is perfectly legal to run concurrently with | ||
240 | * the _rcu list-traversal primitives, such as | ||
241 | * hlist_for_each_entry(). | ||
242 | */ | ||
243 | static inline void hlist_del_rcu(struct hlist_node *n) | ||
244 | { | ||
245 | __hlist_del(n); | ||
246 | n->pprev = LIST_POISON2; | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * hlist_replace_rcu - replace old entry by new one | ||
251 | * @old : the element to be replaced | ||
252 | * @new : the new element to insert | ||
253 | * | ||
254 | * The @old entry will be replaced with the @new entry atomically. | ||
255 | */ | ||
256 | static inline void hlist_replace_rcu(struct hlist_node *old, | ||
257 | struct hlist_node *new) | ||
258 | { | ||
259 | struct hlist_node *next = old->next; | ||
260 | |||
261 | new->next = next; | ||
262 | new->pprev = old->pprev; | ||
263 | rcu_assign_pointer(*new->pprev, new); | ||
264 | if (next) | ||
265 | new->next->pprev = &new->next; | ||
266 | old->pprev = LIST_POISON2; | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * hlist_add_head_rcu | ||
271 | * @n: the element to add to the hash list. | ||
272 | * @h: the list to add to. | ||
273 | * | ||
274 | * Description: | ||
275 | * Adds the specified element to the specified hlist, | ||
276 | * while permitting racing traversals. | ||
277 | * | ||
278 | * The caller must take whatever precautions are necessary | ||
279 | * (such as holding appropriate locks) to avoid racing | ||
280 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
281 | * or hlist_del_rcu(), running on this same list. | ||
282 | * However, it is perfectly legal to run concurrently with | ||
283 | * the _rcu list-traversal primitives, such as | ||
284 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
285 | * problems on Alpha CPUs. Regardless of the type of CPU, the | ||
286 | * list-traversal primitive must be guarded by rcu_read_lock(). | ||
287 | */ | ||
288 | static inline void hlist_add_head_rcu(struct hlist_node *n, | ||
289 | struct hlist_head *h) | ||
290 | { | ||
291 | struct hlist_node *first = h->first; | ||
292 | |||
293 | n->next = first; | ||
294 | n->pprev = &h->first; | ||
295 | rcu_assign_pointer(h->first, n); | ||
296 | if (first) | ||
297 | first->pprev = &n->next; | ||
298 | } | ||
299 | |||
300 | /** | ||
301 | * hlist_add_before_rcu | ||
302 | * @n: the new element to add to the hash list. | ||
303 | * @next: the existing element to add the new element before. | ||
304 | * | ||
305 | * Description: | ||
306 | * Adds the specified element to the specified hlist | ||
307 | * before the specified node while permitting racing traversals. | ||
308 | * | ||
309 | * The caller must take whatever precautions are necessary | ||
310 | * (such as holding appropriate locks) to avoid racing | ||
311 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
312 | * or hlist_del_rcu(), running on this same list. | ||
313 | * However, it is perfectly legal to run concurrently with | ||
314 | * the _rcu list-traversal primitives, such as | ||
315 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
316 | * problems on Alpha CPUs. | ||
317 | */ | ||
318 | static inline void hlist_add_before_rcu(struct hlist_node *n, | ||
319 | struct hlist_node *next) | ||
320 | { | ||
321 | n->pprev = next->pprev; | ||
322 | n->next = next; | ||
323 | rcu_assign_pointer(*(n->pprev), n); | ||
324 | next->pprev = &n->next; | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * hlist_add_after_rcu | ||
329 | * @prev: the existing element to add the new element after. | ||
330 | * @n: the new element to add to the hash list. | ||
331 | * | ||
332 | * Description: | ||
333 | * Adds the specified element to the specified hlist | ||
334 | * after the specified node while permitting racing traversals. | ||
335 | * | ||
336 | * The caller must take whatever precautions are necessary | ||
337 | * (such as holding appropriate locks) to avoid racing | ||
338 | * with another list-mutation primitive, such as hlist_add_head_rcu() | ||
339 | * or hlist_del_rcu(), running on this same list. | ||
340 | * However, it is perfectly legal to run concurrently with | ||
341 | * the _rcu list-traversal primitives, such as | ||
342 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | ||
343 | * problems on Alpha CPUs. | ||
344 | */ | ||
345 | static inline void hlist_add_after_rcu(struct hlist_node *prev, | ||
346 | struct hlist_node *n) | ||
347 | { | ||
348 | n->next = prev->next; | ||
349 | n->pprev = &prev->next; | ||
350 | rcu_assign_pointer(prev->next, n); | ||
351 | if (n->next) | ||
352 | n->next->pprev = &n->next; | ||
353 | } | ||
354 | |||
355 | /** | ||
356 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | ||
357 | * @tpos: the type * to use as a loop cursor. | ||
358 | * @pos: the &struct hlist_node to use as a loop cursor. | ||
359 | * @head: the head for your list. | ||
360 | * @member: the name of the hlist_node within the struct. | ||
361 | * | ||
362 | * This list-traversal primitive may safely run concurrently with | ||
363 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | ||
364 | * as long as the traversal is guarded by rcu_read_lock(). | ||
365 | */ | ||
366 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | ||
367 | for (pos = rcu_dereference((head)->first); \ | ||
368 | pos && ({ prefetch(pos->next); 1; }) && \ | ||
369 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | ||
370 | pos = rcu_dereference(pos->next)) | ||
371 | |||
372 | #endif /* __KERNEL__ */ | ||
373 | #endif | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index d42dbec06083..e8b4039cfb2f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/cpumask.h> | 40 | #include <linux/cpumask.h> |
41 | #include <linux/seqlock.h> | 41 | #include <linux/seqlock.h> |
42 | #include <linux/lockdep.h> | 42 | #include <linux/lockdep.h> |
43 | #include <linux/completion.h> | ||
43 | 44 | ||
44 | /** | 45 | /** |
45 | * struct rcu_head - callback structure for use with RCU | 46 | * struct rcu_head - callback structure for use with RCU |
@@ -168,6 +169,27 @@ struct rcu_head { | |||
168 | (p) = (v); \ | 169 | (p) = (v); \ |
169 | }) | 170 | }) |
170 | 171 | ||
172 | /* Infrastructure to implement the synchronize_() primitives. */ | ||
173 | |||
174 | struct rcu_synchronize { | ||
175 | struct rcu_head head; | ||
176 | struct completion completion; | ||
177 | }; | ||
178 | |||
179 | extern void wakeme_after_rcu(struct rcu_head *head); | ||
180 | |||
181 | #define synchronize_rcu_xxx(name, func) \ | ||
182 | void name(void) \ | ||
183 | { \ | ||
184 | struct rcu_synchronize rcu; \ | ||
185 | \ | ||
186 | init_completion(&rcu.completion); \ | ||
187 | /* Will wake me after RCU finished. */ \ | ||
188 | func(&rcu.head, wakeme_after_rcu); \ | ||
189 | /* Wait for it. */ \ | ||
190 | wait_for_completion(&rcu.completion); \ | ||
191 | } | ||
192 | |||
171 | /** | 193 | /** |
172 | * synchronize_sched - block until all CPUs have exited any non-preemptive | 194 | * synchronize_sched - block until all CPUs have exited any non-preemptive |
173 | * kernel code sequences. | 195 | * kernel code sequences. |
@@ -224,8 +246,8 @@ extern void call_rcu_bh(struct rcu_head *head, | |||
224 | /* Exported common interfaces */ | 246 | /* Exported common interfaces */ |
225 | extern void synchronize_rcu(void); | 247 | extern void synchronize_rcu(void); |
226 | extern void rcu_barrier(void); | 248 | extern void rcu_barrier(void); |
227 | extern long rcu_batches_completed(void); | 249 | extern void rcu_barrier_bh(void); |
228 | extern long rcu_batches_completed_bh(void); | 250 | extern void rcu_barrier_sched(void); |
229 | 251 | ||
230 | /* Internal to kernel */ | 252 | /* Internal to kernel */ |
231 | extern void rcu_init(void); | 253 | extern void rcu_init(void); |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 8a05c7e20bc4..f04b64eca636 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h | |||
@@ -40,10 +40,39 @@ | |||
40 | #include <linux/cpumask.h> | 40 | #include <linux/cpumask.h> |
41 | #include <linux/seqlock.h> | 41 | #include <linux/seqlock.h> |
42 | 42 | ||
43 | #define rcu_qsctr_inc(cpu) | 43 | struct rcu_dyntick_sched { |
44 | int dynticks; | ||
45 | int dynticks_snap; | ||
46 | int sched_qs; | ||
47 | int sched_qs_snap; | ||
48 | int sched_dynticks_snap; | ||
49 | }; | ||
50 | |||
51 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); | ||
52 | |||
53 | static inline void rcu_qsctr_inc(int cpu) | ||
54 | { | ||
55 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
56 | |||
57 | rdssp->sched_qs++; | ||
58 | } | ||
44 | #define rcu_bh_qsctr_inc(cpu) | 59 | #define rcu_bh_qsctr_inc(cpu) |
45 | #define call_rcu_bh(head, rcu) call_rcu(head, rcu) | 60 | #define call_rcu_bh(head, rcu) call_rcu(head, rcu) |
46 | 61 | ||
62 | /** | ||
63 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | ||
64 | * @head: structure to be used for queueing the RCU updates. | ||
65 | * @func: actual update function to be invoked after the grace period | ||
66 | * | ||
67 | * The update function will be invoked some time after a full | ||
68 | * synchronize_sched()-style grace period elapses, in other words after | ||
69 | * all currently executing preempt-disabled sections of code (including | ||
70 | * hardirq handlers, NMI handlers, and local_irq_save() blocks) have | ||
71 | * completed. | ||
72 | */ | ||
73 | extern void call_rcu_sched(struct rcu_head *head, | ||
74 | void (*func)(struct rcu_head *head)); | ||
75 | |||
47 | extern void __rcu_read_lock(void) __acquires(RCU); | 76 | extern void __rcu_read_lock(void) __acquires(RCU); |
48 | extern void __rcu_read_unlock(void) __releases(RCU); | 77 | extern void __rcu_read_unlock(void) __releases(RCU); |
49 | extern int rcu_pending(int cpu); | 78 | extern int rcu_pending(int cpu); |
@@ -55,6 +84,7 @@ extern int rcu_needs_cpu(int cpu); | |||
55 | extern void __synchronize_sched(void); | 84 | extern void __synchronize_sched(void); |
56 | 85 | ||
57 | extern void __rcu_init(void); | 86 | extern void __rcu_init(void); |
87 | extern void rcu_init_sched(void); | ||
58 | extern void rcu_check_callbacks(int cpu, int user); | 88 | extern void rcu_check_callbacks(int cpu, int user); |
59 | extern void rcu_restart_cpu(int cpu); | 89 | extern void rcu_restart_cpu(int cpu); |
60 | extern long rcu_batches_completed(void); | 90 | extern long rcu_batches_completed(void); |
@@ -81,20 +111,20 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |||
81 | struct softirq_action; | 111 | struct softirq_action; |
82 | 112 | ||
83 | #ifdef CONFIG_NO_HZ | 113 | #ifdef CONFIG_NO_HZ |
84 | DECLARE_PER_CPU(long, dynticks_progress_counter); | 114 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); |
85 | 115 | ||
86 | static inline void rcu_enter_nohz(void) | 116 | static inline void rcu_enter_nohz(void) |
87 | { | 117 | { |
88 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 118 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
89 | __get_cpu_var(dynticks_progress_counter)++; | 119 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
90 | WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1); | 120 | WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1); |
91 | } | 121 | } |
92 | 122 | ||
93 | static inline void rcu_exit_nohz(void) | 123 | static inline void rcu_exit_nohz(void) |
94 | { | 124 | { |
95 | __get_cpu_var(dynticks_progress_counter)++; | ||
96 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 125 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
97 | WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1)); | 126 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
127 | WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1)); | ||
98 | } | 128 | } |
99 | 129 | ||
100 | #else /* CONFIG_NO_HZ */ | 130 | #else /* CONFIG_NO_HZ */ |