aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/list.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/list.h')
-rw-r--r--include/linux/list.h707
1 files changed, 707 insertions, 0 deletions
diff --git a/include/linux/list.h b/include/linux/list.h
new file mode 100644
index 000000000000..dd7cd54fa831
--- /dev/null
+++ b/include/linux/list.h
@@ -0,0 +1,707 @@
1#ifndef _LINUX_LIST_H
2#define _LINUX_LIST_H
3
4#ifdef __KERNEL__
5
6#include <linux/stddef.h>
7#include <linux/prefetch.h>
8#include <asm/system.h>
9
10/*
11 * These are non-NULL pointers that will result in page faults
12 * under normal circumstances, used to verify that nobody uses
13 * non-initialized list entries.
14 */
15#define LIST_POISON1 ((void *) 0x00100100)
16#define LIST_POISON2 ((void *) 0x00200200)
17
18/*
19 * Simple doubly linked list implementation.
20 *
21 * Some of the internal functions ("__xxx") are useful when
22 * manipulating whole lists rather than single entries, as
23 * sometimes we already know the next/prev entries and we can
24 * generate better code by using them directly rather than
25 * using the generic single-entry routines.
26 */
27
28struct list_head {
29 struct list_head *next, *prev;
30};
31
32#define LIST_HEAD_INIT(name) { &(name), &(name) }
33
34#define LIST_HEAD(name) \
35 struct list_head name = LIST_HEAD_INIT(name)
36
37#define INIT_LIST_HEAD(ptr) do { \
38 (ptr)->next = (ptr); (ptr)->prev = (ptr); \
39} while (0)
40
41/*
42 * Insert a new entry between two known consecutive entries.
43 *
44 * This is only for internal list manipulation where we know
45 * the prev/next entries already!
46 */
47static inline void __list_add(struct list_head *new,
48 struct list_head *prev,
49 struct list_head *next)
50{
51 next->prev = new;
52 new->next = next;
53 new->prev = prev;
54 prev->next = new;
55}
56
57/**
58 * list_add - add a new entry
59 * @new: new entry to be added
60 * @head: list head to add it after
61 *
62 * Insert a new entry after the specified head.
63 * This is good for implementing stacks.
64 */
65static inline void list_add(struct list_head *new, struct list_head *head)
66{
67 __list_add(new, head, head->next);
68}
69
70/**
71 * list_add_tail - add a new entry
72 * @new: new entry to be added
73 * @head: list head to add it before
74 *
75 * Insert a new entry before the specified head.
76 * This is useful for implementing queues.
77 */
78static inline void list_add_tail(struct list_head *new, struct list_head *head)
79{
80 __list_add(new, head->prev, head);
81}
82
83/*
84 * Insert a new entry between two known consecutive entries.
85 *
86 * This is only for internal list manipulation where we know
87 * the prev/next entries already!
88 */
89static inline void __list_add_rcu(struct list_head * new,
90 struct list_head * prev, struct list_head * next)
91{
92 new->next = next;
93 new->prev = prev;
94 smp_wmb();
95 next->prev = new;
96 prev->next = new;
97}
98
99/**
100 * list_add_rcu - add a new entry to rcu-protected list
101 * @new: new entry to be added
102 * @head: list head to add it after
103 *
104 * Insert a new entry after the specified head.
105 * This is good for implementing stacks.
106 *
107 * The caller must take whatever precautions are necessary
108 * (such as holding appropriate locks) to avoid racing
109 * with another list-mutation primitive, such as list_add_rcu()
110 * or list_del_rcu(), running on this same list.
111 * However, it is perfectly legal to run concurrently with
112 * the _rcu list-traversal primitives, such as
113 * list_for_each_entry_rcu().
114 */
115static inline void list_add_rcu(struct list_head *new, struct list_head *head)
116{
117 __list_add_rcu(new, head, head->next);
118}
119
120/**
121 * list_add_tail_rcu - add a new entry to rcu-protected list
122 * @new: new entry to be added
123 * @head: list head to add it before
124 *
125 * Insert a new entry before the specified head.
126 * This is useful for implementing queues.
127 *
128 * The caller must take whatever precautions are necessary
129 * (such as holding appropriate locks) to avoid racing
130 * with another list-mutation primitive, such as list_add_tail_rcu()
131 * or list_del_rcu(), running on this same list.
132 * However, it is perfectly legal to run concurrently with
133 * the _rcu list-traversal primitives, such as
134 * list_for_each_entry_rcu().
135 */
136static inline void list_add_tail_rcu(struct list_head *new,
137 struct list_head *head)
138{
139 __list_add_rcu(new, head->prev, head);
140}
141
142/*
143 * Delete a list entry by making the prev/next entries
144 * point to each other.
145 *
146 * This is only for internal list manipulation where we know
147 * the prev/next entries already!
148 */
149static inline void __list_del(struct list_head * prev, struct list_head * next)
150{
151 next->prev = prev;
152 prev->next = next;
153}
154
155/**
156 * list_del - deletes entry from list.
157 * @entry: the element to delete from the list.
158 * Note: list_empty on entry does not return true after this, the entry is
159 * in an undefined state.
160 */
161static inline void list_del(struct list_head *entry)
162{
163 __list_del(entry->prev, entry->next);
164 entry->next = LIST_POISON1;
165 entry->prev = LIST_POISON2;
166}
167
168/**
169 * list_del_rcu - deletes entry from list without re-initialization
170 * @entry: the element to delete from the list.
171 *
172 * Note: list_empty on entry does not return true after this,
173 * the entry is in an undefined state. It is useful for RCU based
174 * lockfree traversal.
175 *
176 * In particular, it means that we can not poison the forward
177 * pointers that may still be used for walking the list.
178 *
179 * The caller must take whatever precautions are necessary
180 * (such as holding appropriate locks) to avoid racing
181 * with another list-mutation primitive, such as list_del_rcu()
182 * or list_add_rcu(), running on this same list.
183 * However, it is perfectly legal to run concurrently with
184 * the _rcu list-traversal primitives, such as
185 * list_for_each_entry_rcu().
186 *
187 * Note that the caller is not permitted to immediately free
188 * the newly deleted entry. Instead, either synchronize_kernel()
189 * or call_rcu() must be used to defer freeing until an RCU
190 * grace period has elapsed.
191 */
192static inline void list_del_rcu(struct list_head *entry)
193{
194 __list_del(entry->prev, entry->next);
195 entry->prev = LIST_POISON2;
196}
197
198/*
199 * list_replace_rcu - replace old entry by new one
200 * @old : the element to be replaced
201 * @new : the new element to insert
202 *
203 * The old entry will be replaced with the new entry atomically.
204 */
205static inline void list_replace_rcu(struct list_head *old, struct list_head *new){
206 new->next = old->next;
207 new->prev = old->prev;
208 smp_wmb();
209 new->next->prev = new;
210 new->prev->next = new;
211}
212
213/**
214 * list_del_init - deletes entry from list and reinitialize it.
215 * @entry: the element to delete from the list.
216 */
217static inline void list_del_init(struct list_head *entry)
218{
219 __list_del(entry->prev, entry->next);
220 INIT_LIST_HEAD(entry);
221}
222
223/**
224 * list_move - delete from one list and add as another's head
225 * @list: the entry to move
226 * @head: the head that will precede our entry
227 */
228static inline void list_move(struct list_head *list, struct list_head *head)
229{
230 __list_del(list->prev, list->next);
231 list_add(list, head);
232}
233
234/**
235 * list_move_tail - delete from one list and add as another's tail
236 * @list: the entry to move
237 * @head: the head that will follow our entry
238 */
239static inline void list_move_tail(struct list_head *list,
240 struct list_head *head)
241{
242 __list_del(list->prev, list->next);
243 list_add_tail(list, head);
244}
245
246/**
247 * list_empty - tests whether a list is empty
248 * @head: the list to test.
249 */
250static inline int list_empty(const struct list_head *head)
251{
252 return head->next == head;
253}
254
255/**
256 * list_empty_careful - tests whether a list is
257 * empty _and_ checks that no other CPU might be
258 * in the process of still modifying either member
259 *
260 * NOTE: using list_empty_careful() without synchronization
261 * can only be safe if the only activity that can happen
262 * to the list entry is list_del_init(). Eg. it cannot be used
263 * if another CPU could re-list_add() it.
264 *
265 * @head: the list to test.
266 */
267static inline int list_empty_careful(const struct list_head *head)
268{
269 struct list_head *next = head->next;
270 return (next == head) && (next == head->prev);
271}
272
273static inline void __list_splice(struct list_head *list,
274 struct list_head *head)
275{
276 struct list_head *first = list->next;
277 struct list_head *last = list->prev;
278 struct list_head *at = head->next;
279
280 first->prev = head;
281 head->next = first;
282
283 last->next = at;
284 at->prev = last;
285}
286
287/**
288 * list_splice - join two lists
289 * @list: the new list to add.
290 * @head: the place to add it in the first list.
291 */
292static inline void list_splice(struct list_head *list, struct list_head *head)
293{
294 if (!list_empty(list))
295 __list_splice(list, head);
296}
297
298/**
299 * list_splice_init - join two lists and reinitialise the emptied list.
300 * @list: the new list to add.
301 * @head: the place to add it in the first list.
302 *
303 * The list at @list is reinitialised
304 */
305static inline void list_splice_init(struct list_head *list,
306 struct list_head *head)
307{
308 if (!list_empty(list)) {
309 __list_splice(list, head);
310 INIT_LIST_HEAD(list);
311 }
312}
313
314/**
315 * list_entry - get the struct for this entry
316 * @ptr: the &struct list_head pointer.
317 * @type: the type of the struct this is embedded in.
318 * @member: the name of the list_struct within the struct.
319 */
320#define list_entry(ptr, type, member) \
321 container_of(ptr, type, member)
322
323/**
324 * list_for_each - iterate over a list
325 * @pos: the &struct list_head to use as a loop counter.
326 * @head: the head for your list.
327 */
328#define list_for_each(pos, head) \
329 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
330 pos = pos->next)
331
332/**
333 * __list_for_each - iterate over a list
334 * @pos: the &struct list_head to use as a loop counter.
335 * @head: the head for your list.
336 *
337 * This variant differs from list_for_each() in that it's the
338 * simplest possible list iteration code, no prefetching is done.
339 * Use this for code that knows the list to be very short (empty
340 * or 1 entry) most of the time.
341 */
342#define __list_for_each(pos, head) \
343 for (pos = (head)->next; pos != (head); pos = pos->next)
344
345/**
346 * list_for_each_prev - iterate over a list backwards
347 * @pos: the &struct list_head to use as a loop counter.
348 * @head: the head for your list.
349 */
350#define list_for_each_prev(pos, head) \
351 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
352 pos = pos->prev)
353
354/**
355 * list_for_each_safe - iterate over a list safe against removal of list entry
356 * @pos: the &struct list_head to use as a loop counter.
357 * @n: another &struct list_head to use as temporary storage
358 * @head: the head for your list.
359 */
360#define list_for_each_safe(pos, n, head) \
361 for (pos = (head)->next, n = pos->next; pos != (head); \
362 pos = n, n = pos->next)
363
364/**
365 * list_for_each_entry - iterate over list of given type
366 * @pos: the type * to use as a loop counter.
367 * @head: the head for your list.
368 * @member: the name of the list_struct within the struct.
369 */
370#define list_for_each_entry(pos, head, member) \
371 for (pos = list_entry((head)->next, typeof(*pos), member); \
372 prefetch(pos->member.next), &pos->member != (head); \
373 pos = list_entry(pos->member.next, typeof(*pos), member))
374
375/**
376 * list_for_each_entry_reverse - iterate backwards over list of given type.
377 * @pos: the type * to use as a loop counter.
378 * @head: the head for your list.
379 * @member: the name of the list_struct within the struct.
380 */
381#define list_for_each_entry_reverse(pos, head, member) \
382 for (pos = list_entry((head)->prev, typeof(*pos), member); \
383 prefetch(pos->member.prev), &pos->member != (head); \
384 pos = list_entry(pos->member.prev, typeof(*pos), member))
385
386/**
387 * list_prepare_entry - prepare a pos entry for use as a start point in
388 * list_for_each_entry_continue
389 * @pos: the type * to use as a start point
390 * @head: the head of the list
391 * @member: the name of the list_struct within the struct.
392 */
393#define list_prepare_entry(pos, head, member) \
394 ((pos) ? : list_entry(head, typeof(*pos), member))
395
396/**
397 * list_for_each_entry_continue - iterate over list of given type
398 * continuing after existing point
399 * @pos: the type * to use as a loop counter.
400 * @head: the head for your list.
401 * @member: the name of the list_struct within the struct.
402 */
403#define list_for_each_entry_continue(pos, head, member) \
404 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
405 prefetch(pos->member.next), &pos->member != (head); \
406 pos = list_entry(pos->member.next, typeof(*pos), member))
407
408/**
409 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
410 * @pos: the type * to use as a loop counter.
411 * @n: another type * to use as temporary storage
412 * @head: the head for your list.
413 * @member: the name of the list_struct within the struct.
414 */
415#define list_for_each_entry_safe(pos, n, head, member) \
416 for (pos = list_entry((head)->next, typeof(*pos), member), \
417 n = list_entry(pos->member.next, typeof(*pos), member); \
418 &pos->member != (head); \
419 pos = n, n = list_entry(n->member.next, typeof(*n), member))
420
421/**
422 * list_for_each_rcu - iterate over an rcu-protected list
423 * @pos: the &struct list_head to use as a loop counter.
424 * @head: the head for your list.
425 *
426 * This list-traversal primitive may safely run concurrently with
427 * the _rcu list-mutation primitives such as list_add_rcu()
428 * as long as the traversal is guarded by rcu_read_lock().
429 */
430#define list_for_each_rcu(pos, head) \
431 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
432 pos = rcu_dereference(pos->next))
433
434#define __list_for_each_rcu(pos, head) \
435 for (pos = (head)->next; pos != (head); \
436 pos = rcu_dereference(pos->next))
437
438/**
439 * list_for_each_safe_rcu - iterate over an rcu-protected list safe
440 * against removal of list entry
441 * @pos: the &struct list_head to use as a loop counter.
442 * @n: another &struct list_head to use as temporary storage
443 * @head: the head for your list.
444 *
445 * This list-traversal primitive may safely run concurrently with
446 * the _rcu list-mutation primitives such as list_add_rcu()
447 * as long as the traversal is guarded by rcu_read_lock().
448 */
449#define list_for_each_safe_rcu(pos, n, head) \
450 for (pos = (head)->next, n = pos->next; pos != (head); \
451 pos = rcu_dereference(n), n = pos->next)
452
453/**
454 * list_for_each_entry_rcu - iterate over rcu list of given type
455 * @pos: the type * to use as a loop counter.
456 * @head: the head for your list.
457 * @member: the name of the list_struct within the struct.
458 *
459 * This list-traversal primitive may safely run concurrently with
460 * the _rcu list-mutation primitives such as list_add_rcu()
461 * as long as the traversal is guarded by rcu_read_lock().
462 */
463#define list_for_each_entry_rcu(pos, head, member) \
464 for (pos = list_entry((head)->next, typeof(*pos), member); \
465 prefetch(pos->member.next), &pos->member != (head); \
466 pos = rcu_dereference(list_entry(pos->member.next, \
467 typeof(*pos), member)))
468
469
470/**
471 * list_for_each_continue_rcu - iterate over an rcu-protected list
472 * continuing after existing point.
473 * @pos: the &struct list_head to use as a loop counter.
474 * @head: the head for your list.
475 *
476 * This list-traversal primitive may safely run concurrently with
477 * the _rcu list-mutation primitives such as list_add_rcu()
478 * as long as the traversal is guarded by rcu_read_lock().
479 */
480#define list_for_each_continue_rcu(pos, head) \
481 for ((pos) = (pos)->next; prefetch((pos)->next), (pos) != (head); \
482 (pos) = rcu_dereference((pos)->next))
483
484/*
485 * Double linked lists with a single pointer list head.
486 * Mostly useful for hash tables where the two pointer list head is
487 * too wasteful.
488 * You lose the ability to access the tail in O(1).
489 */
490
491struct hlist_head {
492 struct hlist_node *first;
493};
494
495struct hlist_node {
496 struct hlist_node *next, **pprev;
497};
498
499#define HLIST_HEAD_INIT { .first = NULL }
500#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
501#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
502#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
503
504static inline int hlist_unhashed(const struct hlist_node *h)
505{
506 return !h->pprev;
507}
508
509static inline int hlist_empty(const struct hlist_head *h)
510{
511 return !h->first;
512}
513
514static inline void __hlist_del(struct hlist_node *n)
515{
516 struct hlist_node *next = n->next;
517 struct hlist_node **pprev = n->pprev;
518 *pprev = next;
519 if (next)
520 next->pprev = pprev;
521}
522
523static inline void hlist_del(struct hlist_node *n)
524{
525 __hlist_del(n);
526 n->next = LIST_POISON1;
527 n->pprev = LIST_POISON2;
528}
529
530/**
531 * hlist_del_rcu - deletes entry from hash list without re-initialization
532 * @n: the element to delete from the hash list.
533 *
534 * Note: list_unhashed() on entry does not return true after this,
535 * the entry is in an undefined state. It is useful for RCU based
536 * lockfree traversal.
537 *
538 * In particular, it means that we can not poison the forward
539 * pointers that may still be used for walking the hash list.
540 *
541 * The caller must take whatever precautions are necessary
542 * (such as holding appropriate locks) to avoid racing
543 * with another list-mutation primitive, such as hlist_add_head_rcu()
544 * or hlist_del_rcu(), running on this same list.
545 * However, it is perfectly legal to run concurrently with
546 * the _rcu list-traversal primitives, such as
547 * hlist_for_each_entry().
548 */
549static inline void hlist_del_rcu(struct hlist_node *n)
550{
551 __hlist_del(n);
552 n->pprev = LIST_POISON2;
553}
554
555static inline void hlist_del_init(struct hlist_node *n)
556{
557 if (n->pprev) {
558 __hlist_del(n);
559 INIT_HLIST_NODE(n);
560 }
561}
562
563static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
564{
565 struct hlist_node *first = h->first;
566 n->next = first;
567 if (first)
568 first->pprev = &n->next;
569 h->first = n;
570 n->pprev = &h->first;
571}
572
573
574/**
575 * hlist_add_head_rcu - adds the specified element to the specified hlist,
576 * while permitting racing traversals.
577 * @n: the element to add to the hash list.
578 * @h: the list to add to.
579 *
580 * The caller must take whatever precautions are necessary
581 * (such as holding appropriate locks) to avoid racing
582 * with another list-mutation primitive, such as hlist_add_head_rcu()
583 * or hlist_del_rcu(), running on this same list.
584 * However, it is perfectly legal to run concurrently with
585 * the _rcu list-traversal primitives, such as
586 * hlist_for_each_rcu(), used to prevent memory-consistency
587 * problems on Alpha CPUs. Regardless of the type of CPU, the
588 * list-traversal primitive must be guarded by rcu_read_lock().
589 */
590static inline void hlist_add_head_rcu(struct hlist_node *n,
591 struct hlist_head *h)
592{
593 struct hlist_node *first = h->first;
594 n->next = first;
595 n->pprev = &h->first;
596 smp_wmb();
597 if (first)
598 first->pprev = &n->next;
599 h->first = n;
600}
601
602/* next must be != NULL */
603static inline void hlist_add_before(struct hlist_node *n,
604 struct hlist_node *next)
605{
606 n->pprev = next->pprev;
607 n->next = next;
608 next->pprev = &n->next;
609 *(n->pprev) = n;
610}
611
612static inline void hlist_add_after(struct hlist_node *n,
613 struct hlist_node *next)
614{
615 next->next = n->next;
616 n->next = next;
617 next->pprev = &n->next;
618
619 if(next->next)
620 next->next->pprev = &next->next;
621}
622
623#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
624
625#define hlist_for_each(pos, head) \
626 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
627 pos = pos->next)
628
629#define hlist_for_each_safe(pos, n, head) \
630 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
631 pos = n)
632
633#define hlist_for_each_rcu(pos, head) \
634 for ((pos) = (head)->first; pos && ({ prefetch((pos)->next); 1; }); \
635 (pos) = rcu_dereference((pos)->next))
636
637/**
638 * hlist_for_each_entry - iterate over list of given type
639 * @tpos: the type * to use as a loop counter.
640 * @pos: the &struct hlist_node to use as a loop counter.
641 * @head: the head for your list.
642 * @member: the name of the hlist_node within the struct.
643 */
644#define hlist_for_each_entry(tpos, pos, head, member) \
645 for (pos = (head)->first; \
646 pos && ({ prefetch(pos->next); 1;}) && \
647 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
648 pos = pos->next)
649
650/**
651 * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
652 * @tpos: the type * to use as a loop counter.
653 * @pos: the &struct hlist_node to use as a loop counter.
654 * @member: the name of the hlist_node within the struct.
655 */
656#define hlist_for_each_entry_continue(tpos, pos, member) \
657 for (pos = (pos)->next; \
658 pos && ({ prefetch(pos->next); 1;}) && \
659 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
660 pos = pos->next)
661
662/**
663 * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
664 * @tpos: the type * to use as a loop counter.
665 * @pos: the &struct hlist_node to use as a loop counter.
666 * @member: the name of the hlist_node within the struct.
667 */
668#define hlist_for_each_entry_from(tpos, pos, member) \
669 for (; pos && ({ prefetch(pos->next); 1;}) && \
670 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
671 pos = pos->next)
672
673/**
674 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
675 * @tpos: the type * to use as a loop counter.
676 * @pos: the &struct hlist_node to use as a loop counter.
677 * @n: another &struct hlist_node to use as temporary storage
678 * @head: the head for your list.
679 * @member: the name of the hlist_node within the struct.
680 */
681#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
682 for (pos = (head)->first; \
683 pos && ({ n = pos->next; 1; }) && \
684 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
685 pos = n)
686
687/**
688 * hlist_for_each_entry_rcu - iterate over rcu list of given type
689 * @pos: the type * to use as a loop counter.
690 * @pos: the &struct hlist_node to use as a loop counter.
691 * @head: the head for your list.
692 * @member: the name of the hlist_node within the struct.
693 *
694 * This list-traversal primitive may safely run concurrently with
695 * the _rcu list-mutation primitives such as hlist_add_rcu()
696 * as long as the traversal is guarded by rcu_read_lock().
697 */
698#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
699 for (pos = (head)->first; \
700 pos && ({ prefetch(pos->next); 1;}) && \
701 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
702 pos = rcu_dereference(pos->next))
703
704#else
705#warning "don't include kernel headers in userspace"
706#endif /* __KERNEL__ */
707#endif