diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 175 |
1 files changed, 173 insertions, 2 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 909923717830..2725f4e5a9bf 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -146,8 +146,14 @@ struct skb_shared_info { | |||
146 | unsigned short gso_segs; | 146 | unsigned short gso_segs; |
147 | unsigned short gso_type; | 147 | unsigned short gso_type; |
148 | __be32 ip6_frag_id; | 148 | __be32 ip6_frag_id; |
149 | #ifdef CONFIG_HAS_DMA | ||
150 | unsigned int num_dma_maps; | ||
151 | #endif | ||
149 | struct sk_buff *frag_list; | 152 | struct sk_buff *frag_list; |
150 | skb_frag_t frags[MAX_SKB_FRAGS]; | 153 | skb_frag_t frags[MAX_SKB_FRAGS]; |
154 | #ifdef CONFIG_HAS_DMA | ||
155 | dma_addr_t dma_maps[MAX_SKB_FRAGS + 1]; | ||
156 | #endif | ||
151 | }; | 157 | }; |
152 | 158 | ||
153 | /* We divide dataref into two halves. The higher 16 bits hold references | 159 | /* We divide dataref into two halves. The higher 16 bits hold references |
@@ -353,6 +359,14 @@ struct sk_buff { | |||
353 | 359 | ||
354 | #include <asm/system.h> | 360 | #include <asm/system.h> |
355 | 361 | ||
362 | #ifdef CONFIG_HAS_DMA | ||
363 | #include <linux/dma-mapping.h> | ||
364 | extern int skb_dma_map(struct device *dev, struct sk_buff *skb, | ||
365 | enum dma_data_direction dir); | ||
366 | extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb, | ||
367 | enum dma_data_direction dir); | ||
368 | #endif | ||
369 | |||
356 | extern void kfree_skb(struct sk_buff *skb); | 370 | extern void kfree_skb(struct sk_buff *skb); |
357 | extern void __kfree_skb(struct sk_buff *skb); | 371 | extern void __kfree_skb(struct sk_buff *skb); |
358 | extern struct sk_buff *__alloc_skb(unsigned int size, | 372 | extern struct sk_buff *__alloc_skb(unsigned int size, |
@@ -369,6 +383,8 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, | |||
369 | return __alloc_skb(size, priority, 1, -1); | 383 | return __alloc_skb(size, priority, 1, -1); |
370 | } | 384 | } |
371 | 385 | ||
386 | extern int skb_recycle_check(struct sk_buff *skb, int skb_size); | ||
387 | |||
372 | extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); | 388 | extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); |
373 | extern struct sk_buff *skb_clone(struct sk_buff *skb, | 389 | extern struct sk_buff *skb_clone(struct sk_buff *skb, |
374 | gfp_t priority); | 390 | gfp_t priority); |
@@ -459,6 +475,37 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) | |||
459 | } | 475 | } |
460 | 476 | ||
461 | /** | 477 | /** |
478 | * skb_queue_is_last - check if skb is the last entry in the queue | ||
479 | * @list: queue head | ||
480 | * @skb: buffer | ||
481 | * | ||
482 | * Returns true if @skb is the last buffer on the list. | ||
483 | */ | ||
484 | static inline bool skb_queue_is_last(const struct sk_buff_head *list, | ||
485 | const struct sk_buff *skb) | ||
486 | { | ||
487 | return (skb->next == (struct sk_buff *) list); | ||
488 | } | ||
489 | |||
490 | /** | ||
491 | * skb_queue_next - return the next packet in the queue | ||
492 | * @list: queue head | ||
493 | * @skb: current buffer | ||
494 | * | ||
495 | * Return the next packet in @list after @skb. It is only valid to | ||
496 | * call this if skb_queue_is_last() evaluates to false. | ||
497 | */ | ||
498 | static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, | ||
499 | const struct sk_buff *skb) | ||
500 | { | ||
501 | /* This BUG_ON may seem severe, but if we just return then we | ||
502 | * are going to dereference garbage. | ||
503 | */ | ||
504 | BUG_ON(skb_queue_is_last(list, skb)); | ||
505 | return skb->next; | ||
506 | } | ||
507 | |||
508 | /** | ||
462 | * skb_get - reference buffer | 509 | * skb_get - reference buffer |
463 | * @skb: buffer to reference | 510 | * @skb: buffer to reference |
464 | * | 511 | * |
@@ -646,6 +693,22 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) | |||
646 | return list_->qlen; | 693 | return list_->qlen; |
647 | } | 694 | } |
648 | 695 | ||
696 | /** | ||
697 | * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head | ||
698 | * @list: queue to initialize | ||
699 | * | ||
700 | * This initializes only the list and queue length aspects of | ||
701 | * an sk_buff_head object. This allows to initialize the list | ||
702 | * aspects of an sk_buff_head without reinitializing things like | ||
703 | * the spinlock. It can also be used for on-stack sk_buff_head | ||
704 | * objects where the spinlock is known to not be used. | ||
705 | */ | ||
706 | static inline void __skb_queue_head_init(struct sk_buff_head *list) | ||
707 | { | ||
708 | list->prev = list->next = (struct sk_buff *)list; | ||
709 | list->qlen = 0; | ||
710 | } | ||
711 | |||
649 | /* | 712 | /* |
650 | * This function creates a split out lock class for each invocation; | 713 | * This function creates a split out lock class for each invocation; |
651 | * this is needed for now since a whole lot of users of the skb-queue | 714 | * this is needed for now since a whole lot of users of the skb-queue |
@@ -657,8 +720,7 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) | |||
657 | static inline void skb_queue_head_init(struct sk_buff_head *list) | 720 | static inline void skb_queue_head_init(struct sk_buff_head *list) |
658 | { | 721 | { |
659 | spin_lock_init(&list->lock); | 722 | spin_lock_init(&list->lock); |
660 | list->prev = list->next = (struct sk_buff *)list; | 723 | __skb_queue_head_init(list); |
661 | list->qlen = 0; | ||
662 | } | 724 | } |
663 | 725 | ||
664 | static inline void skb_queue_head_init_class(struct sk_buff_head *list, | 726 | static inline void skb_queue_head_init_class(struct sk_buff_head *list, |
@@ -685,6 +747,83 @@ static inline void __skb_insert(struct sk_buff *newsk, | |||
685 | list->qlen++; | 747 | list->qlen++; |
686 | } | 748 | } |
687 | 749 | ||
750 | static inline void __skb_queue_splice(const struct sk_buff_head *list, | ||
751 | struct sk_buff *prev, | ||
752 | struct sk_buff *next) | ||
753 | { | ||
754 | struct sk_buff *first = list->next; | ||
755 | struct sk_buff *last = list->prev; | ||
756 | |||
757 | first->prev = prev; | ||
758 | prev->next = first; | ||
759 | |||
760 | last->next = next; | ||
761 | next->prev = last; | ||
762 | } | ||
763 | |||
764 | /** | ||
765 | * skb_queue_splice - join two skb lists, this is designed for stacks | ||
766 | * @list: the new list to add | ||
767 | * @head: the place to add it in the first list | ||
768 | */ | ||
769 | static inline void skb_queue_splice(const struct sk_buff_head *list, | ||
770 | struct sk_buff_head *head) | ||
771 | { | ||
772 | if (!skb_queue_empty(list)) { | ||
773 | __skb_queue_splice(list, (struct sk_buff *) head, head->next); | ||
774 | head->qlen += list->qlen; | ||
775 | } | ||
776 | } | ||
777 | |||
778 | /** | ||
779 | * skb_queue_splice - join two skb lists and reinitialise the emptied list | ||
780 | * @list: the new list to add | ||
781 | * @head: the place to add it in the first list | ||
782 | * | ||
783 | * The list at @list is reinitialised | ||
784 | */ | ||
785 | static inline void skb_queue_splice_init(struct sk_buff_head *list, | ||
786 | struct sk_buff_head *head) | ||
787 | { | ||
788 | if (!skb_queue_empty(list)) { | ||
789 | __skb_queue_splice(list, (struct sk_buff *) head, head->next); | ||
790 | head->qlen += list->qlen; | ||
791 | __skb_queue_head_init(list); | ||
792 | } | ||
793 | } | ||
794 | |||
795 | /** | ||
796 | * skb_queue_splice_tail - join two skb lists, each list being a queue | ||
797 | * @list: the new list to add | ||
798 | * @head: the place to add it in the first list | ||
799 | */ | ||
800 | static inline void skb_queue_splice_tail(const struct sk_buff_head *list, | ||
801 | struct sk_buff_head *head) | ||
802 | { | ||
803 | if (!skb_queue_empty(list)) { | ||
804 | __skb_queue_splice(list, head->prev, (struct sk_buff *) head); | ||
805 | head->qlen += list->qlen; | ||
806 | } | ||
807 | } | ||
808 | |||
809 | /** | ||
810 | * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list | ||
811 | * @list: the new list to add | ||
812 | * @head: the place to add it in the first list | ||
813 | * | ||
814 | * Each of the lists is a queue. | ||
815 | * The list at @list is reinitialised | ||
816 | */ | ||
817 | static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, | ||
818 | struct sk_buff_head *head) | ||
819 | { | ||
820 | if (!skb_queue_empty(list)) { | ||
821 | __skb_queue_splice(list, head->prev, (struct sk_buff *) head); | ||
822 | head->qlen += list->qlen; | ||
823 | __skb_queue_head_init(list); | ||
824 | } | ||
825 | } | ||
826 | |||
688 | /** | 827 | /** |
689 | * __skb_queue_after - queue a buffer at the list head | 828 | * __skb_queue_after - queue a buffer at the list head |
690 | * @list: list to use | 829 | * @list: list to use |
@@ -829,6 +968,9 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i, | |||
829 | skb_shinfo(skb)->nr_frags = i + 1; | 968 | skb_shinfo(skb)->nr_frags = i + 1; |
830 | } | 969 | } |
831 | 970 | ||
971 | extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, | ||
972 | int off, int size); | ||
973 | |||
832 | #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) | 974 | #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) |
833 | #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) | 975 | #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) |
834 | #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) | 976 | #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) |
@@ -1243,6 +1385,26 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, | |||
1243 | return __netdev_alloc_skb(dev, length, GFP_ATOMIC); | 1385 | return __netdev_alloc_skb(dev, length, GFP_ATOMIC); |
1244 | } | 1386 | } |
1245 | 1387 | ||
1388 | extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); | ||
1389 | |||
1390 | /** | ||
1391 | * netdev_alloc_page - allocate a page for ps-rx on a specific device | ||
1392 | * @dev: network device to receive on | ||
1393 | * | ||
1394 | * Allocate a new page node local to the specified device. | ||
1395 | * | ||
1396 | * %NULL is returned if there is no free memory. | ||
1397 | */ | ||
1398 | static inline struct page *netdev_alloc_page(struct net_device *dev) | ||
1399 | { | ||
1400 | return __netdev_alloc_page(dev, GFP_ATOMIC); | ||
1401 | } | ||
1402 | |||
1403 | static inline void netdev_free_page(struct net_device *dev, struct page *page) | ||
1404 | { | ||
1405 | __free_page(page); | ||
1406 | } | ||
1407 | |||
1246 | /** | 1408 | /** |
1247 | * skb_clone_writable - is the header of a clone writable | 1409 | * skb_clone_writable - is the header of a clone writable |
1248 | * @skb: buffer to check | 1410 | * @skb: buffer to check |
@@ -1434,6 +1596,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | |||
1434 | skb != (struct sk_buff *)(queue); \ | 1596 | skb != (struct sk_buff *)(queue); \ |
1435 | skb = tmp, tmp = skb->next) | 1597 | skb = tmp, tmp = skb->next) |
1436 | 1598 | ||
1599 | #define skb_queue_walk_from(queue, skb) \ | ||
1600 | for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ | ||
1601 | skb = skb->next) | ||
1602 | |||
1603 | #define skb_queue_walk_from_safe(queue, skb, tmp) \ | ||
1604 | for (tmp = skb->next; \ | ||
1605 | skb != (struct sk_buff *)(queue); \ | ||
1606 | skb = tmp, tmp = skb->next) | ||
1607 | |||
1437 | #define skb_queue_reverse_walk(queue, skb) \ | 1608 | #define skb_queue_reverse_walk(queue, skb) \ |
1438 | for (skb = (queue)->prev; \ | 1609 | for (skb = (queue)->prev; \ |
1439 | prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ | 1610 | prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ |