aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h206
1 files changed, 51 insertions, 155 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bbd8d0027e2f..11fd9f2c4093 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -256,7 +256,10 @@ struct sk_buff {
256 ktime_t tstamp; 256 ktime_t tstamp;
257 struct net_device *dev; 257 struct net_device *dev;
258 258
259 struct dst_entry *dst; 259 union {
260 struct dst_entry *dst;
261 struct rtable *rtable;
262 };
260 struct sec_path *sp; 263 struct sec_path *sp;
261 264
262 /* 265 /*
@@ -310,7 +313,10 @@ struct sk_buff {
310 __u16 tc_verd; /* traffic control verdict */ 313 __u16 tc_verd; /* traffic control verdict */
311#endif 314#endif
312#endif 315#endif
313 /* 2 byte hole */ 316#ifdef CONFIG_IPV6_NDISC_NODETYPE
317 __u8 ndisc_nodetype:2;
318#endif
319 /* 14 bit hole */
314 320
315#ifdef CONFIG_NET_DMA 321#ifdef CONFIG_NET_DMA
316 dma_cookie_t dma_cookie; 322 dma_cookie_t dma_cookie;
@@ -657,11 +663,21 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
657} 663}
658 664
659/* 665/*
660 * Insert an sk_buff at the start of a list. 666 * Insert an sk_buff on a list.
661 * 667 *
662 * The "__skb_xxxx()" functions are the non-atomic ones that 668 * The "__skb_xxxx()" functions are the non-atomic ones that
663 * can only be called with interrupts disabled. 669 * can only be called with interrupts disabled.
664 */ 670 */
671extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
672static inline void __skb_insert(struct sk_buff *newsk,
673 struct sk_buff *prev, struct sk_buff *next,
674 struct sk_buff_head *list)
675{
676 newsk->next = next;
677 newsk->prev = prev;
678 next->prev = prev->next = newsk;
679 list->qlen++;
680}
665 681
666/** 682/**
667 * __skb_queue_after - queue a buffer at the list head 683 * __skb_queue_after - queue a buffer at the list head
@@ -678,13 +694,17 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
678 struct sk_buff *prev, 694 struct sk_buff *prev,
679 struct sk_buff *newsk) 695 struct sk_buff *newsk)
680{ 696{
681 struct sk_buff *next; 697 __skb_insert(newsk, prev, prev->next, list);
682 list->qlen++; 698}
683 699
684 next = prev->next; 700extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
685 newsk->next = next; 701 struct sk_buff_head *list);
686 newsk->prev = prev; 702
687 next->prev = prev->next = newsk; 703static inline void __skb_queue_before(struct sk_buff_head *list,
704 struct sk_buff *next,
705 struct sk_buff *newsk)
706{
707 __skb_insert(newsk, next->prev, next, list);
688} 708}
689 709
690/** 710/**
@@ -718,66 +738,7 @@ extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
718static inline void __skb_queue_tail(struct sk_buff_head *list, 738static inline void __skb_queue_tail(struct sk_buff_head *list,
719 struct sk_buff *newsk) 739 struct sk_buff *newsk)
720{ 740{
721 struct sk_buff *prev, *next; 741 __skb_queue_before(list, (struct sk_buff *)list, newsk);
722
723 list->qlen++;
724 next = (struct sk_buff *)list;
725 prev = next->prev;
726 newsk->next = next;
727 newsk->prev = prev;
728 next->prev = prev->next = newsk;
729}
730
731
732/**
733 * __skb_dequeue - remove from the head of the queue
734 * @list: list to dequeue from
735 *
736 * Remove the head of the list. This function does not take any locks
737 * so must be used with appropriate locks held only. The head item is
738 * returned or %NULL if the list is empty.
739 */
740extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
741static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
742{
743 struct sk_buff *next, *prev, *result;
744
745 prev = (struct sk_buff *) list;
746 next = prev->next;
747 result = NULL;
748 if (next != prev) {
749 result = next;
750 next = next->next;
751 list->qlen--;
752 next->prev = prev;
753 prev->next = next;
754 result->next = result->prev = NULL;
755 }
756 return result;
757}
758
759
760/*
761 * Insert a packet on a list.
762 */
763extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
764static inline void __skb_insert(struct sk_buff *newsk,
765 struct sk_buff *prev, struct sk_buff *next,
766 struct sk_buff_head *list)
767{
768 newsk->next = next;
769 newsk->prev = prev;
770 next->prev = prev->next = newsk;
771 list->qlen++;
772}
773
774/*
775 * Place a packet after a given packet in a list.
776 */
777extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
778static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
779{
780 __skb_insert(newsk, old, old->next, list);
781} 742}
782 743
783/* 744/*
@@ -797,8 +758,22 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
797 prev->next = next; 758 prev->next = next;
798} 759}
799 760
800 761/**
801/* XXX: more streamlined implementation */ 762 * __skb_dequeue - remove from the head of the queue
763 * @list: list to dequeue from
764 *
765 * Remove the head of the list. This function does not take any locks
766 * so must be used with appropriate locks held only. The head item is
767 * returned or %NULL if the list is empty.
768 */
769extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
770static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
771{
772 struct sk_buff *skb = skb_peek(list);
773 if (skb)
774 __skb_unlink(skb, list);
775 return skb;
776}
802 777
803/** 778/**
804 * __skb_dequeue_tail - remove from the tail of the queue 779 * __skb_dequeue_tail - remove from the tail of the queue
@@ -889,6 +864,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
889/* 864/*
890 * Add data to an sk_buff 865 * Add data to an sk_buff
891 */ 866 */
867extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
892static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 868static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
893{ 869{
894 unsigned char *tmp = skb_tail_pointer(skb); 870 unsigned char *tmp = skb_tail_pointer(skb);
@@ -898,26 +874,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
898 return tmp; 874 return tmp;
899} 875}
900 876
901/** 877extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
902 * skb_put - add data to a buffer
903 * @skb: buffer to use
904 * @len: amount of data to add
905 *
906 * This function extends the used data area of the buffer. If this would
907 * exceed the total buffer size the kernel will panic. A pointer to the
908 * first byte of the extra data is returned.
909 */
910static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
911{
912 unsigned char *tmp = skb_tail_pointer(skb);
913 SKB_LINEAR_ASSERT(skb);
914 skb->tail += len;
915 skb->len += len;
916 if (unlikely(skb->tail > skb->end))
917 skb_over_panic(skb, len, current_text_addr());
918 return tmp;
919}
920
921static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 878static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
922{ 879{
923 skb->data -= len; 880 skb->data -= len;
@@ -925,24 +882,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
925 return skb->data; 882 return skb->data;
926} 883}
927 884
928/** 885extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
929 * skb_push - add data to the start of a buffer
930 * @skb: buffer to use
931 * @len: amount of data to add
932 *
933 * This function extends the used data area of the buffer at the buffer
934 * start. If this would exceed the total buffer headroom the kernel will
935 * panic. A pointer to the first byte of the extra data is returned.
936 */
937static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
938{
939 skb->data -= len;
940 skb->len += len;
941 if (unlikely(skb->data<skb->head))
942 skb_under_panic(skb, len, current_text_addr());
943 return skb->data;
944}
945
946static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 886static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
947{ 887{
948 skb->len -= len; 888 skb->len -= len;
@@ -950,21 +890,6 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
950 return skb->data += len; 890 return skb->data += len;
951} 891}
952 892
953/**
954 * skb_pull - remove data from the start of a buffer
955 * @skb: buffer to use
956 * @len: amount of data to remove
957 *
958 * This function removes data from the start of a buffer, returning
959 * the memory to the headroom. A pointer to the next data in the buffer
960 * is returned. Once the data has been pulled future pushes will overwrite
961 * the old data.
962 */
963static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
964{
965 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
966}
967
968extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 893extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
969 894
970static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 895static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
@@ -1205,21 +1130,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1205 skb_set_tail_pointer(skb, len); 1130 skb_set_tail_pointer(skb, len);
1206} 1131}
1207 1132
1208/** 1133extern void skb_trim(struct sk_buff *skb, unsigned int len);
1209 * skb_trim - remove end from a buffer
1210 * @skb: buffer to alter
1211 * @len: new length
1212 *
1213 * Cut the length of a buffer down by removing data from the tail. If
1214 * the buffer is already under the length specified it is not modified.
1215 * The skb must be linear.
1216 */
1217static inline void skb_trim(struct sk_buff *skb, unsigned int len)
1218{
1219 if (skb->len > len)
1220 __skb_trim(skb, len);
1221}
1222
1223 1134
1224static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1135static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1225{ 1136{
@@ -1302,22 +1213,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1302 return skb; 1213 return skb;
1303} 1214}
1304 1215
1305/** 1216extern struct sk_buff *dev_alloc_skb(unsigned int length);
1306 * dev_alloc_skb - allocate an skbuff for receiving
1307 * @length: length to allocate
1308 *
1309 * Allocate a new &sk_buff and assign it a usage count of one. The
1310 * buffer has unspecified headroom built in. Users should allocate
1311 * the headroom they think they need without accounting for the
1312 * built in space. The built in space is used for optimisations.
1313 *
1314 * %NULL is returned if there is no free memory. Although this function
1315 * allocates memory it can be called from an interrupt.
1316 */
1317static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1318{
1319 return __dev_alloc_skb(length, GFP_ATOMIC);
1320}
1321 1217
1322extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1218extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1323 unsigned int length, gfp_t gfp_mask); 1219 unsigned int length, gfp_t gfp_mask);