aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h117
1 files changed, 72 insertions, 45 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 111f26b6e28b..b534a1be540a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -117,11 +117,11 @@ struct nf_conntrack {
117 117
118#ifdef CONFIG_BRIDGE_NETFILTER 118#ifdef CONFIG_BRIDGE_NETFILTER
119struct nf_bridge_info { 119struct nf_bridge_info {
120 atomic_t use; 120 atomic_t use;
121 struct net_device *physindev; 121 unsigned int mask;
122 struct net_device *physoutdev; 122 struct net_device *physindev;
123 unsigned int mask; 123 struct net_device *physoutdev;
124 unsigned long data[32 / sizeof(unsigned long)]; 124 unsigned long data[32 / sizeof(unsigned long)];
125}; 125};
126#endif 126#endif
127 127
@@ -470,7 +470,8 @@ struct sk_buff {
470 __u8 wifi_acked_valid:1; 470 __u8 wifi_acked_valid:1;
471 __u8 wifi_acked:1; 471 __u8 wifi_acked:1;
472 __u8 no_fcs:1; 472 __u8 no_fcs:1;
473 /* 9/11 bit hole (depending on ndisc_nodetype presence) */ 473 __u8 head_frag:1;
474 /* 8/10 bit hole (depending on ndisc_nodetype presence) */
474 kmemcheck_bitfield_end(flags2); 475 kmemcheck_bitfield_end(flags2);
475 476
476#ifdef CONFIG_NET_DMA 477#ifdef CONFIG_NET_DMA
@@ -560,9 +561,15 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
560extern void kfree_skb(struct sk_buff *skb); 561extern void kfree_skb(struct sk_buff *skb);
561extern void consume_skb(struct sk_buff *skb); 562extern void consume_skb(struct sk_buff *skb);
562extern void __kfree_skb(struct sk_buff *skb); 563extern void __kfree_skb(struct sk_buff *skb);
564extern struct kmem_cache *skbuff_head_cache;
565
566extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
567extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
568 bool *fragstolen, int *delta_truesize);
569
563extern struct sk_buff *__alloc_skb(unsigned int size, 570extern struct sk_buff *__alloc_skb(unsigned int size,
564 gfp_t priority, int fclone, int node); 571 gfp_t priority, int fclone, int node);
565extern struct sk_buff *build_skb(void *data); 572extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
566static inline struct sk_buff *alloc_skb(unsigned int size, 573static inline struct sk_buff *alloc_skb(unsigned int size,
567 gfp_t priority) 574 gfp_t priority)
568{ 575{
@@ -643,11 +650,21 @@ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
643{ 650{
644 return skb->head + skb->end; 651 return skb->head + skb->end;
645} 652}
653
654static inline unsigned int skb_end_offset(const struct sk_buff *skb)
655{
656 return skb->end;
657}
646#else 658#else
647static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 659static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
648{ 660{
649 return skb->end; 661 return skb->end;
650} 662}
663
664static inline unsigned int skb_end_offset(const struct sk_buff *skb)
665{
666 return skb->end - skb->head;
667}
651#endif 668#endif
652 669
653/* Internal */ 670/* Internal */
@@ -881,10 +898,11 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
881 */ 898 */
882static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) 899static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
883{ 900{
884 struct sk_buff *list = ((const struct sk_buff *)list_)->next; 901 struct sk_buff *skb = list_->next;
885 if (list == (struct sk_buff *)list_) 902
886 list = NULL; 903 if (skb == (struct sk_buff *)list_)
887 return list; 904 skb = NULL;
905 return skb;
888} 906}
889 907
890/** 908/**
@@ -900,6 +918,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
900 const struct sk_buff_head *list_) 918 const struct sk_buff_head *list_)
901{ 919{
902 struct sk_buff *next = skb->next; 920 struct sk_buff *next = skb->next;
921
903 if (next == (struct sk_buff *)list_) 922 if (next == (struct sk_buff *)list_)
904 next = NULL; 923 next = NULL;
905 return next; 924 return next;
@@ -920,10 +939,12 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
920 */ 939 */
921static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) 940static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
922{ 941{
923 struct sk_buff *list = ((const struct sk_buff *)list_)->prev; 942 struct sk_buff *skb = list_->prev;
924 if (list == (struct sk_buff *)list_) 943
925 list = NULL; 944 if (skb == (struct sk_buff *)list_)
926 return list; 945 skb = NULL;
946 return skb;
947
927} 948}
928 949
929/** 950/**
@@ -1664,31 +1685,11 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
1664 kfree_skb(skb); 1685 kfree_skb(skb);
1665} 1686}
1666 1687
1667/** 1688extern void *netdev_alloc_frag(unsigned int fragsz);
1668 * __dev_alloc_skb - allocate an skbuff for receiving
1669 * @length: length to allocate
1670 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1671 *
1672 * Allocate a new &sk_buff and assign it a usage count of one. The
1673 * buffer has unspecified headroom built in. Users should allocate
1674 * the headroom they think they need without accounting for the
1675 * built in space. The built in space is used for optimisations.
1676 *
1677 * %NULL is returned if there is no free memory.
1678 */
1679static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1680 gfp_t gfp_mask)
1681{
1682 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1683 if (likely(skb))
1684 skb_reserve(skb, NET_SKB_PAD);
1685 return skb;
1686}
1687
1688extern struct sk_buff *dev_alloc_skb(unsigned int length);
1689 1689
1690extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1690extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1691 unsigned int length, gfp_t gfp_mask); 1691 unsigned int length,
1692 gfp_t gfp_mask);
1692 1693
1693/** 1694/**
1694 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1695 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -1704,11 +1705,25 @@ extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1704 * allocates memory it can be called from an interrupt. 1705 * allocates memory it can be called from an interrupt.
1705 */ 1706 */
1706static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1707static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1707 unsigned int length) 1708 unsigned int length)
1708{ 1709{
1709 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1710 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1710} 1711}
1711 1712
1713/* legacy helper around __netdev_alloc_skb() */
1714static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1715 gfp_t gfp_mask)
1716{
1717 return __netdev_alloc_skb(NULL, length, gfp_mask);
1718}
1719
1720/* legacy helper around netdev_alloc_skb() */
1721static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1722{
1723 return netdev_alloc_skb(NULL, length);
1724}
1725
1726
1712static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, 1727static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1713 unsigned int length, gfp_t gfp) 1728 unsigned int length, gfp_t gfp)
1714{ 1729{
@@ -1881,8 +1896,6 @@ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1881{ 1896{
1882 int delta = 0; 1897 int delta = 0;
1883 1898
1884 if (headroom < NET_SKB_PAD)
1885 headroom = NET_SKB_PAD;
1886 if (headroom > skb_headroom(skb)) 1899 if (headroom > skb_headroom(skb))
1887 delta = headroom - skb_headroom(skb); 1900 delta = headroom - skb_headroom(skb);
1888 1901
@@ -1963,8 +1976,8 @@ static inline int skb_add_data(struct sk_buff *skb,
1963 return -EFAULT; 1976 return -EFAULT;
1964} 1977}
1965 1978
1966static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1979static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
1967 const struct page *page, int off) 1980 const struct page *page, int off)
1968{ 1981{
1969 if (i) { 1982 if (i) {
1970 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1983 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1972,7 +1985,7 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1972 return page == skb_frag_page(frag) && 1985 return page == skb_frag_page(frag) &&
1973 off == frag->page_offset + skb_frag_size(frag); 1986 off == frag->page_offset + skb_frag_size(frag);
1974 } 1987 }
1975 return 0; 1988 return false;
1976} 1989}
1977 1990
1978static inline int __skb_linearize(struct sk_buff *skb) 1991static inline int __skb_linearize(struct sk_buff *skb)
@@ -2552,7 +2565,7 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2552 return false; 2565 return false;
2553 2566
2554 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 2567 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
2555 if (skb_end_pointer(skb) - skb->head < skb_size) 2568 if (skb_end_offset(skb) < skb_size)
2556 return false; 2569 return false;
2557 2570
2558 if (skb_shared(skb) || skb_cloned(skb)) 2571 if (skb_shared(skb) || skb_cloned(skb))
@@ -2560,5 +2573,19 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2560 2573
2561 return true; 2574 return true;
2562} 2575}
2576
2577/**
2578 * skb_head_is_locked - Determine if the skb->head is locked down
2579 * @skb: skb to check
2580 *
2581 * The head on skbs build around a head frag can be removed if they are
2582 * not cloned. This function returns true if the skb head is locked down
2583 * due to either being allocated via kmalloc, or by being a clone with
2584 * multiple references to the head.
2585 */
2586static inline bool skb_head_is_locked(const struct sk_buff *skb)
2587{
2588 return !skb->head_frag || skb_cloned(skb);
2589}
2563#endif /* __KERNEL__ */ 2590#endif /* __KERNEL__ */
2564#endif /* _LINUX_SKBUFF_H */ 2591#endif /* _LINUX_SKBUFF_H */