diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 96 |
1 files changed, 51 insertions, 45 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fe864885c1ed..ae86adee3746 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/dmaengine.h> | 30 | #include <linux/dmaengine.h> |
31 | #include <linux/hrtimer.h> | 31 | #include <linux/hrtimer.h> |
32 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
33 | #include <linux/netdev_features.h> | ||
33 | 34 | ||
34 | /* Don't change this without changing skb_csum_unnecessary! */ | 35 | /* Don't change this without changing skb_csum_unnecessary! */ |
35 | #define CHECKSUM_NONE 0 | 36 | #define CHECKSUM_NONE 0 |
@@ -87,7 +88,6 @@ | |||
87 | * at device setup time. | 88 | * at device setup time. |
88 | * NETIF_F_HW_CSUM - it is clever device, it is able to checksum | 89 | * NETIF_F_HW_CSUM - it is clever device, it is able to checksum |
89 | * everything. | 90 | * everything. |
90 | * NETIF_F_NO_CSUM - loopback or reliable single hop media. | ||
91 | * NETIF_F_IP_CSUM - device is dumb. It is able to csum only | 91 | * NETIF_F_IP_CSUM - device is dumb. It is able to csum only |
92 | * TCP/UDP over IPv4. Sigh. Vendors like this | 92 | * TCP/UDP over IPv4. Sigh. Vendors like this |
93 | * way by an unknown reason. Though, see comment above | 93 | * way by an unknown reason. Though, see comment above |
@@ -128,13 +128,17 @@ struct sk_buff_head { | |||
128 | 128 | ||
129 | struct sk_buff; | 129 | struct sk_buff; |
130 | 130 | ||
131 | /* To allow 64K frame to be packed as single skb without frag_list. Since | 131 | /* To allow 64K frame to be packed as single skb without frag_list we |
132 | * GRO uses frags we allocate at least 16 regardless of page size. | 132 | * require 64K/PAGE_SIZE pages plus 1 additional page to allow for |
133 | * buffers which do not start on a page boundary. | ||
134 | * | ||
135 | * Since GRO uses frags we allocate at least 16 regardless of page | ||
136 | * size. | ||
133 | */ | 137 | */ |
134 | #if (65536/PAGE_SIZE + 2) < 16 | 138 | #if (65536/PAGE_SIZE + 1) < 16 |
135 | #define MAX_SKB_FRAGS 16UL | 139 | #define MAX_SKB_FRAGS 16UL |
136 | #else | 140 | #else |
137 | #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) | 141 | #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) |
138 | #endif | 142 | #endif |
139 | 143 | ||
140 | typedef struct skb_frag_struct skb_frag_t; | 144 | typedef struct skb_frag_struct skb_frag_t; |
@@ -218,6 +222,9 @@ enum { | |||
218 | 222 | ||
219 | /* device driver supports TX zero-copy buffers */ | 223 | /* device driver supports TX zero-copy buffers */ |
220 | SKBTX_DEV_ZEROCOPY = 1 << 4, | 224 | SKBTX_DEV_ZEROCOPY = 1 << 4, |
225 | |||
226 | /* generate wifi status information (where possible) */ | ||
227 | SKBTX_WIFI_STATUS = 1 << 5, | ||
221 | }; | 228 | }; |
222 | 229 | ||
223 | /* | 230 | /* |
@@ -235,15 +242,15 @@ struct ubuf_info { | |||
235 | * the end of the header data, ie. at skb->end. | 242 | * the end of the header data, ie. at skb->end. |
236 | */ | 243 | */ |
237 | struct skb_shared_info { | 244 | struct skb_shared_info { |
238 | unsigned short nr_frags; | 245 | unsigned char nr_frags; |
246 | __u8 tx_flags; | ||
239 | unsigned short gso_size; | 247 | unsigned short gso_size; |
240 | /* Warning: this field is not always filled in (UFO)! */ | 248 | /* Warning: this field is not always filled in (UFO)! */ |
241 | unsigned short gso_segs; | 249 | unsigned short gso_segs; |
242 | unsigned short gso_type; | 250 | unsigned short gso_type; |
243 | __be32 ip6_frag_id; | ||
244 | __u8 tx_flags; | ||
245 | struct sk_buff *frag_list; | 251 | struct sk_buff *frag_list; |
246 | struct skb_shared_hwtstamps hwtstamps; | 252 | struct skb_shared_hwtstamps hwtstamps; |
253 | __be32 ip6_frag_id; | ||
247 | 254 | ||
248 | /* | 255 | /* |
249 | * Warning : all fields before dataref are cleared in __alloc_skb() | 256 | * Warning : all fields before dataref are cleared in __alloc_skb() |
@@ -352,6 +359,8 @@ typedef unsigned char *sk_buff_data_t; | |||
352 | * @ooo_okay: allow the mapping of a socket to a queue to be changed | 359 | * @ooo_okay: allow the mapping of a socket to a queue to be changed |
353 | * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport | 360 | * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport |
354 | * ports. | 361 | * ports. |
362 | * @wifi_acked_valid: wifi_acked was set | ||
363 | * @wifi_acked: whether frame was acked on wifi or not | ||
355 | * @dma_cookie: a cookie to one of several possible DMA operations | 364 | * @dma_cookie: a cookie to one of several possible DMA operations |
356 | * done by skb DMA functions | 365 | * done by skb DMA functions |
357 | * @secmark: security marking | 366 | * @secmark: security marking |
@@ -445,10 +454,11 @@ struct sk_buff { | |||
445 | #endif | 454 | #endif |
446 | __u8 ooo_okay:1; | 455 | __u8 ooo_okay:1; |
447 | __u8 l4_rxhash:1; | 456 | __u8 l4_rxhash:1; |
457 | __u8 wifi_acked_valid:1; | ||
458 | __u8 wifi_acked:1; | ||
459 | /* 10/12 bit hole (depending on ndisc_nodetype presence) */ | ||
448 | kmemcheck_bitfield_end(flags2); | 460 | kmemcheck_bitfield_end(flags2); |
449 | 461 | ||
450 | /* 0/13 bit hole */ | ||
451 | |||
452 | #ifdef CONFIG_NET_DMA | 462 | #ifdef CONFIG_NET_DMA |
453 | dma_cookie_t dma_cookie; | 463 | dma_cookie_t dma_cookie; |
454 | #endif | 464 | #endif |
@@ -540,6 +550,7 @@ extern void consume_skb(struct sk_buff *skb); | |||
540 | extern void __kfree_skb(struct sk_buff *skb); | 550 | extern void __kfree_skb(struct sk_buff *skb); |
541 | extern struct sk_buff *__alloc_skb(unsigned int size, | 551 | extern struct sk_buff *__alloc_skb(unsigned int size, |
542 | gfp_t priority, int fclone, int node); | 552 | gfp_t priority, int fclone, int node); |
553 | extern struct sk_buff *build_skb(void *data); | ||
543 | static inline struct sk_buff *alloc_skb(unsigned int size, | 554 | static inline struct sk_buff *alloc_skb(unsigned int size, |
544 | gfp_t priority) | 555 | gfp_t priority) |
545 | { | 556 | { |
@@ -561,8 +572,9 @@ extern struct sk_buff *skb_clone(struct sk_buff *skb, | |||
561 | gfp_t priority); | 572 | gfp_t priority); |
562 | extern struct sk_buff *skb_copy(const struct sk_buff *skb, | 573 | extern struct sk_buff *skb_copy(const struct sk_buff *skb, |
563 | gfp_t priority); | 574 | gfp_t priority); |
564 | extern struct sk_buff *pskb_copy(struct sk_buff *skb, | 575 | extern struct sk_buff *__pskb_copy(struct sk_buff *skb, |
565 | gfp_t gfp_mask); | 576 | int headroom, gfp_t gfp_mask); |
577 | |||
566 | extern int pskb_expand_head(struct sk_buff *skb, | 578 | extern int pskb_expand_head(struct sk_buff *skb, |
567 | int nhead, int ntail, | 579 | int nhead, int ntail, |
568 | gfp_t gfp_mask); | 580 | gfp_t gfp_mask); |
@@ -1453,6 +1465,16 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) | |||
1453 | } | 1465 | } |
1454 | #endif /* NET_SKBUFF_DATA_USES_OFFSET */ | 1466 | #endif /* NET_SKBUFF_DATA_USES_OFFSET */ |
1455 | 1467 | ||
1468 | static inline void skb_mac_header_rebuild(struct sk_buff *skb) | ||
1469 | { | ||
1470 | if (skb_mac_header_was_set(skb)) { | ||
1471 | const unsigned char *old_mac = skb_mac_header(skb); | ||
1472 | |||
1473 | skb_set_mac_header(skb, -skb->mac_len); | ||
1474 | memmove(skb_mac_header(skb), old_mac, skb->mac_len); | ||
1475 | } | ||
1476 | } | ||
1477 | |||
1456 | static inline int skb_checksum_start_offset(const struct sk_buff *skb) | 1478 | static inline int skb_checksum_start_offset(const struct sk_buff *skb) |
1457 | { | 1479 | { |
1458 | return skb->csum_start - skb_headroom(skb); | 1480 | return skb->csum_start - skb_headroom(skb); |
@@ -1662,38 +1684,6 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, | |||
1662 | } | 1684 | } |
1663 | 1685 | ||
1664 | /** | 1686 | /** |
1665 | * __netdev_alloc_page - allocate a page for ps-rx on a specific device | ||
1666 | * @dev: network device to receive on | ||
1667 | * @gfp_mask: alloc_pages_node mask | ||
1668 | * | ||
1669 | * Allocate a new page. dev currently unused. | ||
1670 | * | ||
1671 | * %NULL is returned if there is no free memory. | ||
1672 | */ | ||
1673 | static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) | ||
1674 | { | ||
1675 | return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0); | ||
1676 | } | ||
1677 | |||
1678 | /** | ||
1679 | * netdev_alloc_page - allocate a page for ps-rx on a specific device | ||
1680 | * @dev: network device to receive on | ||
1681 | * | ||
1682 | * Allocate a new page. dev currently unused. | ||
1683 | * | ||
1684 | * %NULL is returned if there is no free memory. | ||
1685 | */ | ||
1686 | static inline struct page *netdev_alloc_page(struct net_device *dev) | ||
1687 | { | ||
1688 | return __netdev_alloc_page(dev, GFP_ATOMIC); | ||
1689 | } | ||
1690 | |||
1691 | static inline void netdev_free_page(struct net_device *dev, struct page *page) | ||
1692 | { | ||
1693 | __free_page(page); | ||
1694 | } | ||
1695 | |||
1696 | /** | ||
1697 | * skb_frag_page - retrieve the page refered to by a paged fragment | 1687 | * skb_frag_page - retrieve the page refered to by a paged fragment |
1698 | * @frag: the paged fragment | 1688 | * @frag: the paged fragment |
1699 | * | 1689 | * |
@@ -1824,6 +1814,12 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev, | |||
1824 | frag->page_offset + offset, size, dir); | 1814 | frag->page_offset + offset, size, dir); |
1825 | } | 1815 | } |
1826 | 1816 | ||
1817 | static inline struct sk_buff *pskb_copy(struct sk_buff *skb, | ||
1818 | gfp_t gfp_mask) | ||
1819 | { | ||
1820 | return __pskb_copy(skb, skb_headroom(skb), gfp_mask); | ||
1821 | } | ||
1822 | |||
1827 | /** | 1823 | /** |
1828 | * skb_clone_writable - is the header of a clone writable | 1824 | * skb_clone_writable - is the header of a clone writable |
1829 | * @skb: buffer to check | 1825 | * @skb: buffer to check |
@@ -2105,7 +2101,8 @@ extern void skb_split(struct sk_buff *skb, | |||
2105 | extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, | 2101 | extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, |
2106 | int shiftlen); | 2102 | int shiftlen); |
2107 | 2103 | ||
2108 | extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features); | 2104 | extern struct sk_buff *skb_segment(struct sk_buff *skb, |
2105 | netdev_features_t features); | ||
2109 | 2106 | ||
2110 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, | 2107 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, |
2111 | int len, void *buffer) | 2108 | int len, void *buffer) |
@@ -2263,6 +2260,15 @@ static inline void skb_tx_timestamp(struct sk_buff *skb) | |||
2263 | sw_tx_timestamp(skb); | 2260 | sw_tx_timestamp(skb); |
2264 | } | 2261 | } |
2265 | 2262 | ||
2263 | /** | ||
2264 | * skb_complete_wifi_ack - deliver skb with wifi status | ||
2265 | * | ||
2266 | * @skb: the original outgoing packet | ||
2267 | * @acked: ack status | ||
2268 | * | ||
2269 | */ | ||
2270 | void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); | ||
2271 | |||
2266 | extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); | 2272 | extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); |
2267 | extern __sum16 __skb_checksum_complete(struct sk_buff *skb); | 2273 | extern __sum16 __skb_checksum_complete(struct sk_buff *skb); |
2268 | 2274 | ||