aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h262
1 files changed, 242 insertions, 20 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 8bd383caa363..6a6b352326d7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -29,6 +29,7 @@
29#include <linux/rcupdate.h> 29#include <linux/rcupdate.h>
30#include <linux/dmaengine.h> 30#include <linux/dmaengine.h>
31#include <linux/hrtimer.h> 31#include <linux/hrtimer.h>
32#include <linux/dma-mapping.h>
32 33
33/* Don't change this without changing skb_csum_unnecessary! */ 34/* Don't change this without changing skb_csum_unnecessary! */
34#define CHECKSUM_NONE 0 35#define CHECKSUM_NONE 0
@@ -45,6 +46,11 @@
45#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 46#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
46#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 47#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
47 48
49/* return minimum truesize of one skb containing X bytes of data */
50#define SKB_TRUESIZE(X) ((X) + \
51 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
52 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
53
48/* A. Checksumming of received packets by device. 54/* A. Checksumming of received packets by device.
49 * 55 *
50 * NONE: device failed to checksum this packet. 56 * NONE: device failed to checksum this packet.
@@ -134,7 +140,9 @@ struct sk_buff;
134typedef struct skb_frag_struct skb_frag_t; 140typedef struct skb_frag_struct skb_frag_t;
135 141
136struct skb_frag_struct { 142struct skb_frag_struct {
137 struct page *page; 143 struct {
144 struct page *p;
145 } page;
138#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 146#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
139 __u32 page_offset; 147 __u32 page_offset;
140 __u32 size; 148 __u32 size;
@@ -144,6 +152,26 @@ struct skb_frag_struct {
144#endif 152#endif
145}; 153};
146 154
155static inline unsigned int skb_frag_size(const skb_frag_t *frag)
156{
157 return frag->size;
158}
159
160static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
161{
162 frag->size = size;
163}
164
165static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
166{
167 frag->size += delta;
168}
169
170static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
171{
172 frag->size -= delta;
173}
174
147#define HAVE_HW_TIME_STAMP 175#define HAVE_HW_TIME_STAMP
148 176
149/** 177/**
@@ -322,6 +350,8 @@ typedef unsigned char *sk_buff_data_t;
322 * @queue_mapping: Queue mapping for multiqueue devices 350 * @queue_mapping: Queue mapping for multiqueue devices
323 * @ndisc_nodetype: router type (from link layer) 351 * @ndisc_nodetype: router type (from link layer)
324 * @ooo_okay: allow the mapping of a socket to a queue to be changed 352 * @ooo_okay: allow the mapping of a socket to a queue to be changed
353 * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
354 * ports.
325 * @dma_cookie: a cookie to one of several possible DMA operations 355 * @dma_cookie: a cookie to one of several possible DMA operations
326 * done by skb DMA functions 356 * done by skb DMA functions
327 * @secmark: security marking 357 * @secmark: security marking
@@ -414,6 +444,7 @@ struct sk_buff {
414 __u8 ndisc_nodetype:2; 444 __u8 ndisc_nodetype:2;
415#endif 445#endif
416 __u8 ooo_okay:1; 446 __u8 ooo_okay:1;
447 __u8 l4_rxhash:1;
417 kmemcheck_bitfield_end(flags2); 448 kmemcheck_bitfield_end(flags2);
418 449
419 /* 0/13 bit hole */ 450 /* 0/13 bit hole */
@@ -521,6 +552,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
521 return __alloc_skb(size, priority, 1, NUMA_NO_NODE); 552 return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
522} 553}
523 554
555extern void skb_recycle(struct sk_buff *skb);
524extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 556extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
525 557
526extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 558extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
@@ -573,11 +605,11 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
573 unsigned int to, struct ts_config *config, 605 unsigned int to, struct ts_config *config,
574 struct ts_state *state); 606 struct ts_state *state);
575 607
576extern __u32 __skb_get_rxhash(struct sk_buff *skb); 608extern void __skb_get_rxhash(struct sk_buff *skb);
577static inline __u32 skb_get_rxhash(struct sk_buff *skb) 609static inline __u32 skb_get_rxhash(struct sk_buff *skb)
578{ 610{
579 if (!skb->rxhash) 611 if (!skb->rxhash)
580 skb->rxhash = __skb_get_rxhash(skb); 612 __skb_get_rxhash(skb);
581 613
582 return skb->rxhash; 614 return skb->rxhash;
583} 615}
@@ -823,9 +855,9 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
823 * The reference count is not incremented and the reference is therefore 855 * The reference count is not incremented and the reference is therefore
824 * volatile. Use with caution. 856 * volatile. Use with caution.
825 */ 857 */
826static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) 858static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
827{ 859{
828 struct sk_buff *list = ((struct sk_buff *)list_)->next; 860 struct sk_buff *list = ((const struct sk_buff *)list_)->next;
829 if (list == (struct sk_buff *)list_) 861 if (list == (struct sk_buff *)list_)
830 list = NULL; 862 list = NULL;
831 return list; 863 return list;
@@ -844,9 +876,9 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
844 * The reference count is not incremented and the reference is therefore 876 * The reference count is not incremented and the reference is therefore
845 * volatile. Use with caution. 877 * volatile. Use with caution.
846 */ 878 */
847static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) 879static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
848{ 880{
849 struct sk_buff *list = ((struct sk_buff *)list_)->prev; 881 struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
850 if (list == (struct sk_buff *)list_) 882 if (list == (struct sk_buff *)list_)
851 list = NULL; 883 list = NULL;
852 return list; 884 return list;
@@ -1123,18 +1155,51 @@ static inline int skb_pagelen(const struct sk_buff *skb)
1123 int i, len = 0; 1155 int i, len = 0;
1124 1156
1125 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1157 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1126 len += skb_shinfo(skb)->frags[i].size; 1158 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1127 return len + skb_headlen(skb); 1159 return len + skb_headlen(skb);
1128} 1160}
1129 1161
1130static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 1162/**
1131 struct page *page, int off, int size) 1163 * __skb_fill_page_desc - initialise a paged fragment in an skb
1164 * @skb: buffer containing fragment to be initialised
1165 * @i: paged fragment index to initialise
1166 * @page: the page to use for this fragment
1167 * @off: the offset to the data with @page
1168 * @size: the length of the data
1169 *
1170 * Initialises the @i'th fragment of @skb to point to &size bytes at
1171 * offset @off within @page.
1172 *
1173 * Does not take any additional reference on the fragment.
1174 */
1175static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1176 struct page *page, int off, int size)
1132{ 1177{
1133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1178 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1134 1179
1135 frag->page = page; 1180 frag->page.p = page;
1136 frag->page_offset = off; 1181 frag->page_offset = off;
1137 frag->size = size; 1182 skb_frag_size_set(frag, size);
1183}
1184
1185/**
1186 * skb_fill_page_desc - initialise a paged fragment in an skb
1187 * @skb: buffer containing fragment to be initialised
1188 * @i: paged fragment index to initialise
1189 * @page: the page to use for this fragment
1190 * @off: the offset to the data with @page
1191 * @size: the length of the data
1192 *
1193 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1194 * @skb to point to &size bytes at offset @off within @page. In
1195 * addition updates @skb such that @i is the last fragment.
1196 *
1197 * Does not take any additional reference on the fragment.
1198 */
1199static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1200 struct page *page, int off, int size)
1201{
1202 __skb_fill_page_desc(skb, i, page, off, size);
1138 skb_shinfo(skb)->nr_frags = i + 1; 1203 skb_shinfo(skb)->nr_frags = i + 1;
1139} 1204}
1140 1205
@@ -1629,6 +1694,137 @@ static inline void netdev_free_page(struct net_device *dev, struct page *page)
1629} 1694}
1630 1695
1631/** 1696/**
1697 * skb_frag_page - retrieve the page refered to by a paged fragment
1698 * @frag: the paged fragment
1699 *
1700 * Returns the &struct page associated with @frag.
1701 */
1702static inline struct page *skb_frag_page(const skb_frag_t *frag)
1703{
1704 return frag->page.p;
1705}
1706
1707/**
1708 * __skb_frag_ref - take an addition reference on a paged fragment.
1709 * @frag: the paged fragment
1710 *
1711 * Takes an additional reference on the paged fragment @frag.
1712 */
1713static inline void __skb_frag_ref(skb_frag_t *frag)
1714{
1715 get_page(skb_frag_page(frag));
1716}
1717
1718/**
1719 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
1720 * @skb: the buffer
1721 * @f: the fragment offset.
1722 *
1723 * Takes an additional reference on the @f'th paged fragment of @skb.
1724 */
1725static inline void skb_frag_ref(struct sk_buff *skb, int f)
1726{
1727 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1728}
1729
1730/**
1731 * __skb_frag_unref - release a reference on a paged fragment.
1732 * @frag: the paged fragment
1733 *
1734 * Releases a reference on the paged fragment @frag.
1735 */
1736static inline void __skb_frag_unref(skb_frag_t *frag)
1737{
1738 put_page(skb_frag_page(frag));
1739}
1740
1741/**
1742 * skb_frag_unref - release a reference on a paged fragment of an skb.
1743 * @skb: the buffer
1744 * @f: the fragment offset
1745 *
1746 * Releases a reference on the @f'th paged fragment of @skb.
1747 */
1748static inline void skb_frag_unref(struct sk_buff *skb, int f)
1749{
1750 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
1751}
1752
1753/**
1754 * skb_frag_address - gets the address of the data contained in a paged fragment
1755 * @frag: the paged fragment buffer
1756 *
1757 * Returns the address of the data within @frag. The page must already
1758 * be mapped.
1759 */
1760static inline void *skb_frag_address(const skb_frag_t *frag)
1761{
1762 return page_address(skb_frag_page(frag)) + frag->page_offset;
1763}
1764
1765/**
1766 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
1767 * @frag: the paged fragment buffer
1768 *
1769 * Returns the address of the data within @frag. Checks that the page
1770 * is mapped and returns %NULL otherwise.
1771 */
1772static inline void *skb_frag_address_safe(const skb_frag_t *frag)
1773{
1774 void *ptr = page_address(skb_frag_page(frag));
1775 if (unlikely(!ptr))
1776 return NULL;
1777
1778 return ptr + frag->page_offset;
1779}
1780
1781/**
1782 * __skb_frag_set_page - sets the page contained in a paged fragment
1783 * @frag: the paged fragment
1784 * @page: the page to set
1785 *
1786 * Sets the fragment @frag to contain @page.
1787 */
1788static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
1789{
1790 frag->page.p = page;
1791}
1792
1793/**
1794 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
1795 * @skb: the buffer
1796 * @f: the fragment offset
1797 * @page: the page to set
1798 *
1799 * Sets the @f'th fragment of @skb to contain @page.
1800 */
1801static inline void skb_frag_set_page(struct sk_buff *skb, int f,
1802 struct page *page)
1803{
1804 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
1805}
1806
1807/**
1808 * skb_frag_dma_map - maps a paged fragment via the DMA API
1809 * @device: the device to map the fragment to
1810 * @frag: the paged fragment to map
1811 * @offset: the offset within the fragment (starting at the
1812 * fragment's own offset)
1813 * @size: the number of bytes to map
1814 * @direction: the direction of the mapping (%PCI_DMA_*)
1815 *
1816 * Maps the page associated with @frag to @device.
1817 */
1818static inline dma_addr_t skb_frag_dma_map(struct device *dev,
1819 const skb_frag_t *frag,
1820 size_t offset, size_t size,
1821 enum dma_data_direction dir)
1822{
1823 return dma_map_page(dev, skb_frag_page(frag),
1824 frag->page_offset + offset, size, dir);
1825}
1826
1827/**
1632 * skb_clone_writable - is the header of a clone writable 1828 * skb_clone_writable - is the header of a clone writable
1633 * @skb: buffer to check 1829 * @skb: buffer to check
1634 * @len: length up to which to write 1830 * @len: length up to which to write
@@ -1636,7 +1832,7 @@ static inline void netdev_free_page(struct net_device *dev, struct page *page)
1636 * Returns true if modifying the header part of the cloned buffer 1832 * Returns true if modifying the header part of the cloned buffer
1637 * does not requires the data to be copied. 1833 * does not requires the data to be copied.
1638 */ 1834 */
1639static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) 1835static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
1640{ 1836{
1641 return !skb_header_cloned(skb) && 1837 return !skb_header_cloned(skb) &&
1642 skb_headroom(skb) + len <= skb->hdr_len; 1838 skb_headroom(skb) + len <= skb->hdr_len;
@@ -1730,13 +1926,13 @@ static inline int skb_add_data(struct sk_buff *skb,
1730} 1926}
1731 1927
1732static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1928static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1733 struct page *page, int off) 1929 const struct page *page, int off)
1734{ 1930{
1735 if (i) { 1931 if (i) {
1736 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1932 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1737 1933
1738 return page == frag->page && 1934 return page == skb_frag_page(frag) &&
1739 off == frag->page_offset + frag->size; 1935 off == frag->page_offset + skb_frag_size(frag);
1740 } 1936 }
1741 return 0; 1937 return 0;
1742} 1938}
@@ -2020,8 +2216,13 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2020/** 2216/**
2021 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps 2217 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
2022 * 2218 *
2219 * PHY drivers may accept clones of transmitted packets for
2220 * timestamping via their phy_driver.txtstamp method. These drivers
2221 * must call this function to return the skb back to the stack, with
2222 * or without a timestamp.
2223 *
2023 * @skb: clone of the the original outgoing packet 2224 * @skb: clone of the the original outgoing packet
2024 * @hwtstamps: hardware time stamps 2225 * @hwtstamps: hardware time stamps, may be NULL if not available
2025 * 2226 *
2026 */ 2227 */
2027void skb_complete_tx_timestamp(struct sk_buff *skb, 2228void skb_complete_tx_timestamp(struct sk_buff *skb,
@@ -2257,7 +2458,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2257{ 2458{
2258 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2459 /* LRO sets gso_size but not gso_type, whereas if GSO is really
2259 * wanted then gso_type will be set. */ 2460 * wanted then gso_type will be set. */
2260 struct skb_shared_info *shinfo = skb_shinfo(skb); 2461 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2462
2261 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && 2463 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2262 unlikely(shinfo->gso_type == 0)) { 2464 unlikely(shinfo->gso_type == 0)) {
2263 __skb_warn_lro_forwarding(skb); 2465 __skb_warn_lro_forwarding(skb);
@@ -2281,7 +2483,7 @@ static inline void skb_forward_csum(struct sk_buff *skb)
2281 * Instead of forcing ip_summed to CHECKSUM_NONE, we can 2483 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2282 * use this helper, to document places where we make this assertion. 2484 * use this helper, to document places where we make this assertion.
2283 */ 2485 */
2284static inline void skb_checksum_none_assert(struct sk_buff *skb) 2486static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2285{ 2487{
2286#ifdef DEBUG 2488#ifdef DEBUG
2287 BUG_ON(skb->ip_summed != CHECKSUM_NONE); 2489 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
@@ -2290,5 +2492,25 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb)
2290 2492
2291bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2493bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2292 2494
2495static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2496{
2497 if (irqs_disabled())
2498 return false;
2499
2500 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
2501 return false;
2502
2503 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
2504 return false;
2505
2506 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
2507 if (skb_end_pointer(skb) - skb->head < skb_size)
2508 return false;
2509
2510 if (skb_shared(skb) || skb_cloned(skb))
2511 return false;
2512
2513 return true;
2514}
2293#endif /* __KERNEL__ */ 2515#endif /* __KERNEL__ */
2294#endif /* _LINUX_SKBUFF_H */ 2516#endif /* _LINUX_SKBUFF_H */