aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
authorJoseph Gasparakis <joseph.gasparakis@intel.com>2012-12-07 09:14:14 -0500
committerDavid S. Miller <davem@davemloft.net>2012-12-09 00:20:28 -0500
commit6a674e9c75b17e7a88ff15b3c2e269eed54f7cfb (patch)
treecff48ec155b5c7d8a595d66cc7dc0216a91f9ec3 /include/linux/skbuff.h
parent9ecb9aabaf634677c77af467f4e3028b09d7bcda (diff)
net: Add support for hardware-offloaded encapsulation
This patch adds support in the kernel for offloading in the NIC Tx and Rx checksumming for encapsulated packets (such as VXLAN and IP GRE). For Tx encapsulation offload, the driver will need to set the right bits in netdev->hw_enc_features. The protocol driver will have to set the skb->encapsulation bit and populate the inner headers, so the NIC driver will use those inner headers to calculate the csum in hardware. For Rx encapsulation offload, the driver will need to set again the skb->encapsulation flag and the skb->ip_csum to CHECKSUM_UNNECESSARY. In that case the protocol driver should push the decapsulated packet up to the stack, again with CHECKSUM_UNNECESSARY. In ether case, the protocol driver should set the skb->encapsulation flag back to zero. Finally the protocol driver should have NETIF_F_RXCSUM flag set in its features. Signed-off-by: Joseph Gasparakis <joseph.gasparakis@intel.com> Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h95
1 files changed, 94 insertions, 1 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f2af494330ab..320e976d5ab8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -376,6 +376,8 @@ typedef unsigned char *sk_buff_data_t;
376 * @mark: Generic packet mark 376 * @mark: Generic packet mark
377 * @dropcount: total number of sk_receive_queue overflows 377 * @dropcount: total number of sk_receive_queue overflows
378 * @vlan_tci: vlan tag control information 378 * @vlan_tci: vlan tag control information
379 * @inner_transport_header: Inner transport layer header (encapsulation)
380 * @inner_network_header: Network layer header (encapsulation)
379 * @transport_header: Transport layer header 381 * @transport_header: Transport layer header
380 * @network_header: Network layer header 382 * @network_header: Network layer header
381 * @mac_header: Link layer header 383 * @mac_header: Link layer header
@@ -471,7 +473,13 @@ struct sk_buff {
471 __u8 wifi_acked:1; 473 __u8 wifi_acked:1;
472 __u8 no_fcs:1; 474 __u8 no_fcs:1;
473 __u8 head_frag:1; 475 __u8 head_frag:1;
474 /* 8/10 bit hole (depending on ndisc_nodetype presence) */ 476 /* Encapsulation protocol and NIC drivers should use
477 * this flag to indicate to each other if the skb contains
478 * encapsulated packet or not and maybe use the inner packet
479 * headers if needed
480 */
481 __u8 encapsulation:1;
482 /* 7/9 bit hole (depending on ndisc_nodetype presence) */
475 kmemcheck_bitfield_end(flags2); 483 kmemcheck_bitfield_end(flags2);
476 484
477#ifdef CONFIG_NET_DMA 485#ifdef CONFIG_NET_DMA
@@ -486,6 +494,8 @@ struct sk_buff {
486 __u32 avail_size; 494 __u32 avail_size;
487 }; 495 };
488 496
497 sk_buff_data_t inner_transport_header;
498 sk_buff_data_t inner_network_header;
489 sk_buff_data_t transport_header; 499 sk_buff_data_t transport_header;
490 sk_buff_data_t network_header; 500 sk_buff_data_t network_header;
491 sk_buff_data_t mac_header; 501 sk_buff_data_t mac_header;
@@ -1435,12 +1445,53 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
1435 skb->tail += len; 1445 skb->tail += len;
1436} 1446}
1437 1447
1448static inline void skb_reset_inner_headers(struct sk_buff *skb)
1449{
1450 skb->inner_network_header = skb->network_header;
1451 skb->inner_transport_header = skb->transport_header;
1452}
1453
1438static inline void skb_reset_mac_len(struct sk_buff *skb) 1454static inline void skb_reset_mac_len(struct sk_buff *skb)
1439{ 1455{
1440 skb->mac_len = skb->network_header - skb->mac_header; 1456 skb->mac_len = skb->network_header - skb->mac_header;
1441} 1457}
1442 1458
1443#ifdef NET_SKBUFF_DATA_USES_OFFSET 1459#ifdef NET_SKBUFF_DATA_USES_OFFSET
1460static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1461 *skb)
1462{
1463 return skb->head + skb->inner_transport_header;
1464}
1465
1466static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1467{
1468 skb->inner_transport_header = skb->data - skb->head;
1469}
1470
1471static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1472 const int offset)
1473{
1474 skb_reset_inner_transport_header(skb);
1475 skb->inner_transport_header += offset;
1476}
1477
1478static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1479{
1480 return skb->head + skb->inner_network_header;
1481}
1482
1483static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1484{
1485 skb->inner_network_header = skb->data - skb->head;
1486}
1487
1488static inline void skb_set_inner_network_header(struct sk_buff *skb,
1489 const int offset)
1490{
1491 skb_reset_inner_network_header(skb);
1492 skb->inner_network_header += offset;
1493}
1494
1444static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1495static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1445{ 1496{
1446 return skb->head + skb->transport_header; 1497 return skb->head + skb->transport_header;
@@ -1496,6 +1547,38 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1496} 1547}
1497 1548
1498#else /* NET_SKBUFF_DATA_USES_OFFSET */ 1549#else /* NET_SKBUFF_DATA_USES_OFFSET */
1550static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1551 *skb)
1552{
1553 return skb->inner_transport_header;
1554}
1555
1556static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1557{
1558 skb->inner_transport_header = skb->data;
1559}
1560
1561static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1562 const int offset)
1563{
1564 skb->inner_transport_header = skb->data + offset;
1565}
1566
1567static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1568{
1569 return skb->inner_network_header;
1570}
1571
1572static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1573{
1574 skb->inner_network_header = skb->data;
1575}
1576
1577static inline void skb_set_inner_network_header(struct sk_buff *skb,
1578 const int offset)
1579{
1580 skb->inner_network_header = skb->data + offset;
1581}
1499 1582
1500static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1583static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1501{ 1584{
@@ -1574,11 +1657,21 @@ static inline u32 skb_network_header_len(const struct sk_buff *skb)
1574 return skb->transport_header - skb->network_header; 1657 return skb->transport_header - skb->network_header;
1575} 1658}
1576 1659
1660static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
1661{
1662 return skb->inner_transport_header - skb->inner_network_header;
1663}
1664
1577static inline int skb_network_offset(const struct sk_buff *skb) 1665static inline int skb_network_offset(const struct sk_buff *skb)
1578{ 1666{
1579 return skb_network_header(skb) - skb->data; 1667 return skb_network_header(skb) - skb->data;
1580} 1668}
1581 1669
1670static inline int skb_inner_network_offset(const struct sk_buff *skb)
1671{
1672 return skb_inner_network_header(skb) - skb->data;
1673}
1674
1582static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 1675static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1583{ 1676{
1584 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1677 return pskb_may_pull(skb, skb_network_offset(skb) + len);