aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-02-18 23:47:04 -0500
committerDavid S. Miller <davem@davemloft.net>2016-02-18 23:47:04 -0500
commitd289cbed9d55a94e44c0ff2c9803e1d68e4b57fe (patch)
tree4c0133a234f5ea4a6932d3a81f3ea6c8eb0bd8d4
parent376471a7b6e93067cb8a0ce5e57e8bd6071eebdd (diff)
parentffcc55c0c2a85835a4ac080bc1053c3a277b88e2 (diff)
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 40GbE Intel Wired LAN Driver Updates 2016-02-18 This series contains updates to i40e and i40evf only. Alex Duyck provides all the patches in the series to update and fix the drivers. Fixed the driver to drop the outer checksum offload on UDP tunnels, since the issue is that the upper levels of the stack never requested such an offload and it results in possible errors. Updates the TSO function to just use u64 values, so we do not have to end up casting u32 values. In the TSO path, factored out the L4 header offsets allowing us to ignore the L4 header offsets when dealing with the L3 checksum and length update. Consolidates all of the spots where we were updating either the TCP or IP checksums in the TSO and checksum path into the TSO function. Fixed two issues by adding support for IPv4 encapsulated in IPv6, first issue was the fact that iphdr(skb)->protocol was being used to test for the outer transport protocol which breaks IPv6 support. The second was that we cleared the flag for v4 going to v6, but we did not take care of txflags going the other way. Added support for IPv6 extension headers in setting up the Tx checksum. Added exception handling to the Tx checksum path so that we can handle cases of TSO where the frame is bad, or Tx checksum where we did not recognize a protocol. Fixed a number of issues to make certain that we are using the correct protocols when parsing both the inner and outer headers of a frame that is mixed between IPv4 and IPv6 for inner and outer. Updated the feature flags to reflect the newly enabled/added features. Sorry, no witty patch descriptions this time around, probably should let Mitch help in writing patch descriptions for Alex. :-) ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c404
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c360
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c23
6 files changed, 433 insertions, 386 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 16e5e0b81bd0..2f2b2d714f63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7474,8 +7474,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
7474 tx_ring->dcb_tc = 0; 7474 tx_ring->dcb_tc = 0;
7475 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) 7475 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7476 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; 7476 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7477 if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
7478 tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
7479 vsi->tx_rings[i] = tx_ring; 7477 vsi->tx_rings[i] = tx_ring;
7480 7478
7481 rx_ring = &tx_ring[1]; 7479 rx_ring = &tx_ring[1];
@@ -8628,9 +8626,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
8628 u8 next_idx; 8626 u8 next_idx;
8629 u8 idx; 8627 u8 idx;
8630 8628
8631 if (sa_family == AF_INET6)
8632 return;
8633
8634 idx = i40e_get_udp_port_idx(pf, port); 8629 idx = i40e_get_udp_port_idx(pf, port);
8635 8630
8636 /* Check if port already exists */ 8631 /* Check if port already exists */
@@ -8670,9 +8665,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
8670 struct i40e_pf *pf = vsi->back; 8665 struct i40e_pf *pf = vsi->back;
8671 u8 idx; 8666 u8 idx;
8672 8667
8673 if (sa_family == AF_INET6)
8674 return;
8675
8676 idx = i40e_get_udp_port_idx(pf, port); 8668 idx = i40e_get_udp_port_idx(pf, port);
8677 8669
8678 /* Check if port already exists */ 8670 /* Check if port already exists */
@@ -8709,9 +8701,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
8709 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) 8701 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8710 return; 8702 return;
8711 8703
8712 if (sa_family == AF_INET6)
8713 return;
8714
8715 idx = i40e_get_udp_port_idx(pf, port); 8704 idx = i40e_get_udp_port_idx(pf, port);
8716 8705
8717 /* Check if port already exists */ 8706 /* Check if port already exists */
@@ -8753,9 +8742,6 @@ static void i40e_del_geneve_port(struct net_device *netdev,
8753 struct i40e_pf *pf = vsi->back; 8742 struct i40e_pf *pf = vsi->back;
8754 u8 idx; 8743 u8 idx;
8755 8744
8756 if (sa_family == AF_INET6)
8757 return;
8758
8759 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) 8745 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8760 return; 8746 return;
8761 8747
@@ -9046,10 +9032,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
9046 np = netdev_priv(netdev); 9032 np = netdev_priv(netdev);
9047 np->vsi = vsi; 9033 np->vsi = vsi;
9048 9034
9049 netdev->hw_enc_features |= NETIF_F_IP_CSUM | 9035 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
9050 NETIF_F_GSO_UDP_TUNNEL | 9036 NETIF_F_IPV6_CSUM |
9051 NETIF_F_GSO_GRE | 9037 NETIF_F_TSO |
9052 NETIF_F_TSO | 9038 NETIF_F_TSO6 |
9039 NETIF_F_TSO_ECN |
9040 NETIF_F_GSO_GRE |
9041 NETIF_F_GSO_UDP_TUNNEL |
9042 NETIF_F_GSO_UDP_TUNNEL_CSUM |
9053 0; 9043 0;
9054 9044
9055 netdev->features = NETIF_F_SG | 9045 netdev->features = NETIF_F_SG |
@@ -9071,6 +9061,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
9071 9061
9072 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 9062 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9073 netdev->features |= NETIF_F_NTUPLE; 9063 netdev->features |= NETIF_F_NTUPLE;
9064 if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
9065 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9074 9066
9075 /* copy netdev features into list of user selectable features */ 9067 /* copy netdev features into list of user selectable features */
9076 netdev->hw_features |= netdev->features; 9068 netdev->hw_features |= netdev->features;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 65f2fd80aa79..1d3afa7dda18 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1389,16 +1389,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1389 u16 rx_ptype) 1389 u16 rx_ptype)
1390{ 1390{
1391 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); 1391 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1392 bool ipv4 = false, ipv6 = false; 1392 bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
1393 bool ipv4_tunnel, ipv6_tunnel;
1394 __wsum rx_udp_csum;
1395 struct iphdr *iph;
1396 __sum16 csum;
1397
1398 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1399 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1400 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1401 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1402 1393
1403 skb->ip_summed = CHECKSUM_NONE; 1394 skb->ip_summed = CHECKSUM_NONE;
1404 1395
@@ -1414,12 +1405,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1414 if (!(decoded.known && decoded.outer_ip)) 1405 if (!(decoded.known && decoded.outer_ip))
1415 return; 1406 return;
1416 1407
1417 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 1408 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1418 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) 1409 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1419 ipv4 = true; 1410 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1420 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 1411 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1421 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1422 ipv6 = true;
1423 1412
1424 if (ipv4 && 1413 if (ipv4 &&
1425 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | 1414 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
@@ -1443,37 +1432,17 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1443 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) 1432 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1444 return; 1433 return;
1445 1434
1446 /* If VXLAN/GENEVE traffic has an outer UDPv4 checksum we need to check 1435 /* The hardware supported by this driver does not validate outer
1447 * it in the driver, hardware does not do it for us. 1436 * checksums for tunneled VXLAN or GENEVE frames. I don't agree
1448 * Since L3L4P bit was set we assume a valid IHL value (>=5) 1437 * with it but the specification states that you "MAY validate", it
1449 * so the total length of IPv4 header is IHL*4 bytes 1438 * doesn't make it a hard requirement so if we have validated the
1450 * The UDP_0 bit *may* bet set if the *inner* header is UDP 1439 * inner checksum report CHECKSUM_UNNECESSARY.
1451 */ 1440 */
1452 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && 1441
1453 (ipv4_tunnel)) { 1442 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1454 skb->transport_header = skb->mac_header + 1443 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1455 sizeof(struct ethhdr) + 1444 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1456 (ip_hdr(skb)->ihl * 4); 1445 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1457
1458 /* Add 4 bytes for VLAN tagged packets */
1459 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1460 skb->protocol == htons(ETH_P_8021AD))
1461 ? VLAN_HLEN : 0;
1462
1463 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1464 (udp_hdr(skb)->check != 0)) {
1465 rx_udp_csum = udp_csum(skb);
1466 iph = ip_hdr(skb);
1467 csum = csum_tcpudp_magic(
1468 iph->saddr, iph->daddr,
1469 (skb->len - skb_transport_offset(skb)),
1470 IPPROTO_UDP, rx_udp_csum);
1471
1472 if (udp_hdr(skb)->check != csum)
1473 goto checksum_fail;
1474
1475 } /* else its GRE and so no outer UDP header */
1476 }
1477 1446
1478 skb->ip_summed = CHECKSUM_UNNECESSARY; 1447 skb->ip_summed = CHECKSUM_UNNECESSARY;
1479 skb->csum_level = ipv4_tunnel || ipv6_tunnel; 1448 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
@@ -2061,10 +2030,9 @@ tx_only:
2061 * @tx_ring: ring to add programming descriptor to 2030 * @tx_ring: ring to add programming descriptor to
2062 * @skb: send buffer 2031 * @skb: send buffer
2063 * @tx_flags: send tx flags 2032 * @tx_flags: send tx flags
2064 * @protocol: wire protocol
2065 **/ 2033 **/
2066static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, 2034static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2067 u32 tx_flags, __be16 protocol) 2035 u32 tx_flags)
2068{ 2036{
2069 struct i40e_filter_program_desc *fdir_desc; 2037 struct i40e_filter_program_desc *fdir_desc;
2070 struct i40e_pf *pf = tx_ring->vsi->back; 2038 struct i40e_pf *pf = tx_ring->vsi->back;
@@ -2076,6 +2044,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2076 struct tcphdr *th; 2044 struct tcphdr *th;
2077 unsigned int hlen; 2045 unsigned int hlen;
2078 u32 flex_ptype, dtype_cmd; 2046 u32 flex_ptype, dtype_cmd;
2047 int l4_proto;
2079 u16 i; 2048 u16 i;
2080 2049
2081 /* make sure ATR is enabled */ 2050 /* make sure ATR is enabled */
@@ -2089,36 +2058,28 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2089 if (!tx_ring->atr_sample_rate) 2058 if (!tx_ring->atr_sample_rate)
2090 return; 2059 return;
2091 2060
2061 /* Currently only IPv4/IPv6 with TCP is supported */
2092 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) 2062 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2093 return; 2063 return;
2094 2064
2095 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) { 2065 /* snag network header to get L4 type and address */
2096 /* snag network header to get L4 type and address */ 2066 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2097 hdr.network = skb_network_header(skb); 2067 skb_inner_network_header(skb) : skb_network_header(skb);
2098 2068
2099 /* Currently only IPv4/IPv6 with TCP is supported 2069 /* Note: tx_flags gets modified to reflect inner protocols in
2100 * access ihl as u8 to avoid unaligned access on ia64 2070 * tx_enable_csum function if encap is enabled.
2101 */ 2071 */
2102 if (tx_flags & I40E_TX_FLAGS_IPV4) 2072 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2103 hlen = (hdr.network[0] & 0x0F) << 2; 2073 /* access ihl as u8 to avoid unaligned access on ia64 */
2104 else if (protocol == htons(ETH_P_IPV6)) 2074 hlen = (hdr.network[0] & 0x0F) << 2;
2105 hlen = sizeof(struct ipv6hdr); 2075 l4_proto = hdr.ipv4->protocol;
2106 else
2107 return;
2108 } else { 2076 } else {
2109 hdr.network = skb_inner_network_header(skb); 2077 hlen = hdr.network - skb->data;
2110 hlen = skb_inner_network_header_len(skb); 2078 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2079 hlen -= hdr.network - skb->data;
2111 } 2080 }
2112 2081
2113 /* Currently only IPv4/IPv6 with TCP is supported 2082 if (l4_proto != IPPROTO_TCP)
2114 * Note: tx_flags gets modified to reflect inner protocols in
2115 * tx_enable_csum function if encap is enabled.
2116 */
2117 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
2118 (hdr.ipv4->protocol != IPPROTO_TCP))
2119 return;
2120 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
2121 (hdr.ipv6->nexthdr != IPPROTO_TCP))
2122 return; 2083 return;
2123 2084
2124 th = (struct tcphdr *)(hdr.network + hlen); 2085 th = (struct tcphdr *)(hdr.network + hlen);
@@ -2155,7 +2116,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2155 2116
2156 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & 2117 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2157 I40E_TXD_FLTR_QW0_QINDEX_MASK; 2118 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2158 flex_ptype |= (protocol == htons(ETH_P_IP)) ? 2119 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2159 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << 2120 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2160 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : 2121 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2161 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << 2122 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
@@ -2295,11 +2256,18 @@ out:
2295static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, 2256static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2296 u8 *hdr_len, u64 *cd_type_cmd_tso_mss) 2257 u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
2297{ 2258{
2298 u32 cd_cmd, cd_tso_len, cd_mss; 2259 u64 cd_cmd, cd_tso_len, cd_mss;
2299 struct ipv6hdr *ipv6h; 2260 union {
2300 struct tcphdr *tcph; 2261 struct iphdr *v4;
2301 struct iphdr *iph; 2262 struct ipv6hdr *v6;
2302 u32 l4len; 2263 unsigned char *hdr;
2264 } ip;
2265 union {
2266 struct tcphdr *tcp;
2267 struct udphdr *udp;
2268 unsigned char *hdr;
2269 } l4;
2270 u32 paylen, l4_offset;
2303 int err; 2271 int err;
2304 2272
2305 if (skb->ip_summed != CHECKSUM_PARTIAL) 2273 if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -2312,35 +2280,60 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2312 if (err < 0) 2280 if (err < 0)
2313 return err; 2281 return err;
2314 2282
2315 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 2283 ip.hdr = skb_network_header(skb);
2316 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); 2284 l4.hdr = skb_transport_header(skb);
2317 2285
2318 if (iph->version == 4) { 2286 /* initialize outer IP header fields */
2319 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 2287 if (ip.v4->version == 4) {
2320 iph->tot_len = 0; 2288 ip.v4->tot_len = 0;
2321 iph->check = 0; 2289 ip.v4->check = 0;
2322 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 2290 } else {
2323 0, IPPROTO_TCP, 0); 2291 ip.v6->payload_len = 0;
2324 } else if (ipv6h->version == 6) { 2292 }
2325 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 2293
2326 ipv6h->payload_len = 0; 2294 if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
2327 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 2295 SKB_GSO_UDP_TUNNEL_CSUM)) {
2328 0, IPPROTO_TCP, 0); 2296 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
2297 /* determine offset of outer transport header */
2298 l4_offset = l4.hdr - skb->data;
2299
2300 /* remove payload length from outer checksum */
2301 paylen = (__force u16)l4.udp->check;
2302 paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
2303 l4.udp->check = ~csum_fold((__force __wsum)paylen);
2304 }
2305
2306 /* reset pointers to inner headers */
2307 ip.hdr = skb_inner_network_header(skb);
2308 l4.hdr = skb_inner_transport_header(skb);
2309
2310 /* initialize inner IP header fields */
2311 if (ip.v4->version == 4) {
2312 ip.v4->tot_len = 0;
2313 ip.v4->check = 0;
2314 } else {
2315 ip.v6->payload_len = 0;
2316 }
2329 } 2317 }
2330 2318
2331 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); 2319 /* determine offset of inner transport header */
2332 *hdr_len = (skb->encapsulation 2320 l4_offset = l4.hdr - skb->data;
2333 ? (skb_inner_transport_header(skb) - skb->data) 2321
2334 : skb_transport_offset(skb)) + l4len; 2322 /* remove payload length from inner checksum */
2323 paylen = (__force u16)l4.tcp->check;
2324 paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
2325 l4.tcp->check = ~csum_fold((__force __wsum)paylen);
2326
2327 /* compute length of segmentation header */
2328 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2335 2329
2336 /* find the field values */ 2330 /* find the field values */
2337 cd_cmd = I40E_TX_CTX_DESC_TSO; 2331 cd_cmd = I40E_TX_CTX_DESC_TSO;
2338 cd_tso_len = skb->len - *hdr_len; 2332 cd_tso_len = skb->len - *hdr_len;
2339 cd_mss = skb_shinfo(skb)->gso_size; 2333 cd_mss = skb_shinfo(skb)->gso_size;
2340 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | 2334 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2341 ((u64)cd_tso_len << 2335 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2342 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | 2336 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2343 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2344 return 1; 2337 return 1;
2345} 2338}
2346 2339
@@ -2395,129 +2388,154 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2395 * @tx_ring: Tx descriptor ring 2388 * @tx_ring: Tx descriptor ring
2396 * @cd_tunneling: ptr to context desc bits 2389 * @cd_tunneling: ptr to context desc bits
2397 **/ 2390 **/
2398static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, 2391static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2399 u32 *td_cmd, u32 *td_offset, 2392 u32 *td_cmd, u32 *td_offset,
2400 struct i40e_ring *tx_ring, 2393 struct i40e_ring *tx_ring,
2401 u32 *cd_tunneling) 2394 u32 *cd_tunneling)
2402{ 2395{
2403 struct ipv6hdr *this_ipv6_hdr; 2396 union {
2404 unsigned int this_tcp_hdrlen; 2397 struct iphdr *v4;
2405 struct iphdr *this_ip_hdr; 2398 struct ipv6hdr *v6;
2406 u32 network_hdr_len; 2399 unsigned char *hdr;
2407 u8 l4_hdr = 0; 2400 } ip;
2408 struct udphdr *oudph = NULL; 2401 union {
2409 struct iphdr *oiph = NULL; 2402 struct tcphdr *tcp;
2410 u32 l4_tunnel = 0; 2403 struct udphdr *udp;
2404 unsigned char *hdr;
2405 } l4;
2406 unsigned char *exthdr;
2407 u32 offset, cmd = 0, tunnel = 0;
2408 __be16 frag_off;
2409 u8 l4_proto = 0;
2410
2411 if (skb->ip_summed != CHECKSUM_PARTIAL)
2412 return 0;
2413
2414 ip.hdr = skb_network_header(skb);
2415 l4.hdr = skb_transport_header(skb);
2416
2417 /* compute outer L2 header size */
2418 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2411 2419
2412 if (skb->encapsulation) { 2420 if (skb->encapsulation) {
2413 switch (ip_hdr(skb)->protocol) { 2421 /* define outer network header type */
2422 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2423 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2424 I40E_TX_CTX_EXT_IP_IPV4 :
2425 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2426
2427 l4_proto = ip.v4->protocol;
2428 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2429 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
2430
2431 exthdr = ip.hdr + sizeof(*ip.v6);
2432 l4_proto = ip.v6->nexthdr;
2433 if (l4.hdr != exthdr)
2434 ipv6_skip_exthdr(skb, exthdr - skb->data,
2435 &l4_proto, &frag_off);
2436 }
2437
2438 /* compute outer L3 header size */
2439 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2440 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2441
2442 /* switch IP header pointer from outer to inner header */
2443 ip.hdr = skb_inner_network_header(skb);
2444
2445 /* define outer transport */
2446 switch (l4_proto) {
2414 case IPPROTO_UDP: 2447 case IPPROTO_UDP:
2415 oudph = udp_hdr(skb); 2448 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
2416 oiph = ip_hdr(skb);
2417 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2418 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 2449 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2419 break; 2450 break;
2420 case IPPROTO_GRE: 2451 case IPPROTO_GRE:
2421 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; 2452 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
2453 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2422 break; 2454 break;
2423 default: 2455 default:
2424 return;
2425 }
2426 network_hdr_len = skb_inner_network_header_len(skb);
2427 this_ip_hdr = inner_ip_hdr(skb);
2428 this_ipv6_hdr = inner_ipv6_hdr(skb);
2429 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2430
2431 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2432 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2433 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2434 ip_hdr(skb)->check = 0;
2435 } else {
2436 *cd_tunneling |=
2437 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2438 }
2439 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2440 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2441 if (*tx_flags & I40E_TX_FLAGS_TSO) 2456 if (*tx_flags & I40E_TX_FLAGS_TSO)
2442 ip_hdr(skb)->check = 0; 2457 return -1;
2458
2459 skb_checksum_help(skb);
2460 return 0;
2443 } 2461 }
2444 2462
2445 /* Now set the ctx descriptor fields */ 2463 /* compute tunnel header size */
2446 *cd_tunneling |= (skb_network_header_len(skb) >> 2) << 2464 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2447 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | 2465 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2448 l4_tunnel | 2466
2449 ((skb_inner_network_offset(skb) - 2467 /* indicate if we need to offload outer UDP header */
2450 skb_transport_offset(skb)) >> 1) << 2468 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
2451 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 2469 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2452 if (this_ip_hdr->version == 6) { 2470 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2453 *tx_flags &= ~I40E_TX_FLAGS_IPV4; 2471
2472 /* record tunnel offload values */
2473 *cd_tunneling |= tunnel;
2474
2475 /* switch L4 header pointer from outer to inner */
2476 l4.hdr = skb_inner_transport_header(skb);
2477 l4_proto = 0;
2478
2479 /* reset type as we transition from outer to inner headers */
2480 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2481 if (ip.v4->version == 4)
2482 *tx_flags |= I40E_TX_FLAGS_IPV4;
2483 if (ip.v6->version == 6)
2454 *tx_flags |= I40E_TX_FLAGS_IPV6; 2484 *tx_flags |= I40E_TX_FLAGS_IPV6;
2455 }
2456 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2457 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2458 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2459 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2460 oiph->daddr,
2461 (skb->len - skb_transport_offset(skb)),
2462 IPPROTO_UDP, 0);
2463 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2464 }
2465 } else {
2466 network_hdr_len = skb_network_header_len(skb);
2467 this_ip_hdr = ip_hdr(skb);
2468 this_ipv6_hdr = ipv6_hdr(skb);
2469 this_tcp_hdrlen = tcp_hdrlen(skb);
2470 } 2485 }
2471 2486
2472 /* Enable IP checksum offloads */ 2487 /* Enable IP checksum offloads */
2473 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 2488 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2474 l4_hdr = this_ip_hdr->protocol; 2489 l4_proto = ip.v4->protocol;
2475 /* the stack computes the IP header already, the only time we 2490 /* the stack computes the IP header already, the only time we
2476 * need the hardware to recompute it is in the case of TSO. 2491 * need the hardware to recompute it is in the case of TSO.
2477 */ 2492 */
2478 if (*tx_flags & I40E_TX_FLAGS_TSO) { 2493 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2479 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; 2494 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2480 this_ip_hdr->check = 0; 2495 I40E_TX_DESC_CMD_IIPT_IPV4;
2481 } else {
2482 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2483 }
2484 /* Now set the td_offset for IP header length */
2485 *td_offset = (network_hdr_len >> 2) <<
2486 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2487 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { 2496 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2488 l4_hdr = this_ipv6_hdr->nexthdr; 2497 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2489 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; 2498
2490 /* Now set the td_offset for IP header length */ 2499 exthdr = ip.hdr + sizeof(*ip.v6);
2491 *td_offset = (network_hdr_len >> 2) << 2500 l4_proto = ip.v6->nexthdr;
2492 I40E_TX_DESC_LENGTH_IPLEN_SHIFT; 2501 if (l4.hdr != exthdr)
2502 ipv6_skip_exthdr(skb, exthdr - skb->data,
2503 &l4_proto, &frag_off);
2493 } 2504 }
2494 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ 2505
2495 *td_offset |= (skb_network_offset(skb) >> 1) << 2506 /* compute inner L3 header size */
2496 I40E_TX_DESC_LENGTH_MACLEN_SHIFT; 2507 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2497 2508
2498 /* Enable L4 checksum offloads */ 2509 /* Enable L4 checksum offloads */
2499 switch (l4_hdr) { 2510 switch (l4_proto) {
2500 case IPPROTO_TCP: 2511 case IPPROTO_TCP:
2501 /* enable checksum offloads */ 2512 /* enable checksum offloads */
2502 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; 2513 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2503 *td_offset |= (this_tcp_hdrlen >> 2) << 2514 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2504 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2505 break; 2515 break;
2506 case IPPROTO_SCTP: 2516 case IPPROTO_SCTP:
2507 /* enable SCTP checksum offload */ 2517 /* enable SCTP checksum offload */
2508 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; 2518 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2509 *td_offset |= (sizeof(struct sctphdr) >> 2) << 2519 offset |= (sizeof(struct sctphdr) >> 2) <<
2510 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 2520 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2511 break; 2521 break;
2512 case IPPROTO_UDP: 2522 case IPPROTO_UDP:
2513 /* enable UDP checksum offload */ 2523 /* enable UDP checksum offload */
2514 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; 2524 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2515 *td_offset |= (sizeof(struct udphdr) >> 2) << 2525 offset |= (sizeof(struct udphdr) >> 2) <<
2516 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 2526 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2517 break; 2527 break;
2518 default: 2528 default:
2519 break; 2529 if (*tx_flags & I40E_TX_FLAGS_TSO)
2530 return -1;
2531 skb_checksum_help(skb);
2532 return 0;
2520 } 2533 }
2534
2535 *td_cmd |= cmd;
2536 *td_offset |= offset;
2537
2538 return 1;
2521} 2539}
2522 2540
2523/** 2541/**
@@ -2954,12 +2972,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2954 td_cmd |= I40E_TX_DESC_CMD_ICRC; 2972 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2955 2973
2956 /* Always offload the checksum, since it's in the data descriptor */ 2974 /* Always offload the checksum, since it's in the data descriptor */
2957 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2975 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2958 tx_flags |= I40E_TX_FLAGS_CSUM; 2976 tx_ring, &cd_tunneling);
2959 2977 if (tso < 0)
2960 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, 2978 goto out_drop;
2961 tx_ring, &cd_tunneling);
2962 }
2963 2979
2964 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 2980 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2965 cd_tunneling, cd_l2tag2); 2981 cd_tunneling, cd_l2tag2);
@@ -2968,7 +2984,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2968 * 2984 *
2969 * NOTE: this must always be directly before the data descriptor. 2985 * NOTE: this must always be directly before the data descriptor.
2970 */ 2986 */
2971 i40e_atr(tx_ring, skb, tx_flags, protocol); 2987 i40e_atr(tx_ring, skb, tx_flags);
2972 2988
2973 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 2989 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2974 td_cmd, td_offset); 2990 td_cmd, td_offset);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 3acc9244134d..fde5f42524fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -153,7 +153,6 @@ enum i40e_dyn_idx_t {
153#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 153#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
154#define I40E_MIN_DESC_PENDING 4 154#define I40E_MIN_DESC_PENDING 4
155 155
156#define I40E_TX_FLAGS_CSUM BIT(0)
157#define I40E_TX_FLAGS_HW_VLAN BIT(1) 156#define I40E_TX_FLAGS_HW_VLAN BIT(1)
158#define I40E_TX_FLAGS_SW_VLAN BIT(2) 157#define I40E_TX_FLAGS_SW_VLAN BIT(2)
159#define I40E_TX_FLAGS_TSO BIT(3) 158#define I40E_TX_FLAGS_TSO BIT(3)
@@ -277,7 +276,6 @@ struct i40e_ring {
277 276
278 u16 flags; 277 u16 flags;
279#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) 278#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
280#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
281#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) 279#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
282 280
283 /* stats structs */ 281 /* stats structs */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index fb6cd7e5d3be..6d66fcdc6122 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -861,16 +861,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
861 u16 rx_ptype) 861 u16 rx_ptype)
862{ 862{
863 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); 863 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
864 bool ipv4 = false, ipv6 = false; 864 bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
865 bool ipv4_tunnel, ipv6_tunnel;
866 __wsum rx_udp_csum;
867 struct iphdr *iph;
868 __sum16 csum;
869
870 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
871 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
872 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
873 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
874 865
875 skb->ip_summed = CHECKSUM_NONE; 866 skb->ip_summed = CHECKSUM_NONE;
876 867
@@ -886,12 +877,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
886 if (!(decoded.known && decoded.outer_ip)) 877 if (!(decoded.known && decoded.outer_ip))
887 return; 878 return;
888 879
889 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 880 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
890 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) 881 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
891 ipv4 = true; 882 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
892 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 883 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
893 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
894 ipv6 = true;
895 884
896 if (ipv4 && 885 if (ipv4 &&
897 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | 886 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
@@ -915,36 +904,17 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
915 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) 904 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
916 return; 905 return;
917 906
918 /* If VXLAN traffic has an outer UDPv4 checksum we need to check 907 /* The hardware supported by this driver does not validate outer
919 * it in the driver, hardware does not do it for us. 908 * checksums for tunneled VXLAN or GENEVE frames. I don't agree
920 * Since L3L4P bit was set we assume a valid IHL value (>=5) 909 * with it but the specification states that you "MAY validate", it
921 * so the total length of IPv4 header is IHL*4 bytes 910 * doesn't make it a hard requirement so if we have validated the
922 * The UDP_0 bit *may* bet set if the *inner* header is UDP 911 * inner checksum report CHECKSUM_UNNECESSARY.
923 */ 912 */
924 if (ipv4_tunnel) { 913
925 skb->transport_header = skb->mac_header + 914 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
926 sizeof(struct ethhdr) + 915 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
927 (ip_hdr(skb)->ihl * 4); 916 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
928 917 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
929 /* Add 4 bytes for VLAN tagged packets */
930 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
931 skb->protocol == htons(ETH_P_8021AD))
932 ? VLAN_HLEN : 0;
933
934 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
935 (udp_hdr(skb)->check != 0)) {
936 rx_udp_csum = udp_csum(skb);
937 iph = ip_hdr(skb);
938 csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
939 (skb->len -
940 skb_transport_offset(skb)),
941 IPPROTO_UDP, rx_udp_csum);
942
943 if (udp_hdr(skb)->check != csum)
944 goto checksum_fail;
945
946 } /* else its GRE and so no outer UDP header */
947 }
948 918
949 skb->ip_summed = CHECKSUM_UNNECESSARY; 919 skb->ip_summed = CHECKSUM_UNNECESSARY;
950 skb->csum_level = ipv4_tunnel || ipv6_tunnel; 920 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
@@ -1554,11 +1524,18 @@ out:
1554static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, 1524static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1555 u8 *hdr_len, u64 *cd_type_cmd_tso_mss) 1525 u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
1556{ 1526{
1557 u32 cd_cmd, cd_tso_len, cd_mss; 1527 u64 cd_cmd, cd_tso_len, cd_mss;
1558 struct ipv6hdr *ipv6h; 1528 union {
1559 struct tcphdr *tcph; 1529 struct iphdr *v4;
1560 struct iphdr *iph; 1530 struct ipv6hdr *v6;
1561 u32 l4len; 1531 unsigned char *hdr;
1532 } ip;
1533 union {
1534 struct tcphdr *tcp;
1535 struct udphdr *udp;
1536 unsigned char *hdr;
1537 } l4;
1538 u32 paylen, l4_offset;
1562 int err; 1539 int err;
1563 1540
1564 if (skb->ip_summed != CHECKSUM_PARTIAL) 1541 if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1571,35 +1548,60 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1571 if (err < 0) 1548 if (err < 0)
1572 return err; 1549 return err;
1573 1550
1574 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1551 ip.hdr = skb_network_header(skb);
1575 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); 1552 l4.hdr = skb_transport_header(skb);
1576 1553
1577 if (iph->version == 4) { 1554 /* initialize outer IP header fields */
1578 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1555 if (ip.v4->version == 4) {
1579 iph->tot_len = 0; 1556 ip.v4->tot_len = 0;
1580 iph->check = 0; 1557 ip.v4->check = 0;
1581 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1558 } else {
1582 0, IPPROTO_TCP, 0); 1559 ip.v6->payload_len = 0;
1583 } else if (ipv6h->version == 6) { 1560 }
1584 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1561
1585 ipv6h->payload_len = 0; 1562 if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
1586 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 1563 SKB_GSO_UDP_TUNNEL_CSUM)) {
1587 0, IPPROTO_TCP, 0); 1564 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
1565 /* determine offset of outer transport header */
1566 l4_offset = l4.hdr - skb->data;
1567
1568 /* remove payload length from outer checksum */
1569 paylen = (__force u16)l4.udp->check;
1570 paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
1571 l4.udp->check = ~csum_fold((__force __wsum)paylen);
1572 }
1573
1574 /* reset pointers to inner headers */
1575 ip.hdr = skb_inner_network_header(skb);
1576 l4.hdr = skb_inner_transport_header(skb);
1577
1578 /* initialize inner IP header fields */
1579 if (ip.v4->version == 4) {
1580 ip.v4->tot_len = 0;
1581 ip.v4->check = 0;
1582 } else {
1583 ip.v6->payload_len = 0;
1584 }
1588 } 1585 }
1589 1586
1590 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); 1587 /* determine offset of inner transport header */
1591 *hdr_len = (skb->encapsulation 1588 l4_offset = l4.hdr - skb->data;
1592 ? (skb_inner_transport_header(skb) - skb->data) 1589
1593 : skb_transport_offset(skb)) + l4len; 1590 /* remove payload length from inner checksum */
1591 paylen = (__force u16)l4.tcp->check;
1592 paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
1593 l4.tcp->check = ~csum_fold((__force __wsum)paylen);
1594
1595 /* compute length of segmentation header */
1596 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1594 1597
1595 /* find the field values */ 1598 /* find the field values */
1596 cd_cmd = I40E_TX_CTX_DESC_TSO; 1599 cd_cmd = I40E_TX_CTX_DESC_TSO;
1597 cd_tso_len = skb->len - *hdr_len; 1600 cd_tso_len = skb->len - *hdr_len;
1598 cd_mss = skb_shinfo(skb)->gso_size; 1601 cd_mss = skb_shinfo(skb)->gso_size;
1599 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | 1602 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1600 ((u64)cd_tso_len << 1603 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1601 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | 1604 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
1602 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
1603 return 1; 1605 return 1;
1604} 1606}
1605 1607
@@ -1609,129 +1611,157 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1609 * @tx_flags: pointer to Tx flags currently set 1611 * @tx_flags: pointer to Tx flags currently set
1610 * @td_cmd: Tx descriptor command bits to set 1612 * @td_cmd: Tx descriptor command bits to set
1611 * @td_offset: Tx descriptor header offsets to set 1613 * @td_offset: Tx descriptor header offsets to set
1614 * @tx_ring: Tx descriptor ring
1612 * @cd_tunneling: ptr to context desc bits 1615 * @cd_tunneling: ptr to context desc bits
1613 **/ 1616 **/
1614static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, 1617static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1615 u32 *td_cmd, u32 *td_offset, 1618 u32 *td_cmd, u32 *td_offset,
1616 struct i40e_ring *tx_ring, 1619 struct i40e_ring *tx_ring,
1617 u32 *cd_tunneling) 1620 u32 *cd_tunneling)
1618{ 1621{
1619 struct ipv6hdr *this_ipv6_hdr; 1622 union {
1620 unsigned int this_tcp_hdrlen; 1623 struct iphdr *v4;
1621 struct iphdr *this_ip_hdr; 1624 struct ipv6hdr *v6;
1622 u32 network_hdr_len; 1625 unsigned char *hdr;
1623 u8 l4_hdr = 0; 1626 } ip;
1624 struct udphdr *oudph; 1627 union {
1625 struct iphdr *oiph; 1628 struct tcphdr *tcp;
1626 u32 l4_tunnel = 0; 1629 struct udphdr *udp;
1630 unsigned char *hdr;
1631 } l4;
1632 unsigned char *exthdr;
1633 u32 offset, cmd = 0, tunnel = 0;
1634 __be16 frag_off;
1635 u8 l4_proto = 0;
1636
1637 if (skb->ip_summed != CHECKSUM_PARTIAL)
1638 return 0;
1639
1640 ip.hdr = skb_network_header(skb);
1641 l4.hdr = skb_transport_header(skb);
1642
1643 /* compute outer L2 header size */
1644 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1627 1645
1628 if (skb->encapsulation) { 1646 if (skb->encapsulation) {
1629 switch (ip_hdr(skb)->protocol) { 1647 /* define outer network header type */
1648 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
1649 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
1650 I40E_TX_CTX_EXT_IP_IPV4 :
1651 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1652
1653 l4_proto = ip.v4->protocol;
1654 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
1655 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
1656
1657 exthdr = ip.hdr + sizeof(*ip.v6);
1658 l4_proto = ip.v6->nexthdr;
1659 if (l4.hdr != exthdr)
1660 ipv6_skip_exthdr(skb, exthdr - skb->data,
1661 &l4_proto, &frag_off);
1662 }
1663
1664 /* compute outer L3 header size */
1665 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1666 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
1667
1668 /* switch IP header pointer from outer to inner header */
1669 ip.hdr = skb_inner_network_header(skb);
1670
1671 /* define outer transport */
1672 switch (l4_proto) {
1630 case IPPROTO_UDP: 1673 case IPPROTO_UDP:
1631 oudph = udp_hdr(skb); 1674 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
1632 oiph = ip_hdr(skb); 1675 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
1633 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; 1676 break;
1677 case IPPROTO_GRE:
1678 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
1634 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; 1679 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
1635 break; 1680 break;
1636 default: 1681 default:
1637 return;
1638 }
1639 network_hdr_len = skb_inner_network_header_len(skb);
1640 this_ip_hdr = inner_ip_hdr(skb);
1641 this_ipv6_hdr = inner_ipv6_hdr(skb);
1642 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
1643
1644 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
1645 if (*tx_flags & I40E_TX_FLAGS_TSO) {
1646 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
1647 ip_hdr(skb)->check = 0;
1648 } else {
1649 *cd_tunneling |=
1650 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1651 }
1652 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
1653 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1654 if (*tx_flags & I40E_TX_FLAGS_TSO) 1682 if (*tx_flags & I40E_TX_FLAGS_TSO)
1655 ip_hdr(skb)->check = 0; 1683 return -1;
1656 }
1657 1684
1658 /* Now set the ctx descriptor fields */ 1685 skb_checksum_help(skb);
1659 *cd_tunneling |= (skb_network_header_len(skb) >> 2) << 1686 return 0;
1660 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
1661 l4_tunnel |
1662 ((skb_inner_network_offset(skb) -
1663 skb_transport_offset(skb)) >> 1) <<
1664 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1665 if (this_ip_hdr->version == 6) {
1666 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
1667 *tx_flags |= I40E_TX_FLAGS_IPV6;
1668 } 1687 }
1669 1688
1670 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && 1689 /* compute tunnel header size */
1671 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && 1690 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1672 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { 1691 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1673 oudph->check = ~csum_tcpudp_magic(oiph->saddr, 1692
1674 oiph->daddr, 1693 /* indicate if we need to offload outer UDP header */
1675 (skb->len - skb_transport_offset(skb)), 1694 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
1676 IPPROTO_UDP, 0); 1695 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1677 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; 1696 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
1678 } 1697
1679 } else { 1698 /* record tunnel offload values */
1680 network_hdr_len = skb_network_header_len(skb); 1699 *cd_tunneling |= tunnel;
1681 this_ip_hdr = ip_hdr(skb); 1700
1682 this_ipv6_hdr = ipv6_hdr(skb); 1701 /* switch L4 header pointer from outer to inner */
1683 this_tcp_hdrlen = tcp_hdrlen(skb); 1702 l4.hdr = skb_inner_transport_header(skb);
1703 l4_proto = 0;
1704
1705 /* reset type as we transition from outer to inner headers */
1706 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
1707 if (ip.v4->version == 4)
1708 *tx_flags |= I40E_TX_FLAGS_IPV4;
1709 if (ip.v6->version == 6)
1710 *tx_flags |= I40E_TX_FLAGS_IPV6;
1684 } 1711 }
1685 1712
1686 /* Enable IP checksum offloads */ 1713 /* Enable IP checksum offloads */
1687 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 1714 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
1688 l4_hdr = this_ip_hdr->protocol; 1715 l4_proto = ip.v4->protocol;
1689 /* the stack computes the IP header already, the only time we 1716 /* the stack computes the IP header already, the only time we
1690 * need the hardware to recompute it is in the case of TSO. 1717 * need the hardware to recompute it is in the case of TSO.
1691 */ 1718 */
1692 if (*tx_flags & I40E_TX_FLAGS_TSO) { 1719 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
1693 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; 1720 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
1694 this_ip_hdr->check = 0; 1721 I40E_TX_DESC_CMD_IIPT_IPV4;
1695 } else {
1696 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
1697 }
1698 /* Now set the td_offset for IP header length */
1699 *td_offset = (network_hdr_len >> 2) <<
1700 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1701 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { 1722 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
1702 l4_hdr = this_ipv6_hdr->nexthdr; 1723 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
1703 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; 1724
1704 /* Now set the td_offset for IP header length */ 1725 exthdr = ip.hdr + sizeof(*ip.v6);
1705 *td_offset = (network_hdr_len >> 2) << 1726 l4_proto = ip.v6->nexthdr;
1706 I40E_TX_DESC_LENGTH_IPLEN_SHIFT; 1727 if (l4.hdr != exthdr)
1728 ipv6_skip_exthdr(skb, exthdr - skb->data,
1729 &l4_proto, &frag_off);
1707 } 1730 }
1708 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ 1731
1709 *td_offset |= (skb_network_offset(skb) >> 1) << 1732 /* compute inner L3 header size */
1710 I40E_TX_DESC_LENGTH_MACLEN_SHIFT; 1733 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1711 1734
1712 /* Enable L4 checksum offloads */ 1735 /* Enable L4 checksum offloads */
1713 switch (l4_hdr) { 1736 switch (l4_proto) {
1714 case IPPROTO_TCP: 1737 case IPPROTO_TCP:
1715 /* enable checksum offloads */ 1738 /* enable checksum offloads */
1716 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; 1739 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1717 *td_offset |= (this_tcp_hdrlen >> 2) << 1740 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1718 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1719 break; 1741 break;
1720 case IPPROTO_SCTP: 1742 case IPPROTO_SCTP:
1721 /* enable SCTP checksum offload */ 1743 /* enable SCTP checksum offload */
1722 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; 1744 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
1723 *td_offset |= (sizeof(struct sctphdr) >> 2) << 1745 offset |= (sizeof(struct sctphdr) >> 2) <<
1724 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 1746 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1725 break; 1747 break;
1726 case IPPROTO_UDP: 1748 case IPPROTO_UDP:
1727 /* enable UDP checksum offload */ 1749 /* enable UDP checksum offload */
1728 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; 1750 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
1729 *td_offset |= (sizeof(struct udphdr) >> 2) << 1751 offset |= (sizeof(struct udphdr) >> 2) <<
1730 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 1752 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1731 break; 1753 break;
1732 default: 1754 default:
1733 break; 1755 if (*tx_flags & I40E_TX_FLAGS_TSO)
1756 return -1;
1757 skb_checksum_help(skb);
1758 return 0;
1734 } 1759 }
1760
1761 *td_cmd |= cmd;
1762 *td_offset |= offset;
1763
1764 return 1;
1735} 1765}
1736 1766
1737/** 1767/**
@@ -2147,12 +2177,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2147 td_cmd |= I40E_TX_DESC_CMD_ICRC; 2177 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2148 2178
2149 /* Always offload the checksum, since it's in the data descriptor */ 2179 /* Always offload the checksum, since it's in the data descriptor */
2150 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2180 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2151 tx_flags |= I40E_TX_FLAGS_CSUM; 2181 tx_ring, &cd_tunneling);
2152 2182 if (tso < 0)
2153 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, 2183 goto out_drop;
2154 tx_ring, &cd_tunneling);
2155 }
2156 2184
2157 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 2185 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2158 cd_tunneling, cd_l2tag2); 2186 cd_tunneling, cd_l2tag2);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 81c96619287b..6ea8701cf066 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -153,7 +153,6 @@ enum i40e_dyn_idx_t {
153#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 153#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
154#define I40E_MIN_DESC_PENDING 4 154#define I40E_MIN_DESC_PENDING 4
155 155
156#define I40E_TX_FLAGS_CSUM BIT(0)
157#define I40E_TX_FLAGS_HW_VLAN BIT(1) 156#define I40E_TX_FLAGS_HW_VLAN BIT(1)
158#define I40E_TX_FLAGS_SW_VLAN BIT(2) 157#define I40E_TX_FLAGS_SW_VLAN BIT(2)
159#define I40E_TX_FLAGS_TSO BIT(3) 158#define I40E_TX_FLAGS_TSO BIT(3)
@@ -275,7 +274,6 @@ struct i40e_ring {
275 274
276 u16 flags; 275 u16 flags;
277#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) 276#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
278#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
279 277
280 /* stats structs */ 278 /* stats structs */
281 struct i40e_queue_stats stats; 279 struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 41369a30dfb8..3396fe32cc6d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2337,9 +2337,24 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
2337 NETIF_F_IPV6_CSUM | 2337 NETIF_F_IPV6_CSUM |
2338 NETIF_F_TSO | 2338 NETIF_F_TSO |
2339 NETIF_F_TSO6 | 2339 NETIF_F_TSO6 |
2340 NETIF_F_TSO_ECN |
2341 NETIF_F_GSO_GRE |
2342 NETIF_F_GSO_UDP_TUNNEL |
2340 NETIF_F_RXCSUM | 2343 NETIF_F_RXCSUM |
2341 NETIF_F_GRO; 2344 NETIF_F_GRO;
2342 2345
2346 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
2347 NETIF_F_IPV6_CSUM |
2348 NETIF_F_TSO |
2349 NETIF_F_TSO6 |
2350 NETIF_F_TSO_ECN |
2351 NETIF_F_GSO_GRE |
2352 NETIF_F_GSO_UDP_TUNNEL |
2353 NETIF_F_GSO_UDP_TUNNEL_CSUM;
2354
2355 if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)
2356 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
2357
2343 /* copy netdev features into list of user selectable features */ 2358 /* copy netdev features into list of user selectable features */
2344 netdev->hw_features |= netdev->features; 2359 netdev->hw_features |= netdev->features;
2345 netdev->hw_features &= ~NETIF_F_RXCSUM; 2360 netdev->hw_features &= ~NETIF_F_RXCSUM;
@@ -2478,6 +2493,10 @@ static void i40evf_init_task(struct work_struct *work)
2478 default: 2493 default:
2479 goto err_alloc; 2494 goto err_alloc;
2480 } 2495 }
2496
2497 if (hw->mac.type == I40E_MAC_X722_VF)
2498 adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE;
2499
2481 if (i40evf_process_config(adapter)) 2500 if (i40evf_process_config(adapter))
2482 goto err_alloc; 2501 goto err_alloc;
2483 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 2502 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
@@ -2519,10 +2538,6 @@ static void i40evf_init_task(struct work_struct *work)
2519 goto err_sw_init; 2538 goto err_sw_init;
2520 i40evf_map_rings_to_vectors(adapter); 2539 i40evf_map_rings_to_vectors(adapter);
2521 if (adapter->vf_res->vf_offload_flags & 2540 if (adapter->vf_res->vf_offload_flags &
2522 I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2523 adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
2524
2525 if (adapter->vf_res->vf_offload_flags &
2526 I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 2541 I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2527 adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; 2542 adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
2528 2543