aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <aduyck@mirantis.com>2016-04-14 17:19:38 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2016-05-13 18:26:37 -0400
commite10715d3e9618901c5ef820a92e6a8e6548b43d3 (patch)
treef6aa6d407abe1526e7eda2b63d35f25503f5dfc9 /drivers/net/ethernet/intel/igb/igb_main.c
parent942c711206d1e0cd3dffc591829cbcbb9bcc0b1b (diff)
igb/igbvf: Add support for GSO partial
This patch adds support for partial GSO segmentation in the case of tunnels. Specifically with this change the driver an perform segmentation as long as the frame either has IPv6 inner headers, or we are allowed to mangle the IP IDs on the inner header. This is needed because we will not be modifying any fields from the start of the start of the outer transport header to the start of the inner transport header as we are treating them like they are just a block of IP options. Signed-off-by: Alexander Duyck <aduyck@mirantis.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c137
1 files changed, 96 insertions, 41 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cab306934462..21727692bef6 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2087,6 +2087,40 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2087 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); 2087 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2088} 2088}
2089 2089
2090#define IGB_MAX_MAC_HDR_LEN 127
2091#define IGB_MAX_NETWORK_HDR_LEN 511
2092
2093static netdev_features_t
2094igb_features_check(struct sk_buff *skb, struct net_device *dev,
2095 netdev_features_t features)
2096{
2097 unsigned int network_hdr_len, mac_hdr_len;
2098
2099 /* Make certain the headers can be described by a context descriptor */
2100 mac_hdr_len = skb_network_header(skb) - skb->data;
2101 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2102 return features & ~(NETIF_F_HW_CSUM |
2103 NETIF_F_SCTP_CRC |
2104 NETIF_F_HW_VLAN_CTAG_TX |
2105 NETIF_F_TSO |
2106 NETIF_F_TSO6);
2107
2108 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2109 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2110 return features & ~(NETIF_F_HW_CSUM |
2111 NETIF_F_SCTP_CRC |
2112 NETIF_F_TSO |
2113 NETIF_F_TSO6);
2114
2115 /* We can only support IPV4 TSO in tunnels if we can mangle the
2116 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2117 */
2118 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2119 features &= ~NETIF_F_TSO;
2120
2121 return features;
2122}
2123
2090static const struct net_device_ops igb_netdev_ops = { 2124static const struct net_device_ops igb_netdev_ops = {
2091 .ndo_open = igb_open, 2125 .ndo_open = igb_open,
2092 .ndo_stop = igb_close, 2126 .ndo_stop = igb_close,
@@ -2111,7 +2145,7 @@ static const struct net_device_ops igb_netdev_ops = {
2111 .ndo_fix_features = igb_fix_features, 2145 .ndo_fix_features = igb_fix_features,
2112 .ndo_set_features = igb_set_features, 2146 .ndo_set_features = igb_set_features,
2113 .ndo_fdb_add = igb_ndo_fdb_add, 2147 .ndo_fdb_add = igb_ndo_fdb_add,
2114 .ndo_features_check = passthru_features_check, 2148 .ndo_features_check = igb_features_check,
2115}; 2149};
2116 2150
2117/** 2151/**
@@ -2377,38 +2411,43 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2377 NETIF_F_TSO6 | 2411 NETIF_F_TSO6 |
2378 NETIF_F_RXHASH | 2412 NETIF_F_RXHASH |
2379 NETIF_F_RXCSUM | 2413 NETIF_F_RXCSUM |
2380 NETIF_F_HW_CSUM | 2414 NETIF_F_HW_CSUM;
2381 NETIF_F_HW_VLAN_CTAG_RX |
2382 NETIF_F_HW_VLAN_CTAG_TX;
2383 2415
2384 if (hw->mac.type >= e1000_82576) 2416 if (hw->mac.type >= e1000_82576)
2385 netdev->features |= NETIF_F_SCTP_CRC; 2417 netdev->features |= NETIF_F_SCTP_CRC;
2386 2418
2419#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
2420 NETIF_F_GSO_GRE_CSUM | \
2421 NETIF_F_GSO_IPIP | \
2422 NETIF_F_GSO_SIT | \
2423 NETIF_F_GSO_UDP_TUNNEL | \
2424 NETIF_F_GSO_UDP_TUNNEL_CSUM)
2425
2426 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
2427 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
2428
2387 /* copy netdev features into list of user selectable features */ 2429 /* copy netdev features into list of user selectable features */
2388 netdev->hw_features |= netdev->features; 2430 netdev->hw_features |= netdev->features |
2389 netdev->hw_features |= NETIF_F_RXALL; 2431 NETIF_F_HW_VLAN_CTAG_RX |
2432 NETIF_F_HW_VLAN_CTAG_TX |
2433 NETIF_F_RXALL;
2390 2434
2391 if (hw->mac.type >= e1000_i350) 2435 if (hw->mac.type >= e1000_i350)
2392 netdev->hw_features |= NETIF_F_NTUPLE; 2436 netdev->hw_features |= NETIF_F_NTUPLE;
2393 2437
2394 /* set this bit last since it cannot be part of hw_features */ 2438 if (pci_using_dac)
2395 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2439 netdev->features |= NETIF_F_HIGHDMA;
2396
2397 netdev->vlan_features |= NETIF_F_SG |
2398 NETIF_F_TSO |
2399 NETIF_F_TSO6 |
2400 NETIF_F_HW_CSUM |
2401 NETIF_F_SCTP_CRC;
2402 2440
2441 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
2403 netdev->mpls_features |= NETIF_F_HW_CSUM; 2442 netdev->mpls_features |= NETIF_F_HW_CSUM;
2404 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 2443 netdev->hw_enc_features |= netdev->vlan_features;
2405 2444
2406 netdev->priv_flags |= IFF_SUPP_NOFCS; 2445 /* set this bit last since it cannot be part of vlan_features */
2446 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
2447 NETIF_F_HW_VLAN_CTAG_RX |
2448 NETIF_F_HW_VLAN_CTAG_TX;
2407 2449
2408 if (pci_using_dac) { 2450 netdev->priv_flags |= IFF_SUPP_NOFCS;
2409 netdev->features |= NETIF_F_HIGHDMA;
2410 netdev->vlan_features |= NETIF_F_HIGHDMA;
2411 }
2412 2451
2413 netdev->priv_flags |= IFF_UNICAST_FLT; 2452 netdev->priv_flags |= IFF_UNICAST_FLT;
2414 2453
@@ -4842,9 +4881,18 @@ static int igb_tso(struct igb_ring *tx_ring,
4842 struct igb_tx_buffer *first, 4881 struct igb_tx_buffer *first,
4843 u8 *hdr_len) 4882 u8 *hdr_len)
4844{ 4883{
4884 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
4845 struct sk_buff *skb = first->skb; 4885 struct sk_buff *skb = first->skb;
4846 u32 vlan_macip_lens, type_tucmd; 4886 union {
4847 u32 mss_l4len_idx, l4len; 4887 struct iphdr *v4;
4888 struct ipv6hdr *v6;
4889 unsigned char *hdr;
4890 } ip;
4891 union {
4892 struct tcphdr *tcp;
4893 unsigned char *hdr;
4894 } l4;
4895 u32 paylen, l4_offset;
4848 int err; 4896 int err;
4849 4897
4850 if (skb->ip_summed != CHECKSUM_PARTIAL) 4898 if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -4857,45 +4905,52 @@ static int igb_tso(struct igb_ring *tx_ring,
4857 if (err < 0) 4905 if (err < 0)
4858 return err; 4906 return err;
4859 4907
4908 ip.hdr = skb_network_header(skb);
4909 l4.hdr = skb_checksum_start(skb);
4910
4860 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4911 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4861 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; 4912 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
4862 4913
4863 if (first->protocol == htons(ETH_P_IP)) { 4914 /* initialize outer IP header fields */
4864 struct iphdr *iph = ip_hdr(skb); 4915 if (ip.v4->version == 4) {
4865 iph->tot_len = 0; 4916 /* IP header will have to cancel out any data that
4866 iph->check = 0; 4917 * is not a part of the outer IP header
4867 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 4918 */
4868 iph->daddr, 0, 4919 ip.v4->check = csum_fold(csum_add(lco_csum(skb),
4869 IPPROTO_TCP, 4920 csum_unfold(l4.tcp->check)));
4870 0);
4871 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 4921 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4922
4923 ip.v4->tot_len = 0;
4872 first->tx_flags |= IGB_TX_FLAGS_TSO | 4924 first->tx_flags |= IGB_TX_FLAGS_TSO |
4873 IGB_TX_FLAGS_CSUM | 4925 IGB_TX_FLAGS_CSUM |
4874 IGB_TX_FLAGS_IPV4; 4926 IGB_TX_FLAGS_IPV4;
4875 } else if (skb_is_gso_v6(skb)) { 4927 } else {
4876 ipv6_hdr(skb)->payload_len = 0; 4928 ip.v6->payload_len = 0;
4877 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4878 &ipv6_hdr(skb)->daddr,
4879 0, IPPROTO_TCP, 0);
4880 first->tx_flags |= IGB_TX_FLAGS_TSO | 4929 first->tx_flags |= IGB_TX_FLAGS_TSO |
4881 IGB_TX_FLAGS_CSUM; 4930 IGB_TX_FLAGS_CSUM;
4882 } 4931 }
4883 4932
4884 /* compute header lengths */ 4933 /* determine offset of inner transport header */
4885 l4len = tcp_hdrlen(skb); 4934 l4_offset = l4.hdr - skb->data;
4886 *hdr_len = skb_transport_offset(skb) + l4len; 4935
4936 /* compute length of segmentation header */
4937 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
4938
4939 /* remove payload length from inner checksum */
4940 paylen = skb->len - l4_offset;
4941 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
4887 4942
4888 /* update gso size and bytecount with header size */ 4943 /* update gso size and bytecount with header size */
4889 first->gso_segs = skb_shinfo(skb)->gso_segs; 4944 first->gso_segs = skb_shinfo(skb)->gso_segs;
4890 first->bytecount += (first->gso_segs - 1) * *hdr_len; 4945 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4891 4946
4892 /* MSS L4LEN IDX */ 4947 /* MSS L4LEN IDX */
4893 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT; 4948 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
4894 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; 4949 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
4895 4950
4896 /* VLAN MACLEN IPLEN */ 4951 /* VLAN MACLEN IPLEN */
4897 vlan_macip_lens = skb_network_header_len(skb); 4952 vlan_macip_lens = l4.hdr - ip.hdr;
4898 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; 4953 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
4899 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; 4954 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
4900 4955
4901 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); 4956 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);