aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igc/igc_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igc/igc_main.c')
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c451
1 files changed, 448 insertions, 3 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index f8d692f6aa4f..34fa0e60a780 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -620,6 +620,55 @@ static void igc_configure_tx(struct igc_adapter *adapter)
620 */ 620 */
621static void igc_setup_mrqc(struct igc_adapter *adapter) 621static void igc_setup_mrqc(struct igc_adapter *adapter)
622{ 622{
623 struct igc_hw *hw = &adapter->hw;
624 u32 j, num_rx_queues;
625 u32 mrqc, rxcsum;
626 u32 rss_key[10];
627
628 netdev_rss_key_fill(rss_key, sizeof(rss_key));
629 for (j = 0; j < 10; j++)
630 wr32(IGC_RSSRK(j), rss_key[j]);
631
632 num_rx_queues = adapter->rss_queues;
633
634 if (adapter->rss_indir_tbl_init != num_rx_queues) {
635 for (j = 0; j < IGC_RETA_SIZE; j++)
636 adapter->rss_indir_tbl[j] =
637 (j * num_rx_queues) / IGC_RETA_SIZE;
638 adapter->rss_indir_tbl_init = num_rx_queues;
639 }
640 igc_write_rss_indir_tbl(adapter);
641
642 /* Disable raw packet checksumming so that RSS hash is placed in
643 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
644 * offloads as they are enabled by default
645 */
646 rxcsum = rd32(IGC_RXCSUM);
647 rxcsum |= IGC_RXCSUM_PCSD;
648
649 /* Enable Receive Checksum Offload for SCTP */
650 rxcsum |= IGC_RXCSUM_CRCOFL;
651
652 /* Don't need to set TUOFL or IPOFL, they default to 1 */
653 wr32(IGC_RXCSUM, rxcsum);
654
655 /* Generate RSS hash based on packet types, TCP/UDP
656 * port numbers and/or IPv4/v6 src and dst addresses
657 */
658 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
659 IGC_MRQC_RSS_FIELD_IPV4_TCP |
660 IGC_MRQC_RSS_FIELD_IPV6 |
661 IGC_MRQC_RSS_FIELD_IPV6_TCP |
662 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
663
664 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
665 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
666 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
667 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
668
669 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
670
671 wr32(IGC_MRQC, mrqc);
623} 672}
624 673
625/** 674/**
@@ -890,7 +939,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
890 /* Make sure there is space in the ring for the next send. */ 939 /* Make sure there is space in the ring for the next send. */
891 igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 940 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
892 941
893 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 942 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
894 writel(i, tx_ring->tail); 943 writel(i, tx_ring->tail);
895 } 944 }
896 945
@@ -1145,7 +1194,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1145 /* Determine available headroom for copy */ 1194 /* Determine available headroom for copy */
1146 headlen = size; 1195 headlen = size;
1147 if (headlen > IGC_RX_HDR_LEN) 1196 if (headlen > IGC_RX_HDR_LEN)
1148 headlen = eth_get_headlen(va, IGC_RX_HDR_LEN); 1197 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1149 1198
1150 /* align pull length to size of long to optimize memcpy performance */ 1199 /* align pull length to size of long to optimize memcpy performance */
1151 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 1200 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
@@ -1733,12 +1782,200 @@ void igc_up(struct igc_adapter *adapter)
1733 * igc_update_stats - Update the board statistics counters 1782 * igc_update_stats - Update the board statistics counters
1734 * @adapter: board private structure 1783 * @adapter: board private structure
1735 */ 1784 */
1736static void igc_update_stats(struct igc_adapter *adapter) 1785void igc_update_stats(struct igc_adapter *adapter)
1737{ 1786{
1787 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
1788 struct pci_dev *pdev = adapter->pdev;
1789 struct igc_hw *hw = &adapter->hw;
1790 u64 _bytes, _packets;
1791 u64 bytes, packets;
1792 unsigned int start;
1793 u32 mpc;
1794 int i;
1795
1796 /* Prevent stats update while adapter is being reset, or if the pci
1797 * connection is down.
1798 */
1799 if (adapter->link_speed == 0)
1800 return;
1801 if (pci_channel_offline(pdev))
1802 return;
1803
1804 packets = 0;
1805 bytes = 0;
1806
1807 rcu_read_lock();
1808 for (i = 0; i < adapter->num_rx_queues; i++) {
1809 struct igc_ring *ring = adapter->rx_ring[i];
1810 u32 rqdpc = rd32(IGC_RQDPC(i));
1811
1812 if (hw->mac.type >= igc_i225)
1813 wr32(IGC_RQDPC(i), 0);
1814
1815 if (rqdpc) {
1816 ring->rx_stats.drops += rqdpc;
1817 net_stats->rx_fifo_errors += rqdpc;
1818 }
1819
1820 do {
1821 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
1822 _bytes = ring->rx_stats.bytes;
1823 _packets = ring->rx_stats.packets;
1824 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
1825 bytes += _bytes;
1826 packets += _packets;
1827 }
1828
1829 net_stats->rx_bytes = bytes;
1830 net_stats->rx_packets = packets;
1831
1832 packets = 0;
1833 bytes = 0;
1834 for (i = 0; i < adapter->num_tx_queues; i++) {
1835 struct igc_ring *ring = adapter->tx_ring[i];
1836
1837 do {
1838 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
1839 _bytes = ring->tx_stats.bytes;
1840 _packets = ring->tx_stats.packets;
1841 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
1842 bytes += _bytes;
1843 packets += _packets;
1844 }
1845 net_stats->tx_bytes = bytes;
1846 net_stats->tx_packets = packets;
1847 rcu_read_unlock();
1848
1849 /* read stats registers */
1850 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
1851 adapter->stats.gprc += rd32(IGC_GPRC);
1852 adapter->stats.gorc += rd32(IGC_GORCL);
1853 rd32(IGC_GORCH); /* clear GORCL */
1854 adapter->stats.bprc += rd32(IGC_BPRC);
1855 adapter->stats.mprc += rd32(IGC_MPRC);
1856 adapter->stats.roc += rd32(IGC_ROC);
1857
1858 adapter->stats.prc64 += rd32(IGC_PRC64);
1859 adapter->stats.prc127 += rd32(IGC_PRC127);
1860 adapter->stats.prc255 += rd32(IGC_PRC255);
1861 adapter->stats.prc511 += rd32(IGC_PRC511);
1862 adapter->stats.prc1023 += rd32(IGC_PRC1023);
1863 adapter->stats.prc1522 += rd32(IGC_PRC1522);
1864 adapter->stats.symerrs += rd32(IGC_SYMERRS);
1865 adapter->stats.sec += rd32(IGC_SEC);
1866
1867 mpc = rd32(IGC_MPC);
1868 adapter->stats.mpc += mpc;
1869 net_stats->rx_fifo_errors += mpc;
1870 adapter->stats.scc += rd32(IGC_SCC);
1871 adapter->stats.ecol += rd32(IGC_ECOL);
1872 adapter->stats.mcc += rd32(IGC_MCC);
1873 adapter->stats.latecol += rd32(IGC_LATECOL);
1874 adapter->stats.dc += rd32(IGC_DC);
1875 adapter->stats.rlec += rd32(IGC_RLEC);
1876 adapter->stats.xonrxc += rd32(IGC_XONRXC);
1877 adapter->stats.xontxc += rd32(IGC_XONTXC);
1878 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
1879 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
1880 adapter->stats.fcruc += rd32(IGC_FCRUC);
1881 adapter->stats.gptc += rd32(IGC_GPTC);
1882 adapter->stats.gotc += rd32(IGC_GOTCL);
1883 rd32(IGC_GOTCH); /* clear GOTCL */
1884 adapter->stats.rnbc += rd32(IGC_RNBC);
1885 adapter->stats.ruc += rd32(IGC_RUC);
1886 adapter->stats.rfc += rd32(IGC_RFC);
1887 adapter->stats.rjc += rd32(IGC_RJC);
1888 adapter->stats.tor += rd32(IGC_TORH);
1889 adapter->stats.tot += rd32(IGC_TOTH);
1890 adapter->stats.tpr += rd32(IGC_TPR);
1891
1892 adapter->stats.ptc64 += rd32(IGC_PTC64);
1893 adapter->stats.ptc127 += rd32(IGC_PTC127);
1894 adapter->stats.ptc255 += rd32(IGC_PTC255);
1895 adapter->stats.ptc511 += rd32(IGC_PTC511);
1896 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
1897 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
1898
1899 adapter->stats.mptc += rd32(IGC_MPTC);
1900 adapter->stats.bptc += rd32(IGC_BPTC);
1901
1902 adapter->stats.tpt += rd32(IGC_TPT);
1903 adapter->stats.colc += rd32(IGC_COLC);
1904
1905 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
1906
1907 adapter->stats.tsctc += rd32(IGC_TSCTC);
1908 adapter->stats.tsctfc += rd32(IGC_TSCTFC);
1909
1910 adapter->stats.iac += rd32(IGC_IAC);
1911 adapter->stats.icrxoc += rd32(IGC_ICRXOC);
1912 adapter->stats.icrxptc += rd32(IGC_ICRXPTC);
1913 adapter->stats.icrxatc += rd32(IGC_ICRXATC);
1914 adapter->stats.ictxptc += rd32(IGC_ICTXPTC);
1915 adapter->stats.ictxatc += rd32(IGC_ICTXATC);
1916 adapter->stats.ictxqec += rd32(IGC_ICTXQEC);
1917 adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC);
1918 adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC);
1919
1920 /* Fill out the OS statistics structure */
1921 net_stats->multicast = adapter->stats.mprc;
1922 net_stats->collisions = adapter->stats.colc;
1923
1924 /* Rx Errors */
1925
1926 /* RLEC on some newer hardware can be incorrect so build
1927 * our own version based on RUC and ROC
1928 */
1929 net_stats->rx_errors = adapter->stats.rxerrc +
1930 adapter->stats.crcerrs + adapter->stats.algnerrc +
1931 adapter->stats.ruc + adapter->stats.roc +
1932 adapter->stats.cexterr;
1933 net_stats->rx_length_errors = adapter->stats.ruc +
1934 adapter->stats.roc;
1935 net_stats->rx_crc_errors = adapter->stats.crcerrs;
1936 net_stats->rx_frame_errors = adapter->stats.algnerrc;
1937 net_stats->rx_missed_errors = adapter->stats.mpc;
1938
1939 /* Tx Errors */
1940 net_stats->tx_errors = adapter->stats.ecol +
1941 adapter->stats.latecol;
1942 net_stats->tx_aborted_errors = adapter->stats.ecol;
1943 net_stats->tx_window_errors = adapter->stats.latecol;
1944 net_stats->tx_carrier_errors = adapter->stats.tncrs;
1945
1946 /* Tx Dropped needs to be maintained elsewhere */
1947
1948 /* Management Stats */
1949 adapter->stats.mgptc += rd32(IGC_MGTPTC);
1950 adapter->stats.mgprc += rd32(IGC_MGTPRC);
1951 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
1738} 1952}
1739 1953
1740static void igc_nfc_filter_exit(struct igc_adapter *adapter) 1954static void igc_nfc_filter_exit(struct igc_adapter *adapter)
1741{ 1955{
1956 struct igc_nfc_filter *rule;
1957
1958 spin_lock(&adapter->nfc_lock);
1959
1960 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
1961 igc_erase_filter(adapter, rule);
1962
1963 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
1964 igc_erase_filter(adapter, rule);
1965
1966 spin_unlock(&adapter->nfc_lock);
1967}
1968
1969static void igc_nfc_filter_restore(struct igc_adapter *adapter)
1970{
1971 struct igc_nfc_filter *rule;
1972
1973 spin_lock(&adapter->nfc_lock);
1974
1975 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
1976 igc_add_filter(adapter, rule);
1977
1978 spin_unlock(&adapter->nfc_lock);
1742} 1979}
1743 1980
1744/** 1981/**
@@ -1885,6 +2122,86 @@ static struct net_device_stats *igc_get_stats(struct net_device *netdev)
1885 return &netdev->stats; 2122 return &netdev->stats;
1886} 2123}
1887 2124
2125static netdev_features_t igc_fix_features(struct net_device *netdev,
2126 netdev_features_t features)
2127{
2128 /* Since there is no support for separate Rx/Tx vlan accel
2129 * enable/disable make sure Tx flag is always in same state as Rx.
2130 */
2131 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2132 features |= NETIF_F_HW_VLAN_CTAG_TX;
2133 else
2134 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2135
2136 return features;
2137}
2138
2139static int igc_set_features(struct net_device *netdev,
2140 netdev_features_t features)
2141{
2142 netdev_features_t changed = netdev->features ^ features;
2143 struct igc_adapter *adapter = netdev_priv(netdev);
2144
2145 /* Add VLAN support */
2146 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2147 return 0;
2148
2149 if (!(features & NETIF_F_NTUPLE)) {
2150 struct hlist_node *node2;
2151 struct igc_nfc_filter *rule;
2152
2153 spin_lock(&adapter->nfc_lock);
2154 hlist_for_each_entry_safe(rule, node2,
2155 &adapter->nfc_filter_list, nfc_node) {
2156 igc_erase_filter(adapter, rule);
2157 hlist_del(&rule->nfc_node);
2158 kfree(rule);
2159 }
2160 spin_unlock(&adapter->nfc_lock);
2161 adapter->nfc_filter_count = 0;
2162 }
2163
2164 netdev->features = features;
2165
2166 if (netif_running(netdev))
2167 igc_reinit_locked(adapter);
2168 else
2169 igc_reset(adapter);
2170
2171 return 1;
2172}
2173
2174static netdev_features_t
2175igc_features_check(struct sk_buff *skb, struct net_device *dev,
2176 netdev_features_t features)
2177{
2178 unsigned int network_hdr_len, mac_hdr_len;
2179
2180 /* Make certain the headers can be described by a context descriptor */
2181 mac_hdr_len = skb_network_header(skb) - skb->data;
2182 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
2183 return features & ~(NETIF_F_HW_CSUM |
2184 NETIF_F_SCTP_CRC |
2185 NETIF_F_HW_VLAN_CTAG_TX |
2186 NETIF_F_TSO |
2187 NETIF_F_TSO6);
2188
2189 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2190 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
2191 return features & ~(NETIF_F_HW_CSUM |
2192 NETIF_F_SCTP_CRC |
2193 NETIF_F_TSO |
2194 NETIF_F_TSO6);
2195
2196 /* We can only support IPv4 TSO in tunnels if we can mangle the
2197 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2198 */
2199 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2200 features &= ~NETIF_F_TSO;
2201
2202 return features;
2203}
2204
1888/** 2205/**
1889 * igc_configure - configure the hardware for RX and TX 2206 * igc_configure - configure the hardware for RX and TX
1890 * @adapter: private board structure 2207 * @adapter: private board structure
@@ -1901,6 +2218,7 @@ static void igc_configure(struct igc_adapter *adapter)
1901 igc_setup_mrqc(adapter); 2218 igc_setup_mrqc(adapter);
1902 igc_setup_rctl(adapter); 2219 igc_setup_rctl(adapter);
1903 2220
2221 igc_nfc_filter_restore(adapter);
1904 igc_configure_tx(adapter); 2222 igc_configure_tx(adapter);
1905 igc_configure_rx(adapter); 2223 igc_configure_rx(adapter);
1906 2224
@@ -1962,6 +2280,127 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter)
1962 igc_rar_set_index(adapter, 0); 2280 igc_rar_set_index(adapter, 0);
1963} 2281}
1964 2282
2283/* If the filter to be added and an already existing filter express
2284 * the same address and address type, it should be possible to only
2285 * override the other configurations, for example the queue to steer
2286 * traffic.
2287 */
2288static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
2289 const u8 *addr, const u8 flags)
2290{
2291 if (!(entry->state & IGC_MAC_STATE_IN_USE))
2292 return true;
2293
2294 if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
2295 (flags & IGC_MAC_STATE_SRC_ADDR))
2296 return false;
2297
2298 if (!ether_addr_equal(addr, entry->addr))
2299 return false;
2300
2301 return true;
2302}
2303
2304/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
2305 * 'flags' is used to indicate what kind of match is made, match is by
2306 * default for the destination address, if matching by source address
2307 * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
2308 */
2309static int igc_add_mac_filter_flags(struct igc_adapter *adapter,
2310 const u8 *addr, const u8 queue,
2311 const u8 flags)
2312{
2313 struct igc_hw *hw = &adapter->hw;
2314 int rar_entries = hw->mac.rar_entry_count;
2315 int i;
2316
2317 if (is_zero_ether_addr(addr))
2318 return -EINVAL;
2319
2320 /* Search for the first empty entry in the MAC table.
2321 * Do not touch entries at the end of the table reserved for the VF MAC
2322 * addresses.
2323 */
2324 for (i = 0; i < rar_entries; i++) {
2325 if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
2326 addr, flags))
2327 continue;
2328
2329 ether_addr_copy(adapter->mac_table[i].addr, addr);
2330 adapter->mac_table[i].queue = queue;
2331 adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags;
2332
2333 igc_rar_set_index(adapter, i);
2334 return i;
2335 }
2336
2337 return -ENOSPC;
2338}
2339
2340int igc_add_mac_steering_filter(struct igc_adapter *adapter,
2341 const u8 *addr, u8 queue, u8 flags)
2342{
2343 return igc_add_mac_filter_flags(adapter, addr, queue,
2344 IGC_MAC_STATE_QUEUE_STEERING | flags);
2345}
2346
2347/* Remove a MAC filter for 'addr' directing matching traffic to
2348 * 'queue', 'flags' is used to indicate what kind of match need to be
2349 * removed, match is by default for the destination address, if
2350 * matching by source address is to be removed the flag
2351 * IGC_MAC_STATE_SRC_ADDR can be used.
2352 */
2353static int igc_del_mac_filter_flags(struct igc_adapter *adapter,
2354 const u8 *addr, const u8 queue,
2355 const u8 flags)
2356{
2357 struct igc_hw *hw = &adapter->hw;
2358 int rar_entries = hw->mac.rar_entry_count;
2359 int i;
2360
2361 if (is_zero_ether_addr(addr))
2362 return -EINVAL;
2363
2364 /* Search for matching entry in the MAC table based on given address
2365 * and queue. Do not touch entries at the end of the table reserved
2366 * for the VF MAC addresses.
2367 */
2368 for (i = 0; i < rar_entries; i++) {
2369 if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
2370 continue;
2371 if ((adapter->mac_table[i].state & flags) != flags)
2372 continue;
2373 if (adapter->mac_table[i].queue != queue)
2374 continue;
2375 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
2376 continue;
2377
2378 /* When a filter for the default address is "deleted",
2379 * we return it to its initial configuration
2380 */
2381 if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
2382 adapter->mac_table[i].state =
2383 IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
2384 } else {
2385 adapter->mac_table[i].state = 0;
2386 adapter->mac_table[i].queue = 0;
2387 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
2388 }
2389
2390 igc_rar_set_index(adapter, i);
2391 return 0;
2392 }
2393
2394 return -ENOENT;
2395}
2396
2397int igc_del_mac_steering_filter(struct igc_adapter *adapter,
2398 const u8 *addr, u8 queue, u8 flags)
2399{
2400 return igc_del_mac_filter_flags(adapter, addr, queue,
2401 IGC_MAC_STATE_QUEUE_STEERING | flags);
2402}
2403
1965/** 2404/**
1966 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2405 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
1967 * @netdev: network interface device structure 2406 * @netdev: network interface device structure
@@ -3429,6 +3868,9 @@ static const struct net_device_ops igc_netdev_ops = {
3429 .ndo_set_mac_address = igc_set_mac, 3868 .ndo_set_mac_address = igc_set_mac,
3430 .ndo_change_mtu = igc_change_mtu, 3869 .ndo_change_mtu = igc_change_mtu,
3431 .ndo_get_stats = igc_get_stats, 3870 .ndo_get_stats = igc_get_stats,
3871 .ndo_fix_features = igc_fix_features,
3872 .ndo_set_features = igc_set_features,
3873 .ndo_features_check = igc_features_check,
3432}; 3874};
3433 3875
3434/* PCIe configuration access */ 3876/* PCIe configuration access */
@@ -3658,6 +4100,9 @@ static int igc_probe(struct pci_dev *pdev,
3658 if (err) 4100 if (err)
3659 goto err_sw_init; 4101 goto err_sw_init;
3660 4102
4103 /* copy netdev features into list of user selectable features */
4104 netdev->hw_features |= NETIF_F_NTUPLE;
4105
3661 /* MTU range: 68 - 9216 */ 4106 /* MTU range: 68 - 9216 */
3662 netdev->min_mtu = ETH_MIN_MTU; 4107 netdev->min_mtu = ETH_MIN_MTU;
3663 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 4108 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;