aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-02-08 01:48:50 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-08 01:48:50 -0500
commitd78f802f18ed100c53f331cd59791cd82ccb9438 (patch)
tree5adce44da093cf2bb808538bbc076855708f3484
parent57ee062e4baa9532e5a3835a2d185590a3fc8bd3 (diff)
parent5b7f000ff94c77031a628f87b788b1a032f2d4d9 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2015-02-05 This series contains updates to fm10k, ixgbe and ixgbevf. Matthew fixes an issue where fm10k does not properly drop the upper-most four bits on of the VLAN ID due to type promotion, so resolve the issue by not masking off the bits, but by throwing an error if the VLAN ID is out-of-bounds. Then cleans up two cases where variables were not being used, but were being set, so just remove the unused variables. Don cleans up sparse errors in the x550 family file for ixgbe. Fixed up a redundant setting of the default value for set_rxpba, which was done twice accidentally. Cleaned up the probe routine to remove a redundant attempt to identify the PHY, which could lead to a panic on x550. Added support for VXLAN receive checksum offload in x550 hardware. Added the Ethertype Anti-spoofing feature for affected devices. Emil enables ixgbe and ixgbevf to allow multiple queues in SRIOV mode. Adds RSS support for x550 per VF. Fixed up a couple of issues introduced in commit 2b509c0cd292 ("ixgbe: cleanup ixgbe_ndo_set_vf_vlan"), fixed setting of the VLAN inside ixgbe_enable_port_vlan() and disable the "hide VLAN" bit in PFQDE when port VLAN is disabled. Cleaned up the setting of vlan_features by enabling all features at once. Fixed the ordering of the shutdown patch so that we attempt to shutdown the rings more gracefully. We shutdown the main Rx filter in the case of Rx and we set the carrier_off state in the case of Tx so that packets stop being delivered from outside the driver. Then we shutdown interrupts and NAPI, then finally stop the rings from performing DMA and clean them. Added code to allow for Tx hang checking to provide more robust debug info in the event of a transmit unit hang in ixgbevf. Cleaned up ixgbevf logic dealing with link up/down by breaking down the link detection and up/down events into separate functions, similar to how these events are handled in other drivers. Combined the ixgbevf reset and watchdog tasks into a single task so that we can avoid multiple schedules of the reset task when we have a reset event needed due to either the mailbox going down or transmit packets being present on a link down. v2: Fixed up patch #03 of the series to remove the variable type change based on feedback from David Laight ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c112
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c90
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h36
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c495
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h10
13 files changed, 584 insertions, 219 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 4d61ef50b465..f4ff465584a0 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -192,6 +192,17 @@ config IXGBE
192 To compile this driver as a module, choose M here. The module 192 To compile this driver as a module, choose M here. The module
193 will be called ixgbe. 193 will be called ixgbe.
194 194
195config IXGBE_VXLAN
196 bool "Virtual eXtensible Local Area Network Support"
197 default n
198 depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m)
199 ---help---
200 This allows one to create VXLAN virtual interfaces that provide
201 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
202 to tunnel virtual network infrastructure in virtualized environments.
203 Say Y here if you want to use Virtual eXtensible Local Area Network
204 (VXLAN) in the driver.
205
195config IXGBE_HWMON 206config IXGBE_HWMON
196 bool "Intel(R) 10GbE PCI Express adapters HWMON support" 207 bool "Intel(R) 10GbE PCI Express adapters HWMON support"
197 default y 208 default y
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 14a4ea795c01..9f5457c9e627 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1194,12 +1194,11 @@ static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw,
1194{ 1194{
1195 const enum fm10k_mbx_state state = mbx->state; 1195 const enum fm10k_mbx_state state = mbx->state;
1196 const u32 *hdr = &mbx->mbx_hdr; 1196 const u32 *hdr = &mbx->mbx_hdr;
1197 u16 head, tail; 1197 u16 head;
1198 s32 err; 1198 s32 err;
1199 1199
1200 /* we will need to pull all of the fields for verification */ 1200 /* we will need to pull the header field for verification */
1201 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); 1201 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
1202 tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
1203 1202
1204 /* We should not be receiving disconnect if Rx is incomplete */ 1203 /* We should not be receiving disconnect if Rx is incomplete */
1205 if (mbx->pushed) 1204 if (mbx->pushed)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 275423d4f777..7e4711958e46 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -330,13 +330,10 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
330 struct fm10k_mac_update mac_update; 330 struct fm10k_mac_update mac_update;
331 u32 msg[5]; 331 u32 msg[5];
332 332
333 /* if glort is not valid return error */ 333 /* if glort or vlan are not valid return error */
334 if (!fm10k_glort_valid_pf(hw, glort)) 334 if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
335 return FM10K_ERR_PARAM; 335 return FM10K_ERR_PARAM;
336 336
337 /* drop upper 4 bits of VLAN ID */
338 vid = (vid << 4) >> 4;
339
340 /* record fields */ 337 /* record fields */
341 mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) | 338 mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
342 ((u32)mac[3] << 16) | 339 ((u32)mac[3] << 16) |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
index 7822809436a3..d966044e017a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
@@ -57,7 +57,6 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
57 struct sk_buff_head *list = &interface->ts_tx_skb_queue; 57 struct sk_buff_head *list = &interface->ts_tx_skb_queue;
58 struct sk_buff *clone; 58 struct sk_buff *clone;
59 unsigned long flags; 59 unsigned long flags;
60 __le16 dglort;
61 60
62 /* create clone for us to return on the Tx path */ 61 /* create clone for us to return on the Tx path */
63 clone = skb_clone_sk(skb); 62 clone = skb_clone_sk(skb);
@@ -65,8 +64,6 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
65 return; 64 return;
66 65
67 FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT; 66 FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
68 dglort = FM10K_CB(clone)->fi.w.dglort;
69
70 spin_lock_irqsave(&list->lock, flags); 67 spin_lock_irqsave(&list->lock, flags);
71 68
72 /* attempt to locate any buffers with the same dglort, 69 /* attempt to locate any buffers with the same dglort,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 38fc64cf5dca..7dcbbec09a70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -76,6 +76,8 @@
76#define IXGBE_MAX_RXD 4096 76#define IXGBE_MAX_RXD 4096
77#define IXGBE_MIN_RXD 64 77#define IXGBE_MIN_RXD 64
78 78
79#define IXGBE_ETH_P_LLDP 0x88CC
80
79/* flow control */ 81/* flow control */
80#define IXGBE_MIN_FCRTL 0x40 82#define IXGBE_MIN_FCRTL 0x40
81#define IXGBE_MAX_FCRTL 0x7FF80 83#define IXGBE_MAX_FCRTL 0x7FF80
@@ -753,6 +755,7 @@ struct ixgbe_adapter {
753 u32 timer_event_accumulator; 755 u32 timer_event_accumulator;
754 u32 vferr_refcount; 756 u32 vferr_refcount;
755 struct ixgbe_mac_addr *mac_table; 757 struct ixgbe_mac_addr *mac_table;
758 u16 vxlan_port;
756 struct kobject *info_kobj; 759 struct kobject *info_kobj;
757#ifdef CONFIG_IXGBE_HWMON 760#ifdef CONFIG_IXGBE_HWMON
758 struct hwmon_buff *ixgbe_hwmon_buff; 761 struct hwmon_buff *ixgbe_hwmon_buff;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e9e3a1eb9a97..70cc4c5c0a01 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -50,6 +50,7 @@
50#include <linux/if_bridge.h> 50#include <linux/if_bridge.h>
51#include <linux/prefetch.h> 51#include <linux/prefetch.h>
52#include <scsi/fc/fc_fcoe.h> 52#include <scsi/fc/fc_fcoe.h>
53#include <net/vxlan.h>
53 54
54#ifdef CONFIG_OF 55#ifdef CONFIG_OF
55#include <linux/of_net.h> 56#include <linux/of_net.h>
@@ -1396,12 +1397,23 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1396 union ixgbe_adv_rx_desc *rx_desc, 1397 union ixgbe_adv_rx_desc *rx_desc,
1397 struct sk_buff *skb) 1398 struct sk_buff *skb)
1398{ 1399{
1400 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1401 __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1402 bool encap_pkt = false;
1403
1399 skb_checksum_none_assert(skb); 1404 skb_checksum_none_assert(skb);
1400 1405
1401 /* Rx csum disabled */ 1406 /* Rx csum disabled */
1402 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1407 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1403 return; 1408 return;
1404 1409
1410 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
1411 (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
1412 encap_pkt = true;
1413 skb->encapsulation = 1;
1414 skb->ip_summed = CHECKSUM_NONE;
1415 }
1416
1405 /* if IP and error */ 1417 /* if IP and error */
1406 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && 1418 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1407 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { 1419 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
@@ -1413,8 +1425,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1413 return; 1425 return;
1414 1426
1415 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { 1427 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1416 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1417
1418 /* 1428 /*
1419 * 82599 errata, UDP frames with a 0 checksum can be marked as 1429 * 82599 errata, UDP frames with a 0 checksum can be marked as
1420 * checksum errors. 1430 * checksum errors.
@@ -1429,6 +1439,17 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1429 1439
1430 /* It must be a TCP or UDP packet with a valid checksum */ 1440 /* It must be a TCP or UDP packet with a valid checksum */
1431 skb->ip_summed = CHECKSUM_UNNECESSARY; 1441 skb->ip_summed = CHECKSUM_UNNECESSARY;
1442 if (encap_pkt) {
1443 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1444 return;
1445
1446 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1447 ring->rx_stats.csum_err++;
1448 return;
1449 }
1450 /* If we checked the outer header let the stack know */
1451 skb->csum_level = 1;
1452 }
1432} 1453}
1433 1454
1434static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, 1455static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
@@ -3564,10 +3585,24 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3564 /* Enable MAC Anti-Spoofing */ 3585 /* Enable MAC Anti-Spoofing */
3565 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), 3586 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3566 adapter->num_vfs); 3587 adapter->num_vfs);
3588
3589 /* Ensure LLDP is set for Ethertype Antispoofing if we will be
3590 * calling set_ethertype_anti_spoofing for each VF in loop below
3591 */
3592 if (hw->mac.ops.set_ethertype_anti_spoofing)
3593 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
3594 (IXGBE_ETQF_FILTER_EN | /* enable filter */
3595 IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */
3596 IXGBE_ETH_P_LLDP)); /* LLDP eth type */
3597
3567 /* For VFs that have spoof checking turned off */ 3598 /* For VFs that have spoof checking turned off */
3568 for (i = 0; i < adapter->num_vfs; i++) { 3599 for (i = 0; i < adapter->num_vfs; i++) {
3569 if (!adapter->vfinfo[i].spoofchk_enabled) 3600 if (!adapter->vfinfo[i].spoofchk_enabled)
3570 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); 3601 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3602
3603 /* enable ethertype anti spoofing if hw supports it */
3604 if (hw->mac.ops.set_ethertype_anti_spoofing)
3605 hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
3571 } 3606 }
3572} 3607}
3573 3608
@@ -5627,6 +5662,10 @@ static int ixgbe_open(struct net_device *netdev)
5627 5662
5628 ixgbe_up_complete(adapter); 5663 ixgbe_up_complete(adapter);
5629 5664
5665#if IS_ENABLED(CONFIG_IXGBE_VXLAN)
5666 vxlan_get_rx_port(netdev);
5667
5668#endif
5630 return 0; 5669 return 0;
5631 5670
5632err_set_queues: 5671err_set_queues:
@@ -7771,6 +7810,64 @@ static int ixgbe_set_features(struct net_device *netdev,
7771 return 0; 7810 return 0;
7772} 7811}
7773 7812
7813/**
7814 * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
7815 * @dev: The port's netdev
7816 * @sa_family: Socket Family that VXLAN is notifiying us about
7817 * @port: New UDP port number that VXLAN started listening to
7818 **/
7819static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
7820 __be16 port)
7821{
7822 struct ixgbe_adapter *adapter = netdev_priv(dev);
7823 struct ixgbe_hw *hw = &adapter->hw;
7824 u16 new_port = ntohs(port);
7825
7826 if (sa_family == AF_INET6)
7827 return;
7828
7829 if (adapter->vxlan_port == new_port) {
7830 netdev_info(dev, "Port %d already offloaded\n", new_port);
7831 return;
7832 }
7833
7834 if (adapter->vxlan_port) {
7835 netdev_info(dev,
7836 "Hit Max num of UDP ports, not adding port %d\n",
7837 new_port);
7838 return;
7839 }
7840
7841 adapter->vxlan_port = new_port;
7842 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
7843}
7844
7845/**
7846 * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
7847 * @dev: The port's netdev
7848 * @sa_family: Socket Family that VXLAN is notifying us about
7849 * @port: UDP port number that VXLAN stopped listening to
7850 **/
7851static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
7852 __be16 port)
7853{
7854 struct ixgbe_adapter *adapter = netdev_priv(dev);
7855 struct ixgbe_hw *hw = &adapter->hw;
7856 u16 new_port = ntohs(port);
7857
7858 if (sa_family == AF_INET6)
7859 return;
7860
7861 if (adapter->vxlan_port != new_port) {
7862 netdev_info(dev, "Port %d was not found, not deleting\n",
7863 new_port);
7864 return;
7865 }
7866
7867 adapter->vxlan_port = 0;
7868 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0);
7869}
7870
7774static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 7871static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7775 struct net_device *dev, 7872 struct net_device *dev,
7776 const unsigned char *addr, u16 vid, 7873 const unsigned char *addr, u16 vid,
@@ -7982,6 +8079,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7982 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 8079 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7983 .ndo_dfwd_add_station = ixgbe_fwd_add, 8080 .ndo_dfwd_add_station = ixgbe_fwd_add,
7984 .ndo_dfwd_del_station = ixgbe_fwd_del, 8081 .ndo_dfwd_del_station = ixgbe_fwd_del,
8082 .ndo_add_vxlan_port = ixgbe_add_vxlan_port,
8083 .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
7985}; 8084};
7986 8085
7987/** 8086/**
@@ -8339,6 +8438,15 @@ skip_sriov:
8339 netdev->priv_flags |= IFF_UNICAST_FLT; 8438 netdev->priv_flags |= IFF_UNICAST_FLT;
8340 netdev->priv_flags |= IFF_SUPP_NOFCS; 8439 netdev->priv_flags |= IFF_SUPP_NOFCS;
8341 8440
8441 switch (adapter->hw.mac.type) {
8442 case ixgbe_mac_X550:
8443 case ixgbe_mac_X550EM_x:
8444 netdev->hw_enc_features |= NETIF_F_RXCSUM;
8445 break;
8446 default:
8447 break;
8448 }
8449
8342#ifdef CONFIG_IXGBE_DCB 8450#ifdef CONFIG_IXGBE_DCB
8343 netdev->dcbnl_ops = &dcbnl_ops; 8451 netdev->dcbnl_ops = &dcbnl_ops;
8344#endif 8452#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index c76ba90ecc6e..7f37fe7269a7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -101,9 +101,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
101 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 101 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
102 } 102 }
103 103
104 /* We do not support RSS w/ SR-IOV */
105 adapter->ring_feature[RING_F_RSS].limit = 1;
106
107 /* Disable RSC when in SR-IOV mode */ 104 /* Disable RSC when in SR-IOV mode */
108 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | 105 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
109 IXGBE_FLAG2_RSC_ENABLED); 106 IXGBE_FLAG2_RSC_ENABLED);
@@ -1097,14 +1094,12 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
1097 u16 vlan, u8 qos) 1094 u16 vlan, u8 qos)
1098{ 1095{
1099 struct ixgbe_hw *hw = &adapter->hw; 1096 struct ixgbe_hw *hw = &adapter->hw;
1100 int err = 0; 1097 int err;
1101 1098
1102 if (adapter->vfinfo[vf].pf_vlan) 1099 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1103 err = ixgbe_set_vf_vlan(adapter, false,
1104 adapter->vfinfo[vf].pf_vlan,
1105 vf);
1106 if (err) 1100 if (err)
1107 goto out; 1101 goto out;
1102
1108 ixgbe_set_vmvir(adapter, vlan, qos, vf); 1103 ixgbe_set_vmvir(adapter, vlan, qos, vf);
1109 ixgbe_set_vmolr(hw, vf, false); 1104 ixgbe_set_vmolr(hw, vf, false);
1110 if (adapter->vfinfo[vf].spoofchk_enabled) 1105 if (adapter->vfinfo[vf].spoofchk_enabled)
@@ -1143,6 +1138,11 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
1143 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); 1138 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
1144 if (adapter->vfinfo[vf].vlan_count) 1139 if (adapter->vfinfo[vf].vlan_count)
1145 adapter->vfinfo[vf].vlan_count--; 1140 adapter->vfinfo[vf].vlan_count--;
1141
1142 /* disable hide VLAN on X550 */
1143 if (hw->mac.type >= ixgbe_mac_X550)
1144 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
1145
1146 adapter->vfinfo[vf].pf_vlan = 0; 1146 adapter->vfinfo[vf].pf_vlan = 0;
1147 adapter->vfinfo[vf].pf_qos = 0; 1147 adapter->vfinfo[vf].pf_qos = 0;
1148 1148
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index d101b25dc4b6..fc5ecee56ca8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -378,6 +378,8 @@ struct ixgbe_thermal_sensor_data {
378#define IXGBE_SPOOF_MACAS_MASK 0xFF 378#define IXGBE_SPOOF_MACAS_MASK 0xFF
379#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 379#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
380#define IXGBE_SPOOF_VLANAS_SHIFT 8 380#define IXGBE_SPOOF_VLANAS_SHIFT 8
381#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000
382#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16
381#define IXGBE_PFVFSPOOF_REG_COUNT 8 383#define IXGBE_PFVFSPOOF_REG_COUNT 8
382 384
383#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ 385#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
@@ -399,6 +401,7 @@ struct ixgbe_thermal_sensor_data {
399 401
400#define IXGBE_WUPL 0x05900 402#define IXGBE_WUPL 0x05900
401#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ 403#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
404#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */
402#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ 405#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
403#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host 406#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
404 * Filter Table */ 407 * Filter Table */
@@ -1540,6 +1543,7 @@ enum {
1540#define IXGBE_MAX_ETQF_FILTERS 8 1543#define IXGBE_MAX_ETQF_FILTERS 8
1541#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ 1544#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
1542#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ 1545#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
1546#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
1543#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ 1547#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
1544#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ 1548#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
1545#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ 1549#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
@@ -1565,6 +1569,9 @@ enum {
1565#define IXGBE_ETQF_FILTER_FCOE 2 1569#define IXGBE_ETQF_FILTER_FCOE 2
1566#define IXGBE_ETQF_FILTER_1588 3 1570#define IXGBE_ETQF_FILTER_1588 3
1567#define IXGBE_ETQF_FILTER_FIP 4 1571#define IXGBE_ETQF_FILTER_FIP 4
1572#define IXGBE_ETQF_FILTER_LLDP 5
1573#define IXGBE_ETQF_FILTER_LACP 6
1574
1568/* VLAN Control Bit Masks */ 1575/* VLAN Control Bit Masks */
1569#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ 1576#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
1570#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ 1577#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@@ -2122,6 +2129,7 @@ enum {
2122#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 2129#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
2123#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 2130#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
2124#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ 2131#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
2132#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */
2125#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ 2133#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
2126#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ 2134#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
2127#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ 2135#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
@@ -2139,6 +2147,7 @@ enum {
2139#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ 2147#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
2140#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ 2148#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
2141#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ 2149#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
2150#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */
2142#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ 2151#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
2143#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ 2152#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
2144#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ 2153#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
@@ -2227,6 +2236,8 @@ enum {
2227#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ 2236#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
2228#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ 2237#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
2229#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ 2238#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
2239#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */
2240#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */
2230#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ 2241#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
2231#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ 2242#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
2232#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ 2243#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
@@ -3056,6 +3067,7 @@ struct ixgbe_mac_operations {
3056 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 3067 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
3057 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); 3068 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
3058 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); 3069 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
3070 void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
3059 3071
3060 /* DMA Coalescing */ 3072 /* DMA Coalescing */
3061 s32 (*dmac_config)(struct ixgbe_hw *hw); 3073 s32 (*dmac_config)(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index ba54ff07b438..49395420c9b3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -55,9 +55,6 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
55{ 55{
56 struct ixgbe_mac_info *mac = &hw->mac; 56 struct ixgbe_mac_info *mac = &hw->mac;
57 57
58 /* Call PHY identify routine to get the phy type */
59 ixgbe_identify_phy_generic(hw);
60
61 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; 58 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
62 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; 59 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
63 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; 60 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index ffdd1231f419..50bf81908dd6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -80,7 +80,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
80 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 80 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
81 * ixgbe_hw struct in order to set up EEPROM access. 81 * ixgbe_hw struct in order to set up EEPROM access.
82 **/ 82 **/
83s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) 83static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
84{ 84{
85 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 85 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
86 u32 eec; 86 u32 eec;
@@ -110,8 +110,8 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
110 * @device_type: 3 bit device type 110 * @device_type: 3 bit device type
111 * @phy_data: Pointer to read data from the register 111 * @phy_data: Pointer to read data from the register
112 **/ 112 **/
113s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, 113static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
114 u32 device_type, u32 *data) 114 u32 device_type, u32 *data)
115{ 115{
116 u32 i, command, error; 116 u32 i, command, error;
117 117
@@ -158,7 +158,8 @@ s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
158 * 158 *
159 * Reads a 16 bit word from the EEPROM using the hostif. 159 * Reads a 16 bit word from the EEPROM using the hostif.
160 **/ 160 **/
161s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) 161static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
162 u16 *data)
162{ 163{
163 s32 status; 164 s32 status;
164 struct ixgbe_hic_read_shadow_ram buffer; 165 struct ixgbe_hic_read_shadow_ram buffer;
@@ -193,8 +194,8 @@ s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
193 * 194 *
194 * Reads a 16 bit word(s) from the EEPROM using the hostif. 195 * Reads a 16 bit word(s) from the EEPROM using the hostif.
195 **/ 196 **/
196s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, 197static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
197 u16 offset, u16 words, u16 *data) 198 u16 offset, u16 words, u16 *data)
198{ 199{
199 struct ixgbe_hic_read_shadow_ram buffer; 200 struct ixgbe_hic_read_shadow_ram buffer;
200 u32 current_word = 0; 201 u32 current_word = 0;
@@ -331,7 +332,8 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
331 * 332 *
332 * Returns a negative error code on error, or the 16-bit checksum 333 * Returns a negative error code on error, or the 16-bit checksum
333 **/ 334 **/
334s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) 335static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
336 u32 buffer_size)
335{ 337{
336 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; 338 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
337 u16 *local_buffer; 339 u16 *local_buffer;
@@ -407,7 +409,7 @@ s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
407 * 409 *
408 * Returns a negative error code on error, or the 16-bit checksum 410 * Returns a negative error code on error, or the 16-bit checksum
409 **/ 411 **/
410s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) 412static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
411{ 413{
412 return ixgbe_calc_checksum_X550(hw, NULL, 0); 414 return ixgbe_calc_checksum_X550(hw, NULL, 0);
413} 415}
@@ -419,7 +421,7 @@ s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
419 * 421 *
420 * Reads a 16 bit word from the EEPROM using the hostif. 422 * Reads a 16 bit word from the EEPROM using the hostif.
421 **/ 423 **/
422s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) 424static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
423{ 425{
424 s32 status = 0; 426 s32 status = 0;
425 427
@@ -440,7 +442,8 @@ s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
440 * Performs checksum calculation and validates the EEPROM checksum. If the 442 * Performs checksum calculation and validates the EEPROM checksum. If the
441 * caller does not need checksum_val, the value can be NULL. 443 * caller does not need checksum_val, the value can be NULL.
442 **/ 444 **/
443s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) 445static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
446 u16 *checksum_val)
444{ 447{
445 s32 status; 448 s32 status;
446 u16 checksum; 449 u16 checksum;
@@ -489,7 +492,8 @@ s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
489 * 492 *
490 * Write a 16 bit word to the EEPROM using the hostif. 493 * Write a 16 bit word to the EEPROM using the hostif.
491 **/ 494 **/
492s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data) 495static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
496 u16 data)
493{ 497{
494 s32 status; 498 s32 status;
495 struct ixgbe_hic_write_shadow_ram buffer; 499 struct ixgbe_hic_write_shadow_ram buffer;
@@ -517,7 +521,7 @@ s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
517 * 521 *
518 * Write a 16 bit word to the EEPROM using the hostif. 522 * Write a 16 bit word to the EEPROM using the hostif.
519 **/ 523 **/
520s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) 524static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
521{ 525{
522 s32 status = 0; 526 s32 status = 0;
523 527
@@ -537,7 +541,7 @@ s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
537 * 541 *
538 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. 542 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
539 **/ 543 **/
540s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) 544static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
541{ 545{
542 s32 status = 0; 546 s32 status = 0;
543 union ixgbe_hic_hdr2 buffer; 547 union ixgbe_hic_hdr2 buffer;
@@ -560,7 +564,7 @@ s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
560 * checksum and updates the EEPROM and instructs the hardware to update 564 * checksum and updates the EEPROM and instructs the hardware to update
561 * the flash. 565 * the flash.
562 **/ 566 **/
563s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) 567static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
564{ 568{
565 s32 status; 569 s32 status;
566 u16 checksum = 0; 570 u16 checksum = 0;
@@ -600,8 +604,9 @@ s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
600 * 604 *
601 * Write a 16 bit word(s) to the EEPROM using the hostif. 605 * Write a 16 bit word(s) to the EEPROM using the hostif.
602 **/ 606 **/
603s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, 607static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
604 u16 offset, u16 words, u16 *data) 608 u16 offset, u16 words,
609 u16 *data)
605{ 610{
606 s32 status = 0; 611 s32 status = 0;
607 u32 i = 0; 612 u32 i = 0;
@@ -630,7 +635,7 @@ s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
630/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers 635/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
631 * @hw: pointer to hardware structure 636 * @hw: pointer to hardware structure
632 **/ 637 **/
633void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) 638static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
634{ 639{
635 struct ixgbe_mac_info *mac = &hw->mac; 640 struct ixgbe_mac_info *mac = &hw->mac;
636 641
@@ -647,7 +652,7 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
647/** ixgbe_setup_sfp_modules_X550em - Setup SFP module 652/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
648 * @hw: pointer to hardware structure 653 * @hw: pointer to hardware structure
649 */ 654 */
650s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) 655static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
651{ 656{
652 bool setup_linear; 657 bool setup_linear;
653 u16 reg_slice, edc_mode; 658 u16 reg_slice, edc_mode;
@@ -703,9 +708,9 @@ s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
703 * @speed: pointer to link speed 708 * @speed: pointer to link speed
704 * @autoneg: true when autoneg or autotry is enabled 709 * @autoneg: true when autoneg or autotry is enabled
705 **/ 710 **/
706s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, 711static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
707 ixgbe_link_speed *speed, 712 ixgbe_link_speed *speed,
708 bool *autoneg) 713 bool *autoneg)
709{ 714{
710 /* SFP */ 715 /* SFP */
711 if (hw->phy.media_type == ixgbe_media_type_fiber) { 716 if (hw->phy.media_type == ixgbe_media_type_fiber) {
@@ -740,8 +745,8 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
740 * @device_type: 3 bit device type 745 * @device_type: 3 bit device type
741 * @data: Data to write to the register 746 * @data: Data to write to the register
742 **/ 747 **/
743s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, 748static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
744 u32 device_type, u32 data) 749 u32 device_type, u32 data)
745{ 750{
746 u32 i, command, error; 751 u32 i, command, error;
747 752
@@ -904,7 +909,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
904 * 909 *
905 * Configures the integrated KX4 PHY. 910 * Configures the integrated KX4 PHY.
906 **/ 911 **/
907s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) 912static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
908{ 913{
909 s32 status; 914 s32 status;
910 u32 reg_val; 915 u32 reg_val;
@@ -942,7 +947,7 @@ s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
942 * 947 *
943 * Configures the integrated KR PHY. 948 * Configures the integrated KR PHY.
944 **/ 949 **/
945s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) 950static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
946{ 951{
947 s32 status; 952 s32 status;
948 u32 reg_val; 953 u32 reg_val;
@@ -987,7 +992,7 @@ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
987 * A return of a non-zero value indicates an error, and the base driver should 992 * A return of a non-zero value indicates an error, and the base driver should
988 * not report link up. 993 * not report link up.
989 **/ 994 **/
990s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) 995static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
991{ 996{
992 u32 status; 997 u32 status;
993 u16 lasi, autoneg_status, speed; 998 u16 lasi, autoneg_status, speed;
@@ -1049,7 +1054,7 @@ s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
1049 * set during init_shared_code because the PHY/SFP type was 1054 * set during init_shared_code because the PHY/SFP type was
1050 * not known. Perform the SFP init if necessary. 1055 * not known. Perform the SFP init if necessary.
1051 **/ 1056 **/
1052s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) 1057static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
1053{ 1058{
1054 struct ixgbe_phy_info *phy = &hw->phy; 1059 struct ixgbe_phy_info *phy = &hw->phy;
1055 s32 ret_val; 1060 s32 ret_val;
@@ -1102,7 +1107,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
1102 * Returns the media type (fiber, copper, backplane) 1107 * Returns the media type (fiber, copper, backplane)
1103 * 1108 *
1104 */ 1109 */
1105enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) 1110static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1106{ 1111{
1107 enum ixgbe_media_type media_type; 1112 enum ixgbe_media_type media_type;
1108 1113
@@ -1129,7 +1134,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1129/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. 1134/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
1130 ** @hw: pointer to hardware structure 1135 ** @hw: pointer to hardware structure
1131 **/ 1136 **/
1132s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) 1137static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
1133{ 1138{
1134 u32 status; 1139 u32 status;
1135 u16 reg; 1140 u16 reg;
@@ -1202,7 +1207,7 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
1202 ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) 1207 ** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1203 ** reset. 1208 ** reset.
1204 **/ 1209 **/
1205s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) 1210static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
1206{ 1211{
1207 ixgbe_link_speed link_speed; 1212 ixgbe_link_speed link_speed;
1208 s32 status; 1213 s32 status;
@@ -1295,6 +1300,28 @@ mac_reset_top:
1295 return status; 1300 return status;
1296} 1301}
1297 1302
1303/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
1304 * anti-spoofing
1305 * @hw: pointer to hardware structure
1306 * @enable: enable or disable switch for Ethertype anti-spoofing
1307 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1308 **/
1309void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
1310 int vf)
1311{
1312 int vf_target_reg = vf >> 3;
1313 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1314 u32 pfvfspoof;
1315
1316 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1317 if (enable)
1318 pfvfspoof |= (1 << vf_target_shift);
1319 else
1320 pfvfspoof &= ~(1 << vf_target_shift);
1321
1322 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1323}
1324
1298#define X550_COMMON_MAC \ 1325#define X550_COMMON_MAC \
1299 .init_hw = &ixgbe_init_hw_generic, \ 1326 .init_hw = &ixgbe_init_hw_generic, \
1300 .start_hw = &ixgbe_start_hw_X540, \ 1327 .start_hw = &ixgbe_start_hw_X540, \
@@ -1329,6 +1356,8 @@ mac_reset_top:
1329 .init_uta_tables = &ixgbe_init_uta_tables_generic, \ 1356 .init_uta_tables = &ixgbe_init_uta_tables_generic, \
1330 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ 1357 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
1331 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ 1358 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
1359 .set_ethertype_anti_spoofing = \
1360 &ixgbe_set_ethertype_anti_spoofing_X550, \
1332 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ 1361 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \
1333 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \ 1362 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \
1334 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ 1363 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
@@ -1345,7 +1374,6 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
1345 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 1374 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
1346 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, 1375 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
1347 .setup_link = &ixgbe_setup_mac_link_X540, 1376 .setup_link = &ixgbe_setup_mac_link_X540,
1348 .set_rxpba = &ixgbe_set_rxpba_generic,
1349 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, 1377 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
1350 .setup_sfp = NULL, 1378 .setup_sfp = NULL,
1351}; 1379};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8c44ab25f3fa..3a9b356dff01 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -43,6 +43,13 @@
43#define BP_EXTENDED_STATS 43#define BP_EXTENDED_STATS
44#endif 44#endif
45 45
46#define IXGBE_MAX_TXD_PWR 14
47#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
48
49/* Tx Descriptors needed, worst case */
50#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
51#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
52
46/* wrapper around a pointer to a socket buffer, 53/* wrapper around a pointer to a socket buffer,
47 * so a DMA handle can be stored along with the buffer */ 54 * so a DMA handle can be stored along with the buffer */
48struct ixgbevf_tx_buffer { 55struct ixgbevf_tx_buffer {
@@ -85,6 +92,18 @@ struct ixgbevf_rx_queue_stats {
85 u64 csum_err; 92 u64 csum_err;
86}; 93};
87 94
95enum ixgbevf_ring_state_t {
96 __IXGBEVF_TX_DETECT_HANG,
97 __IXGBEVF_HANG_CHECK_ARMED,
98};
99
100#define check_for_tx_hang(ring) \
101 test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
102#define set_check_for_tx_hang(ring) \
103 set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
104#define clear_check_for_tx_hang(ring) \
105 clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
106
88struct ixgbevf_ring { 107struct ixgbevf_ring {
89 struct ixgbevf_ring *next; 108 struct ixgbevf_ring *next;
90 struct net_device *netdev; 109 struct net_device *netdev;
@@ -101,7 +120,7 @@ struct ixgbevf_ring {
101 struct ixgbevf_tx_buffer *tx_buffer_info; 120 struct ixgbevf_tx_buffer *tx_buffer_info;
102 struct ixgbevf_rx_buffer *rx_buffer_info; 121 struct ixgbevf_rx_buffer *rx_buffer_info;
103 }; 122 };
104 123 unsigned long state;
105 struct ixgbevf_stats stats; 124 struct ixgbevf_stats stats;
106 struct u64_stats_sync syncp; 125 struct u64_stats_sync syncp;
107 union { 126 union {
@@ -124,6 +143,7 @@ struct ixgbevf_ring {
124 143
125#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES 144#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
126#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES 145#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
146#define IXGBEVF_MAX_RSS_QUEUES 2
127 147
128#define IXGBEVF_DEFAULT_TXD 1024 148#define IXGBEVF_DEFAULT_TXD 1024
129#define IXGBEVF_DEFAULT_RXD 512 149#define IXGBEVF_DEFAULT_RXD 512
@@ -347,8 +367,6 @@ struct ixgbevf_adapter {
347 /* this field must be first, see ixgbevf_process_skb_fields */ 367 /* this field must be first, see ixgbevf_process_skb_fields */
348 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 368 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
349 369
350 struct timer_list watchdog_timer;
351 struct work_struct reset_task;
352 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 370 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
353 371
354 /* Interrupt Throttle Rate */ 372 /* Interrupt Throttle Rate */
@@ -378,8 +396,7 @@ struct ixgbevf_adapter {
378 * thus the additional *_CAPABLE flags. 396 * thus the additional *_CAPABLE flags.
379 */ 397 */
380 u32 flags; 398 u32 flags;
381#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1) 399#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
382
383#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) 400#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
384 401
385 struct msix_entry *msix_entries; 402 struct msix_entry *msix_entries;
@@ -415,9 +432,11 @@ struct ixgbevf_adapter {
415 u32 link_speed; 432 u32 link_speed;
416 bool link_up; 433 bool link_up;
417 434
418 spinlock_t mbx_lock; 435 struct timer_list service_timer;
436 struct work_struct service_task;
419 437
420 struct work_struct watchdog_task; 438 spinlock_t mbx_lock;
439 unsigned long last_reset;
421}; 440};
422 441
423enum ixbgevf_state_t { 442enum ixbgevf_state_t {
@@ -426,7 +445,8 @@ enum ixbgevf_state_t {
426 __IXGBEVF_DOWN, 445 __IXGBEVF_DOWN,
427 __IXGBEVF_DISABLED, 446 __IXGBEVF_DISABLED,
428 __IXGBEVF_REMOVING, 447 __IXGBEVF_REMOVING,
429 __IXGBEVF_WORK_INIT, 448 __IXGBEVF_SERVICE_SCHED,
449 __IXGBEVF_SERVICE_INITED,
430}; 450};
431 451
432enum ixgbevf_boards { 452enum ixgbevf_boards {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index fe2e10f40df8..4186981e562d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -98,6 +98,23 @@ static int debug = -1;
98module_param(debug, int, 0); 98module_param(debug, int, 0);
99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
100 100
101static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
102{
103 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
104 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
105 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
106 schedule_work(&adapter->service_task);
107}
108
109static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
110{
111 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
112
113 /* flush memory to make sure state is correct before next watchdog */
114 smp_mb__before_atomic();
115 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
116}
117
101/* forward decls */ 118/* forward decls */
102static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); 119static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
103static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 120static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
@@ -111,8 +128,8 @@ static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
111 return; 128 return;
112 hw->hw_addr = NULL; 129 hw->hw_addr = NULL;
113 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 130 dev_err(&adapter->pdev->dev, "Adapter removed\n");
114 if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 131 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
115 schedule_work(&adapter->watchdog_task); 132 ixgbevf_service_event_schedule(adapter);
116} 133}
117 134
118static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) 135static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
@@ -199,14 +216,72 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
199 /* tx_buffer must be completely set up in the transmit path */ 216 /* tx_buffer must be completely set up in the transmit path */
200} 217}
201 218
202#define IXGBE_MAX_TXD_PWR 14 219static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
203#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 220{
221 return ring->stats.packets;
222}
223
224static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
225{
226 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
227 struct ixgbe_hw *hw = &adapter->hw;
228
229 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
230 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
231
232 if (head != tail)
233 return (head < tail) ?
234 tail - head : (tail + ring->count - head);
235
236 return 0;
237}
238
239static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
240{
241 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
242 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
243 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
244
245 clear_check_for_tx_hang(tx_ring);
246
247 /* Check for a hung queue, but be thorough. This verifies
248 * that a transmit has been completed since the previous
249 * check AND there is at least one packet pending. The
250 * ARMED bit is set to indicate a potential hang.
251 */
252 if ((tx_done_old == tx_done) && tx_pending) {
253 /* make sure it is true for two checks in a row */
254 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
255 &tx_ring->state);
256 }
257 /* reset the countdown */
258 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
259
260 /* update completed stats and continue */
261 tx_ring->tx_stats.tx_done_old = tx_done;
262
263 return false;
264}
265
266static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
267{
268 /* Do the reset outside of interrupt context */
269 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
270 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
271 ixgbevf_service_event_schedule(adapter);
272 }
273}
204 274
205/* Tx Descriptors needed, worst case */ 275/**
206#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 276 * ixgbevf_tx_timeout - Respond to a Tx Hang
207#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 277 * @netdev: network interface device structure
278 **/
279static void ixgbevf_tx_timeout(struct net_device *netdev)
280{
281 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
208 282
209static void ixgbevf_tx_timeout(struct net_device *netdev); 283 ixgbevf_tx_timeout_reset(adapter);
284}
210 285
211/** 286/**
212 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 287 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
@@ -311,6 +386,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
311 q_vector->tx.total_bytes += total_bytes; 386 q_vector->tx.total_bytes += total_bytes;
312 q_vector->tx.total_packets += total_packets; 387 q_vector->tx.total_packets += total_packets;
313 388
389 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
390 struct ixgbe_hw *hw = &adapter->hw;
391 union ixgbe_adv_tx_desc *eop_desc;
392
393 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
394
395 pr_err("Detected Tx Unit Hang\n"
396 " Tx Queue <%d>\n"
397 " TDH, TDT <%x>, <%x>\n"
398 " next_to_use <%x>\n"
399 " next_to_clean <%x>\n"
400 "tx_buffer_info[next_to_clean]\n"
401 " next_to_watch <%p>\n"
402 " eop_desc->wb.status <%x>\n"
403 " time_stamp <%lx>\n"
404 " jiffies <%lx>\n",
405 tx_ring->queue_index,
406 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
407 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
408 tx_ring->next_to_use, i,
409 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
410 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
411
412 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
413
414 /* schedule immediate reset if we believe we hung */
415 ixgbevf_tx_timeout_reset(adapter);
416
417 return true;
418 }
419
314#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 420#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
315 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 421 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
316 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 422 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -1158,9 +1264,7 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1158 1264
1159 hw->mac.get_link_status = 1; 1265 hw->mac.get_link_status = 1;
1160 1266
1161 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && 1267 ixgbevf_service_event_schedule(adapter);
1162 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1163 mod_timer(&adapter->watchdog_timer, jiffies);
1164 1268
1165 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 1269 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1166 1270
@@ -1479,6 +1583,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1479 txdctl |= (1 << 8) | /* HTHRESH = 1 */ 1583 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1480 32; /* PTHRESH = 32 */ 1584 32; /* PTHRESH = 32 */
1481 1585
1586 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1587
1482 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); 1588 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1483 1589
1484 /* poll to verify queue is enabled */ 1590 /* poll to verify queue is enabled */
@@ -1584,6 +1690,39 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1584 reg_idx); 1690 reg_idx);
1585} 1691}
1586 1692
1693static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1694{
1695 struct ixgbe_hw *hw = &adapter->hw;
1696 u32 vfmrqc = 0, vfreta = 0;
1697 u32 rss_key[10];
1698 u16 rss_i = adapter->num_rx_queues;
1699 int i, j;
1700
1701 /* Fill out hash function seeds */
1702 netdev_rss_key_fill(rss_key, sizeof(rss_key));
1703 for (i = 0; i < 10; i++)
1704 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1705
1706 /* Fill out redirection table */
1707 for (i = 0, j = 0; i < 64; i++, j++) {
1708 if (j == rss_i)
1709 j = 0;
1710 vfreta = (vfreta << 8) | (j * 0x1);
1711 if ((i & 3) == 3)
1712 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1713 }
1714
1715 /* Perform hash on these packet types */
1716 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1717 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1718 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1719 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1720
1721 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1722
1723 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1724}
1725
1587static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, 1726static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1588 struct ixgbevf_ring *ring) 1727 struct ixgbevf_ring *ring)
1589{ 1728{
@@ -1640,6 +1779,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1640 struct net_device *netdev = adapter->netdev; 1779 struct net_device *netdev = adapter->netdev;
1641 1780
1642 ixgbevf_setup_psrtype(adapter); 1781 ixgbevf_setup_psrtype(adapter);
1782 if (hw->mac.type >= ixgbe_mac_X550_vf)
1783 ixgbevf_setup_vfmrqc(adapter);
1643 1784
1644 /* notify the PF of our intent to use this size of frame */ 1785 /* notify the PF of our intent to use this size of frame */
1645 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); 1786 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
@@ -1794,7 +1935,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1794 struct ixgbe_hw *hw = &adapter->hw; 1935 struct ixgbe_hw *hw = &adapter->hw;
1795 unsigned int def_q = 0; 1936 unsigned int def_q = 0;
1796 unsigned int num_tcs = 0; 1937 unsigned int num_tcs = 0;
1797 unsigned int num_rx_queues = 1; 1938 unsigned int num_rx_queues = adapter->num_rx_queues;
1939 unsigned int num_tx_queues = adapter->num_tx_queues;
1798 int err; 1940 int err;
1799 1941
1800 spin_lock_bh(&adapter->mbx_lock); 1942 spin_lock_bh(&adapter->mbx_lock);
@@ -1808,6 +1950,9 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1808 return err; 1950 return err;
1809 1951
1810 if (num_tcs > 1) { 1952 if (num_tcs > 1) {
1953 /* we need only one Tx queue */
1954 num_tx_queues = 1;
1955
1811 /* update default Tx ring register index */ 1956 /* update default Tx ring register index */
1812 adapter->tx_ring[0]->reg_idx = def_q; 1957 adapter->tx_ring[0]->reg_idx = def_q;
1813 1958
@@ -1816,7 +1961,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1816 } 1961 }
1817 1962
1818 /* if we have a bad config abort request queue reset */ 1963 /* if we have a bad config abort request queue reset */
1819 if (adapter->num_rx_queues != num_rx_queues) { 1964 if ((adapter->num_rx_queues != num_rx_queues) ||
1965 (adapter->num_tx_queues != num_tx_queues)) {
1820 /* force mailbox timeout to prevent further messages */ 1966 /* force mailbox timeout to prevent further messages */
1821 hw->mbx.timeout = 0; 1967 hw->mbx.timeout = 0;
1822 1968
@@ -1917,6 +2063,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1917 clear_bit(__IXGBEVF_DOWN, &adapter->state); 2063 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1918 ixgbevf_napi_enable_all(adapter); 2064 ixgbevf_napi_enable_all(adapter);
1919 2065
2066 /* clear any pending interrupts, may auto mask */
2067 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2068 ixgbevf_irq_enable(adapter);
2069
1920 /* enable transmits */ 2070 /* enable transmits */
1921 netif_tx_start_all_queues(netdev); 2071 netif_tx_start_all_queues(netdev);
1922 2072
@@ -1924,21 +2074,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1924 ixgbevf_init_last_counter_stats(adapter); 2074 ixgbevf_init_last_counter_stats(adapter);
1925 2075
1926 hw->mac.get_link_status = 1; 2076 hw->mac.get_link_status = 1;
1927 mod_timer(&adapter->watchdog_timer, jiffies); 2077 mod_timer(&adapter->service_timer, jiffies);
1928} 2078}
1929 2079
1930void ixgbevf_up(struct ixgbevf_adapter *adapter) 2080void ixgbevf_up(struct ixgbevf_adapter *adapter)
1931{ 2081{
1932 struct ixgbe_hw *hw = &adapter->hw;
1933
1934 ixgbevf_configure(adapter); 2082 ixgbevf_configure(adapter);
1935 2083
1936 ixgbevf_up_complete(adapter); 2084 ixgbevf_up_complete(adapter);
1937
1938 /* clear any pending interrupts, may auto mask */
1939 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1940
1941 ixgbevf_irq_enable(adapter);
1942} 2085}
1943 2086
1944/** 2087/**
@@ -2045,22 +2188,19 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
2045 for (i = 0; i < adapter->num_rx_queues; i++) 2188 for (i = 0; i < adapter->num_rx_queues; i++)
2046 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); 2189 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2047 2190
2048 netif_tx_disable(netdev); 2191 usleep_range(10000, 20000);
2049
2050 msleep(10);
2051 2192
2052 netif_tx_stop_all_queues(netdev); 2193 netif_tx_stop_all_queues(netdev);
2053 2194
2195 /* call carrier off first to avoid false dev_watchdog timeouts */
2196 netif_carrier_off(netdev);
2197 netif_tx_disable(netdev);
2198
2054 ixgbevf_irq_disable(adapter); 2199 ixgbevf_irq_disable(adapter);
2055 2200
2056 ixgbevf_napi_disable_all(adapter); 2201 ixgbevf_napi_disable_all(adapter);
2057 2202
2058 del_timer_sync(&adapter->watchdog_timer); 2203 del_timer_sync(&adapter->service_timer);
2059 /* can't call flush scheduled work here because it can deadlock
2060 * if linkwatch_event tries to acquire the rtnl_lock which we are
2061 * holding */
2062 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
2063 msleep(1);
2064 2204
2065 /* disable transmits in the hardware now that interrupts are off */ 2205 /* disable transmits in the hardware now that interrupts are off */
2066 for (i = 0; i < adapter->num_tx_queues; i++) { 2206 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -2070,8 +2210,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
2070 IXGBE_TXDCTL_SWFLSH); 2210 IXGBE_TXDCTL_SWFLSH);
2071 } 2211 }
2072 2212
2073 netif_carrier_off(netdev);
2074
2075 if (!pci_channel_offline(adapter->pdev)) 2213 if (!pci_channel_offline(adapter->pdev))
2076 ixgbevf_reset(adapter); 2214 ixgbevf_reset(adapter);
2077 2215
@@ -2110,6 +2248,8 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2110 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 2248 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2111 netdev->addr_len); 2249 netdev->addr_len);
2112 } 2250 }
2251
2252 adapter->last_reset = jiffies;
2113} 2253}
2114 2254
2115static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 2255static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
@@ -2181,8 +2321,19 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2181 return; 2321 return;
2182 2322
2183 /* we need as many queues as traffic classes */ 2323 /* we need as many queues as traffic classes */
2184 if (num_tcs > 1) 2324 if (num_tcs > 1) {
2185 adapter->num_rx_queues = num_tcs; 2325 adapter->num_rx_queues = num_tcs;
2326 } else {
2327 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2328
2329 switch (hw->api_version) {
2330 case ixgbe_mbox_api_11:
2331 adapter->num_rx_queues = rss;
2332 adapter->num_tx_queues = rss;
2333 default:
2334 break;
2335 }
2336 }
2186} 2337}
2187 2338
2188/** 2339/**
@@ -2552,7 +2703,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2552 struct ixgbe_hw *hw = &adapter->hw; 2703 struct ixgbe_hw *hw = &adapter->hw;
2553 int i; 2704 int i;
2554 2705
2555 if (!adapter->link_up) 2706 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2707 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2556 return; 2708 return;
2557 2709
2558 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2710 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
@@ -2576,79 +2728,176 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2576} 2728}
2577 2729
2578/** 2730/**
2579 * ixgbevf_watchdog - Timer Call-back 2731 * ixgbevf_service_timer - Timer Call-back
2580 * @data: pointer to adapter cast into an unsigned long 2732 * @data: pointer to adapter cast into an unsigned long
2581 **/ 2733 **/
2582static void ixgbevf_watchdog(unsigned long data) 2734static void ixgbevf_service_timer(unsigned long data)
2583{ 2735{
2584 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2736 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2737
2738 /* Reset the timer */
2739 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2740
2741 ixgbevf_service_event_schedule(adapter);
2742}
2743
2744static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
2745{
2746 if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
2747 return;
2748
2749 adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
2750
2751 /* If we're already down or resetting, just bail */
2752 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2753 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2754 return;
2755
2756 adapter->tx_timeout_count++;
2757
2758 ixgbevf_reinit_locked(adapter);
2759}
2760
2761/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2762 * @adapter - pointer to the device adapter structure
2763 *
2764 * This function serves two purposes. First it strobes the interrupt lines
2765 * in order to make certain interrupts are occurring. Secondly it sets the
2766 * bits needed to check for TX hangs. As a result we should immediately
2767 * determine if a hang has occurred.
2768 */
2769static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2770{
2585 struct ixgbe_hw *hw = &adapter->hw; 2771 struct ixgbe_hw *hw = &adapter->hw;
2586 u32 eics = 0; 2772 u32 eics = 0;
2587 int i; 2773 int i;
2588 2774
2589 /* 2775 /* If we're down or resetting, just bail */
2590 * Do the watchdog outside of interrupt context due to the lovely 2776 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2591 * delays that some of the newer hardware requires 2777 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2592 */ 2778 return;
2593 2779
2594 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2780 /* Force detection of hung controller */
2595 goto watchdog_short_circuit; 2781 if (netif_carrier_ok(adapter->netdev)) {
2782 for (i = 0; i < adapter->num_tx_queues; i++)
2783 set_check_for_tx_hang(adapter->tx_ring[i]);
2784 }
2596 2785
2597 /* get one bit for every active tx/rx interrupt vector */ 2786 /* get one bit for every active tx/rx interrupt vector */
2598 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2787 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2599 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2788 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2789
2600 if (qv->rx.ring || qv->tx.ring) 2790 if (qv->rx.ring || qv->tx.ring)
2601 eics |= 1 << i; 2791 eics |= 1 << i;
2602 } 2792 }
2603 2793
2794 /* Cause software interrupt to ensure rings are cleaned */
2604 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2795 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2796}
2605 2797
2606watchdog_short_circuit: 2798/**
2607 schedule_work(&adapter->watchdog_task); 2799 * ixgbevf_watchdog_update_link - update the link status
2800 * @adapter - pointer to the device adapter structure
2801 **/
2802static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
2803{
2804 struct ixgbe_hw *hw = &adapter->hw;
2805 u32 link_speed = adapter->link_speed;
2806 bool link_up = adapter->link_up;
2807 s32 err;
2808
2809 spin_lock_bh(&adapter->mbx_lock);
2810
2811 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2812
2813 spin_unlock_bh(&adapter->mbx_lock);
2814
2815 /* if check for link returns error we will need to reset */
2816 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
2817 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
2818 link_up = false;
2819 }
2820
2821 adapter->link_up = link_up;
2822 adapter->link_speed = link_speed;
2608} 2823}
2609 2824
2610/** 2825/**
2611 * ixgbevf_tx_timeout - Respond to a Tx Hang 2826 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2612 * @netdev: network interface device structure 2827 * print link up message
2828 * @adapter - pointer to the device adapter structure
2613 **/ 2829 **/
2614static void ixgbevf_tx_timeout(struct net_device *netdev) 2830static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2615{ 2831{
2616 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2832 struct net_device *netdev = adapter->netdev;
2617 2833
2618 /* Do the reset outside of interrupt context */ 2834 /* only continue if link was previously down */
2619 schedule_work(&adapter->reset_task); 2835 if (netif_carrier_ok(netdev))
2836 return;
2837
2838 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2839 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2840 "10 Gbps" :
2841 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2842 "1 Gbps" :
2843 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2844 "100 Mbps" :
2845 "unknown speed");
2846
2847 netif_carrier_on(netdev);
2620} 2848}
2621 2849
2622static void ixgbevf_reset_task(struct work_struct *work) 2850/**
2851 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2852 * print link down message
2853 * @adapter - pointer to the adapter structure
2854 **/
2855static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2623{ 2856{
2624 struct ixgbevf_adapter *adapter; 2857 struct net_device *netdev = adapter->netdev;
2625 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2626 2858
2627 /* If we're already down or resetting, just bail */ 2859 adapter->link_speed = 0;
2860
2861 /* only continue if link was up previously */
2862 if (!netif_carrier_ok(netdev))
2863 return;
2864
2865 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2866
2867 netif_carrier_off(netdev);
2868}
2869
2870/**
2871 * ixgbevf_watchdog_subtask - worker thread to bring link up
2872 * @work: pointer to work_struct containing our data
2873 **/
2874static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2875{
2876 /* if interface is down do nothing */
2628 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2877 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2629 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2630 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2878 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2631 return; 2879 return;
2632 2880
2633 adapter->tx_timeout_count++; 2881 ixgbevf_watchdog_update_link(adapter);
2634 2882
2635 ixgbevf_reinit_locked(adapter); 2883 if (adapter->link_up)
2884 ixgbevf_watchdog_link_is_up(adapter);
2885 else
2886 ixgbevf_watchdog_link_is_down(adapter);
2887
2888 ixgbevf_update_stats(adapter);
2636} 2889}
2637 2890
2638/** 2891/**
2639 * ixgbevf_watchdog_task - worker thread to bring link up 2892 * ixgbevf_service_task - manages and runs subtasks
2640 * @work: pointer to work_struct containing our data 2893 * @work: pointer to work_struct containing our data
2641 **/ 2894 **/
2642static void ixgbevf_watchdog_task(struct work_struct *work) 2895static void ixgbevf_service_task(struct work_struct *work)
2643{ 2896{
2644 struct ixgbevf_adapter *adapter = container_of(work, 2897 struct ixgbevf_adapter *adapter = container_of(work,
2645 struct ixgbevf_adapter, 2898 struct ixgbevf_adapter,
2646 watchdog_task); 2899 service_task);
2647 struct net_device *netdev = adapter->netdev;
2648 struct ixgbe_hw *hw = &adapter->hw; 2900 struct ixgbe_hw *hw = &adapter->hw;
2649 u32 link_speed = adapter->link_speed;
2650 bool link_up = adapter->link_up;
2651 s32 need_reset;
2652 2901
2653 if (IXGBE_REMOVED(hw->hw_addr)) { 2902 if (IXGBE_REMOVED(hw->hw_addr)) {
2654 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { 2903 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
@@ -2658,73 +2907,13 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2658 } 2907 }
2659 return; 2908 return;
2660 } 2909 }
2661 ixgbevf_queue_reset_subtask(adapter);
2662
2663 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2664
2665 /*
2666 * Always check the link on the watchdog because we have
2667 * no LSC interrupt
2668 */
2669 spin_lock_bh(&adapter->mbx_lock);
2670
2671 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2672
2673 spin_unlock_bh(&adapter->mbx_lock);
2674
2675 if (need_reset) {
2676 adapter->link_up = link_up;
2677 adapter->link_speed = link_speed;
2678 netif_carrier_off(netdev);
2679 netif_tx_stop_all_queues(netdev);
2680 schedule_work(&adapter->reset_task);
2681 goto pf_has_reset;
2682 }
2683 adapter->link_up = link_up;
2684 adapter->link_speed = link_speed;
2685
2686 if (link_up) {
2687 if (!netif_carrier_ok(netdev)) {
2688 char *link_speed_string;
2689 switch (link_speed) {
2690 case IXGBE_LINK_SPEED_10GB_FULL:
2691 link_speed_string = "10 Gbps";
2692 break;
2693 case IXGBE_LINK_SPEED_1GB_FULL:
2694 link_speed_string = "1 Gbps";
2695 break;
2696 case IXGBE_LINK_SPEED_100_FULL:
2697 link_speed_string = "100 Mbps";
2698 break;
2699 default:
2700 link_speed_string = "unknown speed";
2701 break;
2702 }
2703 dev_info(&adapter->pdev->dev,
2704 "NIC Link is Up, %s\n", link_speed_string);
2705 netif_carrier_on(netdev);
2706 netif_tx_wake_all_queues(netdev);
2707 }
2708 } else {
2709 adapter->link_up = false;
2710 adapter->link_speed = 0;
2711 if (netif_carrier_ok(netdev)) {
2712 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2713 netif_carrier_off(netdev);
2714 netif_tx_stop_all_queues(netdev);
2715 }
2716 }
2717 2910
2718 ixgbevf_update_stats(adapter); 2911 ixgbevf_queue_reset_subtask(adapter);
2719 2912 ixgbevf_reset_subtask(adapter);
2720pf_has_reset: 2913 ixgbevf_watchdog_subtask(adapter);
2721 /* Reset the timer */ 2914 ixgbevf_check_hang_subtask(adapter);
2722 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2723 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
2724 mod_timer(&adapter->watchdog_timer,
2725 round_jiffies(jiffies + (2 * HZ)));
2726 2915
2727 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2916 ixgbevf_service_event_complete(adapter);
2728} 2917}
2729 2918
2730/** 2919/**
@@ -2944,10 +3133,6 @@ static int ixgbevf_open(struct net_device *netdev)
2944 if (!adapter->num_msix_vectors) 3133 if (!adapter->num_msix_vectors)
2945 return -ENOMEM; 3134 return -ENOMEM;
2946 3135
2947 /* disallow open during test */
2948 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2949 return -EBUSY;
2950
2951 if (hw->adapter_stopped) { 3136 if (hw->adapter_stopped) {
2952 ixgbevf_reset(adapter); 3137 ixgbevf_reset(adapter);
2953 /* if adapter is still stopped then PF isn't up and 3138 /* if adapter is still stopped then PF isn't up and
@@ -2960,6 +3145,12 @@ static int ixgbevf_open(struct net_device *netdev)
2960 } 3145 }
2961 } 3146 }
2962 3147
3148 /* disallow open during test */
3149 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3150 return -EBUSY;
3151
3152 netif_carrier_off(netdev);
3153
2963 /* allocate transmit descriptors */ 3154 /* allocate transmit descriptors */
2964 err = ixgbevf_setup_all_tx_resources(adapter); 3155 err = ixgbevf_setup_all_tx_resources(adapter);
2965 if (err) 3156 if (err)
@@ -2979,15 +3170,11 @@ static int ixgbevf_open(struct net_device *netdev)
2979 */ 3170 */
2980 ixgbevf_map_rings_to_vectors(adapter); 3171 ixgbevf_map_rings_to_vectors(adapter);
2981 3172
2982 ixgbevf_up_complete(adapter);
2983
2984 /* clear any pending interrupts, may auto mask */
2985 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2986 err = ixgbevf_request_irq(adapter); 3173 err = ixgbevf_request_irq(adapter);
2987 if (err) 3174 if (err)
2988 goto err_req_irq; 3175 goto err_req_irq;
2989 3176
2990 ixgbevf_irq_enable(adapter); 3177 ixgbevf_up_complete(adapter);
2991 3178
2992 return 0; 3179 return 0;
2993 3180
@@ -3822,28 +4009,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3822 NETIF_F_HW_VLAN_CTAG_RX | 4009 NETIF_F_HW_VLAN_CTAG_RX |
3823 NETIF_F_HW_VLAN_CTAG_FILTER; 4010 NETIF_F_HW_VLAN_CTAG_FILTER;
3824 4011
3825 netdev->vlan_features |= NETIF_F_TSO; 4012 netdev->vlan_features |= NETIF_F_TSO |
3826 netdev->vlan_features |= NETIF_F_TSO6; 4013 NETIF_F_TSO6 |
3827 netdev->vlan_features |= NETIF_F_IP_CSUM; 4014 NETIF_F_IP_CSUM |
3828 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 4015 NETIF_F_IPV6_CSUM |
3829 netdev->vlan_features |= NETIF_F_SG; 4016 NETIF_F_SG;
3830 4017
3831 if (pci_using_dac) 4018 if (pci_using_dac)
3832 netdev->features |= NETIF_F_HIGHDMA; 4019 netdev->features |= NETIF_F_HIGHDMA;
3833 4020
3834 netdev->priv_flags |= IFF_UNICAST_FLT; 4021 netdev->priv_flags |= IFF_UNICAST_FLT;
3835 4022
3836 init_timer(&adapter->watchdog_timer);
3837 adapter->watchdog_timer.function = ixgbevf_watchdog;
3838 adapter->watchdog_timer.data = (unsigned long)adapter;
3839
3840 if (IXGBE_REMOVED(hw->hw_addr)) { 4023 if (IXGBE_REMOVED(hw->hw_addr)) {
3841 err = -EIO; 4024 err = -EIO;
3842 goto err_sw_init; 4025 goto err_sw_init;
3843 } 4026 }
3844 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 4027
3845 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 4028 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
3846 set_bit(__IXGBEVF_WORK_INIT, &adapter->state); 4029 (unsigned long)adapter);
4030
4031 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4032 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4033 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
3847 4034
3848 err = ixgbevf_init_interrupt_scheme(adapter); 4035 err = ixgbevf_init_interrupt_scheme(adapter);
3849 if (err) 4036 if (err)
@@ -3917,11 +4104,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3917 adapter = netdev_priv(netdev); 4104 adapter = netdev_priv(netdev);
3918 4105
3919 set_bit(__IXGBEVF_REMOVING, &adapter->state); 4106 set_bit(__IXGBEVF_REMOVING, &adapter->state);
3920 4107 cancel_work_sync(&adapter->service_task);
3921 del_timer_sync(&adapter->watchdog_timer);
3922
3923 cancel_work_sync(&adapter->reset_task);
3924 cancel_work_sync(&adapter->watchdog_task);
3925 4108
3926 if (netdev->reg_state == NETREG_REGISTERED) 4109 if (netdev->reg_state == NETREG_REGISTERED)
3927 unregister_netdev(netdev); 4110 unregister_netdev(netdev);
@@ -3955,7 +4138,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3955 struct net_device *netdev = pci_get_drvdata(pdev); 4138 struct net_device *netdev = pci_get_drvdata(pdev);
3956 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4139 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3957 4140
3958 if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 4141 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
3959 return PCI_ERS_RESULT_DISCONNECT; 4142 return PCI_ERS_RESULT_DISCONNECT;
3960 4143
3961 rtnl_lock(); 4144 rtnl_lock();
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 09dd8f698bea..3e712fd6e695 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -69,6 +69,16 @@
69#define IXGBE_VFGOTC_LSB 0x02020 69#define IXGBE_VFGOTC_LSB 0x02020
70#define IXGBE_VFGOTC_MSB 0x02024 70#define IXGBE_VFGOTC_MSB 0x02024
71#define IXGBE_VFMPRC 0x01034 71#define IXGBE_VFMPRC 0x01034
72#define IXGBE_VFMRQC 0x3000
73#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4))
74#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4))
75
76/* VFMRQC bits */
77#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */
78#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000
79#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000
80#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000
81#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000
72 82
73#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) 83#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
74 84