aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-03-19 23:55:48 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-19 23:55:48 -0400
commit20248162f221184790b598a88763a87170b0f08e (patch)
tree81e79217b8bc763008afca67c8f229b9c0c08e1e
parent28bdc499d647124fa5844453d35e6f5d1b3810dc (diff)
parent5b346dc97567270a5c0f02a390a1d1bb65237cea (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates This series contains updates to i40e, i40evf, e1000e, ixgbe and ixgbevf. Mitch adds support for the VF link state ndo which allows the PF driver to control the virtual link state of the VF devices. Added support for viewing and modifying RSS hash options and RSS hash look-up table programming through ethtool for i40evf. Fixed complaint about the use of min() where min_t() should be used in i40evf. Anjali adds support for ethtool -k option for NTUPLE control for i40e. Elizabeth cleans up and refactors i40e_open() to separate out the VSI code into its own i40e_vsi_open(). Jesse enables the hardware feature head write back to avoid updating the descriptor ring by marking each descriptor with a DD bit and instead writes a memory location with an update to where the driver should clean up to in i40e and i40evf. Reduces context descriptors for i40e/i40evf since we do not need context descriptors for every packet, only for TSO or timesync. Dan Carpenter fixes a potential array underflow in i40e_vc_process_vf_msg(). Dave fixes an e1000e hardware unit hang where the check for pending Tx work when link is lost was mistakenly moved to be done only when link is first detected to be lost. Fixed a problem with poor network performance on certain silicon in e1000e when configured for 100M HDX performance. Carolyn adds register defines needed for time sync functions and the code to call the updated defines. Jacob adds the ixgbe function for writing PCI config word and checks whether the adapter has been removed first. Mark adds the bit __IXGBEVF_REMOVING to indicate that the module is being removed because the __IXGBEVF_DOWN bit had been overloaded for this purpose, but leads to trouble. ixgbevf_down function can now prevent multiple executions by doing test_and_set_bit on __IXGBEVF_DOWN. v2: - dropped patch Mitch's patch "i40evf: Support RSS option in ethtool" based on feedback from Ben Hutchings so that Mitch can re-work the patch solution v3: - removed unnecessary parenthesis in patch 1 based on feedback from David Miller - changed a macro to get the next queue to a function in patch 2 based on feedback from David Miller - added blank lines after variable declaration and code in two functions in patch 6 based on feedback from David Miller ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c89
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c91
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c29
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h70
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c18
16 files changed, 408 insertions, 71 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index eafad410e59a..6bd1832e3f3e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1090,8 +1090,14 @@ static void e1000_print_hw_hang(struct work_struct *work)
1090 adapter->tx_hang_recheck = true; 1090 adapter->tx_hang_recheck = true;
1091 return; 1091 return;
1092 } 1092 }
1093 /* Real hang detected */
1094 adapter->tx_hang_recheck = false; 1093 adapter->tx_hang_recheck = false;
1094
1095 if (er32(TDH(0)) == er32(TDT(0))) {
1096 e_dbg("false hang detected, ignoring\n");
1097 return;
1098 }
1099
1100 /* Real hang detected */
1095 netif_stop_queue(netdev); 1101 netif_stop_queue(netdev);
1096 1102
1097 e1e_rphy(hw, MII_BMSR, &phy_status); 1103 e1e_rphy(hw, MII_BMSR, &phy_status);
@@ -1121,6 +1127,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
1121 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), 1127 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1122 phy_status, phy_1000t_status, phy_ext_status, pci_status); 1128 phy_status, phy_1000t_status, phy_ext_status, pci_status);
1123 1129
1130 e1000e_dump(adapter);
1131
1124 /* Suggest workaround for known h/w issue */ 1132 /* Suggest workaround for known h/w issue */
1125 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) 1133 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1126 e_err("Try turning off Tx pause (flow control) via ethtool\n"); 1134 e_err("Try turning off Tx pause (flow control) via ethtool\n");
@@ -2890,7 +2898,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2890 struct e1000_hw *hw = &adapter->hw; 2898 struct e1000_hw *hw = &adapter->hw;
2891 struct e1000_ring *tx_ring = adapter->tx_ring; 2899 struct e1000_ring *tx_ring = adapter->tx_ring;
2892 u64 tdba; 2900 u64 tdba;
2893 u32 tdlen, tarc; 2901 u32 tdlen, tctl, tarc;
2894 2902
2895 /* Setup the HW Tx Head and Tail descriptor pointers */ 2903 /* Setup the HW Tx Head and Tail descriptor pointers */
2896 tdba = tx_ring->dma; 2904 tdba = tx_ring->dma;
@@ -2927,6 +2935,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2927 /* erratum work around: set txdctl the same for both queues */ 2935 /* erratum work around: set txdctl the same for both queues */
2928 ew32(TXDCTL(1), er32(TXDCTL(0))); 2936 ew32(TXDCTL(1), er32(TXDCTL(0)));
2929 2937
2938 /* Program the Transmit Control Register */
2939 tctl = er32(TCTL);
2940 tctl &= ~E1000_TCTL_CT;
2941 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2942 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2943
2930 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2944 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2931 tarc = er32(TARC(0)); 2945 tarc = er32(TARC(0));
2932 /* set the speed mode bit, we'll clear it if we're not at 2946 /* set the speed mode bit, we'll clear it if we're not at
@@ -2957,6 +2971,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2957 /* enable Report Status bit */ 2971 /* enable Report Status bit */
2958 adapter->txd_cmd |= E1000_TXD_CMD_RS; 2972 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2959 2973
2974 ew32(TCTL, tctl);
2975
2960 hw->mac.ops.config_collision_dist(hw); 2976 hw->mac.ops.config_collision_dist(hw);
2961} 2977}
2962 2978
@@ -4798,6 +4814,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4798 4814
4799 if (adapter->phy_hang_count > 1) { 4815 if (adapter->phy_hang_count > 1) {
4800 adapter->phy_hang_count = 0; 4816 adapter->phy_hang_count = 0;
4817 e_dbg("PHY appears hung - resetting\n");
4801 schedule_work(&adapter->reset_task); 4818 schedule_work(&adapter->reset_task);
4802 } 4819 }
4803} 4820}
@@ -4956,15 +4973,11 @@ static void e1000_watchdog_task(struct work_struct *work)
4956 mod_timer(&adapter->phy_info_timer, 4973 mod_timer(&adapter->phy_info_timer,
4957 round_jiffies(jiffies + 2 * HZ)); 4974 round_jiffies(jiffies + 2 * HZ));
4958 4975
4959 /* The link is lost so the controller stops DMA. 4976 /* 8000ES2LAN requires a Rx packet buffer work-around
4960 * If there is queued Tx work that cannot be done 4977 * on link down event; reset the controller to flush
4961 * or if on an 8000ES2LAN which requires a Rx packet 4978 * the Rx packet buffer.
4962 * buffer work-around on link down event, reset the
4963 * controller to flush the Tx/Rx packet buffers.
4964 * (Do the reset outside of interrupt context).
4965 */ 4979 */
4966 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || 4980 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4967 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
4968 adapter->flags |= FLAG_RESTART_NOW; 4981 adapter->flags |= FLAG_RESTART_NOW;
4969 else 4982 else
4970 pm_schedule_suspend(netdev->dev.parent, 4983 pm_schedule_suspend(netdev->dev.parent,
@@ -4987,6 +5000,15 @@ link_up:
4987 adapter->gotc_old = adapter->stats.gotc; 5000 adapter->gotc_old = adapter->stats.gotc;
4988 spin_unlock(&adapter->stats64_lock); 5001 spin_unlock(&adapter->stats64_lock);
4989 5002
5003 /* If the link is lost the controller stops DMA, but
5004 * if there is queued Tx work it cannot be done. So
5005 * reset the controller to flush the Tx packet buffers.
5006 */
5007 if (!netif_carrier_ok(netdev) &&
5008 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
5009 adapter->flags |= FLAG_RESTART_NOW;
5010
5011 /* If reset is necessary, do it outside of interrupt context. */
4990 if (adapter->flags & FLAG_RESTART_NOW) { 5012 if (adapter->flags & FLAG_RESTART_NOW) {
4991 schedule_work(&adapter->reset_task); 5013 schedule_work(&adapter->reset_task);
4992 /* return immediately since reset is imminent */ 5014 /* return immediately since reset is imminent */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index bd1b4690a608..33cd8b67535d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -208,7 +208,7 @@ struct i40e_pf {
208 bool fc_autoneg_status; 208 bool fc_autoneg_status;
209 209
210 u16 eeprom_version; 210 u16 eeprom_version;
211 u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */ 211 u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */
212 u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ 212 u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
213 u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ 213 u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
214 u16 num_req_vfs; /* num vfs requested for this vf */ 214 u16 num_req_vfs; /* num vfs requested for this vf */
@@ -558,6 +558,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
558 struct i40e_fdir_filter *input, bool add); 558 struct i40e_fdir_filter *input, bool add);
559void i40e_fdir_check_and_reenable(struct i40e_pf *pf); 559void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
560int i40e_get_current_fd_count(struct i40e_pf *pf); 560int i40e_get_current_fd_count(struct i40e_pf *pf);
561bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
561void i40e_set_ethtool_ops(struct net_device *netdev); 562void i40e_set_ethtool_ops(struct net_device *netdev);
562struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 563struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
563 u8 *macaddr, s16 vlan, 564 u8 *macaddr, s16 vlan,
@@ -596,6 +597,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
596void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); 597void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
597void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); 598void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
598int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 599int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
600int i40e_vsi_open(struct i40e_vsi *vsi);
599void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); 601void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
600int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); 602int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
601int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); 603int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 3daaf205eabc..113354214517 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,7 +38,7 @@ static const char i40e_driver_string[] =
38 38
39#define DRV_VERSION_MAJOR 0 39#define DRV_VERSION_MAJOR 0
40#define DRV_VERSION_MINOR 3 40#define DRV_VERSION_MINOR 3
41#define DRV_VERSION_BUILD 34 41#define DRV_VERSION_BUILD 36
42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43 __stringify(DRV_VERSION_MINOR) "." \ 43 __stringify(DRV_VERSION_MINOR) "." \
44 __stringify(DRV_VERSION_BUILD) DRV_KERN 44 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -2181,6 +2181,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
2181 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2181 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2182 I40E_FLAG_FD_ATR_ENABLED)); 2182 I40E_FLAG_FD_ATR_ENABLED));
2183 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2183 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2184 /* FDIR VSI tx ring can still use RS bit and writebacks */
2185 if (vsi->type != I40E_VSI_FDIR)
2186 tx_ctx.head_wb_ena = 1;
2187 tx_ctx.head_wb_addr = ring->dma +
2188 (ring->count * sizeof(struct i40e_tx_desc));
2184 2189
2185 /* As part of VSI creation/update, FW allocates certain 2190 /* As part of VSI creation/update, FW allocates certain
2186 * Tx arbitration queue sets for each TC enabled for 2191 * Tx arbitration queue sets for each TC enabled for
@@ -4235,7 +4240,6 @@ static int i40e_open(struct net_device *netdev)
4235 struct i40e_netdev_priv *np = netdev_priv(netdev); 4240 struct i40e_netdev_priv *np = netdev_priv(netdev);
4236 struct i40e_vsi *vsi = np->vsi; 4241 struct i40e_vsi *vsi = np->vsi;
4237 struct i40e_pf *pf = vsi->back; 4242 struct i40e_pf *pf = vsi->back;
4238 char int_name[IFNAMSIZ];
4239 int err; 4243 int err;
4240 4244
4241 /* disallow open during test */ 4245 /* disallow open during test */
@@ -4244,6 +4248,31 @@ static int i40e_open(struct net_device *netdev)
4244 4248
4245 netif_carrier_off(netdev); 4249 netif_carrier_off(netdev);
4246 4250
4251 err = i40e_vsi_open(vsi);
4252 if (err)
4253 return err;
4254
4255#ifdef CONFIG_I40E_VXLAN
4256 vxlan_get_rx_port(netdev);
4257#endif
4258
4259 return 0;
4260}
4261
4262/**
4263 * i40e_vsi_open -
4264 * @vsi: the VSI to open
4265 *
4266 * Finish initialization of the VSI.
4267 *
4268 * Returns 0 on success, negative value on failure
4269 **/
4270int i40e_vsi_open(struct i40e_vsi *vsi)
4271{
4272 struct i40e_pf *pf = vsi->back;
4273 char int_name[IFNAMSIZ];
4274 int err;
4275
4247 /* allocate descriptors */ 4276 /* allocate descriptors */
4248 err = i40e_vsi_setup_tx_resources(vsi); 4277 err = i40e_vsi_setup_tx_resources(vsi);
4249 if (err) 4278 if (err)
@@ -4256,18 +4285,22 @@ static int i40e_open(struct net_device *netdev)
4256 if (err) 4285 if (err)
4257 goto err_setup_rx; 4286 goto err_setup_rx;
4258 4287
4288 if (!vsi->netdev) {
4289 err = EINVAL;
4290 goto err_setup_rx;
4291 }
4259 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4292 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4260 dev_driver_string(&pf->pdev->dev), netdev->name); 4293 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4261 err = i40e_vsi_request_irq(vsi, int_name); 4294 err = i40e_vsi_request_irq(vsi, int_name);
4262 if (err) 4295 if (err)
4263 goto err_setup_rx; 4296 goto err_setup_rx;
4264 4297
4265 /* Notify the stack of the actual queue counts. */ 4298 /* Notify the stack of the actual queue counts. */
4266 err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs); 4299 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
4267 if (err) 4300 if (err)
4268 goto err_set_queues; 4301 goto err_set_queues;
4269 4302
4270 err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs); 4303 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
4271 if (err) 4304 if (err)
4272 goto err_set_queues; 4305 goto err_set_queues;
4273 4306
@@ -4275,10 +4308,6 @@ static int i40e_open(struct net_device *netdev)
4275 if (err) 4308 if (err)
4276 goto err_up_complete; 4309 goto err_up_complete;
4277 4310
4278#ifdef CONFIG_I40E_VXLAN
4279 vxlan_get_rx_port(netdev);
4280#endif
4281
4282 return 0; 4311 return 0;
4283 4312
4284err_up_complete: 4313err_up_complete:
@@ -6409,6 +6438,39 @@ sw_init_done:
6409} 6438}
6410 6439
6411/** 6440/**
6441 * i40e_set_ntuple - set the ntuple feature flag and take action
6442 * @pf: board private structure to initialize
6443 * @features: the feature set that the stack is suggesting
6444 *
6445 * returns a bool to indicate if reset needs to happen
6446 **/
6447bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
6448{
6449 bool need_reset = false;
6450
6451 /* Check if Flow Director n-tuple support was enabled or disabled. If
6452 * the state changed, we need to reset.
6453 */
6454 if (features & NETIF_F_NTUPLE) {
6455 /* Enable filters and mark for reset */
6456 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6457 need_reset = true;
6458 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6459 } else {
6460 /* turn off filters, mark for reset and clear SW filter list */
6461 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
6462 need_reset = true;
6463 i40e_fdir_filter_exit(pf);
6464 }
6465 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6466 /* if ATR was disabled it can be re-enabled. */
6467 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
6468 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6469 }
6470 return need_reset;
6471}
6472
6473/**
6412 * i40e_set_features - set the netdev feature flags 6474 * i40e_set_features - set the netdev feature flags
6413 * @netdev: ptr to the netdev being adjusted 6475 * @netdev: ptr to the netdev being adjusted
6414 * @features: the feature set that the stack is suggesting 6476 * @features: the feature set that the stack is suggesting
@@ -6418,12 +6480,19 @@ static int i40e_set_features(struct net_device *netdev,
6418{ 6480{
6419 struct i40e_netdev_priv *np = netdev_priv(netdev); 6481 struct i40e_netdev_priv *np = netdev_priv(netdev);
6420 struct i40e_vsi *vsi = np->vsi; 6482 struct i40e_vsi *vsi = np->vsi;
6483 struct i40e_pf *pf = vsi->back;
6484 bool need_reset;
6421 6485
6422 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6486 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6423 i40e_vlan_stripping_enable(vsi); 6487 i40e_vlan_stripping_enable(vsi);
6424 else 6488 else
6425 i40e_vlan_stripping_disable(vsi); 6489 i40e_vlan_stripping_disable(vsi);
6426 6490
6491 need_reset = i40e_set_ntuple(pf, features);
6492
6493 if (need_reset)
6494 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
6495
6427 return 0; 6496 return 0;
6428} 6497}
6429 6498
@@ -6547,6 +6616,7 @@ static const struct net_device_ops i40e_netdev_ops = {
6547 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 6616 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
6548 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, 6617 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
6549 .ndo_get_vf_config = i40e_ndo_get_vf_config, 6618 .ndo_get_vf_config = i40e_ndo_get_vf_config,
6619 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
6550#ifdef CONFIG_I40E_VXLAN 6620#ifdef CONFIG_I40E_VXLAN
6551 .ndo_add_vxlan_port = i40e_add_vxlan_port, 6621 .ndo_add_vxlan_port = i40e_add_vxlan_port,
6552 .ndo_del_vxlan_port = i40e_del_vxlan_port, 6622 .ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -6594,6 +6664,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6594 NETIF_F_TSO | 6664 NETIF_F_TSO |
6595 NETIF_F_TSO6 | 6665 NETIF_F_TSO6 |
6596 NETIF_F_RXCSUM | 6666 NETIF_F_RXCSUM |
6667 NETIF_F_NTUPLE |
6597 NETIF_F_RXHASH | 6668 NETIF_F_RXHASH |
6598 0; 6669 0;
6599 6670
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 88666adb0743..851f6537a96a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -619,6 +619,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
619} 619}
620 620
621/** 621/**
622 * i40e_get_head - Retrieve head from head writeback
623 * @tx_ring: tx ring to fetch head of
624 *
625 * Returns value of Tx ring head based on value stored
626 * in head write-back location
627 **/
628static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
629{
630 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
631
632 return le32_to_cpu(*(volatile __le32 *)head);
633}
634
635/**
622 * i40e_clean_tx_irq - Reclaim resources after transmit completes 636 * i40e_clean_tx_irq - Reclaim resources after transmit completes
623 * @tx_ring: tx ring to clean 637 * @tx_ring: tx ring to clean
624 * @budget: how many cleans we're allowed 638 * @budget: how many cleans we're allowed
@@ -629,6 +643,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
629{ 643{
630 u16 i = tx_ring->next_to_clean; 644 u16 i = tx_ring->next_to_clean;
631 struct i40e_tx_buffer *tx_buf; 645 struct i40e_tx_buffer *tx_buf;
646 struct i40e_tx_desc *tx_head;
632 struct i40e_tx_desc *tx_desc; 647 struct i40e_tx_desc *tx_desc;
633 unsigned int total_packets = 0; 648 unsigned int total_packets = 0;
634 unsigned int total_bytes = 0; 649 unsigned int total_bytes = 0;
@@ -637,6 +652,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
637 tx_desc = I40E_TX_DESC(tx_ring, i); 652 tx_desc = I40E_TX_DESC(tx_ring, i);
638 i -= tx_ring->count; 653 i -= tx_ring->count;
639 654
655 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
656
640 do { 657 do {
641 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 658 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
642 659
@@ -647,9 +664,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
647 /* prevent any other reads prior to eop_desc */ 664 /* prevent any other reads prior to eop_desc */
648 read_barrier_depends(); 665 read_barrier_depends();
649 666
650 /* if the descriptor isn't done, no work yet to do */ 667 /* we have caught up to head, no work left to do */
651 if (!(eop_desc->cmd_type_offset_bsz & 668 if (tx_head == tx_desc)
652 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
653 break; 669 break;
654 670
655 /* clear next_to_watch to prevent false hangs */ 671 /* clear next_to_watch to prevent false hangs */
@@ -905,6 +921,10 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
905 921
906 /* round up to nearest 4K */ 922 /* round up to nearest 4K */
907 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 923 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
924 /* add u32 for head writeback, align after this takes care of
925 * guaranteeing this is at least one cache line in size
926 */
927 tx_ring->size += sizeof(u32);
908 tx_ring->size = ALIGN(tx_ring->size, 4096); 928 tx_ring->size = ALIGN(tx_ring->size, 4096);
909 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 929 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
910 &tx_ring->dma, GFP_KERNEL); 930 &tx_ring->dma, GFP_KERNEL);
@@ -1931,7 +1951,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1931 struct i40e_tx_context_desc *context_desc; 1951 struct i40e_tx_context_desc *context_desc;
1932 int i = tx_ring->next_to_use; 1952 int i = tx_ring->next_to_use;
1933 1953
1934 if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) 1954 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1955 !cd_tunneling && !cd_l2tag2)
1935 return; 1956 return;
1936 1957
1937 /* grab the next descriptor */ 1958 /* grab the next descriptor */
@@ -2042,9 +2063,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2042 tx_bi = &tx_ring->tx_bi[i]; 2063 tx_bi = &tx_ring->tx_bi[i];
2043 } 2064 }
2044 2065
2045 tx_desc->cmd_type_offset_bsz = 2066 /* Place RS bit on last descriptor of any packet that spans across the
2046 build_ctob(td_cmd, td_offset, size, td_tag) | 2067 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
2047 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); 2068 */
2069#define WB_STRIDE 0x3
2070 if (((i & WB_STRIDE) != WB_STRIDE) &&
2071 (first <= &tx_ring->tx_bi[i]) &&
2072 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
2073 tx_desc->cmd_type_offset_bsz =
2074 build_ctob(td_cmd, td_offset, size, td_tag) |
2075 cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
2076 I40E_TXD_QW1_CMD_SHIFT);
2077 } else {
2078 tx_desc->cmd_type_offset_bsz =
2079 build_ctob(td_cmd, td_offset, size, td_tag) |
2080 cpu_to_le64((u64)I40E_TXD_CMD <<
2081 I40E_TXD_QW1_CMD_SHIFT);
2082 }
2048 2083
2049 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, 2084 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2050 tx_ring->queue_index), 2085 tx_ring->queue_index),
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 7839343b967b..02c11a7f7d29 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -230,6 +230,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
230 tx_ctx.qlen = info->ring_len; 230 tx_ctx.qlen = info->ring_len;
231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
232 tx_ctx.rdylist_act = 0; 232 tx_ctx.rdylist_act = 0;
233 tx_ctx.head_wb_ena = 1;
234 tx_ctx.head_wb_addr = info->dma_ring_addr +
235 (info->ring_len * sizeof(struct i40e_tx_desc));
233 236
234 /* clear the context in the HMC */ 237 /* clear the context in the HMC */
235 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 238 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -1771,7 +1774,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1771 u32 v_retval, u8 *msg, u16 msglen) 1774 u32 v_retval, u8 *msg, u16 msglen)
1772{ 1775{
1773 struct i40e_hw *hw = &pf->hw; 1776 struct i40e_hw *hw = &pf->hw;
1774 int local_vf_id = vf_id - hw->func_caps.vf_base_id; 1777 unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
1775 struct i40e_vf *vf; 1778 struct i40e_vf *vf;
1776 int ret; 1779 int ret;
1777 1780
@@ -1920,15 +1923,28 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1920void i40e_vc_notify_link_state(struct i40e_pf *pf) 1923void i40e_vc_notify_link_state(struct i40e_pf *pf)
1921{ 1924{
1922 struct i40e_virtchnl_pf_event pfe; 1925 struct i40e_virtchnl_pf_event pfe;
1926 struct i40e_hw *hw = &pf->hw;
1927 struct i40e_vf *vf = pf->vf;
1928 struct i40e_link_status *ls = &pf->hw.phy.link_info;
1929 int i;
1923 1930
1924 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 1931 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1925 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 1932 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
1926 pfe.event_data.link_event.link_status = 1933 for (i = 0; i < pf->num_alloc_vfs; i++) {
1927 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 1934 if (vf->link_forced) {
1928 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed; 1935 pfe.event_data.link_event.link_status = vf->link_up;
1929 1936 pfe.event_data.link_event.link_speed =
1930 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 1937 (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
1931 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 1938 } else {
1939 pfe.event_data.link_event.link_status =
1940 ls->link_info & I40E_AQ_LINK_UP;
1941 pfe.event_data.link_event.link_speed = ls->link_speed;
1942 }
1943 i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
1944 0, (u8 *)&pfe, sizeof(pfe),
1945 NULL);
1946 vf++;
1947 }
1932} 1948}
1933 1949
1934/** 1950/**
@@ -2193,3 +2209,64 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
2193error_param: 2209error_param:
2194 return ret; 2210 return ret;
2195} 2211}
2212
2213/**
2214 * i40e_ndo_set_vf_link_state
2215 * @netdev: network interface device structure
2216 * @vf_id: vf identifier
2217 * @link: required link state
2218 *
2219 * Set the link state of a specified VF, regardless of physical link state
2220 **/
2221int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2222{
2223 struct i40e_netdev_priv *np = netdev_priv(netdev);
2224 struct i40e_pf *pf = np->vsi->back;
2225 struct i40e_virtchnl_pf_event pfe;
2226 struct i40e_hw *hw = &pf->hw;
2227 struct i40e_vf *vf;
2228 int ret = 0;
2229
2230 /* validate the request */
2231 if (vf_id >= pf->num_alloc_vfs) {
2232 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2233 ret = -EINVAL;
2234 goto error_out;
2235 }
2236
2237 vf = &pf->vf[vf_id];
2238
2239 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2240 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
2241
2242 switch (link) {
2243 case IFLA_VF_LINK_STATE_AUTO:
2244 vf->link_forced = false;
2245 pfe.event_data.link_event.link_status =
2246 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
2247 pfe.event_data.link_event.link_speed =
2248 pf->hw.phy.link_info.link_speed;
2249 break;
2250 case IFLA_VF_LINK_STATE_ENABLE:
2251 vf->link_forced = true;
2252 vf->link_up = true;
2253 pfe.event_data.link_event.link_status = true;
2254 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
2255 break;
2256 case IFLA_VF_LINK_STATE_DISABLE:
2257 vf->link_forced = true;
2258 vf->link_up = false;
2259 pfe.event_data.link_event.link_status = false;
2260 pfe.event_data.link_event.link_speed = 0;
2261 break;
2262 default:
2263 ret = -EINVAL;
2264 goto error_out;
2265 }
2266 /* Notify the VF of its new link state */
2267 i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
2268 0, (u8 *)&pfe, sizeof(pfe), NULL);
2269
2270error_out:
2271 return ret;
2272}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index bedf0ba21d74..389c47f396d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,6 +98,8 @@ struct i40e_vf {
98 98
99 unsigned long vf_caps; /* vf's adv. capabilities */ 99 unsigned long vf_caps; /* vf's adv. capabilities */
100 unsigned long vf_states; /* vf's runtime states */ 100 unsigned long vf_states; /* vf's runtime states */
101 bool link_forced;
102 bool link_up; /* only valid if vf link is forced */
101}; 103};
102 104
103void i40e_free_vfs(struct i40e_pf *pf); 105void i40e_free_vfs(struct i40e_pf *pf);
@@ -116,6 +118,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
116int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); 118int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
117int i40e_ndo_get_vf_config(struct net_device *netdev, 119int i40e_ndo_get_vf_config(struct net_device *netdev,
118 int vf_id, struct ifla_vf_info *ivi); 120 int vf_id, struct ifla_vf_info *ivi);
121int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
122
119void i40e_vc_notify_link_state(struct i40e_pf *pf); 123void i40e_vc_notify_link_state(struct i40e_pf *pf);
120void i40e_vc_notify_reset(struct i40e_pf *pf); 124void i40e_vc_notify_reset(struct i40e_pf *pf);
121 125
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b1d87c6a5c35..53be5f44d015 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -170,6 +170,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
170} 170}
171 171
172/** 172/**
173 * i40e_get_head - Retrieve head from head writeback
174 * @tx_ring: tx ring to fetch head of
175 *
176 * Returns value of Tx ring head based on value stored
177 * in head write-back location
178 **/
179static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
180{
181 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
182
183 return le32_to_cpu(*(volatile __le32 *)head);
184}
185
186/**
173 * i40e_clean_tx_irq - Reclaim resources after transmit completes 187 * i40e_clean_tx_irq - Reclaim resources after transmit completes
174 * @tx_ring: tx ring to clean 188 * @tx_ring: tx ring to clean
175 * @budget: how many cleans we're allowed 189 * @budget: how many cleans we're allowed
@@ -180,6 +194,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
180{ 194{
181 u16 i = tx_ring->next_to_clean; 195 u16 i = tx_ring->next_to_clean;
182 struct i40e_tx_buffer *tx_buf; 196 struct i40e_tx_buffer *tx_buf;
197 struct i40e_tx_desc *tx_head;
183 struct i40e_tx_desc *tx_desc; 198 struct i40e_tx_desc *tx_desc;
184 unsigned int total_packets = 0; 199 unsigned int total_packets = 0;
185 unsigned int total_bytes = 0; 200 unsigned int total_bytes = 0;
@@ -188,6 +203,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
188 tx_desc = I40E_TX_DESC(tx_ring, i); 203 tx_desc = I40E_TX_DESC(tx_ring, i);
189 i -= tx_ring->count; 204 i -= tx_ring->count;
190 205
206 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
207
191 do { 208 do {
192 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 209 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
193 210
@@ -198,9 +215,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
198 /* prevent any other reads prior to eop_desc */ 215 /* prevent any other reads prior to eop_desc */
199 read_barrier_depends(); 216 read_barrier_depends();
200 217
201 /* if the descriptor isn't done, no work yet to do */ 218 /* we have caught up to head, no work left to do */
202 if (!(eop_desc->cmd_type_offset_bsz & 219 if (tx_head == tx_desc)
203 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
204 break; 220 break;
205 221
206 /* clear next_to_watch to prevent false hangs */ 222 /* clear next_to_watch to prevent false hangs */
@@ -432,6 +448,10 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
432 448
433 /* round up to nearest 4K */ 449 /* round up to nearest 4K */
434 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 450 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
451 /* add u32 for head writeback, align after this takes care of
452 * guaranteeing this is at least one cache line in size
453 */
454 tx_ring->size += sizeof(u32);
435 tx_ring->size = ALIGN(tx_ring->size, 4096); 455 tx_ring->size = ALIGN(tx_ring->size, 4096);
436 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 456 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
437 &tx_ring->dma, GFP_KERNEL); 457 &tx_ring->dma, GFP_KERNEL);
@@ -1266,7 +1286,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1266 struct i40e_tx_context_desc *context_desc; 1286 struct i40e_tx_context_desc *context_desc;
1267 int i = tx_ring->next_to_use; 1287 int i = tx_ring->next_to_use;
1268 1288
1269 if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) 1289 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1290 !cd_tunneling && !cd_l2tag2)
1270 return; 1291 return;
1271 1292
1272 /* grab the next descriptor */ 1293 /* grab the next descriptor */
@@ -1377,9 +1398,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1377 tx_bi = &tx_ring->tx_bi[i]; 1398 tx_bi = &tx_ring->tx_bi[i];
1378 } 1399 }
1379 1400
1380 tx_desc->cmd_type_offset_bsz = 1401 /* Place RS bit on last descriptor of any packet that spans across the
1381 build_ctob(td_cmd, td_offset, size, td_tag) | 1402 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
1382 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); 1403 */
1404#define WB_STRIDE 0x3
1405 if (((i & WB_STRIDE) != WB_STRIDE) &&
1406 (first <= &tx_ring->tx_bi[i]) &&
1407 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
1408 tx_desc->cmd_type_offset_bsz =
1409 build_ctob(td_cmd, td_offset, size, td_tag) |
1410 cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
1411 I40E_TXD_QW1_CMD_SHIFT);
1412 } else {
1413 tx_desc->cmd_type_offset_bsz =
1414 build_ctob(td_cmd, td_offset, size, td_tag) |
1415 cpu_to_le64((u64)I40E_TXD_CMD <<
1416 I40E_TXD_QW1_CMD_SHIFT);
1417 }
1383 1418
1384 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, 1419 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
1385 tx_ring->queue_index), 1420 tx_ring->queue_index),
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index d62e27f6e83a..d381bcc4ea9f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -31,7 +31,7 @@ char i40evf_driver_name[] = "i40evf";
31static const char i40evf_driver_string[] = 31static const char i40evf_driver_string[] =
32 "Intel(R) XL710 X710 Virtual Function Network Driver"; 32 "Intel(R) XL710 X710 Virtual Function Network Driver";
33 33
34#define DRV_VERSION "0.9.14" 34#define DRV_VERSION "0.9.16"
35const char i40evf_driver_version[] = DRV_VERSION; 35const char i40evf_driver_version[] = DRV_VERSION;
36static const char i40evf_copyright[] = 36static const char i40evf_copyright[] =
37 "Copyright (c) 2013 - 2014 Intel Corporation."; 37 "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -1140,8 +1140,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1140 * than CPU's. So let's be conservative and only ask for 1140 * than CPU's. So let's be conservative and only ask for
1141 * (roughly) twice the number of vectors as there are CPU's. 1141 * (roughly) twice the number of vectors as there are CPU's.
1142 */ 1142 */
1143 v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; 1143 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1144 v_budget = min(v_budget, (int)adapter->vf_res->max_vectors); 1144 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
1145 1145
1146 /* A failure in MSI-X entry allocation isn't fatal, but it does 1146 /* A failure in MSI-X entry allocation isn't fatal, but it does
1147 * mean we disable MSI-X capabilities of the adapter. 1147 * mean we disable MSI-X capabilities of the adapter.
@@ -1414,6 +1414,13 @@ restart_watchdog:
1414 schedule_work(&adapter->adminq_task); 1414 schedule_work(&adapter->adminq_task);
1415} 1415}
1416 1416
1417static int next_queue(struct i40evf_adapter *adapter, int j)
1418{
1419 j += 1;
1420
1421 return j >= adapter->vsi_res->num_queue_pairs ? 0 : j;
1422}
1423
1417/** 1424/**
1418 * i40evf_configure_rss - Prepare for RSS if used 1425 * i40evf_configure_rss - Prepare for RSS if used
1419 * @adapter: board private structure 1426 * @adapter: board private structure
@@ -1444,15 +1451,13 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1444 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); 1451 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1445 1452
1446 /* Populate the LUT with max no. of queues in round robin fashion */ 1453 /* Populate the LUT with max no. of queues in round robin fashion */
1447 for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) { 1454 j = adapter->vsi_res->num_queue_pairs;
1448 if (j == adapter->vsi_res->num_queue_pairs) 1455 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
1449 j = 0; 1456 lut = next_queue(adapter, j);
1450 /* lut = 4-byte sliding window of 4 lut entries */ 1457 lut |= next_queue(adapter, j) << 8;
1451 lut = (lut << 8) | (j & 1458 lut |= next_queue(adapter, j) << 16;
1452 ((0x1 << 8) - 1)); 1459 lut |= next_queue(adapter, j) << 24;
1453 /* On i = 3, we have 4 entries in lut; write to the register */ 1460 wr32(hw, I40E_VFQF_HLUT(i), lut);
1454 if ((i & 3) == 3)
1455 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1456 } 1461 }
1457 i40e_flush(hw); 1462 i40e_flush(hw);
1458} 1463}
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 393c896ac7e7..b05bf925ac72 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -43,7 +43,11 @@
43#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ 43#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
44 44
45/* Extended Device Control */ 45/* Extended Device Control */
46#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
46#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ 47#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
48#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */
49#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
50
47/* Physical Func Reset Done Indication */ 51/* Physical Func Reset Done Indication */
48#define E1000_CTRL_EXT_PFRSTD 0x00004000 52#define E1000_CTRL_EXT_PFRSTD 0x00004000
49#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 53#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -190,7 +194,8 @@
190/* enable link status from external LINK_0 and LINK_1 pins */ 194/* enable link status from external LINK_0 and LINK_1 pins */
191#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
192#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
193#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 197#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
198#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
194#define E1000_CTRL_RST 0x04000000 /* Global reset */ 199#define E1000_CTRL_RST 0x04000000 /* Global reset */
195#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ 200#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
196#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ 201#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
@@ -528,8 +533,67 @@
528 533
529#define E1000_TIMINCA_16NS_SHIFT 24 534#define E1000_TIMINCA_16NS_SHIFT 24
530 535
531#define E1000_TSICR_TXTS 0x00000002 536/* Time Sync Interrupt Cause/Mask Register Bits */
532#define E1000_TSIM_TXTS 0x00000002 537
538#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
539#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
540#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
541#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
542#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
543#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
544#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
545#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
546
547#define TSYNC_INTERRUPTS TSINTR_TXTS
548#define E1000_TSICR_TXTS TSINTR_TXTS
549
550/* TSAUXC Configuration Bits */
551#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
552#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
553#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
554#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
555#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
556#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
557#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
558#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
559#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
560#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
561#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
562#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
563#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
564#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
565
566/* SDP Configuration Bits */
567#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
568#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
569#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
570#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
571#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
572#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
573#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
574#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
575#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
576#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
577#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
578#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
579#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
580#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
581#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
582#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
583#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
584#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
585#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
586#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
587#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
588#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
589#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
590#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
591#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
592#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
593#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
594#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
595#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
596#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
533 597
534#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ 598#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
535#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ 599#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index abdd935a9dad..e9c5fdd60f54 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -40,6 +40,7 @@
40#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 40#define E1000_FCT 0x00030 /* Flow Control Type - RW */
41#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ 41#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
42#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ 42#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
43#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
43#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ 44#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
44#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ 45#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
45#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ 46#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
@@ -101,6 +102,14 @@
101#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ 102#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
102#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ 103#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
103#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ 104#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
105#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */
106#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
107#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
108#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
109#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
110#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
111#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
112#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
104#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ 113#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
105#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ 114#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
106#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ 115#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 9c9c141f089a..a894551ae3c0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -799,7 +799,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
799 799
800 /* Initialize the time sync interrupts for devices that support it. */ 800 /* Initialize the time sync interrupts for devices that support it. */
801 if (hw->mac.type >= e1000_82580) { 801 if (hw->mac.type >= e1000_82580) {
802 wr32(E1000_TSIM, E1000_TSIM_TXTS); 802 wr32(E1000_TSIM, TSYNC_INTERRUPTS);
803 wr32(E1000_IMS, E1000_IMS_TS); 803 wr32(E1000_IMS, E1000_IMS_TS);
804 } 804 }
805 805
@@ -877,7 +877,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
877 case e1000_i211: 877 case e1000_i211:
878 /* Enable the timer functions and interrupts. */ 878 /* Enable the timer functions and interrupts. */
879 wr32(E1000_TSAUXC, 0x0); 879 wr32(E1000_TSAUXC, 0x0);
880 wr32(E1000_TSIM, E1000_TSIM_TXTS); 880 wr32(E1000_TSIM, TSYNC_INTERRUPTS);
881 wr32(E1000_IMS, E1000_IMS_TS); 881 wr32(E1000_IMS, E1000_IMS_TS);
882 break; 882 break;
883 default: 883 default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index f8ebe583a2ab..7fe22542e404 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -58,7 +58,6 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
58 **/ 58 **/
59static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 59static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
60{ 60{
61 struct ixgbe_adapter *adapter = hw->back;
62 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 61 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
63 u16 pcie_devctl2; 62 u16 pcie_devctl2;
64 63
@@ -84,11 +83,8 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
84 * 16ms to 55ms 83 * 16ms to 55ms
85 */ 84 */
86 pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); 85 pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
87 if (ixgbe_removed(hw->hw_addr))
88 return;
89 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 86 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
90 pci_write_config_word(adapter->pdev, 87 ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
91 IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
92out: 88out:
93 /* disable completion timeout resend */ 89 /* disable completion timeout resend */
94 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 90 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d1d67ba54775..afa1cda90c2e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -133,6 +133,7 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
133#define IXGBE_FAILED_READ_CFG_WORD 0xffffU 133#define IXGBE_FAILED_READ_CFG_WORD 0xffffU
134 134
135u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); 135u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
136void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
136 137
137static inline bool ixgbe_removed(void __iomem *addr) 138static inline bool ixgbe_removed(void __iomem *addr)
138{ 139{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 18cd8ca319ea..c773d6cb6063 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -361,6 +361,15 @@ static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
361} 361}
362#endif /* CONFIG_PCI_IOV */ 362#endif /* CONFIG_PCI_IOV */
363 363
364void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
365{
366 struct ixgbe_adapter *adapter = hw->back;
367
368 if (ixgbe_removed(hw->hw_addr))
369 return;
370 pci_write_config_word(adapter->pdev, reg, value);
371}
372
364static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) 373static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
365{ 374{
366 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); 375 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 54829326bb09..08fb88aba67b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -412,7 +412,8 @@ struct ixgbevf_adapter {
412enum ixbgevf_state_t { 412enum ixbgevf_state_t {
413 __IXGBEVF_TESTING, 413 __IXGBEVF_TESTING,
414 __IXGBEVF_RESETTING, 414 __IXGBEVF_RESETTING,
415 __IXGBEVF_DOWN 415 __IXGBEVF_DOWN,
416 __IXGBEVF_REMOVING,
416}; 417};
417 418
418struct ixgbevf_cb { 419struct ixgbevf_cb {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 8581079791fe..a2cba53c31be 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -608,7 +608,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
608 napi_complete(napi); 608 napi_complete(napi);
609 if (adapter->rx_itr_setting & 1) 609 if (adapter->rx_itr_setting & 1)
610 ixgbevf_set_itr(q_vector); 610 ixgbevf_set_itr(q_vector);
611 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 611 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
612 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
612 ixgbevf_irq_enable_queues(adapter, 613 ixgbevf_irq_enable_queues(adapter,
613 1 << q_vector->v_idx); 614 1 << q_vector->v_idx);
614 615
@@ -833,7 +834,8 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
833 834
834 hw->mac.get_link_status = 1; 835 hw->mac.get_link_status = 1;
835 836
836 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 837 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
838 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
837 mod_timer(&adapter->watchdog_timer, jiffies); 839 mod_timer(&adapter->watchdog_timer, jiffies);
838 840
839 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 841 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -1618,6 +1620,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1618 1620
1619 spin_unlock_bh(&adapter->mbx_lock); 1621 spin_unlock_bh(&adapter->mbx_lock);
1620 1622
1623 smp_mb__before_clear_bit();
1621 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1624 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1622 ixgbevf_napi_enable_all(adapter); 1625 ixgbevf_napi_enable_all(adapter);
1623 1626
@@ -1742,7 +1745,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
1742 int i; 1745 int i;
1743 1746
1744 /* signal that we are down to the interrupt handler */ 1747 /* signal that we are down to the interrupt handler */
1745 set_bit(__IXGBEVF_DOWN, &adapter->state); 1748 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
1749 return; /* do nothing if already down */
1746 1750
1747 /* disable all enabled rx queues */ 1751 /* disable all enabled rx queues */
1748 for (i = 0; i < adapter->num_rx_queues; i++) 1752 for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2329,6 +2333,7 @@ static void ixgbevf_reset_task(struct work_struct *work)
2329 2333
2330 /* If we're already down or resetting, just bail */ 2334 /* If we're already down or resetting, just bail */
2331 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2335 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2336 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2332 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2337 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2333 return; 2338 return;
2334 2339
@@ -2413,7 +2418,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2413 2418
2414pf_has_reset: 2419pf_has_reset:
2415 /* Reset the timer */ 2420 /* Reset the timer */
2416 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2421 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2422 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
2417 mod_timer(&adapter->watchdog_timer, 2423 mod_timer(&adapter->watchdog_timer,
2418 round_jiffies(jiffies + (2 * HZ))); 2424 round_jiffies(jiffies + (2 * HZ)));
2419 2425
@@ -3563,7 +3569,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3563 struct net_device *netdev = pci_get_drvdata(pdev); 3569 struct net_device *netdev = pci_get_drvdata(pdev);
3564 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3570 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3565 3571
3566 set_bit(__IXGBEVF_DOWN, &adapter->state); 3572 set_bit(__IXGBEVF_REMOVING, &adapter->state);
3567 3573
3568 del_timer_sync(&adapter->watchdog_timer); 3574 del_timer_sync(&adapter->watchdog_timer);
3569 3575