diff options
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 43 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_main.c | 87 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | 12 |
7 files changed, 85 insertions, 76 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3116861198f0..53f3ed2df796 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -38,7 +38,7 @@ static const char i40e_driver_string[] = | |||
38 | 38 | ||
39 | #define DRV_VERSION_MAJOR 0 | 39 | #define DRV_VERSION_MAJOR 0 |
40 | #define DRV_VERSION_MINOR 3 | 40 | #define DRV_VERSION_MINOR 3 |
41 | #define DRV_VERSION_BUILD 31 | 41 | #define DRV_VERSION_BUILD 32 |
42 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ | 42 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ |
43 | __stringify(DRV_VERSION_MINOR) "." \ | 43 | __stringify(DRV_VERSION_MINOR) "." \ |
44 | __stringify(DRV_VERSION_BUILD) DRV_KERN | 44 | __stringify(DRV_VERSION_BUILD) DRV_KERN |
@@ -3108,13 +3108,13 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) | |||
3108 | 3108 | ||
3109 | pf_q = vsi->base_queue; | 3109 | pf_q = vsi->base_queue; |
3110 | for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { | 3110 | for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { |
3111 | j = 1000; | 3111 | for (j = 0; j < 50; j++) { |
3112 | do { | ||
3113 | usleep_range(1000, 2000); | ||
3114 | tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); | 3112 | tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); |
3115 | } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) | 3113 | if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == |
3116 | ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1); | 3114 | ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) |
3117 | 3115 | break; | |
3116 | usleep_range(1000, 2000); | ||
3117 | } | ||
3118 | /* Skip if the queue is already in the requested state */ | 3118 | /* Skip if the queue is already in the requested state */ |
3119 | if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) | 3119 | if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) |
3120 | continue; | 3120 | continue; |
@@ -3124,8 +3124,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) | |||
3124 | /* turn on/off the queue */ | 3124 | /* turn on/off the queue */ |
3125 | if (enable) { | 3125 | if (enable) { |
3126 | wr32(hw, I40E_QTX_HEAD(pf_q), 0); | 3126 | wr32(hw, I40E_QTX_HEAD(pf_q), 0); |
3127 | tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK | | 3127 | tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; |
3128 | I40E_QTX_ENA_QENA_STAT_MASK; | ||
3129 | } else { | 3128 | } else { |
3130 | tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; | 3129 | tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; |
3131 | } | 3130 | } |
@@ -3172,12 +3171,13 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) | |||
3172 | 3171 | ||
3173 | pf_q = vsi->base_queue; | 3172 | pf_q = vsi->base_queue; |
3174 | for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { | 3173 | for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { |
3175 | j = 1000; | 3174 | for (j = 0; j < 50; j++) { |
3176 | do { | ||
3177 | usleep_range(1000, 2000); | ||
3178 | rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); | 3175 | rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); |
3179 | } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) | 3176 | if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == |
3180 | ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1); | 3177 | ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) |
3178 | break; | ||
3179 | usleep_range(1000, 2000); | ||
3180 | } | ||
3181 | 3181 | ||
3182 | if (enable) { | 3182 | if (enable) { |
3183 | /* is STAT set ? */ | 3183 | /* is STAT set ? */ |
@@ -3191,11 +3191,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) | |||
3191 | 3191 | ||
3192 | /* turn on/off the queue */ | 3192 | /* turn on/off the queue */ |
3193 | if (enable) | 3193 | if (enable) |
3194 | rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK | | 3194 | rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; |
3195 | I40E_QRX_ENA_QENA_STAT_MASK; | ||
3196 | else | 3195 | else |
3197 | rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK | | 3196 | rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; |
3198 | I40E_QRX_ENA_QENA_STAT_MASK); | ||
3199 | wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); | 3197 | wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); |
3200 | 3198 | ||
3201 | /* wait for the change to finish */ | 3199 | /* wait for the change to finish */ |
@@ -5927,7 +5925,7 @@ static int i40e_init_msix(struct i40e_pf *pf) | |||
5927 | 5925 | ||
5928 | } else if (vec == I40E_MIN_MSIX) { | 5926 | } else if (vec == I40E_MIN_MSIX) { |
5929 | /* Adjust for minimal MSIX use */ | 5927 | /* Adjust for minimal MSIX use */ |
5930 | dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n"); | 5928 | dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n"); |
5931 | pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; | 5929 | pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; |
5932 | pf->num_vmdq_vsis = 0; | 5930 | pf->num_vmdq_vsis = 0; |
5933 | pf->num_vmdq_qps = 0; | 5931 | pf->num_vmdq_qps = 0; |
@@ -6056,7 +6054,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) | |||
6056 | 6054 | ||
6057 | if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && | 6055 | if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && |
6058 | (pf->flags & I40E_FLAG_MSI_ENABLED)) { | 6056 | (pf->flags & I40E_FLAG_MSI_ENABLED)) { |
6059 | dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n"); | 6057 | dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); |
6060 | err = pci_enable_msi(pf->pdev); | 6058 | err = pci_enable_msi(pf->pdev); |
6061 | if (err) { | 6059 | if (err) { |
6062 | dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); | 6060 | dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); |
@@ -6065,7 +6063,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) | |||
6065 | } | 6063 | } |
6066 | 6064 | ||
6067 | if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) | 6065 | if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) |
6068 | dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n"); | 6066 | dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); |
6069 | 6067 | ||
6070 | /* track first vector for misc interrupts */ | 6068 | /* track first vector for misc interrupts */ |
6071 | err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); | 6069 | err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); |
@@ -6092,7 +6090,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) | |||
6092 | i40e_intr, 0, pf->misc_int_name, pf); | 6090 | i40e_intr, 0, pf->misc_int_name, pf); |
6093 | if (err) { | 6091 | if (err) { |
6094 | dev_info(&pf->pdev->dev, | 6092 | dev_info(&pf->pdev->dev, |
6095 | "request_irq for msix_misc failed: %d\n", err); | 6093 | "request_irq for %s failed: %d\n", |
6094 | pf->misc_int_name, err); | ||
6096 | return -EFAULT; | 6095 | return -EFAULT; |
6097 | } | 6096 | } |
6098 | } | 6097 | } |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 7d133faad4cf..189e250198dd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |||
@@ -671,7 +671,6 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) | |||
671 | complete_reset: | 671 | complete_reset: |
672 | /* reallocate vf resources to reset the VSI state */ | 672 | /* reallocate vf resources to reset the VSI state */ |
673 | i40e_free_vf_res(vf); | 673 | i40e_free_vf_res(vf); |
674 | mdelay(10); | ||
675 | i40e_alloc_vf_res(vf); | 674 | i40e_alloc_vf_res(vf); |
676 | i40e_enable_vf_mappings(vf); | 675 | i40e_enable_vf_mappings(vf); |
677 | set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); | 676 | set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index f7cea1bca38d..97662b6bd98a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | |||
@@ -1229,7 +1229,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { | |||
1229 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 | 1229 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 |
1230 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 | 1230 | #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 |
1231 | 1231 | ||
1232 | __le32 tenant_id ; | 1232 | __le32 tenant_id; |
1233 | u8 reserved[4]; | 1233 | u8 reserved[4]; |
1234 | __le16 queue_number; | 1234 | __le16 queue_number; |
1235 | #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 | 1235 | #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index ef7ce65bc00a..ccb43d343543 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h | |||
@@ -196,8 +196,6 @@ struct i40evf_adapter { | |||
196 | 196 | ||
197 | /* RX */ | 197 | /* RX */ |
198 | struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; | 198 | struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; |
199 | int txd_count; | ||
200 | int rxd_count; | ||
201 | u64 hw_csum_rx_error; | 199 | u64 hw_csum_rx_error; |
202 | int num_msix_vectors; | 200 | int num_msix_vectors; |
203 | struct msix_entry *msix_entries; | 201 | struct msix_entry *msix_entries; |
@@ -287,6 +285,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter); | |||
287 | void i40evf_del_vlans(struct i40evf_adapter *adapter); | 285 | void i40evf_del_vlans(struct i40evf_adapter *adapter); |
288 | void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); | 286 | void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); |
289 | void i40evf_request_stats(struct i40evf_adapter *adapter); | 287 | void i40evf_request_stats(struct i40evf_adapter *adapter); |
288 | void i40evf_request_reset(struct i40evf_adapter *adapter); | ||
290 | void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, | 289 | void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, |
291 | enum i40e_virtchnl_ops v_opcode, | 290 | enum i40e_virtchnl_ops v_opcode, |
292 | i40e_status v_retval, u8 *msg, u16 msglen); | 291 | i40e_status v_retval, u8 *msg, u16 msglen); |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index b0b1f4bf5ac0..8b0db1ce179c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver |
4 | * Copyright(c) 2013 Intel Corporation. | 4 | * Copyright(c) 2013 - 2014 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -241,6 +241,7 @@ static int i40evf_set_ringparam(struct net_device *netdev, | |||
241 | { | 241 | { |
242 | struct i40evf_adapter *adapter = netdev_priv(netdev); | 242 | struct i40evf_adapter *adapter = netdev_priv(netdev); |
243 | u32 new_rx_count, new_tx_count; | 243 | u32 new_rx_count, new_tx_count; |
244 | int i; | ||
244 | 245 | ||
245 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | 246 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
246 | return -EINVAL; | 247 | return -EINVAL; |
@@ -256,12 +257,14 @@ static int i40evf_set_ringparam(struct net_device *netdev, | |||
256 | new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); | 257 | new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); |
257 | 258 | ||
258 | /* if nothing to do return success */ | 259 | /* if nothing to do return success */ |
259 | if ((new_tx_count == adapter->txd_count) && | 260 | if ((new_tx_count == adapter->tx_rings[0]->count) && |
260 | (new_rx_count == adapter->rxd_count)) | 261 | (new_rx_count == adapter->rx_rings[0]->count)) |
261 | return 0; | 262 | return 0; |
262 | 263 | ||
263 | adapter->txd_count = new_tx_count; | 264 | for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { |
264 | adapter->rxd_count = new_rx_count; | 265 | adapter->tx_rings[0]->count = new_tx_count; |
266 | adapter->rx_rings[0]->count = new_rx_count; | ||
267 | } | ||
265 | 268 | ||
266 | if (netif_running(netdev)) | 269 | if (netif_running(netdev)) |
267 | i40evf_reinit_locked(adapter); | 270 | i40evf_reinit_locked(adapter); |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index fe2271e19423..b2c03bca7929 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c | |||
@@ -31,10 +31,10 @@ char i40evf_driver_name[] = "i40evf"; | |||
31 | static const char i40evf_driver_string[] = | 31 | static const char i40evf_driver_string[] = |
32 | "Intel(R) XL710 X710 Virtual Function Network Driver"; | 32 | "Intel(R) XL710 X710 Virtual Function Network Driver"; |
33 | 33 | ||
34 | #define DRV_VERSION "0.9.11" | 34 | #define DRV_VERSION "0.9.13" |
35 | const char i40evf_driver_version[] = DRV_VERSION; | 35 | const char i40evf_driver_version[] = DRV_VERSION; |
36 | static const char i40evf_copyright[] = | 36 | static const char i40evf_copyright[] = |
37 | "Copyright (c) 2013 Intel Corporation."; | 37 | "Copyright (c) 2013 - 2014 Intel Corporation."; |
38 | 38 | ||
39 | /* i40evf_pci_tbl - PCI Device ID Table | 39 | /* i40evf_pci_tbl - PCI Device ID Table |
40 | * | 40 | * |
@@ -167,9 +167,13 @@ static void i40evf_tx_timeout(struct net_device *netdev) | |||
167 | struct i40evf_adapter *adapter = netdev_priv(netdev); | 167 | struct i40evf_adapter *adapter = netdev_priv(netdev); |
168 | 168 | ||
169 | adapter->tx_timeout_count++; | 169 | adapter->tx_timeout_count++; |
170 | 170 | dev_info(&adapter->pdev->dev, "TX timeout detected.\n"); | |
171 | /* Do the reset outside of interrupt context */ | 171 | if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { |
172 | schedule_work(&adapter->reset_task); | 172 | dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); |
173 | i40evf_request_reset(adapter); | ||
174 | adapter->flags |= I40EVF_FLAG_RESET_PENDING; | ||
175 | schedule_work(&adapter->reset_task); | ||
176 | } | ||
173 | } | 177 | } |
174 | 178 | ||
175 | /** | 179 | /** |
@@ -211,6 +215,9 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter) | |||
211 | int i; | 215 | int i; |
212 | struct i40e_hw *hw = &adapter->hw; | 216 | struct i40e_hw *hw = &adapter->hw; |
213 | 217 | ||
218 | if (!adapter->msix_entries) | ||
219 | return; | ||
220 | |||
214 | for (i = 1; i < adapter->num_msix_vectors; i++) { | 221 | for (i = 1; i < adapter->num_msix_vectors; i++) { |
215 | wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); | 222 | wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); |
216 | synchronize_irq(adapter->msix_entries[i].vector); | 223 | synchronize_irq(adapter->msix_entries[i].vector); |
@@ -517,7 +524,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) | |||
517 | adapter->misc_vector_name, netdev); | 524 | adapter->misc_vector_name, netdev); |
518 | if (err) { | 525 | if (err) { |
519 | dev_err(&adapter->pdev->dev, | 526 | dev_err(&adapter->pdev->dev, |
520 | "request_irq for msix_aq failed: %d\n", err); | 527 | "request_irq for %s failed: %d\n", |
528 | adapter->misc_vector_name, err); | ||
521 | free_irq(adapter->msix_entries[0].vector, netdev); | 529 | free_irq(adapter->msix_entries[0].vector, netdev); |
522 | } | 530 | } |
523 | return err; | 531 | return err; |
@@ -968,9 +976,14 @@ void i40evf_down(struct i40evf_adapter *adapter) | |||
968 | list_for_each_entry(f, &adapter->mac_filter_list, list) { | 976 | list_for_each_entry(f, &adapter->mac_filter_list, list) { |
969 | f->remove = true; | 977 | f->remove = true; |
970 | } | 978 | } |
979 | /* remove all VLAN filters */ | ||
980 | list_for_each_entry(f, &adapter->vlan_filter_list, list) { | ||
981 | f->remove = true; | ||
982 | } | ||
971 | if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && | 983 | if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && |
972 | adapter->state != __I40EVF_RESETTING) { | 984 | adapter->state != __I40EVF_RESETTING) { |
973 | adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; | 985 | adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; |
986 | adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; | ||
974 | /* disable receives */ | 987 | /* disable receives */ |
975 | adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; | 988 | adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; |
976 | mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); | 989 | mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); |
@@ -1927,14 +1940,14 @@ static void i40evf_init_task(struct work_struct *work) | |||
1927 | adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; | 1940 | adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; |
1928 | err = i40e_set_mac_type(hw); | 1941 | err = i40e_set_mac_type(hw); |
1929 | if (err) { | 1942 | if (err) { |
1930 | dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n", | 1943 | dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", |
1931 | __func__, err); | 1944 | err); |
1932 | goto err; | 1945 | goto err; |
1933 | } | 1946 | } |
1934 | err = i40evf_check_reset_complete(hw); | 1947 | err = i40evf_check_reset_complete(hw); |
1935 | if (err) { | 1948 | if (err) { |
1936 | dev_info(&pdev->dev, "%s: device is still in reset (%d).\n", | 1949 | dev_err(&pdev->dev, "Device is still in reset (%d)\n", |
1937 | __func__, err); | 1950 | err); |
1938 | goto err; | 1951 | goto err; |
1939 | } | 1952 | } |
1940 | hw->aq.num_arq_entries = I40EVF_AQ_LEN; | 1953 | hw->aq.num_arq_entries = I40EVF_AQ_LEN; |
@@ -1944,14 +1957,14 @@ static void i40evf_init_task(struct work_struct *work) | |||
1944 | 1957 | ||
1945 | err = i40evf_init_adminq(hw); | 1958 | err = i40evf_init_adminq(hw); |
1946 | if (err) { | 1959 | if (err) { |
1947 | dev_info(&pdev->dev, "%s: init_adminq failed: %d\n", | 1960 | dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", |
1948 | __func__, err); | 1961 | err); |
1949 | goto err; | 1962 | goto err; |
1950 | } | 1963 | } |
1951 | err = i40evf_send_api_ver(adapter); | 1964 | err = i40evf_send_api_ver(adapter); |
1952 | if (err) { | 1965 | if (err) { |
1953 | dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n", | 1966 | dev_err(&pdev->dev, "Unable to send to PF (%d)\n", |
1954 | __func__, err); | 1967 | err); |
1955 | i40evf_shutdown_adminq(hw); | 1968 | i40evf_shutdown_adminq(hw); |
1956 | goto err; | 1969 | goto err; |
1957 | } | 1970 | } |
@@ -1965,13 +1978,13 @@ static void i40evf_init_task(struct work_struct *work) | |||
1965 | /* aq msg sent, awaiting reply */ | 1978 | /* aq msg sent, awaiting reply */ |
1966 | err = i40evf_verify_api_ver(adapter); | 1979 | err = i40evf_verify_api_ver(adapter); |
1967 | if (err) { | 1980 | if (err) { |
1968 | dev_err(&pdev->dev, "Unable to verify API version, error %d\n", | 1981 | dev_err(&pdev->dev, "Unable to verify API version (%d)\n", |
1969 | err); | 1982 | err); |
1970 | goto err; | 1983 | goto err; |
1971 | } | 1984 | } |
1972 | err = i40evf_send_vf_config_msg(adapter); | 1985 | err = i40evf_send_vf_config_msg(adapter); |
1973 | if (err) { | 1986 | if (err) { |
1974 | dev_err(&pdev->dev, "Unable send config request, error %d\n", | 1987 | dev_err(&pdev->dev, "Unable send config request (%d)\n", |
1975 | err); | 1988 | err); |
1976 | goto err; | 1989 | goto err; |
1977 | } | 1990 | } |
@@ -1985,18 +1998,15 @@ static void i40evf_init_task(struct work_struct *work) | |||
1985 | (I40E_MAX_VF_VSI * | 1998 | (I40E_MAX_VF_VSI * |
1986 | sizeof(struct i40e_virtchnl_vsi_resource)); | 1999 | sizeof(struct i40e_virtchnl_vsi_resource)); |
1987 | adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); | 2000 | adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); |
1988 | if (!adapter->vf_res) { | 2001 | if (!adapter->vf_res) |
1989 | dev_err(&pdev->dev, "%s: unable to allocate memory\n", | ||
1990 | __func__); | ||
1991 | goto err; | 2002 | goto err; |
1992 | } | ||
1993 | } | 2003 | } |
1994 | err = i40evf_get_vf_config(adapter); | 2004 | err = i40evf_get_vf_config(adapter); |
1995 | if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) | 2005 | if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) |
1996 | goto restart; | 2006 | goto restart; |
1997 | if (err) { | 2007 | if (err) { |
1998 | dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n", | 2008 | dev_err(&pdev->dev, "Unable to get VF config (%d)\n", |
1999 | __func__, err); | 2009 | err); |
2000 | goto err_alloc; | 2010 | goto err_alloc; |
2001 | } | 2011 | } |
2002 | adapter->state = __I40EVF_INIT_SW; | 2012 | adapter->state = __I40EVF_INIT_SW; |
@@ -2010,20 +2020,17 @@ static void i40evf_init_task(struct work_struct *work) | |||
2010 | adapter->vsi_res = &adapter->vf_res->vsi_res[i]; | 2020 | adapter->vsi_res = &adapter->vf_res->vsi_res[i]; |
2011 | } | 2021 | } |
2012 | if (!adapter->vsi_res) { | 2022 | if (!adapter->vsi_res) { |
2013 | dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__); | 2023 | dev_err(&pdev->dev, "No LAN VSI found\n"); |
2014 | goto err_alloc; | 2024 | goto err_alloc; |
2015 | } | 2025 | } |
2016 | 2026 | ||
2017 | adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; | 2027 | adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; |
2018 | 2028 | ||
2019 | adapter->txd_count = I40EVF_DEFAULT_TXD; | ||
2020 | adapter->rxd_count = I40EVF_DEFAULT_RXD; | ||
2021 | |||
2022 | netdev->netdev_ops = &i40evf_netdev_ops; | 2029 | netdev->netdev_ops = &i40evf_netdev_ops; |
2023 | i40evf_set_ethtool_ops(netdev); | 2030 | i40evf_set_ethtool_ops(netdev); |
2024 | netdev->watchdog_timeo = 5 * HZ; | 2031 | netdev->watchdog_timeo = 5 * HZ; |
2025 | 2032 | netdev->features |= NETIF_F_HIGHDMA | | |
2026 | netdev->features |= NETIF_F_SG | | 2033 | NETIF_F_SG | |
2027 | NETIF_F_IP_CSUM | | 2034 | NETIF_F_IP_CSUM | |
2028 | NETIF_F_SCTP_CSUM | | 2035 | NETIF_F_SCTP_CSUM | |
2029 | NETIF_F_IPV6_CSUM | | 2036 | NETIF_F_IPV6_CSUM | |
@@ -2039,11 +2046,9 @@ static void i40evf_init_task(struct work_struct *work) | |||
2039 | NETIF_F_HW_VLAN_CTAG_FILTER; | 2046 | NETIF_F_HW_VLAN_CTAG_FILTER; |
2040 | } | 2047 | } |
2041 | 2048 | ||
2042 | /* The HW MAC address was set and/or determined in sw_init */ | ||
2043 | if (!is_valid_ether_addr(adapter->hw.mac.addr)) { | 2049 | if (!is_valid_ether_addr(adapter->hw.mac.addr)) { |
2044 | dev_info(&pdev->dev, | 2050 | dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n", |
2045 | "Invalid MAC address %pMAC, using random\n", | 2051 | adapter->hw.mac.addr); |
2046 | adapter->hw.mac.addr); | ||
2047 | random_ether_addr(adapter->hw.mac.addr); | 2052 | random_ether_addr(adapter->hw.mac.addr); |
2048 | } | 2053 | } |
2049 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); | 2054 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); |
@@ -2077,8 +2082,6 @@ static void i40evf_init_task(struct work_struct *work) | |||
2077 | 2082 | ||
2078 | netif_carrier_off(netdev); | 2083 | netif_carrier_off(netdev); |
2079 | 2084 | ||
2080 | strcpy(netdev->name, "eth%d"); | ||
2081 | |||
2082 | adapter->vsi.id = adapter->vsi_res->vsi_id; | 2085 | adapter->vsi.id = adapter->vsi_res->vsi_id; |
2083 | adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ | 2086 | adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ |
2084 | adapter->vsi.back = adapter; | 2087 | adapter->vsi.back = adapter; |
@@ -2168,20 +2171,18 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2168 | struct net_device *netdev; | 2171 | struct net_device *netdev; |
2169 | struct i40evf_adapter *adapter = NULL; | 2172 | struct i40evf_adapter *adapter = NULL; |
2170 | struct i40e_hw *hw = NULL; | 2173 | struct i40e_hw *hw = NULL; |
2171 | int err, pci_using_dac; | 2174 | int err; |
2172 | 2175 | ||
2173 | err = pci_enable_device(pdev); | 2176 | err = pci_enable_device(pdev); |
2174 | if (err) | 2177 | if (err) |
2175 | return err; | 2178 | return err; |
2176 | 2179 | ||
2177 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { | 2180 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
2178 | pci_using_dac = true; | ||
2179 | /* coherent mask for the same size will always succeed if | 2181 | /* coherent mask for the same size will always succeed if |
2180 | * dma_set_mask does | 2182 | * dma_set_mask does |
2181 | */ | 2183 | */ |
2182 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); | 2184 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
2183 | } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { | 2185 | } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { |
2184 | pci_using_dac = false; | ||
2185 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | 2186 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
2186 | } else { | 2187 | } else { |
2187 | dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n", | 2188 | dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n", |
@@ -2212,8 +2213,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2212 | 2213 | ||
2213 | pci_set_drvdata(pdev, netdev); | 2214 | pci_set_drvdata(pdev, netdev); |
2214 | adapter = netdev_priv(netdev); | 2215 | adapter = netdev_priv(netdev); |
2215 | if (pci_using_dac) | ||
2216 | netdev->features |= NETIF_F_HIGHDMA; | ||
2217 | 2216 | ||
2218 | adapter->netdev = netdev; | 2217 | adapter->netdev = netdev; |
2219 | adapter->pdev = pdev; | 2218 | adapter->pdev = pdev; |
@@ -2363,17 +2362,15 @@ static void i40evf_remove(struct pci_dev *pdev) | |||
2363 | } | 2362 | } |
2364 | adapter->state = __I40EVF_REMOVE; | 2363 | adapter->state = __I40EVF_REMOVE; |
2365 | 2364 | ||
2366 | if (adapter->num_msix_vectors) { | 2365 | if (adapter->msix_entries) { |
2367 | i40evf_misc_irq_disable(adapter); | 2366 | i40evf_misc_irq_disable(adapter); |
2368 | del_timer_sync(&adapter->watchdog_timer); | ||
2369 | |||
2370 | flush_scheduled_work(); | ||
2371 | |||
2372 | i40evf_free_misc_irq(adapter); | 2367 | i40evf_free_misc_irq(adapter); |
2373 | |||
2374 | i40evf_reset_interrupt_capability(adapter); | 2368 | i40evf_reset_interrupt_capability(adapter); |
2375 | } | 2369 | } |
2376 | 2370 | ||
2371 | del_timer_sync(&adapter->watchdog_timer); | ||
2372 | flush_scheduled_work(); | ||
2373 | |||
2377 | if (hw->aq.asq.count) | 2374 | if (hw->aq.asq.count) |
2378 | i40evf_shutdown_adminq(hw); | 2375 | i40evf_shutdown_adminq(hw); |
2379 | 2376 | ||
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 93891a114d3f..e294f012647d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | |||
@@ -654,6 +654,18 @@ void i40evf_request_stats(struct i40evf_adapter *adapter) | |||
654 | /* if the request failed, don't lock out others */ | 654 | /* if the request failed, don't lock out others */ |
655 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; | 655 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; |
656 | } | 656 | } |
657 | /** | ||
658 | * i40evf_request_reset | ||
659 | * @adapter: adapter structure | ||
660 | * | ||
661 | * Request that the PF reset this VF. No response is expected. | ||
662 | **/ | ||
663 | void i40evf_request_reset(struct i40evf_adapter *adapter) | ||
664 | { | ||
665 | /* Don't check CURRENT_OP - this is always higher priority */ | ||
666 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); | ||
667 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; | ||
668 | } | ||
657 | 669 | ||
658 | /** | 670 | /** |
659 | * i40evf_virtchnl_completion | 671 | * i40evf_virtchnl_completion |