aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-01-23 20:22:57 -0500
committerDavid S. Miller <davem@davemloft.net>2018-01-23 20:22:57 -0500
commit521504640f9ebec384a9c3ecd5e6de82fec6d928 (patch)
tree67e954fa92bd60ba3c5ad6d7698ea84a58addaf8
parent6b44d0f9c931b77ca1379731305f9637d8ff69f3 (diff)
parentbbf0bdd41fbf71a008325bdcf0df63ab088bf532 (diff)
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 40GbE Intel Wired LAN Driver Updates 2018-01-23 This series contains updates to i40e and i40evf only. Pawel enables FlatNVM support on x722 devices by allowing nvmupdate tool to configure the preservation flags in the AdminQ command. Mitch fixes a potential divide by zero error when DCB is enabled and the firmware fails to configure the VSI, so check for this state. Fixed a bug where the driver could fail to adhere to ETS bandwidth allocations if 8 traffic classes were configured on the switch. Sudheer fixes a potential deadlock by avoiding to call flush_schedule_work() in i40evf_remove(), since cancel_work_sync() and cancel_delayed_work_sync() already cleans up necessary work items. Fixed an issue with the problematic detection and recovery from hung queues in the PF which was causing lost interrupts. This is done by triggering a software interrupt so that interrupts are forced on and if we are already in napi_poll and an interrupt fires, napi_poll will not be rescheduled and the interrupt is lost. Avinash fixes an issue in the VF where is was possible to issue a reset_task while the device is currently being removed. Michal fixes an issue occurring while calling i40e_led_set() with the blink parameter set to true, which was causing the activity LED instead of the link LED to blink for port identification. Shiraz changes the client interface to not call client close/open on netdev down/up events, since this causes a lot of thrash that is not needed. Instead, disable the PE TCP-ENA flag during a netdev down event and re-enable on a netdev up event, since this blocks all TCP traffic to the RDMA protocol engine. Alan fixes an issue which was causing a potential transmit hang by ignoring the PF link up message if the VF state is not yet in the RUNNING state. Amritha fixes the channel VSI recreation during the reset flow to reconfigure the transmit rings and the queue context associated with the channel VSI. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c179
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c141
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c54
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c54
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c35
18 files changed, 403 insertions, 210 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9af74253c3f7..d9670cd8743f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1027,7 +1027,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
1027 hw->aq.arq.next_to_clean = ntc; 1027 hw->aq.arq.next_to_clean = ntc;
1028 hw->aq.arq.next_to_use = ntu; 1028 hw->aq.arq.next_to_use = ntu;
1029 1029
1030 i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode)); 1030 i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
1031clean_arq_element_out: 1031clean_arq_element_out:
1032 /* Set pending if needed, unlock and return */ 1032 /* Set pending if needed, unlock and return */
1033 if (pending) 1033 if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index c5776340517c..0d471b0db0f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -2231,8 +2231,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
2231 */ 2231 */
2232struct i40e_aqc_nvm_update { 2232struct i40e_aqc_nvm_update {
2233 u8 command_flags; 2233 u8 command_flags;
2234#define I40E_AQ_NVM_LAST_CMD 0x01 2234#define I40E_AQ_NVM_LAST_CMD 0x01
2235#define I40E_AQ_NVM_FLASH_ONLY 0x80 2235#define I40E_AQ_NVM_FLASH_ONLY 0x80
2236#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
2237#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
2238#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
2239#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
2236 u8 module_pointer; 2240 u8 module_pointer;
2237 __le16 length; 2241 __le16 length;
2238 __le32 offset; 2242 __le32 offset;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 1b1e2acbd07f..0de9610c1d8d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -378,11 +378,11 @@ void i40e_client_subtask(struct i40e_pf *pf)
378 if (!client || !cdev) 378 if (!client || !cdev)
379 return; 379 return;
380 380
381 /* Here we handle client opens. If the client is down, but 381 /* Here we handle client opens. If the client is down, and
382 * the netdev is up, then open the client. 382 * the netdev is registered, then open the client.
383 */ 383 */
384 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { 384 if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
385 if (!test_bit(__I40E_VSI_DOWN, vsi->state) && 385 if (vsi->netdev_registered &&
386 client->ops && client->ops->open) { 386 client->ops && client->ops->open) {
387 set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); 387 set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
388 ret = client->ops->open(&cdev->lan_info, client); 388 ret = client->ops->open(&cdev->lan_info, client);
@@ -393,17 +393,19 @@ void i40e_client_subtask(struct i40e_pf *pf)
393 i40e_client_del_instance(pf); 393 i40e_client_del_instance(pf);
394 } 394 }
395 } 395 }
396 } else {
397 /* Likewise for client close. If the client is up, but the netdev
398 * is down, then close the client.
399 */
400 if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
401 client->ops && client->ops->close) {
402 clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
403 client->ops->close(&cdev->lan_info, client, false);
404 i40e_client_release_qvlist(&cdev->lan_info);
405 }
406 } 396 }
397
398 /* enable/disable PE TCP_ENA flag based on netdev down/up
399 */
400 if (test_bit(__I40E_VSI_DOWN, vsi->state))
401 i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
402 0, 0, 0,
403 I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
404 else
405 i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
406 0, 0,
407 I40E_CLIENT_VSI_FLAG_TCP_ENABLE,
408 I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
407} 409}
408 410
409/** 411/**
@@ -717,13 +719,13 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
717 return -ENOENT; 719 return -ENOENT;
718 } 720 }
719 721
720 if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) && 722 if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
721 (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) { 723 (flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
722 ctxt.info.valid_sections = 724 ctxt.info.valid_sections =
723 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 725 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
724 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; 726 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
725 } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) && 727 } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
726 !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) { 728 !(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
727 ctxt.info.valid_sections = 729 ctxt.info.valid_sections =
728 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 730 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
729 ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA; 731 ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 15b21a5315b5..ba55c889e4c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -132,7 +132,7 @@ struct i40e_info {
132 132
133#define I40E_CLIENT_RESET_LEVEL_PF 1 133#define I40E_CLIENT_RESET_LEVEL_PF 1
134#define I40E_CLIENT_RESET_LEVEL_CORE 2 134#define I40E_CLIENT_RESET_LEVEL_CORE 2
135#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1) 135#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
136 136
137struct i40e_ops { 137struct i40e_ops {
138 /* setup_q_vector_list enables queues with a particular vector */ 138 /* setup_q_vector_list enables queues with a particular vector */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 40c5f7628aa1..ee6052ecd215 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1486,6 +1486,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
1486 case I40E_COMBINED_ACTIVITY: 1486 case I40E_COMBINED_ACTIVITY:
1487 case I40E_FILTER_ACTIVITY: 1487 case I40E_FILTER_ACTIVITY:
1488 case I40E_MAC_ACTIVITY: 1488 case I40E_MAC_ACTIVITY:
1489 case I40E_LINK_ACTIVITY:
1489 continue; 1490 continue;
1490 default: 1491 default:
1491 break; 1492 break;
@@ -1534,6 +1535,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1534 case I40E_COMBINED_ACTIVITY: 1535 case I40E_COMBINED_ACTIVITY:
1535 case I40E_FILTER_ACTIVITY: 1536 case I40E_FILTER_ACTIVITY:
1536 case I40E_MAC_ACTIVITY: 1537 case I40E_MAC_ACTIVITY:
1538 case I40E_LINK_ACTIVITY:
1537 continue; 1539 continue;
1538 default: 1540 default:
1539 break; 1541 break;
@@ -1544,9 +1546,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1544 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1546 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1545 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1547 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1546 1548
1547 if (mode == I40E_LINK_ACTIVITY)
1548 blink = false;
1549
1550 if (blink) 1549 if (blink)
1551 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1550 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1552 else 1551 else
@@ -3465,13 +3464,14 @@ exit:
3465 * @length: length of the section to be written (in bytes from the offset) 3464 * @length: length of the section to be written (in bytes from the offset)
3466 * @data: command buffer (size [bytes] = length) 3465 * @data: command buffer (size [bytes] = length)
3467 * @last_command: tells if this is the last command in a series 3466 * @last_command: tells if this is the last command in a series
3467 * @preservation_flags: Preservation mode flags
3468 * @cmd_details: pointer to command details structure or NULL 3468 * @cmd_details: pointer to command details structure or NULL
3469 * 3469 *
3470 * Update the NVM using the admin queue commands 3470 * Update the NVM using the admin queue commands
3471 **/ 3471 **/
3472i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3472i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3473 u32 offset, u16 length, void *data, 3473 u32 offset, u16 length, void *data,
3474 bool last_command, 3474 bool last_command, u8 preservation_flags,
3475 struct i40e_asq_cmd_details *cmd_details) 3475 struct i40e_asq_cmd_details *cmd_details)
3476{ 3476{
3477 struct i40e_aq_desc desc; 3477 struct i40e_aq_desc desc;
@@ -3490,6 +3490,16 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3490 /* If this is the last command in a series, set the proper flag. */ 3490 /* If this is the last command in a series, set the proper flag. */
3491 if (last_command) 3491 if (last_command)
3492 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3492 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3493 if (hw->mac.type == I40E_MAC_X722) {
3494 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3495 cmd->command_flags |=
3496 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3497 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3498 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3499 cmd->command_flags |=
3500 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3501 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3502 }
3493 cmd->module_pointer = module_pointer; 3503 cmd->module_pointer = module_pointer;
3494 cmd->offset = cpu_to_le32(offset); 3504 cmd->offset = cpu_to_le32(offset);
3495 cmd->length = cpu_to_le16(length); 3505 cmd->length = cpu_to_le16(length);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2ab22eba0c7c..0988c90f53dd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4877,104 +4877,6 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4877#endif 4877#endif
4878 4878
4879/** 4879/**
4880 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4881 * @q_idx: TX queue number
4882 * @vsi: Pointer to VSI struct
4883 *
4884 * This function checks specified queue for given VSI. Detects hung condition.
4885 * We proactively detect hung TX queues by checking if interrupts are disabled
4886 * but there are pending descriptors. If it appears hung, attempt to recover
4887 * by triggering a SW interrupt.
4888 **/
4889static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4890{
4891 struct i40e_ring *tx_ring = NULL;
4892 struct i40e_pf *pf;
4893 u32 val, tx_pending;
4894 int i;
4895
4896 pf = vsi->back;
4897
4898 /* now that we have an index, find the tx_ring struct */
4899 for (i = 0; i < vsi->num_queue_pairs; i++) {
4900 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4901 if (q_idx == vsi->tx_rings[i]->queue_index) {
4902 tx_ring = vsi->tx_rings[i];
4903 break;
4904 }
4905 }
4906 }
4907
4908 if (!tx_ring)
4909 return;
4910
4911 /* Read interrupt register */
4912 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4913 val = rd32(&pf->hw,
4914 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4915 tx_ring->vsi->base_vector - 1));
4916 else
4917 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4918
4919 tx_pending = i40e_get_tx_pending(tx_ring);
4920
4921 /* Interrupts are disabled and TX pending is non-zero,
4922 * trigger the SW interrupt (don't wait). Worst case
4923 * there will be one extra interrupt which may result
4924 * into not cleaning any queues because queues are cleaned.
4925 */
4926 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4927 i40e_force_wb(vsi, tx_ring->q_vector);
4928}
4929
4930/**
4931 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4932 * @pf: pointer to PF struct
4933 *
4934 * LAN VSI has netdev and netdev has TX queues. This function is to check
4935 * each of those TX queues if they are hung, trigger recovery by issuing
4936 * SW interrupt.
4937 **/
4938static void i40e_detect_recover_hung(struct i40e_pf *pf)
4939{
4940 struct net_device *netdev;
4941 struct i40e_vsi *vsi;
4942 unsigned int i;
4943
4944 /* Only for LAN VSI */
4945 vsi = pf->vsi[pf->lan_vsi];
4946
4947 if (!vsi)
4948 return;
4949
4950 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4951 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
4952 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
4953 return;
4954
4955 /* Make sure type is MAIN VSI */
4956 if (vsi->type != I40E_VSI_MAIN)
4957 return;
4958
4959 netdev = vsi->netdev;
4960 if (!netdev)
4961 return;
4962
4963 /* Bail out if netif_carrier is not OK */
4964 if (!netif_carrier_ok(netdev))
4965 return;
4966
4967 /* Go thru' TX queues for netdev */
4968 for (i = 0; i < netdev->num_tx_queues; i++) {
4969 struct netdev_queue *q;
4970
4971 q = netdev_get_tx_queue(netdev, i);
4972 if (q)
4973 i40e_detect_recover_hung_queue(i, vsi);
4974 }
4975}
4976
4977/**
4978 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP 4880 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4979 * @pf: pointer to PF 4881 * @pf: pointer to PF
4980 * 4882 *
@@ -5342,6 +5244,8 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5342static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 5244static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5343{ 5245{
5344 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 5246 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5247 struct i40e_pf *pf = vsi->back;
5248 struct i40e_hw *hw = &pf->hw;
5345 struct i40e_vsi_context ctxt; 5249 struct i40e_vsi_context ctxt;
5346 int ret = 0; 5250 int ret = 0;
5347 int i; 5251 int i;
@@ -5359,10 +5263,40 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5359 5263
5360 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 5264 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5361 if (ret) { 5265 if (ret) {
5362 dev_info(&vsi->back->pdev->dev, 5266 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5267
5268 dev_info(&pf->pdev->dev,
5363 "Failed configuring TC map %d for VSI %d\n", 5269 "Failed configuring TC map %d for VSI %d\n",
5364 enabled_tc, vsi->seid); 5270 enabled_tc, vsi->seid);
5365 goto out; 5271 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5272 &bw_config, NULL);
5273 if (ret) {
5274 dev_info(&pf->pdev->dev,
5275 "Failed querying vsi bw info, err %s aq_err %s\n",
5276 i40e_stat_str(hw, ret),
5277 i40e_aq_str(hw, hw->aq.asq_last_status));
5278 goto out;
5279 }
5280 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5281 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5282
5283 if (!valid_tc)
5284 valid_tc = bw_config.tc_valid_bits;
5285 /* Always enable TC0, no matter what */
5286 valid_tc |= 1;
5287 dev_info(&pf->pdev->dev,
5288 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5289 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5290 enabled_tc = valid_tc;
5291 }
5292
5293 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5294 if (ret) {
5295 dev_err(&pf->pdev->dev,
5296 "Unable to configure TC map %d for VSI %d\n",
5297 enabled_tc, vsi->seid);
5298 goto out;
5299 }
5366 } 5300 }
5367 5301
5368 /* Update Queue Pairs Mapping for currently enabled UPs */ 5302 /* Update Queue Pairs Mapping for currently enabled UPs */
@@ -5402,13 +5336,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5402 /* Update the VSI after updating the VSI queue-mapping 5336 /* Update the VSI after updating the VSI queue-mapping
5403 * information 5337 * information
5404 */ 5338 */
5405 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 5339 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5406 if (ret) { 5340 if (ret) {
5407 dev_info(&vsi->back->pdev->dev, 5341 dev_info(&pf->pdev->dev,
5408 "Update vsi tc config failed, err %s aq_err %s\n", 5342 "Update vsi tc config failed, err %s aq_err %s\n",
5409 i40e_stat_str(&vsi->back->hw, ret), 5343 i40e_stat_str(hw, ret),
5410 i40e_aq_str(&vsi->back->hw, 5344 i40e_aq_str(hw, hw->aq.asq_last_status));
5411 vsi->back->hw.aq.asq_last_status));
5412 goto out; 5345 goto out;
5413 } 5346 }
5414 /* update the local VSI info with updated queue map */ 5347 /* update the local VSI info with updated queue map */
@@ -5418,11 +5351,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5418 /* Update current VSI BW information */ 5351 /* Update current VSI BW information */
5419 ret = i40e_vsi_get_bw_info(vsi); 5352 ret = i40e_vsi_get_bw_info(vsi);
5420 if (ret) { 5353 if (ret) {
5421 dev_info(&vsi->back->pdev->dev, 5354 dev_info(&pf->pdev->dev,
5422 "Failed updating vsi bw info, err %s aq_err %s\n", 5355 "Failed updating vsi bw info, err %s aq_err %s\n",
5423 i40e_stat_str(&vsi->back->hw, ret), 5356 i40e_stat_str(hw, ret),
5424 i40e_aq_str(&vsi->back->hw, 5357 i40e_aq_str(hw, hw->aq.asq_last_status));
5425 vsi->back->hw.aq.asq_last_status));
5426 goto out; 5358 goto out;
5427 } 5359 }
5428 5360
@@ -9075,6 +9007,17 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9075 vsi->uplink_seid); 9007 vsi->uplink_seid);
9076 return ret; 9008 return ret;
9077 } 9009 }
9010 /* Reconfigure TX queues using QTX_CTL register */
9011 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9012 if (ret) {
9013 dev_info(&vsi->back->pdev->dev,
9014 "failed to configure TX rings for channel %u\n",
9015 ch->seid);
9016 return ret;
9017 }
9018 /* update 'next_base_queue' */
9019 vsi->next_base_queue = vsi->next_base_queue +
9020 ch->num_queue_pairs;
9078 if (ch->max_tx_rate) { 9021 if (ch->max_tx_rate) {
9079 u64 credits = ch->max_tx_rate; 9022 u64 credits = ch->max_tx_rate;
9080 9023
@@ -9695,7 +9638,7 @@ static void i40e_service_task(struct work_struct *work)
9695 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) 9638 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
9696 return; 9639 return;
9697 9640
9698 i40e_detect_recover_hung(pf); 9641 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
9699 i40e_sync_filters_subtask(pf); 9642 i40e_sync_filters_subtask(pf);
9700 i40e_reset_subtask(pf); 9643 i40e_reset_subtask(pf);
9701 i40e_handle_mdd_event(pf); 9644 i40e_handle_mdd_event(pf);
@@ -10462,10 +10405,9 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
10462 /* set up vector assignment tracking */ 10405 /* set up vector assignment tracking */
10463 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 10406 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
10464 pf->irq_pile = kzalloc(size, GFP_KERNEL); 10407 pf->irq_pile = kzalloc(size, GFP_KERNEL);
10465 if (!pf->irq_pile) { 10408 if (!pf->irq_pile)
10466 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
10467 return -ENOMEM; 10409 return -ENOMEM;
10468 } 10410
10469 pf->irq_pile->num_entries = vectors; 10411 pf->irq_pile->num_entries = vectors;
10470 pf->irq_pile->search_hint = 0; 10412 pf->irq_pile->search_hint = 0;
10471 10413
@@ -10783,8 +10725,13 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
10783 /* Determine the RSS size of the VSI */ 10725 /* Determine the RSS size of the VSI */
10784 if (!vsi->rss_size) { 10726 if (!vsi->rss_size) {
10785 u16 qcount; 10727 u16 qcount;
10786 10728 /* If the firmware does something weird during VSI init, we
10787 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; 10729 * could end up with zero TCs. Check for that to avoid
10730 * divide-by-zero. It probably won't pass traffic, but it also
10731 * won't panic.
10732 */
10733 qcount = vsi->num_queue_pairs /
10734 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
10788 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); 10735 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
10789 } 10736 }
10790 if (!vsi->rss_size) 10737 if (!vsi->rss_size)
@@ -10972,7 +10919,7 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
10972 ret = i40e_aq_update_nvm(&pf->hw, 10919 ret = i40e_aq_update_nvm(&pf->hw,
10973 I40E_SR_NVM_CONTROL_WORD, 10920 I40E_SR_NVM_CONTROL_WORD,
10974 0x10, sizeof(nvm_word), 10921 0x10, sizeof(nvm_word),
10975 &nvm_word, true, NULL); 10922 &nvm_word, true, 0, NULL);
10976 /* Save off last admin queue command status before releasing 10923 /* Save off last admin queue command status before releasing
10977 * the NVM 10924 * the NVM
10978 */ 10925 */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 425713fb72e5..76a5cb04e4fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -239,8 +239,9 @@ read_nvm_exit:
239 * 239 *
240 * Writes a 16 bit words buffer to the Shadow RAM using the admin command. 240 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
241 **/ 241 **/
242static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, 242static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
243 u32 offset, u16 words, void *data, 243 u8 module_pointer, u32 offset,
244 u16 words, void *data,
244 bool last_command) 245 bool last_command)
245{ 246{
246 i40e_status ret_code = I40E_ERR_NVM; 247 i40e_status ret_code = I40E_ERR_NVM;
@@ -496,7 +497,8 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
496 ret_code = i40e_aq_update_nvm(hw, module_pointer, 497 ret_code = i40e_aq_update_nvm(hw, module_pointer,
497 2 * offset, /*bytes*/ 498 2 * offset, /*bytes*/
498 2 * words, /*bytes*/ 499 2 * words, /*bytes*/
499 data, last_command, &cmd_details); 500 data, last_command, 0,
501 &cmd_details);
500 502
501 return ret_code; 503 return ret_code;
502} 504}
@@ -677,6 +679,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
677static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, 679static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
678 struct i40e_nvm_access *cmd, 680 struct i40e_nvm_access *cmd,
679 u8 *bytes, int *perrno); 681 u8 *bytes, int *perrno);
682static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
683 struct i40e_nvm_access *cmd,
684 u8 *bytes, int *perrno);
680static inline u8 i40e_nvmupd_get_module(u32 val) 685static inline u8 i40e_nvmupd_get_module(u32 val)
681{ 686{
682 return (u8)(val & I40E_NVM_MOD_PNT_MASK); 687 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -686,6 +691,12 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
686 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); 691 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
687} 692}
688 693
694static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
695{
696 return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
697 I40E_NVM_PRESERVATION_FLAGS_SHIFT);
698}
699
689static const char * const i40e_nvm_update_state_str[] = { 700static const char * const i40e_nvm_update_state_str[] = {
690 "I40E_NVMUPD_INVALID", 701 "I40E_NVMUPD_INVALID",
691 "I40E_NVMUPD_READ_CON", 702 "I40E_NVMUPD_READ_CON",
@@ -703,6 +714,7 @@ static const char * const i40e_nvm_update_state_str[] = {
703 "I40E_NVMUPD_STATUS", 714 "I40E_NVMUPD_STATUS",
704 "I40E_NVMUPD_EXEC_AQ", 715 "I40E_NVMUPD_EXEC_AQ",
705 "I40E_NVMUPD_GET_AQ_RESULT", 716 "I40E_NVMUPD_GET_AQ_RESULT",
717 "I40E_NVMUPD_GET_AQ_EVENT",
706}; 718};
707 719
708/** 720/**
@@ -798,9 +810,9 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
798 * the wait info and return before doing anything else 810 * the wait info and return before doing anything else
799 */ 811 */
800 if (cmd->offset == 0xffff) { 812 if (cmd->offset == 0xffff) {
801 i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); 813 i40e_nvmupd_clear_wait_state(hw);
802 status = 0; 814 status = 0;
803 goto exit; 815 break;
804 } 816 }
805 817
806 status = I40E_ERR_NOT_READY; 818 status = I40E_ERR_NOT_READY;
@@ -815,7 +827,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
815 *perrno = -ESRCH; 827 *perrno = -ESRCH;
816 break; 828 break;
817 } 829 }
818exit: 830
819 mutex_unlock(&hw->aq.arq_mutex); 831 mutex_unlock(&hw->aq.arq_mutex);
820 return status; 832 return status;
821} 833}
@@ -944,6 +956,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
944 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); 956 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
945 break; 957 break;
946 958
959 case I40E_NVMUPD_GET_AQ_EVENT:
960 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
961 break;
962
947 default: 963 default:
948 i40e_debug(hw, I40E_DEBUG_NVM, 964 i40e_debug(hw, I40E_DEBUG_NVM,
949 "NVMUPD: bad cmd %s in init state\n", 965 "NVMUPD: bad cmd %s in init state\n",
@@ -1118,38 +1134,53 @@ retry:
1118} 1134}
1119 1135
1120/** 1136/**
1121 * i40e_nvmupd_check_wait_event - handle NVM update operation events 1137 * i40e_nvmupd_clear_wait_state - clear wait state on hw
1122 * @hw: pointer to the hardware structure 1138 * @hw: pointer to the hardware structure
1123 * @opcode: the event that just happened
1124 **/ 1139 **/
1125void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode) 1140void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1126{ 1141{
1127 if (opcode == hw->nvm_wait_opcode) { 1142 i40e_debug(hw, I40E_DEBUG_NVM,
1128 i40e_debug(hw, I40E_DEBUG_NVM, 1143 "NVMUPD: clearing wait on opcode 0x%04x\n",
1129 "NVMUPD: clearing wait on opcode 0x%04x\n", opcode); 1144 hw->nvm_wait_opcode);
1130 if (hw->nvm_release_on_done) {
1131 i40e_release_nvm(hw);
1132 hw->nvm_release_on_done = false;
1133 }
1134 hw->nvm_wait_opcode = 0;
1135 1145
1136 if (hw->aq.arq_last_status) { 1146 if (hw->nvm_release_on_done) {
1137 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; 1147 i40e_release_nvm(hw);
1138 return; 1148 hw->nvm_release_on_done = false;
1139 } 1149 }
1150 hw->nvm_wait_opcode = 0;
1140 1151
1141 switch (hw->nvmupd_state) { 1152 if (hw->aq.arq_last_status) {
1142 case I40E_NVMUPD_STATE_INIT_WAIT: 1153 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1143 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1154 return;
1144 break; 1155 }
1145 1156
1146 case I40E_NVMUPD_STATE_WRITE_WAIT: 1157 switch (hw->nvmupd_state) {
1147 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; 1158 case I40E_NVMUPD_STATE_INIT_WAIT:
1148 break; 1159 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1160 break;
1149 1161
1150 default: 1162 case I40E_NVMUPD_STATE_WRITE_WAIT:
1151 break; 1163 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1152 } 1164 break;
1165
1166 default:
1167 break;
1168 }
1169}
1170
1171/**
1172 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1173 * @hw: pointer to the hardware structure
1174 * @opcode: the event that just happened
1175 **/
1176void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1177 struct i40e_aq_desc *desc)
1178{
1179 u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1180
1181 if (opcode == hw->nvm_wait_opcode) {
1182 memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
1183 i40e_nvmupd_clear_wait_state(hw);
1153 } 1184 }
1154} 1185}
1155 1186
@@ -1205,6 +1236,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1205 else if (module == 0) 1236 else if (module == 0)
1206 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; 1237 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1207 break; 1238 break;
1239 case I40E_NVM_AQE:
1240 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1241 break;
1208 } 1242 }
1209 break; 1243 break;
1210 1244
@@ -1267,6 +1301,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1267 u32 aq_data_len; 1301 u32 aq_data_len;
1268 1302
1269 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); 1303 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1304 if (cmd->offset == 0xffff)
1305 return 0;
1306
1270 memset(&cmd_details, 0, sizeof(cmd_details)); 1307 memset(&cmd_details, 0, sizeof(cmd_details));
1271 cmd_details.wb_desc = &hw->nvm_wb_desc; 1308 cmd_details.wb_desc = &hw->nvm_wb_desc;
1272 1309
@@ -1302,6 +1339,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1302 } 1339 }
1303 } 1340 }
1304 1341
1342 if (cmd->offset)
1343 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1344
1305 /* and away we go! */ 1345 /* and away we go! */
1306 status = i40e_asq_send_command(hw, aq_desc, buff, 1346 status = i40e_asq_send_command(hw, aq_desc, buff,
1307 buff_size, &cmd_details); 1347 buff_size, &cmd_details);
@@ -1311,6 +1351,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1311 i40e_stat_str(hw, status), 1351 i40e_stat_str(hw, status),
1312 i40e_aq_str(hw, hw->aq.asq_last_status)); 1352 i40e_aq_str(hw, hw->aq.asq_last_status));
1313 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); 1353 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1354 return status;
1314 } 1355 }
1315 1356
1316 /* should we wait for a followup event? */ 1357 /* should we wait for a followup event? */
@@ -1392,6 +1433,40 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1392} 1433}
1393 1434
1394/** 1435/**
1436 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1437 * @hw: pointer to hardware structure
1438 * @cmd: pointer to nvm update command buffer
1439 * @bytes: pointer to the data buffer
1440 * @perrno: pointer to return error code
1441 *
1442 * cmd structure contains identifiers and data buffer
1443 **/
1444static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1445 struct i40e_nvm_access *cmd,
1446 u8 *bytes, int *perrno)
1447{
1448 u32 aq_total_len;
1449 u32 aq_desc_len;
1450
1451 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1452
1453 aq_desc_len = sizeof(struct i40e_aq_desc);
1454 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
1455
1456 /* check copylength range */
1457 if (cmd->data_size > aq_total_len) {
1458 i40e_debug(hw, I40E_DEBUG_NVM,
1459 "%s: copy length %d too big, trimming to %d\n",
1460 __func__, cmd->data_size, aq_total_len);
1461 cmd->data_size = aq_total_len;
1462 }
1463
1464 memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
1465
1466 return 0;
1467}
1468
1469/**
1395 * i40e_nvmupd_nvm_read - Read NVM 1470 * i40e_nvmupd_nvm_read - Read NVM
1396 * @hw: pointer to hardware structure 1471 * @hw: pointer to hardware structure
1397 * @cmd: pointer to nvm update command buffer 1472 * @cmd: pointer to nvm update command buffer
@@ -1486,18 +1561,20 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1486 i40e_status status = 0; 1561 i40e_status status = 0;
1487 struct i40e_asq_cmd_details cmd_details; 1562 struct i40e_asq_cmd_details cmd_details;
1488 u8 module, transaction; 1563 u8 module, transaction;
1564 u8 preservation_flags;
1489 bool last; 1565 bool last;
1490 1566
1491 transaction = i40e_nvmupd_get_transaction(cmd->config); 1567 transaction = i40e_nvmupd_get_transaction(cmd->config);
1492 module = i40e_nvmupd_get_module(cmd->config); 1568 module = i40e_nvmupd_get_module(cmd->config);
1493 last = (transaction & I40E_NVM_LCB); 1569 last = (transaction & I40E_NVM_LCB);
1570 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1494 1571
1495 memset(&cmd_details, 0, sizeof(cmd_details)); 1572 memset(&cmd_details, 0, sizeof(cmd_details));
1496 cmd_details.wb_desc = &hw->nvm_wb_desc; 1573 cmd_details.wb_desc = &hw->nvm_wb_desc;
1497 1574
1498 status = i40e_aq_update_nvm(hw, module, cmd->offset, 1575 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1499 (u16)cmd->data_size, bytes, last, 1576 (u16)cmd->data_size, bytes, last,
1500 &cmd_details); 1577 preservation_flags, &cmd_details);
1501 if (status) { 1578 if (status) {
1502 i40e_debug(hw, I40E_DEBUG_NVM, 1579 i40e_debug(hw, I40E_DEBUG_NVM,
1503 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", 1580 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index b3cc89cc3a86..187dd53e0056 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -214,7 +214,7 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
214 struct i40e_asq_cmd_details *cmd_details); 214 struct i40e_asq_cmd_details *cmd_details);
215i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 215i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
216 u32 offset, u16 length, void *data, 216 u32 offset, u16 length, void *data,
217 bool last_command, 217 bool last_command, u8 preservation_flags,
218 struct i40e_asq_cmd_details *cmd_details); 218 struct i40e_asq_cmd_details *cmd_details);
219i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 219i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
220 u8 mib_type, void *buff, u16 buff_size, 220 u8 mib_type, void *buff, u16 buff_size,
@@ -333,7 +333,9 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
333i40e_status i40e_nvmupd_command(struct i40e_hw *hw, 333i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
334 struct i40e_nvm_access *cmd, 334 struct i40e_nvm_access *cmd,
335 u8 *bytes, int *); 335 u8 *bytes, int *);
336void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode); 336void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
337 struct i40e_aq_desc *desc);
338void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
337void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); 339void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
338 340
339extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; 341extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 40edb6e5e6f6..8d2275830a40 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -726,6 +726,59 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
726 return 0; 726 return 0;
727} 727}
728 728
729/**
730 * i40e_detect_recover_hung - Function to detect and recover hung_queues
731 * @vsi: pointer to vsi struct with tx queues
732 *
733 * VSI has netdev and netdev has TX queues. This function is to check each of
734 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
735 **/
736void i40e_detect_recover_hung(struct i40e_vsi *vsi)
737{
738 struct i40e_ring *tx_ring = NULL;
739 struct net_device *netdev;
740 unsigned int i;
741 int packets;
742
743 if (!vsi)
744 return;
745
746 if (test_bit(__I40E_VSI_DOWN, vsi->state))
747 return;
748
749 netdev = vsi->netdev;
750 if (!netdev)
751 return;
752
753 if (!netif_carrier_ok(netdev))
754 return;
755
756 for (i = 0; i < vsi->num_queue_pairs; i++) {
757 tx_ring = vsi->tx_rings[i];
758 if (tx_ring && tx_ring->desc) {
759 /* If packet counter has not changed the queue is
760 * likely stalled, so force an interrupt for this
761 * queue.
762 *
763 * prev_pkt_ctr would be negative if there was no
764 * pending work.
765 */
766 packets = tx_ring->stats.packets & INT_MAX;
767 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
768 i40e_force_wb(vsi, tx_ring->q_vector);
769 continue;
770 }
771
772 /* Memory barrier between read of packet count and call
773 * to i40e_get_tx_pending()
774 */
775 smp_rmb();
776 tx_ring->tx_stats.prev_pkt_ctr =
777 i40e_get_tx_pending(tx_ring) ? packets : -1;
778 }
779 }
780}
781
729#define WB_STRIDE 4 782#define WB_STRIDE 4
730 783
731/** 784/**
@@ -1163,6 +1216,7 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1163 1216
1164 tx_ring->next_to_use = 0; 1217 tx_ring->next_to_use = 0;
1165 tx_ring->next_to_clean = 0; 1218 tx_ring->next_to_clean = 0;
1219 tx_ring->tx_stats.prev_pkt_ctr = -1;
1166 return 0; 1220 return 0;
1167 1221
1168err: 1222err:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 2d08760fc4ce..d4799b41e98a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -333,6 +333,7 @@ struct i40e_tx_queue_stats {
333 u64 tx_done_old; 333 u64 tx_done_old;
334 u64 tx_linearize; 334 u64 tx_linearize;
335 u64 tx_force_wb; 335 u64 tx_force_wb;
336 int prev_pkt_ctr;
336}; 337};
337 338
338struct i40e_rx_queue_stats { 339struct i40e_rx_queue_stats {
@@ -501,6 +502,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring);
501int i40e_napi_poll(struct napi_struct *napi, int budget); 502int i40e_napi_poll(struct napi_struct *napi, int budget);
502void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); 503void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
503u32 i40e_get_tx_pending(struct i40e_ring *ring); 504u32 i40e_get_tx_pending(struct i40e_ring *ring);
505void i40e_detect_recover_hung(struct i40e_vsi *vsi);
504int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); 506int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
505bool __i40e_chk_linearize(struct sk_buff *skb); 507bool __i40e_chk_linearize(struct sk_buff *skb);
506 508
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 5a708c363d99..cd294e6a8587 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -402,6 +402,7 @@ enum i40e_nvmupd_cmd {
402 I40E_NVMUPD_STATUS, 402 I40E_NVMUPD_STATUS,
403 I40E_NVMUPD_EXEC_AQ, 403 I40E_NVMUPD_EXEC_AQ,
404 I40E_NVMUPD_GET_AQ_RESULT, 404 I40E_NVMUPD_GET_AQ_RESULT,
405 I40E_NVMUPD_GET_AQ_EVENT,
405}; 406};
406 407
407enum i40e_nvmupd_state { 408enum i40e_nvmupd_state {
@@ -421,15 +422,21 @@ enum i40e_nvmupd_state {
421 422
422#define I40E_NVM_MOD_PNT_MASK 0xFF 423#define I40E_NVM_MOD_PNT_MASK 0xFF
423 424
424#define I40E_NVM_TRANS_SHIFT 8 425#define I40E_NVM_TRANS_SHIFT 8
425#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) 426#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
426#define I40E_NVM_CON 0x0 427#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
427#define I40E_NVM_SNT 0x1 428#define I40E_NVM_PRESERVATION_FLAGS_MASK \
428#define I40E_NVM_LCB 0x2 429 (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
429#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) 430#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
430#define I40E_NVM_ERA 0x4 431#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
431#define I40E_NVM_CSUM 0x8 432#define I40E_NVM_CON 0x0
432#define I40E_NVM_EXEC 0xf 433#define I40E_NVM_SNT 0x1
434#define I40E_NVM_LCB 0x2
435#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
436#define I40E_NVM_ERA 0x4
437#define I40E_NVM_CSUM 0x8
438#define I40E_NVM_AQE 0xe
439#define I40E_NVM_EXEC 0xf
433 440
434#define I40E_NVM_ADAPT_SHIFT 16 441#define I40E_NVM_ADAPT_SHIFT 16
435#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) 442#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -611,6 +618,7 @@ struct i40e_hw {
611 /* state of nvm update process */ 618 /* state of nvm update process */
612 enum i40e_nvmupd_state nvmupd_state; 619 enum i40e_nvmupd_state nvmupd_state;
613 struct i40e_aq_desc nvm_wb_desc; 620 struct i40e_aq_desc nvm_wb_desc;
621 struct i40e_aq_desc nvm_aq_event_desc;
614 struct i40e_virt_mem nvm_buff; 622 struct i40e_virt_mem nvm_buff;
615 bool nvm_release_on_done; 623 bool nvm_release_on_done;
616 u16 nvm_wait_opcode; 624 u16 nvm_wait_opcode;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 435a112d09f5..b0e6454995b6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -2196,8 +2196,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
2196 */ 2196 */
2197struct i40e_aqc_nvm_update { 2197struct i40e_aqc_nvm_update {
2198 u8 command_flags; 2198 u8 command_flags;
2199#define I40E_AQ_NVM_LAST_CMD 0x01 2199#define I40E_AQ_NVM_LAST_CMD 0x01
2200#define I40E_AQ_NVM_FLASH_ONLY 0x80 2200#define I40E_AQ_NVM_FLASH_ONLY 0x80
2201#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
2202#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
2203#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
2204#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
2201 u8 module_pointer; 2205 u8 module_pointer;
2202 __le16 length; 2206 __le16 length;
2203 __le32 offset; 2207 __le32 offset;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 1ba29bb85b67..c7831f7f7761 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -148,6 +148,59 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
148 return 0; 148 return 0;
149} 149}
150 150
151/**
152 * i40evf_detect_recover_hung - Function to detect and recover hung_queues
153 * @vsi: pointer to vsi struct with tx queues
154 *
155 * VSI has netdev and netdev has TX queues. This function is to check each of
156 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
157 **/
158void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
159{
160 struct i40e_ring *tx_ring = NULL;
161 struct net_device *netdev;
162 unsigned int i;
163 int packets;
164
165 if (!vsi)
166 return;
167
168 if (test_bit(__I40E_VSI_DOWN, vsi->state))
169 return;
170
171 netdev = vsi->netdev;
172 if (!netdev)
173 return;
174
175 if (!netif_carrier_ok(netdev))
176 return;
177
178 for (i = 0; i < vsi->back->num_active_queues; i++) {
179 tx_ring = &vsi->back->tx_rings[i];
180 if (tx_ring && tx_ring->desc) {
181 /* If packet counter has not changed the queue is
182 * likely stalled, so force an interrupt for this
183 * queue.
184 *
185 * prev_pkt_ctr would be negative if there was no
186 * pending work.
187 */
188 packets = tx_ring->stats.packets & INT_MAX;
189 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
190 i40evf_force_wb(vsi, tx_ring->q_vector);
191 continue;
192 }
193
194 /* Memory barrier between read of packet count and call
195 * to i40evf_get_tx_pending()
196 */
197 smp_rmb();
198 tx_ring->tx_stats.prev_pkt_ctr =
199 i40evf_get_tx_pending(tx_ring, false) ? packets : -1;
200 }
201 }
202}
203
151#define WB_STRIDE 4 204#define WB_STRIDE 4
152 205
153/** 206/**
@@ -469,6 +522,7 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
469 522
470 tx_ring->next_to_use = 0; 523 tx_ring->next_to_use = 0;
471 tx_ring->next_to_clean = 0; 524 tx_ring->next_to_clean = 0;
525 tx_ring->tx_stats.prev_pkt_ctr = -1;
472 return 0; 526 return 0;
473 527
474err: 528err:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8d26c85d12e1..e72f16b4555b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -313,6 +313,7 @@ struct i40e_tx_queue_stats {
313 u64 tx_done_old; 313 u64 tx_done_old;
314 u64 tx_linearize; 314 u64 tx_linearize;
315 u64 tx_force_wb; 315 u64 tx_force_wb;
316 int prev_pkt_ctr;
316 u64 tx_lost_interrupt; 317 u64 tx_lost_interrupt;
317}; 318};
318 319
@@ -467,6 +468,7 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
467int i40evf_napi_poll(struct napi_struct *napi, int budget); 468int i40evf_napi_poll(struct napi_struct *napi, int budget);
468void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); 469void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
469u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); 470u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
471void i40evf_detect_recover_hung(struct i40e_vsi *vsi);
470int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); 472int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
471bool __i40evf_chk_linearize(struct sk_buff *skb); 473bool __i40evf_chk_linearize(struct sk_buff *skb);
472 474
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 6afc31616e04..54951c84a481 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -361,6 +361,7 @@ enum i40e_nvmupd_cmd {
361 I40E_NVMUPD_STATUS, 361 I40E_NVMUPD_STATUS,
362 I40E_NVMUPD_EXEC_AQ, 362 I40E_NVMUPD_EXEC_AQ,
363 I40E_NVMUPD_GET_AQ_RESULT, 363 I40E_NVMUPD_GET_AQ_RESULT,
364 I40E_NVMUPD_GET_AQ_EVENT,
364}; 365};
365 366
366enum i40e_nvmupd_state { 367enum i40e_nvmupd_state {
@@ -380,15 +381,21 @@ enum i40e_nvmupd_state {
380 381
381#define I40E_NVM_MOD_PNT_MASK 0xFF 382#define I40E_NVM_MOD_PNT_MASK 0xFF
382 383
383#define I40E_NVM_TRANS_SHIFT 8 384#define I40E_NVM_TRANS_SHIFT 8
384#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) 385#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
385#define I40E_NVM_CON 0x0 386#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
386#define I40E_NVM_SNT 0x1 387#define I40E_NVM_PRESERVATION_FLAGS_MASK \
387#define I40E_NVM_LCB 0x2 388 (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
388#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) 389#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
389#define I40E_NVM_ERA 0x4 390#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
390#define I40E_NVM_CSUM 0x8 391#define I40E_NVM_CON 0x0
391#define I40E_NVM_EXEC 0xf 392#define I40E_NVM_SNT 0x1
393#define I40E_NVM_LCB 0x2
394#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
395#define I40E_NVM_ERA 0x4
396#define I40E_NVM_CSUM 0x8
397#define I40E_NVM_AQE 0xe
398#define I40E_NVM_EXEC 0xf
392 399
393#define I40E_NVM_ADAPT_SHIFT 16 400#define I40E_NVM_ADAPT_SHIFT 16
394#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) 401#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -561,6 +568,7 @@ struct i40e_hw {
561 /* state of nvm update process */ 568 /* state of nvm update process */
562 enum i40e_nvmupd_state nvmupd_state; 569 enum i40e_nvmupd_state nvmupd_state;
563 struct i40e_aq_desc nvm_wb_desc; 570 struct i40e_aq_desc nvm_wb_desc;
571 struct i40e_aq_desc nvm_aq_event_desc;
564 struct i40e_virt_mem nvm_buff; 572 struct i40e_virt_mem nvm_buff;
565 bool nvm_release_on_done; 573 bool nvm_release_on_done;
566 u16 nvm_wait_opcode; 574 u16 nvm_wait_opcode;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 47040ab2e298..33c0ffcc8b13 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -187,6 +187,7 @@ enum i40evf_state_t {
187enum i40evf_critical_section_t { 187enum i40evf_critical_section_t {
188 __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ 188 __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
189 __I40EVF_IN_CLIENT_TASK, 189 __I40EVF_IN_CLIENT_TASK,
190 __I40EVF_IN_REMOVE_TASK, /* device being removed */
190}; 191};
191 192
192/* board specific private data structure */ 193/* board specific private data structure */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f92587aba3c7..8934f784e96f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1716,6 +1716,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
1716 if (adapter->state == __I40EVF_RUNNING) 1716 if (adapter->state == __I40EVF_RUNNING)
1717 i40evf_request_stats(adapter); 1717 i40evf_request_stats(adapter);
1718watchdog_done: 1718watchdog_done:
1719 if (adapter->state == __I40EVF_RUNNING)
1720 i40evf_detect_recover_hung(&adapter->vsi);
1719 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); 1721 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1720restart_watchdog: 1722restart_watchdog:
1721 if (adapter->state == __I40EVF_REMOVE) 1723 if (adapter->state == __I40EVF_REMOVE)
@@ -1803,6 +1805,12 @@ static void i40evf_reset_task(struct work_struct *work)
1803 int i = 0, err; 1805 int i = 0, err;
1804 bool running; 1806 bool running;
1805 1807
1808 /* When device is being removed it doesn't make sense to run the reset
1809 * task, just return in such a case.
1810 */
1811 if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
1812 return;
1813
1806 while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, 1814 while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
1807 &adapter->crit_section)) 1815 &adapter->crit_section))
1808 usleep_range(500, 1000); 1816 usleep_range(500, 1000);
@@ -3053,7 +3061,8 @@ static void i40evf_remove(struct pci_dev *pdev)
3053 struct i40evf_mac_filter *f, *ftmp; 3061 struct i40evf_mac_filter *f, *ftmp;
3054 struct i40e_hw *hw = &adapter->hw; 3062 struct i40e_hw *hw = &adapter->hw;
3055 int err; 3063 int err;
3056 3064 /* Indicate we are in remove and not to run reset_task */
3065 set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
3057 cancel_delayed_work_sync(&adapter->init_task); 3066 cancel_delayed_work_sync(&adapter->init_task);
3058 cancel_work_sync(&adapter->reset_task); 3067 cancel_work_sync(&adapter->reset_task);
3059 cancel_delayed_work_sync(&adapter->client_task); 3068 cancel_delayed_work_sync(&adapter->client_task);
@@ -3088,8 +3097,6 @@ static void i40evf_remove(struct pci_dev *pdev)
3088 if (adapter->watchdog_timer.function) 3097 if (adapter->watchdog_timer.function)
3089 del_timer_sync(&adapter->watchdog_timer); 3098 del_timer_sync(&adapter->watchdog_timer);
3090 3099
3091 flush_scheduled_work();
3092
3093 i40evf_free_rss(adapter); 3100 i40evf_free_rss(adapter);
3094 3101
3095 if (hw->aq.asq.count) 3102 if (hw->aq.asq.count)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index feb95b62a077..50ce0d6c09ef 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1001,23 +1001,34 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
1001 if (v_opcode == VIRTCHNL_OP_EVENT) { 1001 if (v_opcode == VIRTCHNL_OP_EVENT) {
1002 struct virtchnl_pf_event *vpe = 1002 struct virtchnl_pf_event *vpe =
1003 (struct virtchnl_pf_event *)msg; 1003 (struct virtchnl_pf_event *)msg;
1004 bool link_up = vpe->event_data.link_event.link_status;
1004 switch (vpe->event) { 1005 switch (vpe->event) {
1005 case VIRTCHNL_EVENT_LINK_CHANGE: 1006 case VIRTCHNL_EVENT_LINK_CHANGE:
1006 adapter->link_speed = 1007 adapter->link_speed =
1007 vpe->event_data.link_event.link_speed; 1008 vpe->event_data.link_event.link_speed;
1008 if (adapter->link_up != 1009
1009 vpe->event_data.link_event.link_status) { 1010 /* we've already got the right link status, bail */
1010 adapter->link_up = 1011 if (adapter->link_up == link_up)
1011 vpe->event_data.link_event.link_status; 1012 break;
1012 if (adapter->link_up) { 1013
1013 netif_tx_start_all_queues(netdev); 1014 /* If we get link up message and start queues before
1014 netif_carrier_on(netdev); 1015 * our queues are configured it will trigger a TX hang.
1015 } else { 1016 * In that case, just ignore the link status message,
1016 netif_tx_stop_all_queues(netdev); 1017 * we'll get another one after we enable queues and
1017 netif_carrier_off(netdev); 1018 * actually prepared to send traffic.
1018 } 1019 */
1019 i40evf_print_link_message(adapter); 1020 if (link_up && adapter->state != __I40EVF_RUNNING)
1021 break;
1022
1023 adapter->link_up = link_up;
1024 if (link_up) {
1025 netif_tx_start_all_queues(netdev);
1026 netif_carrier_on(netdev);
1027 } else {
1028 netif_tx_stop_all_queues(netdev);
1029 netif_carrier_off(netdev);
1020 } 1030 }
1031 i40evf_print_link_message(adapter);
1021 break; 1032 break;
1022 case VIRTCHNL_EVENT_RESET_IMPENDING: 1033 case VIRTCHNL_EVENT_RESET_IMPENDING:
1023 dev_info(&adapter->pdev->dev, "PF reset warning received\n"); 1034 dev_info(&adapter->pdev->dev, "PF reset warning received\n");