aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h35
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c17
11 files changed, 166 insertions, 88 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ce6e9c0427d..d25b3be5ba89 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -244,7 +244,6 @@ struct i40e_fdir_filter {
244#define I40E_DCB_PRIO_TYPE_STRICT 0 244#define I40E_DCB_PRIO_TYPE_STRICT 0
245#define I40E_DCB_PRIO_TYPE_ETS 1 245#define I40E_DCB_PRIO_TYPE_ETS 1
246#define I40E_DCB_STRICT_PRIO_CREDITS 127 246#define I40E_DCB_STRICT_PRIO_CREDITS 127
247#define I40E_MAX_USER_PRIORITY 8
248/* DCB per TC information data structure */ 247/* DCB per TC information data structure */
249struct i40e_tc_info { 248struct i40e_tc_info {
250 u16 qoffset; /* Queue offset from base queue */ 249 u16 qoffset; /* Queue offset from base queue */
@@ -811,6 +810,7 @@ int i40e_vlan_rx_kill_vid(struct net_device *netdev,
811 __always_unused __be16 proto, u16 vid); 810 __always_unused __be16 proto, u16 vid);
812#endif 811#endif
813int i40e_open(struct net_device *netdev); 812int i40e_open(struct net_device *netdev);
813int i40e_close(struct net_device *netdev);
814int i40e_vsi_open(struct i40e_vsi *vsi); 814int i40e_vsi_open(struct i40e_vsi *vsi);
815void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); 815void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
816int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); 816int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -823,7 +823,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
823struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 823struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
824 bool is_vf, bool is_netdev); 824 bool is_vf, bool is_netdev);
825#ifdef I40E_FCOE 825#ifdef I40E_FCOE
826int i40e_close(struct net_device *netdev);
827int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, 826int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
828 struct tc_to_netdev *tc); 827 struct tc_to_netdev *tc);
829void i40e_netpoll(struct net_device *netdev); 828void i40e_netpoll(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 4596294c2ab1..8276a1393e6d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1901,13 +1901,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1901 * 1901 *
1902 * Reset the external PHY. 1902 * Reset the external PHY.
1903 **/ 1903 **/
1904enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1904i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1905 struct i40e_asq_cmd_details *cmd_details) 1905 struct i40e_asq_cmd_details *cmd_details)
1906{ 1906{
1907 struct i40e_aq_desc desc; 1907 struct i40e_aq_desc desc;
1908 struct i40e_aqc_set_phy_debug *cmd = 1908 struct i40e_aqc_set_phy_debug *cmd =
1909 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1909 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1910 enum i40e_status_code status; 1910 i40e_status status;
1911 1911
1912 i40e_fill_default_direct_cmd_desc(&desc, 1912 i40e_fill_default_direct_cmd_desc(&desc,
1913 i40e_aqc_opc_set_phy_debug); 1913 i40e_aqc_opc_set_phy_debug);
@@ -2157,6 +2157,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2157 struct i40e_aq_desc desc; 2157 struct i40e_aq_desc desc;
2158 struct i40e_aqc_add_get_update_vsi *cmd = 2158 struct i40e_aqc_add_get_update_vsi *cmd =
2159 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2159 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2160 struct i40e_aqc_add_get_update_vsi_completion *resp =
2161 (struct i40e_aqc_add_get_update_vsi_completion *)
2162 &desc.params.raw;
2160 i40e_status status; 2163 i40e_status status;
2161 2164
2162 i40e_fill_default_direct_cmd_desc(&desc, 2165 i40e_fill_default_direct_cmd_desc(&desc,
@@ -2168,6 +2171,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2168 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2171 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2169 sizeof(vsi_ctx->info), cmd_details); 2172 sizeof(vsi_ctx->info), cmd_details);
2170 2173
2174 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2175 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2176
2171 return status; 2177 return status;
2172} 2178}
2173 2179
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 784b1659457a..410d237f9137 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1714,7 +1714,7 @@ static void i40e_diag_test(struct net_device *netdev,
1714 /* If the device is online then take it offline */ 1714 /* If the device is online then take it offline */
1715 if (if_running) 1715 if (if_running)
1716 /* indicate we're in test mode */ 1716 /* indicate we're in test mode */
1717 dev_close(netdev); 1717 i40e_close(netdev);
1718 else 1718 else
1719 /* This reset does not affect link - if it is 1719 /* This reset does not affect link - if it is
1720 * changed to a type of reset that does affect 1720 * changed to a type of reset that does affect
@@ -1743,7 +1743,7 @@ static void i40e_diag_test(struct net_device *netdev,
1743 i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); 1743 i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
1744 1744
1745 if (if_running) 1745 if (if_running)
1746 dev_open(netdev); 1746 i40e_open(netdev);
1747 } else { 1747 } else {
1748 /* Online tests */ 1748 /* Online tests */
1749 netif_info(pf, drv, netdev, "online testing starting\n"); 1749 netif_info(pf, drv, netdev, "online testing starting\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 8ad162c16f61..92d2208d13c7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1371,7 +1371,7 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
1371 if (i40e_chk_linearize(skb, count)) { 1371 if (i40e_chk_linearize(skb, count)) {
1372 if (__skb_linearize(skb)) 1372 if (__skb_linearize(skb))
1373 goto out_drop; 1373 goto out_drop;
1374 count = TXD_USE_COUNT(skb->len); 1374 count = i40e_txd_use_count(skb->len);
1375 tx_ring->tx_stats.tx_linearize++; 1375 tx_ring->tx_stats.tx_linearize++;
1376 } 1376 }
1377 1377
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 67006431726a..297fd39ba255 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -45,8 +45,8 @@ static const char i40e_driver_string[] =
45#define DRV_KERN "-k" 45#define DRV_KERN "-k"
46 46
47#define DRV_VERSION_MAJOR 1 47#define DRV_VERSION_MAJOR 1
48#define DRV_VERSION_MINOR 4 48#define DRV_VERSION_MINOR 5
49#define DRV_VERSION_BUILD 25 49#define DRV_VERSION_BUILD 1
50#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 50#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
51 __stringify(DRV_VERSION_MINOR) "." \ 51 __stringify(DRV_VERSION_MINOR) "." \
52 __stringify(DRV_VERSION_BUILD) DRV_KERN 52 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -4164,7 +4164,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4164 int i; 4164 int i;
4165 4165
4166 i40e_stop_misc_vector(pf); 4166 i40e_stop_misc_vector(pf);
4167 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4167 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4168 synchronize_irq(pf->msix_entries[0].vector); 4168 synchronize_irq(pf->msix_entries[0].vector);
4169 free_irq(pf->msix_entries[0].vector, pf); 4169 free_irq(pf->msix_entries[0].vector, pf);
4170 } 4170 }
@@ -5509,11 +5509,7 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5509 * 5509 *
5510 * Returns 0, this is not allowed to fail 5510 * Returns 0, this is not allowed to fail
5511 **/ 5511 **/
5512#ifdef I40E_FCOE
5513int i40e_close(struct net_device *netdev) 5512int i40e_close(struct net_device *netdev)
5514#else
5515static int i40e_close(struct net_device *netdev)
5516#endif
5517{ 5513{
5518 struct i40e_netdev_priv *np = netdev_priv(netdev); 5514 struct i40e_netdev_priv *np = netdev_priv(netdev);
5519 struct i40e_vsi *vsi = np->vsi; 5515 struct i40e_vsi *vsi = np->vsi;
@@ -5538,8 +5534,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5538 5534
5539 WARN_ON(in_interrupt()); 5535 WARN_ON(in_interrupt());
5540 5536
5541 if (i40e_check_asq_alive(&pf->hw))
5542 i40e_vc_notify_reset(pf);
5543 5537
5544 /* do the biggest reset indicated */ 5538 /* do the biggest reset indicated */
5545 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { 5539 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -6377,7 +6371,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6377 break; 6371 break;
6378 default: 6372 default:
6379 dev_info(&pf->pdev->dev, 6373 dev_info(&pf->pdev->dev,
6380 "ARQ Error: Unknown event 0x%04x received\n", 6374 "ARQ: Unknown event 0x%04x ignored\n",
6381 opcode); 6375 opcode);
6382 break; 6376 break;
6383 } 6377 }
@@ -6742,6 +6736,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
6742 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 6736 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6743 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 6737 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6744 return; 6738 return;
6739 if (i40e_check_asq_alive(&pf->hw))
6740 i40e_vc_notify_reset(pf);
6745 6741
6746 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 6742 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6747 6743
@@ -10826,6 +10822,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10826 hw->bus.func = PCI_FUNC(pdev->devfn); 10822 hw->bus.func = PCI_FUNC(pdev->devfn);
10827 pf->instance = pfs_found; 10823 pf->instance = pfs_found;
10828 10824
10825 /* set up the locks for the AQ, do this only once in probe
10826 * and destroy them only once in remove
10827 */
10828 mutex_init(&hw->aq.asq_mutex);
10829 mutex_init(&hw->aq.arq_mutex);
10830
10829 if (debug != -1) { 10831 if (debug != -1) {
10830 pf->msg_enable = pf->hw.debug_mask; 10832 pf->msg_enable = pf->hw.debug_mask;
10831 pf->msg_enable = debug; 10833 pf->msg_enable = debug;
@@ -10871,12 +10873,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10871 /* set up a default setting for link flow control */ 10873 /* set up a default setting for link flow control */
10872 pf->hw.fc.requested_mode = I40E_FC_NONE; 10874 pf->hw.fc.requested_mode = I40E_FC_NONE;
10873 10875
10874 /* set up the locks for the AQ, do this only once in probe
10875 * and destroy them only once in remove
10876 */
10877 mutex_init(&hw->aq.asq_mutex);
10878 mutex_init(&hw->aq.arq_mutex);
10879
10880 err = i40e_init_adminq(hw); 10876 err = i40e_init_adminq(hw);
10881 if (err) { 10877 if (err) {
10882 if (err == I40E_ERR_FIRMWARE_API_VERSION) 10878 if (err == I40E_ERR_FIRMWARE_API_VERSION)
@@ -11269,7 +11265,6 @@ err_init_lan_hmc:
11269 kfree(pf->qp_pile); 11265 kfree(pf->qp_pile);
11270err_sw_init: 11266err_sw_init:
11271err_adminq_setup: 11267err_adminq_setup:
11272 (void)i40e_shutdown_adminq(hw);
11273err_pf_reset: 11268err_pf_reset:
11274 iounmap(hw->hw_addr); 11269 iounmap(hw->hw_addr);
11275err_ioremap: 11270err_ioremap:
@@ -11311,8 +11306,10 @@ static void i40e_remove(struct pci_dev *pdev)
11311 /* no more scheduling of any task */ 11306 /* no more scheduling of any task */
11312 set_bit(__I40E_SUSPENDED, &pf->state); 11307 set_bit(__I40E_SUSPENDED, &pf->state);
11313 set_bit(__I40E_DOWN, &pf->state); 11308 set_bit(__I40E_DOWN, &pf->state);
11314 del_timer_sync(&pf->service_timer); 11309 if (pf->service_timer.data)
11315 cancel_work_sync(&pf->service_task); 11310 del_timer_sync(&pf->service_timer);
11311 if (pf->service_task.func)
11312 cancel_work_sync(&pf->service_task);
11316 11313
11317 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 11314 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11318 i40e_free_vfs(pf); 11315 i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 084d0ab316b7..5bef5b0f00d9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -636,19 +636,21 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
636 636
637/** 637/**
638 * i40e_clean_tx_irq - Reclaim resources after transmit completes 638 * i40e_clean_tx_irq - Reclaim resources after transmit completes
639 * @tx_ring: tx ring to clean 639 * @vsi: the VSI we care about
640 * @budget: how many cleans we're allowed 640 * @tx_ring: Tx ring to clean
641 * @napi_budget: Used to determine if we are in netpoll
641 * 642 *
642 * Returns true if there's any budget left (e.g. the clean is finished) 643 * Returns true if there's any budget left (e.g. the clean is finished)
643 **/ 644 **/
644static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) 645static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
646 struct i40e_ring *tx_ring, int napi_budget)
645{ 647{
646 u16 i = tx_ring->next_to_clean; 648 u16 i = tx_ring->next_to_clean;
647 struct i40e_tx_buffer *tx_buf; 649 struct i40e_tx_buffer *tx_buf;
648 struct i40e_tx_desc *tx_head; 650 struct i40e_tx_desc *tx_head;
649 struct i40e_tx_desc *tx_desc; 651 struct i40e_tx_desc *tx_desc;
650 unsigned int total_packets = 0; 652 unsigned int total_bytes = 0, total_packets = 0;
651 unsigned int total_bytes = 0; 653 unsigned int budget = vsi->work_limit;
652 654
653 tx_buf = &tx_ring->tx_bi[i]; 655 tx_buf = &tx_ring->tx_bi[i];
654 tx_desc = I40E_TX_DESC(tx_ring, i); 656 tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -678,7 +680,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
678 total_packets += tx_buf->gso_segs; 680 total_packets += tx_buf->gso_segs;
679 681
680 /* free the skb */ 682 /* free the skb */
681 dev_consume_skb_any(tx_buf->skb); 683 napi_consume_skb(tx_buf->skb, napi_budget);
682 684
683 /* unmap skb header data */ 685 /* unmap skb header data */
684 dma_unmap_single(tx_ring->dev, 686 dma_unmap_single(tx_ring->dev,
@@ -749,7 +751,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
749 751
750 if (budget && 752 if (budget &&
751 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && 753 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
752 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && 754 !test_bit(__I40E_DOWN, &vsi->state) &&
753 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) 755 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
754 tx_ring->arm_wb = true; 756 tx_ring->arm_wb = true;
755 } 757 }
@@ -767,7 +769,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
767 smp_mb(); 769 smp_mb();
768 if (__netif_subqueue_stopped(tx_ring->netdev, 770 if (__netif_subqueue_stopped(tx_ring->netdev,
769 tx_ring->queue_index) && 771 tx_ring->queue_index) &&
770 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { 772 !test_bit(__I40E_DOWN, &vsi->state)) {
771 netif_wake_subqueue(tx_ring->netdev, 773 netif_wake_subqueue(tx_ring->netdev,
772 tx_ring->queue_index); 774 tx_ring->queue_index);
773 ++tx_ring->tx_stats.restart_queue; 775 ++tx_ring->tx_stats.restart_queue;
@@ -1975,9 +1977,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1975 * budget and be more aggressive about cleaning up the Tx descriptors. 1977 * budget and be more aggressive about cleaning up the Tx descriptors.
1976 */ 1978 */
1977 i40e_for_each_ring(ring, q_vector->tx) { 1979 i40e_for_each_ring(ring, q_vector->tx) {
1978 clean_complete = clean_complete && 1980 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
1979 i40e_clean_tx_irq(ring, vsi->work_limit); 1981 clean_complete = false;
1980 arm_wb = arm_wb || ring->arm_wb; 1982 continue;
1983 }
1984 arm_wb |= ring->arm_wb;
1981 ring->arm_wb = false; 1985 ring->arm_wb = false;
1982 } 1986 }
1983 1987
@@ -1999,8 +2003,9 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1999 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); 2003 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
2000 2004
2001 work_done += cleaned; 2005 work_done += cleaned;
2002 /* if we didn't clean as many as budgeted, we must be done */ 2006 /* if we clean as many as budgeted, we must not be done */
2003 clean_complete = clean_complete && (budget_per_ring > cleaned); 2007 if (cleaned >= budget_per_ring)
2008 clean_complete = false;
2004 } 2009 }
2005 2010
2006 /* If work not completed, return budget and polling will return */ 2011 /* If work not completed, return budget and polling will return */
@@ -2300,7 +2305,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2300 2305
2301 /* remove payload length from outer checksum */ 2306 /* remove payload length from outer checksum */
2302 paylen = (__force u16)l4.udp->check; 2307 paylen = (__force u16)l4.udp->check;
2303 paylen += ntohs(1) * (u16)~(skb->len - l4_offset); 2308 paylen += ntohs((__force __be16)1) *
2309 (u16)~(skb->len - l4_offset);
2304 l4.udp->check = ~csum_fold((__force __wsum)paylen); 2310 l4.udp->check = ~csum_fold((__force __wsum)paylen);
2305 } 2311 }
2306 2312
@@ -2322,7 +2328,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2322 2328
2323 /* remove payload length from inner checksum */ 2329 /* remove payload length from inner checksum */
2324 paylen = (__force u16)l4.tcp->check; 2330 paylen = (__force u16)l4.tcp->check;
2325 paylen += ntohs(1) * (u16)~(skb->len - l4_offset); 2331 paylen += ntohs((__force __be16)1) * (u16)~(skb->len - l4_offset);
2326 l4.tcp->check = ~csum_fold((__force __wsum)paylen); 2332 l4.tcp->check = ~csum_fold((__force __wsum)paylen);
2327 2333
2328 /* compute length of segmentation header */ 2334 /* compute length of segmentation header */
@@ -2717,6 +2723,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2717 tx_bi = first; 2723 tx_bi = first;
2718 2724
2719 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 2725 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2726 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2727
2720 if (dma_mapping_error(tx_ring->dev, dma)) 2728 if (dma_mapping_error(tx_ring->dev, dma))
2721 goto dma_error; 2729 goto dma_error;
2722 2730
@@ -2724,12 +2732,14 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2724 dma_unmap_len_set(tx_bi, len, size); 2732 dma_unmap_len_set(tx_bi, len, size);
2725 dma_unmap_addr_set(tx_bi, dma, dma); 2733 dma_unmap_addr_set(tx_bi, dma, dma);
2726 2734
2735 /* align size to end of page */
2736 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
2727 tx_desc->buffer_addr = cpu_to_le64(dma); 2737 tx_desc->buffer_addr = cpu_to_le64(dma);
2728 2738
2729 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { 2739 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2730 tx_desc->cmd_type_offset_bsz = 2740 tx_desc->cmd_type_offset_bsz =
2731 build_ctob(td_cmd, td_offset, 2741 build_ctob(td_cmd, td_offset,
2732 I40E_MAX_DATA_PER_TXD, td_tag); 2742 max_data, td_tag);
2733 2743
2734 tx_desc++; 2744 tx_desc++;
2735 i++; 2745 i++;
@@ -2740,9 +2750,10 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2740 i = 0; 2750 i = 0;
2741 } 2751 }
2742 2752
2743 dma += I40E_MAX_DATA_PER_TXD; 2753 dma += max_data;
2744 size -= I40E_MAX_DATA_PER_TXD; 2754 size -= max_data;
2745 2755
2756 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2746 tx_desc->buffer_addr = cpu_to_le64(dma); 2757 tx_desc->buffer_addr = cpu_to_le64(dma);
2747 } 2758 }
2748 2759
@@ -2892,7 +2903,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2892 if (i40e_chk_linearize(skb, count)) { 2903 if (i40e_chk_linearize(skb, count)) {
2893 if (__skb_linearize(skb)) 2904 if (__skb_linearize(skb))
2894 goto out_drop; 2905 goto out_drop;
2895 count = TXD_USE_COUNT(skb->len); 2906 count = i40e_txd_use_count(skb->len);
2896 tx_ring->tx_stats.tx_linearize++; 2907 tx_ring->tx_stats.tx_linearize++;
2897 } 2908 }
2898 2909
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index cdd5dc00aec5..9e654e611642 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -146,10 +146,39 @@ enum i40e_dyn_idx_t {
146 146
147#define I40E_MAX_BUFFER_TXD 8 147#define I40E_MAX_BUFFER_TXD 8
148#define I40E_MIN_TX_LEN 17 148#define I40E_MIN_TX_LEN 17
149#define I40E_MAX_DATA_PER_TXD 8192 149
150/* The size limit for a transmit buffer in a descriptor is (16K - 1).
151 * In order to align with the read requests we will align the value to
152 * the nearest 4K which represents our maximum read request size.
153 */
154#define I40E_MAX_READ_REQ_SIZE 4096
155#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
156#define I40E_MAX_DATA_PER_TXD_ALIGNED \
157 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
158
159/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
160 * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
161 * that 12K is not a power of 2 and division is expensive. It is used to
162 * approximate the number of descriptors used per linear buffer. Note
163 * that this will overestimate in some cases as it doesn't account for the
164 * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
165 * the error should not impact things much as large buffers usually mean
166 * we will use fewer descriptors then there are frags in an skb.
167 */
168static inline unsigned int i40e_txd_use_count(unsigned int size)
169{
170 const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
171 const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
172 unsigned int adjust = ~(u32)0;
173
174 /* if we rounded up on the reciprocal pull down the adjustment */
175 if ((max * reciprocal) > adjust)
176 adjust = ~(u32)(reciprocal - 1);
177
178 return (u32)((((u64)size * reciprocal) + adjust) >> 32);
179}
150 180
151/* Tx Descriptors needed, worst case */ 181/* Tx Descriptors needed, worst case */
152#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
153#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 182#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
154#define I40E_MIN_DESC_PENDING 4 183#define I40E_MIN_DESC_PENDING 4
155 184
@@ -377,7 +406,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
377 int count = 0, size = skb_headlen(skb); 406 int count = 0, size = skb_headlen(skb);
378 407
379 for (;;) { 408 for (;;) {
380 count += TXD_USE_COUNT(size); 409 count += i40e_txd_use_count(size);
381 410
382 if (!nr_frags--) 411 if (!nr_frags--)
383 break; 412 break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 816c6bbf7093..47b9e62473c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -63,7 +63,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
63} 63}
64 64
65/** 65/**
66 * i40e_vc_notify_link_state 66 * i40e_vc_notify_vf_link_state
67 * @vf: pointer to the VF structure 67 * @vf: pointer to the VF structure
68 * 68 *
69 * send a link status message to a single VF 69 * send a link status message to a single VF
@@ -917,9 +917,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
917{ 917{
918 struct i40e_pf *pf = vf->pf; 918 struct i40e_pf *pf = vf->pf;
919 struct i40e_hw *hw = &pf->hw; 919 struct i40e_hw *hw = &pf->hw;
920 u32 reg, reg_idx, bit_idx;
920 bool rsd = false; 921 bool rsd = false;
921 int i; 922 int i;
922 u32 reg;
923 923
924 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 924 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
925 return; 925 return;
@@ -988,6 +988,11 @@ complete_reset:
988 } 988 }
989 /* tell the VF the reset is done */ 989 /* tell the VF the reset is done */
990 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 990 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
991
992 /* clear the VFLR bit in GLGEN_VFLRSTAT */
993 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
994 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
995 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
991 i40e_flush(hw); 996 i40e_flush(hw);
992 clear_bit(__I40E_VF_DISABLE, &pf->state); 997 clear_bit(__I40E_VF_DISABLE, &pf->state);
993} 998}
@@ -2293,9 +2298,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
2293 vf = &pf->vf[vf_id]; 2298 vf = &pf->vf[vf_id];
2294 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 2299 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
2295 if (reg & BIT(bit_idx)) { 2300 if (reg & BIT(bit_idx)) {
2296 /* clear the bit in GLGEN_VFLRSTAT */ 2301 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
2297 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
2298
2299 if (!test_bit(__I40E_DOWN, &pf->state)) 2302 if (!test_bit(__I40E_DOWN, &pf->state))
2300 i40e_reset_vf(vf, true); 2303 i40e_reset_vf(vf, true);
2301 } 2304 }
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ebcc25c05796..570348d93e5d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -155,19 +155,21 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
155 155
156/** 156/**
157 * i40e_clean_tx_irq - Reclaim resources after transmit completes 157 * i40e_clean_tx_irq - Reclaim resources after transmit completes
158 * @tx_ring: tx ring to clean 158 * @vsi: the VSI we care about
159 * @budget: how many cleans we're allowed 159 * @tx_ring: Tx ring to clean
160 * @napi_budget: Used to determine if we are in netpoll
160 * 161 *
161 * Returns true if there's any budget left (e.g. the clean is finished) 162 * Returns true if there's any budget left (e.g. the clean is finished)
162 **/ 163 **/
163static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) 164static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
165 struct i40e_ring *tx_ring, int napi_budget)
164{ 166{
165 u16 i = tx_ring->next_to_clean; 167 u16 i = tx_ring->next_to_clean;
166 struct i40e_tx_buffer *tx_buf; 168 struct i40e_tx_buffer *tx_buf;
167 struct i40e_tx_desc *tx_head; 169 struct i40e_tx_desc *tx_head;
168 struct i40e_tx_desc *tx_desc; 170 struct i40e_tx_desc *tx_desc;
169 unsigned int total_packets = 0; 171 unsigned int total_bytes = 0, total_packets = 0;
170 unsigned int total_bytes = 0; 172 unsigned int budget = vsi->work_limit;
171 173
172 tx_buf = &tx_ring->tx_bi[i]; 174 tx_buf = &tx_ring->tx_bi[i];
173 tx_desc = I40E_TX_DESC(tx_ring, i); 175 tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -197,7 +199,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
197 total_packets += tx_buf->gso_segs; 199 total_packets += tx_buf->gso_segs;
198 200
199 /* free the skb */ 201 /* free the skb */
200 dev_kfree_skb_any(tx_buf->skb); 202 napi_consume_skb(tx_buf->skb, napi_budget);
201 203
202 /* unmap skb header data */ 204 /* unmap skb header data */
203 dma_unmap_single(tx_ring->dev, 205 dma_unmap_single(tx_ring->dev,
@@ -267,7 +269,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
267 269
268 if (budget && 270 if (budget &&
269 ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && 271 ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
270 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && 272 !test_bit(__I40E_DOWN, &vsi->state) &&
271 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) 273 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
272 tx_ring->arm_wb = true; 274 tx_ring->arm_wb = true;
273 } 275 }
@@ -285,7 +287,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
285 smp_mb(); 287 smp_mb();
286 if (__netif_subqueue_stopped(tx_ring->netdev, 288 if (__netif_subqueue_stopped(tx_ring->netdev,
287 tx_ring->queue_index) && 289 tx_ring->queue_index) &&
288 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { 290 !test_bit(__I40E_DOWN, &vsi->state)) {
289 netif_wake_subqueue(tx_ring->netdev, 291 netif_wake_subqueue(tx_ring->netdev,
290 tx_ring->queue_index); 292 tx_ring->queue_index);
291 ++tx_ring->tx_stats.restart_queue; 293 ++tx_ring->tx_stats.restart_queue;
@@ -1411,9 +1413,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1411 * budget and be more aggressive about cleaning up the Tx descriptors. 1413 * budget and be more aggressive about cleaning up the Tx descriptors.
1412 */ 1414 */
1413 i40e_for_each_ring(ring, q_vector->tx) { 1415 i40e_for_each_ring(ring, q_vector->tx) {
1414 clean_complete = clean_complete && 1416 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
1415 i40e_clean_tx_irq(ring, vsi->work_limit); 1417 clean_complete = false;
1416 arm_wb = arm_wb || ring->arm_wb; 1418 continue;
1419 }
1420 arm_wb |= ring->arm_wb;
1417 ring->arm_wb = false; 1421 ring->arm_wb = false;
1418 } 1422 }
1419 1423
@@ -1435,8 +1439,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1435 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); 1439 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1436 1440
1437 work_done += cleaned; 1441 work_done += cleaned;
1438 /* if we didn't clean as many as budgeted, we must be done */ 1442 /* if we clean as many as budgeted, we must not be done */
1439 clean_complete = clean_complete && (budget_per_ring > cleaned); 1443 if (cleaned >= budget_per_ring)
1444 clean_complete = false;
1440 } 1445 }
1441 1446
1442 /* If work not completed, return budget and polling will return */ 1447 /* If work not completed, return budget and polling will return */
@@ -1567,7 +1572,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1567 1572
1568 /* remove payload length from outer checksum */ 1573 /* remove payload length from outer checksum */
1569 paylen = (__force u16)l4.udp->check; 1574 paylen = (__force u16)l4.udp->check;
1570 paylen += ntohs(1) * (u16)~(skb->len - l4_offset); 1575 paylen += ntohs((__force __be16)1) *
1576 (u16)~(skb->len - l4_offset);
1571 l4.udp->check = ~csum_fold((__force __wsum)paylen); 1577 l4.udp->check = ~csum_fold((__force __wsum)paylen);
1572 } 1578 }
1573 1579
@@ -1589,7 +1595,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1589 1595
1590 /* remove payload length from inner checksum */ 1596 /* remove payload length from inner checksum */
1591 paylen = (__force u16)l4.tcp->check; 1597 paylen = (__force u16)l4.tcp->check;
1592 paylen += ntohs(1) * (u16)~(skb->len - l4_offset); 1598 paylen += ntohs((__force __be16)1) * (u16)~(skb->len - l4_offset);
1593 l4.tcp->check = ~csum_fold((__force __wsum)paylen); 1599 l4.tcp->check = ~csum_fold((__force __wsum)paylen);
1594 1600
1595 /* compute length of segmentation header */ 1601 /* compute length of segmentation header */
@@ -1936,6 +1942,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1936 tx_bi = first; 1942 tx_bi = first;
1937 1943
1938 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1944 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1945 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
1946
1939 if (dma_mapping_error(tx_ring->dev, dma)) 1947 if (dma_mapping_error(tx_ring->dev, dma))
1940 goto dma_error; 1948 goto dma_error;
1941 1949
@@ -1943,12 +1951,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1943 dma_unmap_len_set(tx_bi, len, size); 1951 dma_unmap_len_set(tx_bi, len, size);
1944 dma_unmap_addr_set(tx_bi, dma, dma); 1952 dma_unmap_addr_set(tx_bi, dma, dma);
1945 1953
1954 /* align size to end of page */
1955 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
1946 tx_desc->buffer_addr = cpu_to_le64(dma); 1956 tx_desc->buffer_addr = cpu_to_le64(dma);
1947 1957
1948 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { 1958 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
1949 tx_desc->cmd_type_offset_bsz = 1959 tx_desc->cmd_type_offset_bsz =
1950 build_ctob(td_cmd, td_offset, 1960 build_ctob(td_cmd, td_offset,
1951 I40E_MAX_DATA_PER_TXD, td_tag); 1961 max_data, td_tag);
1952 1962
1953 tx_desc++; 1963 tx_desc++;
1954 i++; 1964 i++;
@@ -1959,9 +1969,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1959 i = 0; 1969 i = 0;
1960 } 1970 }
1961 1971
1962 dma += I40E_MAX_DATA_PER_TXD; 1972 dma += max_data;
1963 size -= I40E_MAX_DATA_PER_TXD; 1973 size -= max_data;
1964 1974
1975 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
1965 tx_desc->buffer_addr = cpu_to_le64(dma); 1976 tx_desc->buffer_addr = cpu_to_le64(dma);
1966 } 1977 }
1967 1978
@@ -2110,7 +2121,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2110 if (i40e_chk_linearize(skb, count)) { 2121 if (i40e_chk_linearize(skb, count)) {
2111 if (__skb_linearize(skb)) 2122 if (__skb_linearize(skb))
2112 goto out_drop; 2123 goto out_drop;
2113 count = TXD_USE_COUNT(skb->len); 2124 count = i40e_txd_use_count(skb->len);
2114 tx_ring->tx_stats.tx_linearize++; 2125 tx_ring->tx_stats.tx_linearize++;
2115 } 2126 }
2116 2127
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c1dd8c5c9666..3ec0ea5ea3db 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -146,10 +146,39 @@ enum i40e_dyn_idx_t {
146 146
147#define I40E_MAX_BUFFER_TXD 8 147#define I40E_MAX_BUFFER_TXD 8
148#define I40E_MIN_TX_LEN 17 148#define I40E_MIN_TX_LEN 17
149#define I40E_MAX_DATA_PER_TXD 8192 149
150/* The size limit for a transmit buffer in a descriptor is (16K - 1).
151 * In order to align with the read requests we will align the value to
152 * the nearest 4K which represents our maximum read request size.
153 */
154#define I40E_MAX_READ_REQ_SIZE 4096
155#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
156#define I40E_MAX_DATA_PER_TXD_ALIGNED \
157 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
158
159/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
160 * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
161 * that 12K is not a power of 2 and division is expensive. It is used to
162 * approximate the number of descriptors used per linear buffer. Note
163 * that this will overestimate in some cases as it doesn't account for the
164 * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
165 * the error should not impact things much as large buffers usually mean
166 * we will use fewer descriptors then there are frags in an skb.
167 */
168static inline unsigned int i40e_txd_use_count(unsigned int size)
169{
170 const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
171 const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
172 unsigned int adjust = ~(u32)0;
173
174 /* if we rounded up on the reciprocal pull down the adjustment */
175 if ((max * reciprocal) > adjust)
176 adjust = ~(u32)(reciprocal - 1);
177
178 return (u32)((((u64)size * reciprocal) + adjust) >> 32);
179}
150 180
151/* Tx Descriptors needed, worst case */ 181/* Tx Descriptors needed, worst case */
152#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
153#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 182#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
154#define I40E_MIN_DESC_PENDING 4 183#define I40E_MIN_DESC_PENDING 4
155 184
@@ -359,7 +388,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
359 int count = 0, size = skb_headlen(skb); 388 int count = 0, size = skb_headlen(skb);
360 389
361 for (;;) { 390 for (;;) {
362 count += TXD_USE_COUNT(size); 391 count += i40e_txd_use_count(size);
363 392
364 if (!nr_frags--) 393 if (!nr_frags--)
365 break; 394 break;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4b70aae2fa84..e3973684746b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =
37#define DRV_KERN "-k" 37#define DRV_KERN "-k"
38 38
39#define DRV_VERSION_MAJOR 1 39#define DRV_VERSION_MAJOR 1
40#define DRV_VERSION_MINOR 4 40#define DRV_VERSION_MINOR 5
41#define DRV_VERSION_BUILD 15 41#define DRV_VERSION_BUILD 1
42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43 __stringify(DRV_VERSION_MINOR) "." \ 43 __stringify(DRV_VERSION_MINOR) "." \
44 __stringify(DRV_VERSION_BUILD) \ 44 __stringify(DRV_VERSION_BUILD) \
@@ -1507,7 +1507,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1507 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1507 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1508 GFP_KERNEL); 1508 GFP_KERNEL);
1509 if (!adapter->q_vectors) 1509 if (!adapter->q_vectors)
1510 goto err_out; 1510 return -ENOMEM;
1511 1511
1512 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1512 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1513 q_vector = &adapter->q_vectors[q_idx]; 1513 q_vector = &adapter->q_vectors[q_idx];
@@ -1519,15 +1519,6 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1519 } 1519 }
1520 1520
1521 return 0; 1521 return 0;
1522
1523err_out:
1524 while (q_idx) {
1525 q_idx--;
1526 q_vector = &adapter->q_vectors[q_idx];
1527 netif_napi_del(&q_vector->napi);
1528 }
1529 kfree(adapter->q_vectors);
1530 return -ENOMEM;
1531} 1522}
1532 1523
1533/** 1524/**
@@ -2003,6 +1994,8 @@ static void i40evf_adminq_task(struct work_struct *work)
2003 1994
2004 /* check for error indications */ 1995 /* check for error indications */
2005 val = rd32(hw, hw->aq.arq.len); 1996 val = rd32(hw, hw->aq.arq.len);
1997 if (val == 0xdeadbeef) /* indicates device in reset */
1998 goto freedom;
2006 oldval = val; 1999 oldval = val;
2007 if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { 2000 if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2008 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2001 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");