aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c31
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c14
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c15
12 files changed, 83 insertions, 33 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 31fb76ee9d82..a1246e89aad4 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -159,7 +159,7 @@ config IXGBE
159 tristate "Intel(R) 10GbE PCI Express adapters support" 159 tristate "Intel(R) 10GbE PCI Express adapters support"
160 depends on PCI 160 depends on PCI
161 select MDIO 161 select MDIO
162 select MDIO_DEVICE 162 select PHYLIB
163 imply PTP_1588_CLOCK 163 imply PTP_1588_CLOCK
164 ---help--- 164 ---help---
165 This driver supports Intel(R) 10GbE PCI Express family of 165 This driver supports Intel(R) 10GbE PCI Express family of
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2569a168334c..a41008523c98 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -993,8 +993,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
993 993
994 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 994 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
995 txdr->size = ALIGN(txdr->size, 4096); 995 txdr->size = ALIGN(txdr->size, 4096);
996 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 996 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
997 GFP_KERNEL); 997 GFP_KERNEL);
998 if (!txdr->desc) { 998 if (!txdr->desc) {
999 ret_val = 2; 999 ret_val = 2;
1000 goto err_nomem; 1000 goto err_nomem;
@@ -1051,8 +1051,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1051 } 1051 }
1052 1052
1053 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1053 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1054 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1054 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1055 GFP_KERNEL); 1055 GFP_KERNEL);
1056 if (!rxdr->desc) { 1056 if (!rxdr->desc) {
1057 ret_val = 6; 1057 ret_val = 6;
1058 goto err_nomem; 1058 goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 308c006cb41d..189f231075c2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2305,8 +2305,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2305{ 2305{
2306 struct pci_dev *pdev = adapter->pdev; 2306 struct pci_dev *pdev = adapter->pdev;
2307 2307
2308 ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, 2308 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2309 GFP_KERNEL); 2309 GFP_KERNEL);
2310 if (!ring->desc) 2310 if (!ring->desc)
2311 return -ENOMEM; 2311 return -ENOMEM;
2312 2312
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4d40878e395a..e4ff531db14a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 109 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
110 110
111 mem->size = ALIGN(size, alignment); 111 mem->size = ALIGN(size, alignment);
112 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 112 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
113 &mem->pa, GFP_KERNEL); 113 GFP_KERNEL);
114 if (!mem->va) 114 if (!mem->va)
115 return -ENOMEM; 115 return -ENOMEM;
116 116
@@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : 3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3291 if (!ok) { 3291 if (!ok) {
3292 /* Log this in case the user has forgotten to give the kernel
3293 * any buffers, even later in the application.
3294 */
3292 dev_info(&vsi->back->pdev->dev, 3295 dev_info(&vsi->back->pdev->dev,
3293 "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", 3296 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3294 ring->xsk_umem ? "UMEM enabled " : "", 3297 ring->xsk_umem ? "UMEM enabled " : "",
3295 ring->queue_index, pf_q); 3298 ring->queue_index, pf_q);
3296 } 3299 }
@@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi)
6725 6728
6726 for (i = 0; i < vsi->num_queue_pairs; i++) { 6729 for (i = 0; i < vsi->num_queue_pairs; i++) {
6727 i40e_clean_tx_ring(vsi->tx_rings[i]); 6730 i40e_clean_tx_ring(vsi->tx_rings[i]);
6728 if (i40e_enabled_xdp_vsi(vsi)) 6731 if (i40e_enabled_xdp_vsi(vsi)) {
6732 /* Make sure that in-progress ndo_xdp_xmit
6733 * calls are completed.
6734 */
6735 synchronize_rcu();
6729 i40e_clean_tx_ring(vsi->xdp_rings[i]); 6736 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6737 }
6730 i40e_clean_rx_ring(vsi->rx_rings[i]); 6738 i40e_clean_rx_ring(vsi->rx_rings[i]);
6731 } 6739 }
6732 6740
@@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
11895 if (old_prog) 11903 if (old_prog)
11896 bpf_prog_put(old_prog); 11904 bpf_prog_put(old_prog);
11897 11905
11906 /* Kick start the NAPI context if there is an AF_XDP socket open
11907 * on that queue id. This so that receiving will start.
11908 */
11909 if (need_reset && prog)
11910 for (i = 0; i < vsi->num_queue_pairs; i++)
11911 if (vsi->xdp_rings[i]->xsk_umem)
11912 (void)i40e_xsk_async_xmit(vsi->netdev, i);
11913
11898 return 0; 11914 return 0;
11899} 11915}
11900 11916
@@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
11955static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) 11971static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
11956{ 11972{
11957 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); 11973 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
11958 if (i40e_enabled_xdp_vsi(vsi)) 11974 if (i40e_enabled_xdp_vsi(vsi)) {
11975 /* Make sure that in-progress ndo_xdp_xmit calls are
11976 * completed.
11977 */
11978 synchronize_rcu();
11959 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); 11979 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
11980 }
11960 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); 11981 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
11961} 11982}
11962 11983
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a7e14e98889f..6c97667d20ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3709 struct i40e_netdev_priv *np = netdev_priv(dev); 3709 struct i40e_netdev_priv *np = netdev_priv(dev);
3710 unsigned int queue_index = smp_processor_id(); 3710 unsigned int queue_index = smp_processor_id();
3711 struct i40e_vsi *vsi = np->vsi; 3711 struct i40e_vsi *vsi = np->vsi;
3712 struct i40e_pf *pf = vsi->back;
3712 struct i40e_ring *xdp_ring; 3713 struct i40e_ring *xdp_ring;
3713 int drops = 0; 3714 int drops = 0;
3714 int i; 3715 int i;
@@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3716 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 3717 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3717 return -ENETDOWN; 3718 return -ENETDOWN;
3718 3719
3719 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) 3720 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3721 test_bit(__I40E_CONFIG_BUSY, pf->state))
3720 return -ENXIO; 3722 return -ENXIO;
3721 3723
3722 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 3724 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 870cf654e436..3827f16e6923 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
183 err = i40e_queue_pair_enable(vsi, qid); 183 err = i40e_queue_pair_enable(vsi, qid);
184 if (err) 184 if (err)
185 return err; 185 return err;
186
187 /* Kick start the NAPI context so that receiving will start */
188 err = i40e_xsk_async_xmit(vsi->netdev, qid);
189 if (err)
190 return err;
186 } 191 }
187 192
188 return 0; 193 return 0;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index fe1592ae8769..ca54e268d157 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -515,7 +515,7 @@ struct igb_adapter {
515 /* OS defined structs */ 515 /* OS defined structs */
516 struct pci_dev *pdev; 516 struct pci_dev *pdev;
517 517
518 struct mutex stats64_lock; 518 spinlock_t stats64_lock;
519 struct rtnl_link_stats64 stats64; 519 struct rtnl_link_stats64 stats64;
520 520
521 /* structs defined in e1000_hw.h */ 521 /* structs defined in e1000_hw.h */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7426060b678f..c57671068245 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2295 int i, j; 2295 int i, j;
2296 char *p; 2296 char *p;
2297 2297
2298 mutex_lock(&adapter->stats64_lock); 2298 spin_lock(&adapter->stats64_lock);
2299 igb_update_stats(adapter); 2299 igb_update_stats(adapter);
2300 2300
2301 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2301 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
@@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2338 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 2338 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
2339 i += IGB_RX_QUEUE_STATS_LEN; 2339 i += IGB_RX_QUEUE_STATS_LEN;
2340 } 2340 }
2341 mutex_unlock(&adapter->stats64_lock); 2341 spin_unlock(&adapter->stats64_lock);
2342} 2342}
2343 2343
2344static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2344static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 87bdf1604ae2..7137e7f9c7f3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2203,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter)
2203 del_timer_sync(&adapter->phy_info_timer); 2203 del_timer_sync(&adapter->phy_info_timer);
2204 2204
2205 /* record the stats before reset*/ 2205 /* record the stats before reset*/
2206 mutex_lock(&adapter->stats64_lock); 2206 spin_lock(&adapter->stats64_lock);
2207 igb_update_stats(adapter); 2207 igb_update_stats(adapter);
2208 mutex_unlock(&adapter->stats64_lock); 2208 spin_unlock(&adapter->stats64_lock);
2209 2209
2210 adapter->link_speed = 0; 2210 adapter->link_speed = 0;
2211 adapter->link_duplex = 0; 2211 adapter->link_duplex = 0;
@@ -3840,7 +3840,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
3840 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3840 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3841 3841
3842 spin_lock_init(&adapter->nfc_lock); 3842 spin_lock_init(&adapter->nfc_lock);
3843 mutex_init(&adapter->stats64_lock); 3843 spin_lock_init(&adapter->stats64_lock);
3844#ifdef CONFIG_PCI_IOV 3844#ifdef CONFIG_PCI_IOV
3845 switch (hw->mac.type) { 3845 switch (hw->mac.type) {
3846 case e1000_82576: 3846 case e1000_82576:
@@ -5406,9 +5406,9 @@ no_wait:
5406 } 5406 }
5407 } 5407 }
5408 5408
5409 mutex_lock(&adapter->stats64_lock); 5409 spin_lock(&adapter->stats64_lock);
5410 igb_update_stats(adapter); 5410 igb_update_stats(adapter);
5411 mutex_unlock(&adapter->stats64_lock); 5411 spin_unlock(&adapter->stats64_lock);
5412 5412
5413 for (i = 0; i < adapter->num_tx_queues; i++) { 5413 for (i = 0; i < adapter->num_tx_queues; i++) {
5414 struct igb_ring *tx_ring = adapter->tx_ring[i]; 5414 struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -6235,10 +6235,10 @@ static void igb_get_stats64(struct net_device *netdev,
6235{ 6235{
6236 struct igb_adapter *adapter = netdev_priv(netdev); 6236 struct igb_adapter *adapter = netdev_priv(netdev);
6237 6237
6238 mutex_lock(&adapter->stats64_lock); 6238 spin_lock(&adapter->stats64_lock);
6239 igb_update_stats(adapter); 6239 igb_update_stats(adapter);
6240 memcpy(stats, &adapter->stats64, sizeof(*stats)); 6240 memcpy(stats, &adapter->stats64, sizeof(*stats));
6241 mutex_unlock(&adapter->stats64_lock); 6241 spin_unlock(&adapter->stats64_lock);
6242} 6242}
6243 6243
6244/** 6244/**
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 1d4d1686909a..e5ac2d3fd816 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -680,8 +680,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
680 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 680 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
681 txdr->size = ALIGN(txdr->size, 4096); 681 txdr->size = ALIGN(txdr->size, 4096);
682 682
683 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 683 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
684 GFP_KERNEL); 684 GFP_KERNEL);
685 if (!txdr->desc) { 685 if (!txdr->desc) {
686 vfree(txdr->buffer_info); 686 vfree(txdr->buffer_info);
687 return -ENOMEM; 687 return -ENOMEM;
@@ -763,8 +763,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
764 rxdr->size = ALIGN(rxdr->size, 4096); 764 rxdr->size = ALIGN(rxdr->size, 4096);
765 765
766 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 766 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
767 GFP_KERNEL); 767 GFP_KERNEL);
768 768
769 if (!rxdr->desc) { 769 if (!rxdr->desc) {
770 vfree(rxdr->buffer_info); 770 vfree(rxdr->buffer_info);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index daff8183534b..cb35d8202572 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3953 else 3953 else
3954 mrqc = IXGBE_MRQC_VMDQRSS64EN; 3954 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3955 3955
3956 /* Enable L3/L4 for Tx Switched packets */ 3956 /* Enable L3/L4 for Tx Switched packets only for X550,
3957 mrqc |= IXGBE_MRQC_L3L4TXSWEN; 3957 * older devices do not support this feature
3958 */
3959 if (hw->mac.type >= ixgbe_mac_X550)
3960 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3958 } else { 3961 } else {
3959 if (tcs > 4) 3962 if (tcs > 4)
3960 mrqc = IXGBE_MRQC_RTRSS8TCEN; 3963 mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10225 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 10228 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10226 struct ixgbe_adapter *adapter = netdev_priv(dev); 10229 struct ixgbe_adapter *adapter = netdev_priv(dev);
10227 struct bpf_prog *old_prog; 10230 struct bpf_prog *old_prog;
10231 bool need_reset;
10228 10232
10229 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 10233 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10230 return -EINVAL; 10234 return -EINVAL;
@@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10247 return -ENOMEM; 10251 return -ENOMEM;
10248 10252
10249 old_prog = xchg(&adapter->xdp_prog, prog); 10253 old_prog = xchg(&adapter->xdp_prog, prog);
10254 need_reset = (!!prog != !!old_prog);
10250 10255
10251 /* If transitioning XDP modes reconfigure rings */ 10256 /* If transitioning XDP modes reconfigure rings */
10252 if (!!prog != !!old_prog) { 10257 if (need_reset) {
10253 int err = ixgbe_setup_tc(dev, adapter->hw_tcs); 10258 int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10254 10259
10255 if (err) { 10260 if (err) {
@@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10265 if (old_prog) 10270 if (old_prog)
10266 bpf_prog_put(old_prog); 10271 bpf_prog_put(old_prog);
10267 10272
10273 /* Kick start the NAPI context if there is an AF_XDP socket open
10274 * on that queue id. This so that receiving will start.
10275 */
10276 if (need_reset && prog)
10277 for (i = 0; i < adapter->num_rx_queues; i++)
10278 if (adapter->xdp_ring[i]->xsk_umem)
10279 (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
10280
10268 return 0; 10281 return 0;
10269} 10282}
10270 10283
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 65c3e2c979d4..36a8879536a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
144 ixgbe_txrx_ring_disable(adapter, qid); 144 ixgbe_txrx_ring_disable(adapter, qid);
145 145
146 err = ixgbe_add_xsk_umem(adapter, umem, qid); 146 err = ixgbe_add_xsk_umem(adapter, umem, qid);
147 if (err)
148 return err;
147 149
148 if (if_running) 150 if (if_running) {
149 ixgbe_txrx_ring_enable(adapter, qid); 151 ixgbe_txrx_ring_enable(adapter, qid);
150 152
151 return err; 153 /* Kick start the NAPI context so that receiving will start */
154 err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
155 if (err)
156 return err;
157 }
158
159 return 0;
152} 160}
153 161
154static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) 162static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
@@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
634 dma_addr_t dma; 642 dma_addr_t dma;
635 643
636 while (budget-- > 0) { 644 while (budget-- > 0) {
637 if (unlikely(!ixgbe_desc_unused(xdp_ring))) { 645 if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
646 !netif_carrier_ok(xdp_ring->netdev)) {
638 work_done = false; 647 work_done = false;
639 break; 648 break;
640 } 649 }