aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/wil6210/txrx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/wil6210/txrx.c')
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c223
1 files changed, 145 insertions, 78 deletions
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 092081e209da..c8c547457eb4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -104,6 +104,23 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
104 return 0; 104 return 0;
105} 105}
106 106
107static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
108 struct wil_ctx *ctx)
109{
110 dma_addr_t pa = wil_desc_addr(&d->dma.addr);
111 u16 dmalen = le16_to_cpu(d->dma.length);
112 switch (ctx->mapped_as) {
113 case wil_mapped_as_single:
114 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
115 break;
116 case wil_mapped_as_page:
117 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
118 break;
119 default:
120 break;
121 }
122}
123
107static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, 124static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
108 int tx) 125 int tx)
109{ 126{
@@ -122,15 +139,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
122 139
123 ctx = &vring->ctx[vring->swtail]; 140 ctx = &vring->ctx[vring->swtail];
124 *d = *_d; 141 *d = *_d;
125 pa = wil_desc_addr(&d->dma.addr); 142 wil_txdesc_unmap(dev, d, ctx);
126 dmalen = le16_to_cpu(d->dma.length);
127 if (vring->ctx[vring->swtail].mapped_as_page) {
128 dma_unmap_page(dev, pa, dmalen,
129 DMA_TO_DEVICE);
130 } else {
131 dma_unmap_single(dev, pa, dmalen,
132 DMA_TO_DEVICE);
133 }
134 if (ctx->skb) 143 if (ctx->skb)
135 dev_kfree_skb_any(ctx->skb); 144 dev_kfree_skb_any(ctx->skb);
136 vring->swtail = wil_vring_next_tail(vring); 145 vring->swtail = wil_vring_next_tail(vring);
@@ -479,7 +488,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
479 */ 488 */
480void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 489void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
481{ 490{
482 int rc; 491 gro_result_t rc;
483 struct wil6210_priv *wil = ndev_to_wil(ndev); 492 struct wil6210_priv *wil = ndev_to_wil(ndev);
484 unsigned int len = skb->len; 493 unsigned int len = skb->len;
485 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 494 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
@@ -488,17 +497,17 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
488 497
489 skb_orphan(skb); 498 skb_orphan(skb);
490 499
491 rc = netif_receive_skb(skb); 500 rc = napi_gro_receive(&wil->napi_rx, skb);
492 501
493 if (likely(rc == NET_RX_SUCCESS)) { 502 if (unlikely(rc == GRO_DROP)) {
503 ndev->stats.rx_dropped++;
504 stats->rx_dropped++;
505 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
506 } else {
494 ndev->stats.rx_packets++; 507 ndev->stats.rx_packets++;
495 stats->rx_packets++; 508 stats->rx_packets++;
496 ndev->stats.rx_bytes += len; 509 ndev->stats.rx_bytes += len;
497 stats->rx_bytes += len; 510 stats->rx_bytes += len;
498
499 } else {
500 ndev->stats.rx_dropped++;
501 stats->rx_dropped++;
502 } 511 }
503} 512}
504 513
@@ -548,6 +557,11 @@ int wil_rx_init(struct wil6210_priv *wil)
548 struct vring *vring = &wil->vring_rx; 557 struct vring *vring = &wil->vring_rx;
549 int rc; 558 int rc;
550 559
560 if (vring->va) {
561 wil_err(wil, "Rx ring already allocated\n");
562 return -EINVAL;
563 }
564
551 vring->size = WIL6210_RX_RING_SIZE; 565 vring->size = WIL6210_RX_RING_SIZE;
552 rc = wil_vring_alloc(wil, vring); 566 rc = wil_vring_alloc(wil, vring);
553 if (rc) 567 if (rc)
@@ -588,7 +602,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
588 .ring_size = cpu_to_le16(size), 602 .ring_size = cpu_to_le16(size),
589 }, 603 },
590 .ringid = id, 604 .ringid = id,
591 .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4), 605 .cidxtid = mk_cidxtid(cid, tid),
592 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 606 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
593 .mac_ctrl = 0, 607 .mac_ctrl = 0,
594 .to_resolution = 0, 608 .to_resolution = 0,
@@ -604,6 +618,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
604 struct wmi_vring_cfg_done_event cmd; 618 struct wmi_vring_cfg_done_event cmd;
605 } __packed reply; 619 } __packed reply;
606 struct vring *vring = &wil->vring_tx[id]; 620 struct vring *vring = &wil->vring_tx[id];
621 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
607 622
608 if (vring->va) { 623 if (vring->va) {
609 wil_err(wil, "Tx ring [%d] already allocated\n", id); 624 wil_err(wil, "Tx ring [%d] already allocated\n", id);
@@ -611,6 +626,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
611 goto out; 626 goto out;
612 } 627 }
613 628
629 memset(txdata, 0, sizeof(*txdata));
614 vring->size = size; 630 vring->size = size;
615 rc = wil_vring_alloc(wil, vring); 631 rc = wil_vring_alloc(wil, vring);
616 if (rc) 632 if (rc)
@@ -634,6 +650,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
634 } 650 }
635 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 651 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
636 652
653 txdata->enabled = 1;
654
637 return 0; 655 return 0;
638 out_free: 656 out_free:
639 wil_vring_free(wil, vring, 1); 657 wil_vring_free(wil, vring, 1);
@@ -646,9 +664,16 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
646{ 664{
647 struct vring *vring = &wil->vring_tx[id]; 665 struct vring *vring = &wil->vring_tx[id];
648 666
667 WARN_ON(!mutex_is_locked(&wil->mutex));
668
649 if (!vring->va) 669 if (!vring->va)
650 return; 670 return;
651 671
672 /* make sure NAPI won't touch this vring */
673 wil->vring_tx_data[id].enabled = 0;
674 if (test_bit(wil_status_napi_en, &wil->status))
675 napi_synchronize(&wil->napi_tx);
676
652 wil_vring_free(wil, vring, 1); 677 wil_vring_free(wil, vring, 1);
653} 678}
654 679
@@ -662,6 +687,10 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
662 if (cid < 0) 687 if (cid < 0)
663 return NULL; 688 return NULL;
664 689
690 if (!wil->sta[cid].data_port_open &&
691 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
692 return NULL;
693
665 /* TODO: fix for multiple TID */ 694 /* TODO: fix for multiple TID */
666 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { 695 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
667 if (wil->vring2cid_tid[i][0] == cid) { 696 if (wil->vring2cid_tid[i][0] == cid) {
@@ -700,12 +729,19 @@ static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
700 struct vring *v, *v2; 729 struct vring *v, *v2;
701 struct sk_buff *skb2; 730 struct sk_buff *skb2;
702 int i; 731 int i;
732 u8 cid;
703 733
704 /* find 1-st vring */ 734 /* find 1-st vring eligible for data */
705 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 735 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
706 v = &wil->vring_tx[i]; 736 v = &wil->vring_tx[i];
707 if (v->va) 737 if (!v->va)
708 goto found; 738 continue;
739
740 cid = wil->vring2cid_tid[i][0];
741 if (!wil->sta[cid].data_port_open)
742 continue;
743
744 goto found;
709 } 745 }
710 746
711 wil_err(wil, "Tx while no vrings active?\n"); 747 wil_err(wil, "Tx while no vrings active?\n");
@@ -721,6 +757,10 @@ found:
721 v2 = &wil->vring_tx[i]; 757 v2 = &wil->vring_tx[i];
722 if (!v2->va) 758 if (!v2->va)
723 continue; 759 continue;
760 cid = wil->vring2cid_tid[i][0];
761 if (!wil->sta[cid].data_port_open)
762 continue;
763
724 skb2 = skb_copy(skb, GFP_ATOMIC); 764 skb2 = skb_copy(skb, GFP_ATOMIC);
725 if (skb2) { 765 if (skb2) {
726 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); 766 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
@@ -759,6 +799,13 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
759 return 0; 799 return 0;
760} 800}
761 801
802static inline
803void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
804{
805 d->mac.d[2] |= ((nr_frags + 1) <<
806 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
807}
808
762static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil, 809static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
763 struct vring_tx_desc *d, 810 struct vring_tx_desc *d,
764 struct sk_buff *skb) 811 struct sk_buff *skb)
@@ -823,8 +870,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
823 870
824 wil_dbg_txrx(wil, "%s()\n", __func__); 871 wil_dbg_txrx(wil, "%s()\n", __func__);
825 872
826 if (avail < vring->size/8)
827 netif_tx_stop_all_queues(wil_to_ndev(wil));
828 if (avail < 1 + nr_frags) { 873 if (avail < 1 + nr_frags) {
829 wil_err(wil, "Tx ring full. No space for %d fragments\n", 874 wil_err(wil, "Tx ring full. No space for %d fragments\n",
830 1 + nr_frags); 875 1 + nr_frags);
@@ -842,6 +887,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
842 887
843 if (unlikely(dma_mapping_error(dev, pa))) 888 if (unlikely(dma_mapping_error(dev, pa)))
844 return -EINVAL; 889 return -EINVAL;
890 vring->ctx[i].mapped_as = wil_mapped_as_single;
845 /* 1-st segment */ 891 /* 1-st segment */
846 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); 892 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
847 /* Process TCP/UDP checksum offloading */ 893 /* Process TCP/UDP checksum offloading */
@@ -851,8 +897,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
851 goto dma_error; 897 goto dma_error;
852 } 898 }
853 899
854 d->mac.d[2] |= ((nr_frags + 1) << 900 vring->ctx[i].nr_frags = nr_frags;
855 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); 901 wil_tx_desc_set_nr_frags(d, nr_frags);
856 if (nr_frags) 902 if (nr_frags)
857 *_d = *d; 903 *_d = *d;
858 904
@@ -867,8 +913,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
867 DMA_TO_DEVICE); 913 DMA_TO_DEVICE);
868 if (unlikely(dma_mapping_error(dev, pa))) 914 if (unlikely(dma_mapping_error(dev, pa)))
869 goto dma_error; 915 goto dma_error;
916 vring->ctx[i].mapped_as = wil_mapped_as_page;
870 wil_tx_desc_map(d, pa, len, vring_index); 917 wil_tx_desc_map(d, pa, len, vring_index);
871 vring->ctx[i].mapped_as_page = 1; 918 /* no need to check return code -
919 * if it succeeded for 1-st descriptor,
920 * it will succeed here too
921 */
922 wil_tx_desc_offload_cksum_set(wil, d, skb);
872 *_d = *d; 923 *_d = *d;
873 } 924 }
874 /* for the last seg only */ 925 /* for the last seg only */
@@ -897,7 +948,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
897 /* unmap what we have mapped */ 948 /* unmap what we have mapped */
898 nr_frags = f + 1; /* frags mapped + one for skb head */ 949 nr_frags = f + 1; /* frags mapped + one for skb head */
899 for (f = 0; f < nr_frags; f++) { 950 for (f = 0; f < nr_frags; f++) {
900 u16 dmalen;
901 struct wil_ctx *ctx; 951 struct wil_ctx *ctx;
902 952
903 i = (swhead + f) % vring->size; 953 i = (swhead + f) % vring->size;
@@ -905,12 +955,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
905 _d = &(vring->va[i].tx); 955 _d = &(vring->va[i].tx);
906 *d = *_d; 956 *d = *_d;
907 _d->dma.status = TX_DMA_STATUS_DU; 957 _d->dma.status = TX_DMA_STATUS_DU;
908 pa = wil_desc_addr(&d->dma.addr); 958 wil_txdesc_unmap(dev, d, ctx);
909 dmalen = le16_to_cpu(d->dma.length);
910 if (ctx->mapped_as_page)
911 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
912 else
913 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
914 959
915 if (ctx->skb) 960 if (ctx->skb)
916 dev_kfree_skb_any(ctx->skb); 961 dev_kfree_skb_any(ctx->skb);
@@ -927,11 +972,15 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
927 struct wil6210_priv *wil = ndev_to_wil(ndev); 972 struct wil6210_priv *wil = ndev_to_wil(ndev);
928 struct ethhdr *eth = (void *)skb->data; 973 struct ethhdr *eth = (void *)skb->data;
929 struct vring *vring; 974 struct vring *vring;
975 static bool pr_once_fw;
930 int rc; 976 int rc;
931 977
932 wil_dbg_txrx(wil, "%s()\n", __func__); 978 wil_dbg_txrx(wil, "%s()\n", __func__);
933 if (!test_bit(wil_status_fwready, &wil->status)) { 979 if (!test_bit(wil_status_fwready, &wil->status)) {
934 wil_err(wil, "FW not ready\n"); 980 if (!pr_once_fw) {
981 wil_err(wil, "FW not ready\n");
982 pr_once_fw = true;
983 }
935 goto drop; 984 goto drop;
936 } 985 }
937 if (!test_bit(wil_status_fwconnected, &wil->status)) { 986 if (!test_bit(wil_status_fwconnected, &wil->status)) {
@@ -942,6 +991,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
942 wil_err(wil, "Xmit in monitor mode not supported\n"); 991 wil_err(wil, "Xmit in monitor mode not supported\n");
943 goto drop; 992 goto drop;
944 } 993 }
994 pr_once_fw = false;
945 995
946 /* find vring */ 996 /* find vring */
947 if (is_unicast_ether_addr(eth->h_dest)) { 997 if (is_unicast_ether_addr(eth->h_dest)) {
@@ -956,6 +1006,10 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
956 /* set up vring entry */ 1006 /* set up vring entry */
957 rc = wil_tx_vring(wil, vring, skb); 1007 rc = wil_tx_vring(wil, vring, skb);
958 1008
1009 /* do we still have enough room in the vring? */
1010 if (wil_vring_avail_tx(vring) < vring->size/8)
1011 netif_tx_stop_all_queues(wil_to_ndev(wil));
1012
959 switch (rc) { 1013 switch (rc) {
960 case 0: 1014 case 0:
961 /* statistics will be updated on the tx_complete */ 1015 /* statistics will be updated on the tx_complete */
@@ -985,69 +1039,82 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
985 struct net_device *ndev = wil_to_ndev(wil); 1039 struct net_device *ndev = wil_to_ndev(wil);
986 struct device *dev = wil_to_dev(wil); 1040 struct device *dev = wil_to_dev(wil);
987 struct vring *vring = &wil->vring_tx[ringid]; 1041 struct vring *vring = &wil->vring_tx[ringid];
1042 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
988 int done = 0; 1043 int done = 0;
989 int cid = wil->vring2cid_tid[ringid][0]; 1044 int cid = wil->vring2cid_tid[ringid][0];
990 struct wil_net_stats *stats = &wil->sta[cid].stats; 1045 struct wil_net_stats *stats = &wil->sta[cid].stats;
1046 volatile struct vring_tx_desc *_d;
991 1047
992 if (!vring->va) { 1048 if (!vring->va) {
993 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); 1049 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
994 return 0; 1050 return 0;
995 } 1051 }
996 1052
1053 if (!txdata->enabled) {
1054 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
1055 return 0;
1056 }
1057
997 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); 1058 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
998 1059
999 while (!wil_vring_is_empty(vring)) { 1060 while (!wil_vring_is_empty(vring)) {
1000 volatile struct vring_tx_desc *_d = 1061 int new_swtail;
1001 &vring->va[vring->swtail].tx;
1002 struct vring_tx_desc dd, *d = &dd;
1003 dma_addr_t pa;
1004 u16 dmalen;
1005 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; 1062 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
1006 struct sk_buff *skb = ctx->skb; 1063 /**
1007 1064 * For the fragmented skb, HW will set DU bit only for the
1008 *d = *_d; 1065 * last fragment. look for it
1066 */
1067 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
1068 /* TODO: check we are not past head */
1009 1069
1010 if (!(d->dma.status & TX_DMA_STATUS_DU)) 1070 _d = &vring->va[lf].tx;
1071 if (!(_d->dma.status & TX_DMA_STATUS_DU))
1011 break; 1072 break;
1012 1073
1013 dmalen = le16_to_cpu(d->dma.length); 1074 new_swtail = (lf + 1) % vring->size;
1014 trace_wil6210_tx_done(ringid, vring->swtail, dmalen, 1075 while (vring->swtail != new_swtail) {
1015 d->dma.error); 1076 struct vring_tx_desc dd, *d = &dd;
1016 wil_dbg_txrx(wil, 1077 u16 dmalen;
1017 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n", 1078 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
1018 vring->swtail, dmalen, d->dma.status, 1079 struct sk_buff *skb = ctx->skb;
1019 d->dma.error); 1080 _d = &vring->va[vring->swtail].tx;
1020 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4, 1081
1021 (const void *)d, sizeof(*d), false); 1082 *d = *_d;
1022
1023 pa = wil_desc_addr(&d->dma.addr);
1024 if (ctx->mapped_as_page)
1025 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
1026 else
1027 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
1028
1029 if (skb) {
1030 if (d->dma.error == 0) {
1031 ndev->stats.tx_packets++;
1032 stats->tx_packets++;
1033 ndev->stats.tx_bytes += skb->len;
1034 stats->tx_bytes += skb->len;
1035 } else {
1036 ndev->stats.tx_errors++;
1037 stats->tx_errors++;
1038 }
1039 1083
1040 dev_kfree_skb_any(skb); 1084 dmalen = le16_to_cpu(d->dma.length);
1085 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
1086 d->dma.error);
1087 wil_dbg_txrx(wil,
1088 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
1089 vring->swtail, dmalen, d->dma.status,
1090 d->dma.error);
1091 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
1092 (const void *)d, sizeof(*d), false);
1093
1094 wil_txdesc_unmap(dev, d, ctx);
1095
1096 if (skb) {
1097 if (d->dma.error == 0) {
1098 ndev->stats.tx_packets++;
1099 stats->tx_packets++;
1100 ndev->stats.tx_bytes += skb->len;
1101 stats->tx_bytes += skb->len;
1102 } else {
1103 ndev->stats.tx_errors++;
1104 stats->tx_errors++;
1105 }
1106
1107 dev_kfree_skb_any(skb);
1108 }
1109 memset(ctx, 0, sizeof(*ctx));
1110 /* There is no need to touch HW descriptor:
1111 * - ststus bit TX_DMA_STATUS_DU is set by design,
1112 * so hardware will not try to process this desc.,
1113 * - rest of descriptor will be initialized on Tx.
1114 */
1115 vring->swtail = wil_vring_next_tail(vring);
1116 done++;
1041 } 1117 }
1042 memset(ctx, 0, sizeof(*ctx));
1043 /*
1044 * There is no need to touch HW descriptor:
1045 * - ststus bit TX_DMA_STATUS_DU is set by design,
1046 * so hardware will not try to process this desc.,
1047 * - rest of descriptor will be initialized on Tx.
1048 */
1049 vring->swtail = wil_vring_next_tail(vring);
1050 done++;
1051 } 1118 }
1052 if (wil_vring_avail_tx(vring) > vring->size/4) 1119 if (wil_vring_avail_tx(vring) > vring->size/4)
1053 netif_tx_wake_all_queues(wil_to_ndev(wil)); 1120 netif_tx_wake_all_queues(wil_to_ndev(wil));