aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/emulex
diff options
context:
space:
mode:
authorSathya Perla <sathya.perla@emulex.com>2015-01-05 05:48:34 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-05 16:32:53 -0500
commit5f07b3c51abe330c3dd702622c419efffb5757f0 (patch)
treec666cc118eaac0087e79fa33119cb69507a1b748 /drivers/net/ethernet/emulex
parent889ee2c7d70e1ae9fc9341d0e82676c519ffede2 (diff)
be2net: support TX batching using skb->xmit_more flag
This patch uses skb->xmit_more flag to batch TX requests. TX is flushed either when xmit_more is false or there is no more space in the TXQ. Skyhawk-R and BEx chips require an even number of wrbs to be posted. So, when a batch of TX requests is accumulated, the last header wrb may need to be fixed with an extra dummy wrb. This patch refactors be_xmit() routine as a sequence of be_xmit_enqueue() and be_xmit_flush() calls. The Tx completion code is also updated to be able to unmap/free a batch of skbs rather than a single skb. Signed-off-by: Sathya Perla <sathya.perla@emulex.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/emulex')
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c238
4 files changed, 133 insertions, 116 deletions
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 712e7f8e1df7..9fa2569f56cb 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -243,7 +243,6 @@ struct be_tx_stats {
243 u64 tx_bytes; 243 u64 tx_bytes;
244 u64 tx_pkts; 244 u64 tx_pkts;
245 u64 tx_reqs; 245 u64 tx_reqs;
246 u64 tx_wrbs;
247 u64 tx_compl; 246 u64 tx_compl;
248 ulong tx_jiffies; 247 ulong tx_jiffies;
249 u32 tx_stops; 248 u32 tx_stops;
@@ -266,6 +265,9 @@ struct be_tx_obj {
266 /* Remember the skbs that were transmitted */ 265 /* Remember the skbs that were transmitted */
267 struct sk_buff *sent_skb_list[TX_Q_LEN]; 266 struct sk_buff *sent_skb_list[TX_Q_LEN];
268 struct be_tx_stats stats; 267 struct be_tx_stats stats;
268 u16 pend_wrb_cnt; /* Number of WRBs yet to be given to HW */
269 u16 last_req_wrb_cnt; /* wrb cnt of the last req in the Q */
270 u16 last_req_hdr; /* index of the last req's hdr-wrb */
269} ____cacheline_aligned_in_smp; 271} ____cacheline_aligned_in_smp;
270 272
271/* Struct to remember the pages posted for rx frags */ 273/* Struct to remember the pages posted for rx frags */
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 73a500ccbf69..32c53bc0e07a 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -193,8 +193,6 @@ static const struct be_ethtool_stat et_tx_stats[] = {
193 {DRVSTAT_TX_INFO(tx_pkts)}, 193 {DRVSTAT_TX_INFO(tx_pkts)},
194 /* Number of skbs queued for trasmission by the driver */ 194 /* Number of skbs queued for trasmission by the driver */
195 {DRVSTAT_TX_INFO(tx_reqs)}, 195 {DRVSTAT_TX_INFO(tx_reqs)},
196 /* Number of TX work request blocks DMAed to HW */
197 {DRVSTAT_TX_INFO(tx_wrbs)},
198 /* Number of times the TX queue was stopped due to lack 196 /* Number of times the TX queue was stopped due to lack
199 * of spaces in the TXQ. 197 * of spaces in the TXQ.
200 */ 198 */
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 295ee0835ba0..6d7b3a4d3cff 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -311,6 +311,11 @@ struct amap_eth_hdr_wrb {
311 u8 vlan_tag[16]; 311 u8 vlan_tag[16];
312} __packed; 312} __packed;
313 313
314#define TX_HDR_WRB_COMPL 1 /* word 2 */
315#define TX_HDR_WRB_EVT (1 << 1) /* word 2 */
316#define TX_HDR_WRB_NUM_SHIFT 13 /* word 2: bits 13:17 */
317#define TX_HDR_WRB_NUM_MASK 0x1F /* word 2: bits 13:17 */
318
314struct be_eth_hdr_wrb { 319struct be_eth_hdr_wrb {
315 u32 dw[4]; 320 u32 dw[4];
316}; 321};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 41a0a5498da7..37a26b0b7e33 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -662,41 +662,22 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
662 netif_carrier_off(netdev); 662 netif_carrier_off(netdev);
663} 663}
664 664
665static void be_tx_stats_update(struct be_tx_obj *txo, 665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
666 u32 wrb_cnt, u32 copied, u32 gso_segs,
667 bool stopped)
668{ 666{
669 struct be_tx_stats *stats = tx_stats(txo); 667 struct be_tx_stats *stats = tx_stats(txo);
670 668
671 u64_stats_update_begin(&stats->sync); 669 u64_stats_update_begin(&stats->sync);
672 stats->tx_reqs++; 670 stats->tx_reqs++;
673 stats->tx_wrbs += wrb_cnt; 671 stats->tx_bytes += skb->len;
674 stats->tx_bytes += copied; 672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
675 stats->tx_pkts += (gso_segs ? gso_segs : 1);
676 if (stopped)
677 stats->tx_stops++;
678 u64_stats_update_end(&stats->sync); 673 u64_stats_update_end(&stats->sync);
679} 674}
680 675
681/* Determine number of WRB entries needed to xmit data in an skb */ 676/* Returns number of WRBs needed for the skb */
682static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, 677static u32 skb_wrb_cnt(struct sk_buff *skb)
683 bool *dummy)
684{ 678{
685 int cnt = (skb->len > skb->data_len); 679 /* +1 for the header wrb */
686 680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
687 cnt += skb_shinfo(skb)->nr_frags;
688
689 /* to account for hdr wrb */
690 cnt++;
691 if (lancer_chip(adapter) || !(cnt & 1)) {
692 *dummy = false;
693 } else {
694 /* add a dummy to make it an even num */
695 cnt++;
696 *dummy = true;
697 }
698 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
699 return cnt;
700} 681}
701 682
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) 683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
@@ -770,11 +751,14 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
770 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); 751 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
771 } 752 }
772 753
773 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
774 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
775 SET_TX_WRB_HDR_BITS(event, hdr, 1);
776 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); 754 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
777 SET_TX_WRB_HDR_BITS(len, hdr, len); 755 SET_TX_WRB_HDR_BITS(len, hdr, len);
756
757 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
758 * When this hack is not needed, the evt bit is set while ringing DB
759 */
760 if (skip_hw_vlan)
761 SET_TX_WRB_HDR_BITS(event, hdr, 1);
778} 762}
779 763
780static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, 764static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -794,22 +778,24 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
794 } 778 }
795} 779}
796 780
797static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, 781/* Returns the number of WRBs used up by the skb */
798 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, 782static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
799 bool skip_hw_vlan) 783 struct sk_buff *skb, bool skip_hw_vlan)
800{ 784{
801 dma_addr_t busaddr; 785 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
802 int i, copied = 0;
803 struct device *dev = &adapter->pdev->dev; 786 struct device *dev = &adapter->pdev->dev;
804 struct sk_buff *first_skb = skb; 787 struct be_queue_info *txq = &txo->q;
805 struct be_eth_wrb *wrb;
806 struct be_eth_hdr_wrb *hdr; 788 struct be_eth_hdr_wrb *hdr;
807 bool map_single = false; 789 bool map_single = false;
808 u16 map_head; 790 struct be_eth_wrb *wrb;
791 dma_addr_t busaddr;
792 u16 head = txq->head;
809 793
810 hdr = queue_head_node(txq); 794 hdr = queue_head_node(txq);
795 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
796 be_dws_cpu_to_le(hdr, sizeof(*hdr));
797
811 queue_head_inc(txq); 798 queue_head_inc(txq);
812 map_head = txq->head;
813 799
814 if (skb->len > skb->data_len) { 800 if (skb->len > skb->data_len) {
815 int len = skb_headlen(skb); 801 int len = skb_headlen(skb);
@@ -839,19 +825,23 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
839 copied += skb_frag_size(frag); 825 copied += skb_frag_size(frag);
840 } 826 }
841 827
842 if (dummy_wrb) { 828 BUG_ON(txo->sent_skb_list[head]);
843 wrb = queue_head_node(txq); 829 txo->sent_skb_list[head] = skb;
844 wrb_fill(wrb, 0, 0); 830 txo->last_req_hdr = head;
845 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 831 atomic_add(wrb_cnt, &txq->used);
846 queue_head_inc(txq); 832 txo->last_req_wrb_cnt = wrb_cnt;
847 } 833 txo->pend_wrb_cnt += wrb_cnt;
848 834
849 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan); 835 be_tx_stats_update(txo, skb);
850 be_dws_cpu_to_le(hdr, sizeof(*hdr)); 836 return wrb_cnt;
851 837
852 return copied;
853dma_err: 838dma_err:
854 txq->head = map_head; 839 /* Bring the queue back to the state it was in before this
840 * routine was invoked.
841 */
842 txq->head = head;
843 /* skip the first wrb (hdr); it's not mapped */
844 queue_head_inc(txq);
855 while (copied) { 845 while (copied) {
856 wrb = queue_head_node(txq); 846 wrb = queue_head_node(txq);
857 unmap_tx_frag(dev, wrb, map_single); 847 unmap_tx_frag(dev, wrb, map_single);
@@ -860,6 +850,7 @@ dma_err:
860 adapter->drv_stats.dma_map_errors++; 850 adapter->drv_stats.dma_map_errors++;
861 queue_head_inc(txq); 851 queue_head_inc(txq);
862 } 852 }
853 txq->head = head;
863 return 0; 854 return 0;
864} 855}
865 856
@@ -1030,52 +1021,64 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1030 return skb; 1021 return skb;
1031} 1022}
1032 1023
1024static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1025{
1026 struct be_queue_info *txq = &txo->q;
1027 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1028
1029 /* Mark the last request eventable if it hasn't been marked already */
1030 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1031 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1032
1033 /* compose a dummy wrb if there are odd set of wrbs to notify */
1034 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1035 wrb_fill(queue_head_node(txq), 0, 0);
1036 queue_head_inc(txq);
1037 atomic_inc(&txq->used);
1038 txo->pend_wrb_cnt++;
1039 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1040 TX_HDR_WRB_NUM_SHIFT);
1041 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1042 TX_HDR_WRB_NUM_SHIFT);
1043 }
1044 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1045 txo->pend_wrb_cnt = 0;
1046}
1047
1033static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) 1048static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1034{ 1049{
1050 bool skip_hw_vlan = false, flush = !skb->xmit_more;
1035 struct be_adapter *adapter = netdev_priv(netdev); 1051 struct be_adapter *adapter = netdev_priv(netdev);
1036 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; 1052 u16 q_idx = skb_get_queue_mapping(skb);
1053 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1037 struct be_queue_info *txq = &txo->q; 1054 struct be_queue_info *txq = &txo->q;
1038 bool dummy_wrb, stopped = false; 1055 u16 wrb_cnt;
1039 u32 wrb_cnt = 0, copied = 0;
1040 bool skip_hw_vlan = false;
1041 u32 start = txq->head;
1042 1056
1043 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); 1057 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1044 if (!skb) { 1058 if (unlikely(!skb))
1045 tx_stats(txo)->tx_drv_drops++; 1059 goto drop;
1046 return NETDEV_TX_OK;
1047 }
1048
1049 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1050 1060
1051 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb, 1061 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1052 skip_hw_vlan); 1062 if (unlikely(!wrb_cnt)) {
1053 if (copied) { 1063 dev_kfree_skb_any(skb);
1054 int gso_segs = skb_shinfo(skb)->gso_segs; 1064 goto drop;
1065 }
1055 1066
1056 /* record the sent skb in the sent_skb table */ 1067 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1057 BUG_ON(txo->sent_skb_list[start]); 1068 netif_stop_subqueue(netdev, q_idx);
1058 txo->sent_skb_list[start] = skb; 1069 tx_stats(txo)->tx_stops++;
1070 }
1059 1071
1060 /* Ensure txq has space for the next skb; Else stop the queue 1072 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1061 * *BEFORE* ringing the tx doorbell, so that we serialze the 1073 be_xmit_flush(adapter, txo);
1062 * tx compls of the current transmit which'll wake up the queue
1063 */
1064 atomic_add(wrb_cnt, &txq->used);
1065 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1066 txq->len) {
1067 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1068 stopped = true;
1069 }
1070 1074
1071 be_txq_notify(adapter, txo, wrb_cnt); 1075 return NETDEV_TX_OK;
1076drop:
1077 tx_stats(txo)->tx_drv_drops++;
1078 /* Flush the already enqueued tx requests */
1079 if (flush && txo->pend_wrb_cnt)
1080 be_xmit_flush(adapter, txo);
1072 1081
1073 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1074 } else {
1075 txq->head = start;
1076 tx_stats(txo)->tx_drv_drops++;
1077 dev_kfree_skb_any(skb);
1078 }
1079 return NETDEV_TX_OK; 1082 return NETDEV_TX_OK;
1080} 1083}
1081 1084
@@ -1959,32 +1962,34 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1959static u16 be_tx_compl_process(struct be_adapter *adapter, 1962static u16 be_tx_compl_process(struct be_adapter *adapter,
1960 struct be_tx_obj *txo, u16 last_index) 1963 struct be_tx_obj *txo, u16 last_index)
1961{ 1964{
1965 struct sk_buff **sent_skbs = txo->sent_skb_list;
1962 struct be_queue_info *txq = &txo->q; 1966 struct be_queue_info *txq = &txo->q;
1967 u16 frag_index, num_wrbs = 0;
1968 struct sk_buff *skb = NULL;
1969 bool unmap_skb_hdr = false;
1963 struct be_eth_wrb *wrb; 1970 struct be_eth_wrb *wrb;
1964 struct sk_buff **sent_skbs = txo->sent_skb_list;
1965 struct sk_buff *sent_skb;
1966 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1967 bool unmap_skb_hdr = true;
1968
1969 sent_skb = sent_skbs[txq->tail];
1970 BUG_ON(!sent_skb);
1971 sent_skbs[txq->tail] = NULL;
1972
1973 /* skip header wrb */
1974 queue_tail_inc(txq);
1975 1971
1976 do { 1972 do {
1977 cur_index = txq->tail; 1973 if (sent_skbs[txq->tail]) {
1974 /* Free skb from prev req */
1975 if (skb)
1976 dev_consume_skb_any(skb);
1977 skb = sent_skbs[txq->tail];
1978 sent_skbs[txq->tail] = NULL;
1979 queue_tail_inc(txq); /* skip hdr wrb */
1980 num_wrbs++;
1981 unmap_skb_hdr = true;
1982 }
1978 wrb = queue_tail_node(txq); 1983 wrb = queue_tail_node(txq);
1984 frag_index = txq->tail;
1979 unmap_tx_frag(&adapter->pdev->dev, wrb, 1985 unmap_tx_frag(&adapter->pdev->dev, wrb,
1980 (unmap_skb_hdr && skb_headlen(sent_skb))); 1986 (unmap_skb_hdr && skb_headlen(skb)));
1981 unmap_skb_hdr = false; 1987 unmap_skb_hdr = false;
1982
1983 num_wrbs++;
1984 queue_tail_inc(txq); 1988 queue_tail_inc(txq);
1985 } while (cur_index != last_index); 1989 num_wrbs++;
1990 } while (frag_index != last_index);
1991 dev_consume_skb_any(skb);
1986 1992
1987 dev_consume_skb_any(sent_skb);
1988 return num_wrbs; 1993 return num_wrbs;
1989} 1994}
1990 1995
@@ -2068,12 +2073,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
2068 2073
2069static void be_tx_compl_clean(struct be_adapter *adapter) 2074static void be_tx_compl_clean(struct be_adapter *adapter)
2070{ 2075{
2076 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2077 struct device *dev = &adapter->pdev->dev;
2071 struct be_tx_obj *txo; 2078 struct be_tx_obj *txo;
2072 struct be_queue_info *txq; 2079 struct be_queue_info *txq;
2073 struct be_eth_tx_compl *txcp; 2080 struct be_eth_tx_compl *txcp;
2074 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2075 struct sk_buff *sent_skb;
2076 bool dummy_wrb;
2077 int i, pending_txqs; 2081 int i, pending_txqs;
2078 2082
2079 /* Stop polling for compls when HW has been silent for 10ms */ 2083 /* Stop polling for compls when HW has been silent for 10ms */
@@ -2095,7 +2099,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2095 atomic_sub(num_wrbs, &txq->used); 2099 atomic_sub(num_wrbs, &txq->used);
2096 timeo = 0; 2100 timeo = 0;
2097 } 2101 }
2098 if (atomic_read(&txq->used) == 0) 2102 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
2099 pending_txqs--; 2103 pending_txqs--;
2100 } 2104 }
2101 2105
@@ -2105,21 +2109,29 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2105 mdelay(1); 2109 mdelay(1);
2106 } while (true); 2110 } while (true);
2107 2111
2112 /* Free enqueued TX that was never notified to HW */
2108 for_all_tx_queues(adapter, txo, i) { 2113 for_all_tx_queues(adapter, txo, i) {
2109 txq = &txo->q; 2114 txq = &txo->q;
2110 if (atomic_read(&txq->used))
2111 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2112 atomic_read(&txq->used));
2113 2115
2114 /* free posted tx for which compls will never arrive */ 2116 if (atomic_read(&txq->used)) {
2115 while (atomic_read(&txq->used)) { 2117 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2116 sent_skb = txo->sent_skb_list[txq->tail]; 2118 i, atomic_read(&txq->used));
2119 notified_idx = txq->tail;
2117 end_idx = txq->tail; 2120 end_idx = txq->tail;
2118 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb, 2121 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2119 &dummy_wrb); 2122 txq->len);
2120 index_adv(&end_idx, num_wrbs - 1, txq->len); 2123 /* Use the tx-compl process logic to handle requests
2124 * that were not sent to the HW.
2125 */
2121 num_wrbs = be_tx_compl_process(adapter, txo, end_idx); 2126 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2122 atomic_sub(num_wrbs, &txq->used); 2127 atomic_sub(num_wrbs, &txq->used);
2128 BUG_ON(atomic_read(&txq->used));
2129 txo->pend_wrb_cnt = 0;
2130 /* Since hw was never notified of these requests,
2131 * reset TXQ indices
2132 */
2133 txq->head = notified_idx;
2134 txq->tail = notified_idx;
2123 } 2135 }
2124 } 2136 }
2125} 2137}