aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2012-05-21 20:27:58 -0400
committerBen Hutchings <bhutchings@solarflare.com>2012-08-24 14:00:27 -0400
commit14bf718fb97efe9ff649c317e7d87a3617b13e7c (patch)
tree421f6e0546d8d04ccad78292178c0fc560773a12 /drivers/net/ethernet/sfc
parent7668ff9c2ad7d354655e23afa836a92d54d2ea63 (diff)
sfc: Stop TX queues before they fill up
We now have a definite upper bound on the number of descriptors per skb; use that to stop the queue when the next packet might not fit. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/sfc/tx.c212
3 files changed, 112 insertions, 115 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 65a8d49106a4..3b3f08489a5e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -630,6 +630,16 @@ static void efx_start_datapath(struct efx_nic *efx)
630 efx->rx_buffer_order = get_order(efx->rx_buffer_len + 630 efx->rx_buffer_order = get_order(efx->rx_buffer_len +
631 sizeof(struct efx_rx_page_state)); 631 sizeof(struct efx_rx_page_state));
632 632
633 /* We must keep at least one descriptor in a TX ring empty.
634 * We could avoid this when the queue size does not exactly
635 * match the hardware ring size, but it's not that important.
636 * Therefore we stop the queue when one more skb might fill
637 * the ring completely. We wake it when half way back to
638 * empty.
639 */
640 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
641 efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
642
633 /* Initialise the channels */ 643 /* Initialise the channels */
634 efx_for_each_channel(channel, efx) { 644 efx_for_each_channel(channel, efx) {
635 efx_for_each_channel_tx_queue(tx_queue, channel) 645 efx_for_each_channel_tx_queue(tx_queue, channel)
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0ac01fa6e63c..28a6d6258692 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -665,6 +665,8 @@ struct vfdi_status;
665 * should be allocated for this NIC 665 * should be allocated for this NIC
666 * @rxq_entries: Size of receive queues requested by user. 666 * @rxq_entries: Size of receive queues requested by user.
667 * @txq_entries: Size of transmit queues requested by user. 667 * @txq_entries: Size of transmit queues requested by user.
668 * @txq_stop_thresh: TX queue fill level at or above which we stop it.
669 * @txq_wake_thresh: TX queue fill level at or below which we wake it.
668 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches 670 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
669 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches 671 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
670 * @sram_lim_qw: Qword address limit of SRAM 672 * @sram_lim_qw: Qword address limit of SRAM
@@ -775,6 +777,9 @@ struct efx_nic {
775 777
776 unsigned rxq_entries; 778 unsigned rxq_entries;
777 unsigned txq_entries; 779 unsigned txq_entries;
780 unsigned int txq_stop_thresh;
781 unsigned int txq_wake_thresh;
782
778 unsigned tx_dc_base; 783 unsigned tx_dc_base;
779 unsigned rx_dc_base; 784 unsigned rx_dc_base;
780 unsigned sram_lim_qw; 785 unsigned sram_lim_qw;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 24c82f3ce0f3..330d9111a339 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -22,14 +22,6 @@
22#include "nic.h" 22#include "nic.h"
23#include "workarounds.h" 23#include "workarounds.h"
24 24
25/*
26 * TX descriptor ring full threshold
27 *
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
30 */
31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32
33static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 25static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
34 struct efx_tx_buffer *buffer, 26 struct efx_tx_buffer *buffer,
35 unsigned int *pkts_compl, 27 unsigned int *pkts_compl,
@@ -138,6 +130,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
138 return max_descs; 130 return max_descs;
139} 131}
140 132
133/* Get partner of a TX queue, seen as part of the same net core queue */
134static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
135{
136 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
137 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
138 else
139 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
140}
141
142static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
143{
144 /* We need to consider both queues that the net core sees as one */
145 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
146 struct efx_nic *efx = txq1->efx;
147 unsigned int fill_level;
148
149 fill_level = max(txq1->insert_count - txq1->old_read_count,
150 txq2->insert_count - txq2->old_read_count);
151 if (likely(fill_level < efx->txq_stop_thresh))
152 return;
153
154 /* We used the stale old_read_count above, which gives us a
155 * pessimistic estimate of the fill level (which may even
156 * validly be >= efx->txq_entries). Now try again using
157 * read_count (more likely to be a cache miss).
158 *
159 * If we read read_count and then conditionally stop the
160 * queue, it is possible for the completion path to race with
161 * us and complete all outstanding descriptors in the middle,
162 * after which there will be no more completions to wake it.
163 * Therefore we stop the queue first, then read read_count
164 * (with a memory barrier to ensure the ordering), then
165 * restart the queue if the fill level turns out to be low
166 * enough.
167 */
168 netif_tx_stop_queue(txq1->core_txq);
169 smp_mb();
170 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
171 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
172
173 fill_level = max(txq1->insert_count - txq1->old_read_count,
174 txq2->insert_count - txq2->old_read_count);
175 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
176 if (likely(fill_level < efx->txq_stop_thresh)) {
177 smp_mb();
178 if (likely(!efx->loopback_selftest))
179 netif_tx_start_queue(txq1->core_txq);
180 }
181}
182
141/* 183/*
142 * Add a socket buffer to a TX queue 184 * Add a socket buffer to a TX queue
143 * 185 *
@@ -151,7 +193,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
151 * This function is split out from efx_hard_start_xmit to allow the 193 * This function is split out from efx_hard_start_xmit to allow the
152 * loopback test to direct packets via specific TX queues. 194 * loopback test to direct packets via specific TX queues.
153 * 195 *
154 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 196 * Returns NETDEV_TX_OK.
155 * You must hold netif_tx_lock() to call this function. 197 * You must hold netif_tx_lock() to call this function.
156 */ 198 */
157netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 199netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
@@ -160,12 +202,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
160 struct device *dma_dev = &efx->pci_dev->dev; 202 struct device *dma_dev = &efx->pci_dev->dev;
161 struct efx_tx_buffer *buffer; 203 struct efx_tx_buffer *buffer;
162 skb_frag_t *fragment; 204 skb_frag_t *fragment;
163 unsigned int len, unmap_len = 0, fill_level, insert_ptr; 205 unsigned int len, unmap_len = 0, insert_ptr;
164 dma_addr_t dma_addr, unmap_addr = 0; 206 dma_addr_t dma_addr, unmap_addr = 0;
165 unsigned int dma_len; 207 unsigned int dma_len;
166 unsigned short dma_flags; 208 unsigned short dma_flags;
167 int q_space, i = 0; 209 int i = 0;
168 netdev_tx_t rc = NETDEV_TX_OK;
169 210
170 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 211 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
171 212
@@ -183,9 +224,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
183 return NETDEV_TX_OK; 224 return NETDEV_TX_OK;
184 } 225 }
185 226
186 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
187 q_space = efx->txq_entries - 1 - fill_level;
188
189 /* Map for DMA. Use dma_map_single rather than dma_map_page 227 /* Map for DMA. Use dma_map_single rather than dma_map_page
190 * since this is more efficient on machines with sparse 228 * since this is more efficient on machines with sparse
191 * memory. 229 * memory.
@@ -205,32 +243,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
205 243
206 /* Add to TX queue, splitting across DMA boundaries */ 244 /* Add to TX queue, splitting across DMA boundaries */
207 do { 245 do {
208 if (unlikely(q_space-- <= 0)) {
209 /* It might be that completions have
210 * happened since the xmit path last
211 * checked. Update the xmit path's
212 * copy of read_count.
213 */
214 netif_tx_stop_queue(tx_queue->core_txq);
215 /* This memory barrier protects the
216 * change of queue state from the access
217 * of read_count. */
218 smp_mb();
219 tx_queue->old_read_count =
220 ACCESS_ONCE(tx_queue->read_count);
221 fill_level = (tx_queue->insert_count
222 - tx_queue->old_read_count);
223 q_space = efx->txq_entries - 1 - fill_level;
224 if (unlikely(q_space-- <= 0)) {
225 rc = NETDEV_TX_BUSY;
226 goto unwind;
227 }
228 smp_mb();
229 if (likely(!efx->loopback_selftest))
230 netif_tx_start_queue(
231 tx_queue->core_txq);
232 }
233
234 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 246 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
235 buffer = &tx_queue->buffer[insert_ptr]; 247 buffer = &tx_queue->buffer[insert_ptr];
236 efx_tsoh_free(tx_queue, buffer); 248 efx_tsoh_free(tx_queue, buffer);
@@ -277,6 +289,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
277 /* Pass off to hardware */ 289 /* Pass off to hardware */
278 efx_nic_push_buffers(tx_queue); 290 efx_nic_push_buffers(tx_queue);
279 291
292 efx_tx_maybe_stop_queue(tx_queue);
293
280 return NETDEV_TX_OK; 294 return NETDEV_TX_OK;
281 295
282 dma_err: 296 dma_err:
@@ -288,7 +302,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
288 /* Mark the packet as transmitted, and free the SKB ourselves */ 302 /* Mark the packet as transmitted, and free the SKB ourselves */
289 dev_kfree_skb_any(skb); 303 dev_kfree_skb_any(skb);
290 304
291 unwind:
292 /* Work backwards until we hit the original insert pointer value */ 305 /* Work backwards until we hit the original insert pointer value */
293 while (tx_queue->insert_count != tx_queue->write_count) { 306 while (tx_queue->insert_count != tx_queue->write_count) {
294 unsigned int pkts_compl = 0, bytes_compl = 0; 307 unsigned int pkts_compl = 0, bytes_compl = 0;
@@ -309,7 +322,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
309 DMA_TO_DEVICE); 322 DMA_TO_DEVICE);
310 } 323 }
311 324
312 return rc; 325 return NETDEV_TX_OK;
313} 326}
314 327
315/* Remove packets from the TX queue 328/* Remove packets from the TX queue
@@ -448,6 +461,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
448{ 461{
449 unsigned fill_level; 462 unsigned fill_level;
450 struct efx_nic *efx = tx_queue->efx; 463 struct efx_nic *efx = tx_queue->efx;
464 struct efx_tx_queue *txq2;
451 unsigned int pkts_compl = 0, bytes_compl = 0; 465 unsigned int pkts_compl = 0, bytes_compl = 0;
452 466
453 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 467 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
@@ -455,15 +469,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
455 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 469 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
456 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 470 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
457 471
458 /* See if we need to restart the netif queue. This barrier 472 /* See if we need to restart the netif queue. This memory
459 * separates the update of read_count from the test of the 473 * barrier ensures that we write read_count (inside
460 * queue state. */ 474 * efx_dequeue_buffers()) before reading the queue status.
475 */
461 smp_mb(); 476 smp_mb();
462 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 477 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
463 likely(efx->port_enabled) && 478 likely(efx->port_enabled) &&
464 likely(netif_device_present(efx->net_dev))) { 479 likely(netif_device_present(efx->net_dev))) {
465 fill_level = tx_queue->insert_count - tx_queue->read_count; 480 txq2 = efx_tx_queue_partner(tx_queue);
466 if (fill_level < EFX_TXQ_THRESHOLD(efx)) 481 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
482 txq2->insert_count - txq2->read_count);
483 if (fill_level <= efx->txq_wake_thresh)
467 netif_tx_wake_queue(tx_queue->core_txq); 484 netif_tx_wake_queue(tx_queue->core_txq);
468 } 485 }
469 486
@@ -776,47 +793,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
776 * @len: Length of fragment 793 * @len: Length of fragment
777 * @final_buffer: The final buffer inserted into the queue 794 * @final_buffer: The final buffer inserted into the queue
778 * 795 *
779 * Push descriptors onto the TX queue. Return 0 on success or 1 if 796 * Push descriptors onto the TX queue.
780 * @tx_queue full.
781 */ 797 */
782static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 798static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
783 dma_addr_t dma_addr, unsigned len, 799 dma_addr_t dma_addr, unsigned len,
784 struct efx_tx_buffer **final_buffer) 800 struct efx_tx_buffer **final_buffer)
785{ 801{
786 struct efx_tx_buffer *buffer; 802 struct efx_tx_buffer *buffer;
787 struct efx_nic *efx = tx_queue->efx; 803 struct efx_nic *efx = tx_queue->efx;
788 unsigned dma_len, fill_level, insert_ptr; 804 unsigned dma_len, insert_ptr;
789 int q_space;
790 805
791 EFX_BUG_ON_PARANOID(len <= 0); 806 EFX_BUG_ON_PARANOID(len <= 0);
792 807
793 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
794 /* -1 as there is no way to represent all descriptors used */
795 q_space = efx->txq_entries - 1 - fill_level;
796
797 while (1) { 808 while (1) {
798 if (unlikely(q_space-- <= 0)) {
799 /* It might be that completions have happened
800 * since the xmit path last checked. Update
801 * the xmit path's copy of read_count.
802 */
803 netif_tx_stop_queue(tx_queue->core_txq);
804 /* This memory barrier protects the change of
805 * queue state from the access of read_count. */
806 smp_mb();
807 tx_queue->old_read_count =
808 ACCESS_ONCE(tx_queue->read_count);
809 fill_level = (tx_queue->insert_count
810 - tx_queue->old_read_count);
811 q_space = efx->txq_entries - 1 - fill_level;
812 if (unlikely(q_space-- <= 0)) {
813 *final_buffer = NULL;
814 return 1;
815 }
816 smp_mb();
817 netif_tx_start_queue(tx_queue->core_txq);
818 }
819
820 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 809 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
821 buffer = &tx_queue->buffer[insert_ptr]; 810 buffer = &tx_queue->buffer[insert_ptr];
822 ++tx_queue->insert_count; 811 ++tx_queue->insert_count;
@@ -847,7 +836,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
847 EFX_BUG_ON_PARANOID(!len); 836 EFX_BUG_ON_PARANOID(!len);
848 buffer->len = len; 837 buffer->len = len;
849 *final_buffer = buffer; 838 *final_buffer = buffer;
850 return 0;
851} 839}
852 840
853 841
@@ -975,20 +963,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
975 * @st: TSO state 963 * @st: TSO state
976 * 964 *
977 * Form descriptors for the current fragment, until we reach the end 965 * Form descriptors for the current fragment, until we reach the end
978 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 966 * of fragment or end-of-packet.
979 * space in @tx_queue.
980 */ 967 */
981static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 968static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
982 const struct sk_buff *skb, 969 const struct sk_buff *skb,
983 struct tso_state *st) 970 struct tso_state *st)
984{ 971{
985 struct efx_tx_buffer *buffer; 972 struct efx_tx_buffer *buffer;
986 int n, rc; 973 int n;
987 974
988 if (st->in_len == 0) 975 if (st->in_len == 0)
989 return 0; 976 return;
990 if (st->packet_space == 0) 977 if (st->packet_space == 0)
991 return 0; 978 return;
992 979
993 EFX_BUG_ON_PARANOID(st->in_len <= 0); 980 EFX_BUG_ON_PARANOID(st->in_len <= 0);
994 EFX_BUG_ON_PARANOID(st->packet_space <= 0); 981 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
@@ -999,26 +986,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
999 st->out_len -= n; 986 st->out_len -= n;
1000 st->in_len -= n; 987 st->in_len -= n;
1001 988
1002 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); 989 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1003 if (likely(rc == 0)) {
1004 if (st->out_len == 0) {
1005 /* Transfer ownership of the skb */
1006 buffer->skb = skb;
1007 buffer->flags = EFX_TX_BUF_SKB;
1008 } else if (st->packet_space != 0) {
1009 buffer->flags = EFX_TX_BUF_CONT;
1010 }
1011 990
1012 if (st->in_len == 0) { 991 if (st->out_len == 0) {
1013 /* Transfer ownership of the DMA mapping */ 992 /* Transfer ownership of the skb */
1014 buffer->unmap_len = st->unmap_len; 993 buffer->skb = skb;
1015 buffer->flags |= st->dma_flags; 994 buffer->flags = EFX_TX_BUF_SKB;
1016 st->unmap_len = 0; 995 } else if (st->packet_space != 0) {
1017 } 996 buffer->flags = EFX_TX_BUF_CONT;
997 }
998
999 if (st->in_len == 0) {
1000 /* Transfer ownership of the DMA mapping */
1001 buffer->unmap_len = st->unmap_len;
1002 buffer->flags |= st->dma_flags;
1003 st->unmap_len = 0;
1018 } 1004 }
1019 1005
1020 st->dma_addr += n; 1006 st->dma_addr += n;
1021 return rc;
1022} 1007}
1023 1008
1024 1009
@@ -1112,13 +1097,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1112 * 1097 *
1113 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 1098 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1114 * @skb was not enqueued. In all cases @skb is consumed. Return 1099 * @skb was not enqueued. In all cases @skb is consumed. Return
1115 * %NETDEV_TX_OK or %NETDEV_TX_BUSY. 1100 * %NETDEV_TX_OK.
1116 */ 1101 */
1117static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1102static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1118 struct sk_buff *skb) 1103 struct sk_buff *skb)
1119{ 1104{
1120 struct efx_nic *efx = tx_queue->efx; 1105 struct efx_nic *efx = tx_queue->efx;
1121 int frag_i, rc, rc2 = NETDEV_TX_OK; 1106 int frag_i, rc;
1122 struct tso_state state; 1107 struct tso_state state;
1123 1108
1124 /* Find the packet protocol and sanity-check it */ 1109 /* Find the packet protocol and sanity-check it */
@@ -1150,11 +1135,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1150 goto mem_err; 1135 goto mem_err;
1151 1136
1152 while (1) { 1137 while (1) {
1153 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1138 tso_fill_packet_with_fragment(tx_queue, skb, &state);
1154 if (unlikely(rc)) {
1155 rc2 = NETDEV_TX_BUSY;
1156 goto unwind;
1157 }
1158 1139
1159 /* Move onto the next fragment? */ 1140 /* Move onto the next fragment? */
1160 if (state.in_len == 0) { 1141 if (state.in_len == 0) {
@@ -1178,6 +1159,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1178 /* Pass off to hardware */ 1159 /* Pass off to hardware */
1179 efx_nic_push_buffers(tx_queue); 1160 efx_nic_push_buffers(tx_queue);
1180 1161
1162 efx_tx_maybe_stop_queue(tx_queue);
1163
1181 tx_queue->tso_bursts++; 1164 tx_queue->tso_bursts++;
1182 return NETDEV_TX_OK; 1165 return NETDEV_TX_OK;
1183 1166
@@ -1186,7 +1169,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1186 "Out of memory for TSO headers, or DMA mapping error\n"); 1169 "Out of memory for TSO headers, or DMA mapping error\n");
1187 dev_kfree_skb_any(skb); 1170 dev_kfree_skb_any(skb);
1188 1171
1189 unwind:
1190 /* Free the DMA mapping we were in the process of writing out */ 1172 /* Free the DMA mapping we were in the process of writing out */
1191 if (state.unmap_len) { 1173 if (state.unmap_len) {
1192 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) 1174 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
@@ -1198,7 +1180,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1198 } 1180 }
1199 1181
1200 efx_enqueue_unwind(tx_queue); 1182 efx_enqueue_unwind(tx_queue);
1201 return rc2; 1183 return NETDEV_TX_OK;
1202} 1184}
1203 1185
1204 1186