diff options
author | David S. Miller <davem@davemloft.net> | 2010-12-21 15:17:29 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-21 15:17:29 -0500 |
commit | a13c13273a206e0c4b9a814277fb50529457abe7 (patch) | |
tree | 2bf6088a59fef1e032fe2fbb3ff2d0ef7a2bc7f4 /drivers/net/sfc | |
parent | 34a52f363ab6bcf6d50a65c153dec03f3fb32653 (diff) | |
parent | c04bfc6b223662c42a77727342c1df7d39e686a2 (diff) |
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next-2.6
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r-- | drivers/net/sfc/efx.c | 24 | ||||
-rw-r--r-- | drivers/net/sfc/efx.h | 2 | ||||
-rw-r--r-- | drivers/net/sfc/net_driver.h | 13 | ||||
-rw-r--r-- | drivers/net/sfc/tx.c | 111 |
4 files changed, 35 insertions, 115 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 2166c1d0a533..711449c6e675 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -461,9 +461,6 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) | |||
461 | } | 461 | } |
462 | } | 462 | } |
463 | 463 | ||
464 | spin_lock_init(&channel->tx_stop_lock); | ||
465 | atomic_set(&channel->tx_stop_count, 1); | ||
466 | |||
467 | rx_queue = &channel->rx_queue; | 464 | rx_queue = &channel->rx_queue; |
468 | rx_queue->efx = efx; | 465 | rx_queue->efx = efx; |
469 | setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, | 466 | setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, |
@@ -1406,11 +1403,11 @@ static void efx_start_all(struct efx_nic *efx) | |||
1406 | * restart the transmit interface early so the watchdog timer stops */ | 1403 | * restart the transmit interface early so the watchdog timer stops */ |
1407 | efx_start_port(efx); | 1404 | efx_start_port(efx); |
1408 | 1405 | ||
1409 | efx_for_each_channel(channel, efx) { | 1406 | if (efx_dev_registered(efx)) |
1410 | if (efx_dev_registered(efx)) | 1407 | netif_tx_wake_all_queues(efx->net_dev); |
1411 | efx_wake_queue(channel); | 1408 | |
1409 | efx_for_each_channel(channel, efx) | ||
1412 | efx_start_channel(channel); | 1410 | efx_start_channel(channel); |
1413 | } | ||
1414 | 1411 | ||
1415 | if (efx->legacy_irq) | 1412 | if (efx->legacy_irq) |
1416 | efx->legacy_irq_enabled = true; | 1413 | efx->legacy_irq_enabled = true; |
@@ -1498,9 +1495,7 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1498 | /* Stop the kernel transmit interface late, so the watchdog | 1495 | /* Stop the kernel transmit interface late, so the watchdog |
1499 | * timer isn't ticking over the flush */ | 1496 | * timer isn't ticking over the flush */ |
1500 | if (efx_dev_registered(efx)) { | 1497 | if (efx_dev_registered(efx)) { |
1501 | struct efx_channel *channel; | 1498 | netif_tx_stop_all_queues(efx->net_dev); |
1502 | efx_for_each_channel(channel, efx) | ||
1503 | efx_stop_queue(channel); | ||
1504 | netif_tx_lock_bh(efx->net_dev); | 1499 | netif_tx_lock_bh(efx->net_dev); |
1505 | netif_tx_unlock_bh(efx->net_dev); | 1500 | netif_tx_unlock_bh(efx->net_dev); |
1506 | } | 1501 | } |
@@ -1896,6 +1891,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); | |||
1896 | static int efx_register_netdev(struct efx_nic *efx) | 1891 | static int efx_register_netdev(struct efx_nic *efx) |
1897 | { | 1892 | { |
1898 | struct net_device *net_dev = efx->net_dev; | 1893 | struct net_device *net_dev = efx->net_dev; |
1894 | struct efx_channel *channel; | ||
1899 | int rc; | 1895 | int rc; |
1900 | 1896 | ||
1901 | net_dev->watchdog_timeo = 5 * HZ; | 1897 | net_dev->watchdog_timeo = 5 * HZ; |
@@ -1918,6 +1914,14 @@ static int efx_register_netdev(struct efx_nic *efx) | |||
1918 | if (rc) | 1914 | if (rc) |
1919 | goto fail_locked; | 1915 | goto fail_locked; |
1920 | 1916 | ||
1917 | efx_for_each_channel(channel, efx) { | ||
1918 | struct efx_tx_queue *tx_queue; | ||
1919 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
1920 | tx_queue->core_txq = netdev_get_tx_queue( | ||
1921 | efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES); | ||
1922 | } | ||
1923 | } | ||
1924 | |||
1921 | /* Always start with carrier off; PHY events will detect the link */ | 1925 | /* Always start with carrier off; PHY events will detect the link */ |
1922 | netif_carrier_off(efx->net_dev); | 1926 | netif_carrier_off(efx->net_dev); |
1923 | 1927 | ||
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index 003fdb35b4bb..d43a7e5212b1 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h | |||
@@ -36,8 +36,6 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); | |||
36 | extern netdev_tx_t | 36 | extern netdev_tx_t |
37 | efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); | 37 | efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); |
38 | extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); | 38 | extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); |
39 | extern void efx_stop_queue(struct efx_channel *channel); | ||
40 | extern void efx_wake_queue(struct efx_channel *channel); | ||
41 | 39 | ||
42 | /* RX */ | 40 | /* RX */ |
43 | extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); | 41 | extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 76f2fb197f0a..bdce66ddf93a 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -136,6 +136,7 @@ struct efx_tx_buffer { | |||
136 | * @efx: The associated Efx NIC | 136 | * @efx: The associated Efx NIC |
137 | * @queue: DMA queue number | 137 | * @queue: DMA queue number |
138 | * @channel: The associated channel | 138 | * @channel: The associated channel |
139 | * @core_txq: The networking core TX queue structure | ||
139 | * @buffer: The software buffer ring | 140 | * @buffer: The software buffer ring |
140 | * @txd: The hardware descriptor ring | 141 | * @txd: The hardware descriptor ring |
141 | * @ptr_mask: The size of the ring minus 1. | 142 | * @ptr_mask: The size of the ring minus 1. |
@@ -148,8 +149,6 @@ struct efx_tx_buffer { | |||
148 | * variable indicates that the queue is empty. This is to | 149 | * variable indicates that the queue is empty. This is to |
149 | * avoid cache-line ping-pong between the xmit path and the | 150 | * avoid cache-line ping-pong between the xmit path and the |
150 | * completion path. | 151 | * completion path. |
151 | * @stopped: Stopped count. | ||
152 | * Set if this TX queue is currently stopping its port. | ||
153 | * @insert_count: Current insert pointer | 152 | * @insert_count: Current insert pointer |
154 | * This is the number of buffers that have been added to the | 153 | * This is the number of buffers that have been added to the |
155 | * software ring. | 154 | * software ring. |
@@ -179,7 +178,7 @@ struct efx_tx_queue { | |||
179 | struct efx_nic *efx ____cacheline_aligned_in_smp; | 178 | struct efx_nic *efx ____cacheline_aligned_in_smp; |
180 | unsigned queue; | 179 | unsigned queue; |
181 | struct efx_channel *channel; | 180 | struct efx_channel *channel; |
182 | struct efx_nic *nic; | 181 | struct netdev_queue *core_txq; |
183 | struct efx_tx_buffer *buffer; | 182 | struct efx_tx_buffer *buffer; |
184 | struct efx_special_buffer txd; | 183 | struct efx_special_buffer txd; |
185 | unsigned int ptr_mask; | 184 | unsigned int ptr_mask; |
@@ -188,7 +187,6 @@ struct efx_tx_queue { | |||
188 | /* Members used mainly on the completion path */ | 187 | /* Members used mainly on the completion path */ |
189 | unsigned int read_count ____cacheline_aligned_in_smp; | 188 | unsigned int read_count ____cacheline_aligned_in_smp; |
190 | unsigned int old_write_count; | 189 | unsigned int old_write_count; |
191 | int stopped; | ||
192 | 190 | ||
193 | /* Members used only on the xmit path */ | 191 | /* Members used only on the xmit path */ |
194 | unsigned int insert_count ____cacheline_aligned_in_smp; | 192 | unsigned int insert_count ____cacheline_aligned_in_smp; |
@@ -321,7 +319,6 @@ enum efx_rx_alloc_method { | |||
321 | * @irq_moderation: IRQ moderation value (in hardware ticks) | 319 | * @irq_moderation: IRQ moderation value (in hardware ticks) |
322 | * @napi_dev: Net device used with NAPI | 320 | * @napi_dev: Net device used with NAPI |
323 | * @napi_str: NAPI control structure | 321 | * @napi_str: NAPI control structure |
324 | * @reset_work: Scheduled reset work thread | ||
325 | * @work_pending: Is work pending via NAPI? | 322 | * @work_pending: Is work pending via NAPI? |
326 | * @eventq: Event queue buffer | 323 | * @eventq: Event queue buffer |
327 | * @eventq_mask: Event queue pointer mask | 324 | * @eventq_mask: Event queue pointer mask |
@@ -342,8 +339,6 @@ enum efx_rx_alloc_method { | |||
342 | * @n_rx_overlength: Count of RX_OVERLENGTH errors | 339 | * @n_rx_overlength: Count of RX_OVERLENGTH errors |
343 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun | 340 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun |
344 | * @rx_queue: RX queue for this channel | 341 | * @rx_queue: RX queue for this channel |
345 | * @tx_stop_count: Core TX queue stop count | ||
346 | * @tx_stop_lock: Core TX queue stop lock | ||
347 | * @tx_queue: TX queues for this channel | 342 | * @tx_queue: TX queues for this channel |
348 | */ | 343 | */ |
349 | struct efx_channel { | 344 | struct efx_channel { |
@@ -382,10 +377,6 @@ struct efx_channel { | |||
382 | bool rx_pkt_csummed; | 377 | bool rx_pkt_csummed; |
383 | 378 | ||
384 | struct efx_rx_queue rx_queue; | 379 | struct efx_rx_queue rx_queue; |
385 | |||
386 | atomic_t tx_stop_count; | ||
387 | spinlock_t tx_stop_lock; | ||
388 | |||
389 | struct efx_tx_queue tx_queue[2]; | 380 | struct efx_tx_queue tx_queue[2]; |
390 | }; | 381 | }; |
391 | 382 | ||
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index bdb92b4af683..2f5e9da657bf 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -30,50 +30,6 @@ | |||
30 | */ | 30 | */ |
31 | #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) | 31 | #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) |
32 | 32 | ||
33 | /* We need to be able to nest calls to netif_tx_stop_queue(), partly | ||
34 | * because of the 2 hardware queues associated with each core queue, | ||
35 | * but also so that we can inhibit TX for reasons other than a full | ||
36 | * hardware queue. */ | ||
37 | void efx_stop_queue(struct efx_channel *channel) | ||
38 | { | ||
39 | struct efx_nic *efx = channel->efx; | ||
40 | struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0); | ||
41 | |||
42 | if (!tx_queue) | ||
43 | return; | ||
44 | |||
45 | spin_lock_bh(&channel->tx_stop_lock); | ||
46 | netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n"); | ||
47 | |||
48 | atomic_inc(&channel->tx_stop_count); | ||
49 | netif_tx_stop_queue( | ||
50 | netdev_get_tx_queue(efx->net_dev, | ||
51 | tx_queue->queue / EFX_TXQ_TYPES)); | ||
52 | |||
53 | spin_unlock_bh(&channel->tx_stop_lock); | ||
54 | } | ||
55 | |||
56 | /* Decrement core TX queue stop count and wake it if the count is 0 */ | ||
57 | void efx_wake_queue(struct efx_channel *channel) | ||
58 | { | ||
59 | struct efx_nic *efx = channel->efx; | ||
60 | struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0); | ||
61 | |||
62 | if (!tx_queue) | ||
63 | return; | ||
64 | |||
65 | local_bh_disable(); | ||
66 | if (atomic_dec_and_lock(&channel->tx_stop_count, | ||
67 | &channel->tx_stop_lock)) { | ||
68 | netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n"); | ||
69 | netif_tx_wake_queue( | ||
70 | netdev_get_tx_queue(efx->net_dev, | ||
71 | tx_queue->queue / EFX_TXQ_TYPES)); | ||
72 | spin_unlock(&channel->tx_stop_lock); | ||
73 | } | ||
74 | local_bh_enable(); | ||
75 | } | ||
76 | |||
77 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, | 33 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, |
78 | struct efx_tx_buffer *buffer) | 34 | struct efx_tx_buffer *buffer) |
79 | { | 35 | { |
@@ -234,9 +190,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
234 | * checked. Update the xmit path's | 190 | * checked. Update the xmit path's |
235 | * copy of read_count. | 191 | * copy of read_count. |
236 | */ | 192 | */ |
237 | ++tx_queue->stopped; | 193 | netif_tx_stop_queue(tx_queue->core_txq); |
238 | /* This memory barrier protects the | 194 | /* This memory barrier protects the |
239 | * change of stopped from the access | 195 | * change of queue state from the access |
240 | * of read_count. */ | 196 | * of read_count. */ |
241 | smp_mb(); | 197 | smp_mb(); |
242 | tx_queue->old_read_count = | 198 | tx_queue->old_read_count = |
@@ -244,10 +200,12 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
244 | fill_level = (tx_queue->insert_count | 200 | fill_level = (tx_queue->insert_count |
245 | - tx_queue->old_read_count); | 201 | - tx_queue->old_read_count); |
246 | q_space = efx->txq_entries - 1 - fill_level; | 202 | q_space = efx->txq_entries - 1 - fill_level; |
247 | if (unlikely(q_space-- <= 0)) | 203 | if (unlikely(q_space-- <= 0)) { |
248 | goto stop; | 204 | rc = NETDEV_TX_BUSY; |
205 | goto unwind; | ||
206 | } | ||
249 | smp_mb(); | 207 | smp_mb(); |
250 | --tx_queue->stopped; | 208 | netif_tx_start_queue(tx_queue->core_txq); |
251 | } | 209 | } |
252 | 210 | ||
253 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; | 211 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
@@ -307,13 +265,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
307 | 265 | ||
308 | /* Mark the packet as transmitted, and free the SKB ourselves */ | 266 | /* Mark the packet as transmitted, and free the SKB ourselves */ |
309 | dev_kfree_skb_any(skb); | 267 | dev_kfree_skb_any(skb); |
310 | goto unwind; | ||
311 | |||
312 | stop: | ||
313 | rc = NETDEV_TX_BUSY; | ||
314 | |||
315 | if (tx_queue->stopped == 1) | ||
316 | efx_stop_queue(tx_queue->channel); | ||
317 | 268 | ||
318 | unwind: | 269 | unwind: |
319 | /* Work backwards until we hit the original insert pointer value */ | 270 | /* Work backwards until we hit the original insert pointer value */ |
@@ -400,32 +351,21 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
400 | { | 351 | { |
401 | unsigned fill_level; | 352 | unsigned fill_level; |
402 | struct efx_nic *efx = tx_queue->efx; | 353 | struct efx_nic *efx = tx_queue->efx; |
403 | struct netdev_queue *queue; | ||
404 | 354 | ||
405 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); | 355 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); |
406 | 356 | ||
407 | efx_dequeue_buffers(tx_queue, index); | 357 | efx_dequeue_buffers(tx_queue, index); |
408 | 358 | ||
409 | /* See if we need to restart the netif queue. This barrier | 359 | /* See if we need to restart the netif queue. This barrier |
410 | * separates the update of read_count from the test of | 360 | * separates the update of read_count from the test of the |
411 | * stopped. */ | 361 | * queue state. */ |
412 | smp_mb(); | 362 | smp_mb(); |
413 | if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { | 363 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && |
364 | likely(efx->port_enabled)) { | ||
414 | fill_level = tx_queue->insert_count - tx_queue->read_count; | 365 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
415 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) { | 366 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) { |
416 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); | 367 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |
417 | 368 | netif_tx_wake_queue(tx_queue->core_txq); | |
418 | /* Do this under netif_tx_lock(), to avoid racing | ||
419 | * with efx_xmit(). */ | ||
420 | queue = netdev_get_tx_queue( | ||
421 | efx->net_dev, | ||
422 | tx_queue->queue / EFX_TXQ_TYPES); | ||
423 | __netif_tx_lock(queue, smp_processor_id()); | ||
424 | if (tx_queue->stopped) { | ||
425 | tx_queue->stopped = 0; | ||
426 | efx_wake_queue(tx_queue->channel); | ||
427 | } | ||
428 | __netif_tx_unlock(queue); | ||
429 | } | 369 | } |
430 | } | 370 | } |
431 | 371 | ||
@@ -487,7 +427,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) | |||
487 | tx_queue->read_count = 0; | 427 | tx_queue->read_count = 0; |
488 | tx_queue->old_read_count = 0; | 428 | tx_queue->old_read_count = 0; |
489 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; | 429 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; |
490 | BUG_ON(tx_queue->stopped); | ||
491 | 430 | ||
492 | /* Set up TX descriptor ring */ | 431 | /* Set up TX descriptor ring */ |
493 | efx_nic_init_tx(tx_queue); | 432 | efx_nic_init_tx(tx_queue); |
@@ -523,12 +462,6 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | |||
523 | 462 | ||
524 | /* Free up TSO header cache */ | 463 | /* Free up TSO header cache */ |
525 | efx_fini_tso(tx_queue); | 464 | efx_fini_tso(tx_queue); |
526 | |||
527 | /* Release queue's stop on port, if any */ | ||
528 | if (tx_queue->stopped) { | ||
529 | tx_queue->stopped = 0; | ||
530 | efx_wake_queue(tx_queue->channel); | ||
531 | } | ||
532 | } | 465 | } |
533 | 466 | ||
534 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | 467 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) |
@@ -770,9 +703,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
770 | * since the xmit path last checked. Update | 703 | * since the xmit path last checked. Update |
771 | * the xmit path's copy of read_count. | 704 | * the xmit path's copy of read_count. |
772 | */ | 705 | */ |
773 | ++tx_queue->stopped; | 706 | netif_tx_stop_queue(tx_queue->core_txq); |
774 | /* This memory barrier protects the change of | 707 | /* This memory barrier protects the change of |
775 | * stopped from the access of read_count. */ | 708 | * queue state from the access of read_count. */ |
776 | smp_mb(); | 709 | smp_mb(); |
777 | tx_queue->old_read_count = | 710 | tx_queue->old_read_count = |
778 | ACCESS_ONCE(tx_queue->read_count); | 711 | ACCESS_ONCE(tx_queue->read_count); |
@@ -784,7 +717,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
784 | return 1; | 717 | return 1; |
785 | } | 718 | } |
786 | smp_mb(); | 719 | smp_mb(); |
787 | --tx_queue->stopped; | 720 | netif_tx_start_queue(tx_queue->core_txq); |
788 | } | 721 | } |
789 | 722 | ||
790 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; | 723 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
@@ -1124,8 +1057,10 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1124 | 1057 | ||
1125 | while (1) { | 1058 | while (1) { |
1126 | rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); | 1059 | rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); |
1127 | if (unlikely(rc)) | 1060 | if (unlikely(rc)) { |
1128 | goto stop; | 1061 | rc2 = NETDEV_TX_BUSY; |
1062 | goto unwind; | ||
1063 | } | ||
1129 | 1064 | ||
1130 | /* Move onto the next fragment? */ | 1065 | /* Move onto the next fragment? */ |
1131 | if (state.in_len == 0) { | 1066 | if (state.in_len == 0) { |
@@ -1154,14 +1089,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1154 | netif_err(efx, tx_err, efx->net_dev, | 1089 | netif_err(efx, tx_err, efx->net_dev, |
1155 | "Out of memory for TSO headers, or PCI mapping error\n"); | 1090 | "Out of memory for TSO headers, or PCI mapping error\n"); |
1156 | dev_kfree_skb_any(skb); | 1091 | dev_kfree_skb_any(skb); |
1157 | goto unwind; | ||
1158 | |||
1159 | stop: | ||
1160 | rc2 = NETDEV_TX_BUSY; | ||
1161 | |||
1162 | /* Stop the queue if it wasn't stopped before. */ | ||
1163 | if (tx_queue->stopped == 1) | ||
1164 | efx_stop_queue(tx_queue->channel); | ||
1165 | 1092 | ||
1166 | unwind: | 1093 | unwind: |
1167 | /* Free the DMA mapping we were in the process of writing out */ | 1094 | /* Free the DMA mapping we were in the process of writing out */ |