aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r--drivers/net/sfc/tx.c111
1 files changed, 19 insertions, 92 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index bdb92b4af683..2f5e9da657bf 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,50 +30,6 @@
30 */ 30 */
31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) 31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32 32
33/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * because of the 2 hardware queues associated with each core queue,
35 * but also so that we can inhibit TX for reasons other than a full
36 * hardware queue. */
37void efx_stop_queue(struct efx_channel *channel)
38{
39 struct efx_nic *efx = channel->efx;
40 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
41
42 if (!tx_queue)
43 return;
44
45 spin_lock_bh(&channel->tx_stop_lock);
46 netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
47
48 atomic_inc(&channel->tx_stop_count);
49 netif_tx_stop_queue(
50 netdev_get_tx_queue(efx->net_dev,
51 tx_queue->queue / EFX_TXQ_TYPES));
52
53 spin_unlock_bh(&channel->tx_stop_lock);
54}
55
56/* Decrement core TX queue stop count and wake it if the count is 0 */
57void efx_wake_queue(struct efx_channel *channel)
58{
59 struct efx_nic *efx = channel->efx;
60 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
61
62 if (!tx_queue)
63 return;
64
65 local_bh_disable();
66 if (atomic_dec_and_lock(&channel->tx_stop_count,
67 &channel->tx_stop_lock)) {
68 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
69 netif_tx_wake_queue(
70 netdev_get_tx_queue(efx->net_dev,
71 tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock);
73 }
74 local_bh_enable();
75}
76
77static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 33static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
78 struct efx_tx_buffer *buffer) 34 struct efx_tx_buffer *buffer)
79{ 35{
@@ -234,9 +190,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
234 * checked. Update the xmit path's 190 * checked. Update the xmit path's
235 * copy of read_count. 191 * copy of read_count.
236 */ 192 */
237 ++tx_queue->stopped; 193 netif_tx_stop_queue(tx_queue->core_txq);
238 /* This memory barrier protects the 194 /* This memory barrier protects the
239 * change of stopped from the access 195 * change of queue state from the access
240 * of read_count. */ 196 * of read_count. */
241 smp_mb(); 197 smp_mb();
242 tx_queue->old_read_count = 198 tx_queue->old_read_count =
@@ -244,10 +200,12 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
244 fill_level = (tx_queue->insert_count 200 fill_level = (tx_queue->insert_count
245 - tx_queue->old_read_count); 201 - tx_queue->old_read_count);
246 q_space = efx->txq_entries - 1 - fill_level; 202 q_space = efx->txq_entries - 1 - fill_level;
247 if (unlikely(q_space-- <= 0)) 203 if (unlikely(q_space-- <= 0)) {
248 goto stop; 204 rc = NETDEV_TX_BUSY;
205 goto unwind;
206 }
249 smp_mb(); 207 smp_mb();
250 --tx_queue->stopped; 208 netif_tx_start_queue(tx_queue->core_txq);
251 } 209 }
252 210
253 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
@@ -307,13 +265,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
307 265
308 /* Mark the packet as transmitted, and free the SKB ourselves */ 266 /* Mark the packet as transmitted, and free the SKB ourselves */
309 dev_kfree_skb_any(skb); 267 dev_kfree_skb_any(skb);
310 goto unwind;
311
312 stop:
313 rc = NETDEV_TX_BUSY;
314
315 if (tx_queue->stopped == 1)
316 efx_stop_queue(tx_queue->channel);
317 268
318 unwind: 269 unwind:
319 /* Work backwards until we hit the original insert pointer value */ 270 /* Work backwards until we hit the original insert pointer value */
@@ -400,32 +351,21 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
400{ 351{
401 unsigned fill_level; 352 unsigned fill_level;
402 struct efx_nic *efx = tx_queue->efx; 353 struct efx_nic *efx = tx_queue->efx;
403 struct netdev_queue *queue;
404 354
405 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 355 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
406 356
407 efx_dequeue_buffers(tx_queue, index); 357 efx_dequeue_buffers(tx_queue, index);
408 358
409 /* See if we need to restart the netif queue. This barrier 359 /* See if we need to restart the netif queue. This barrier
410 * separates the update of read_count from the test of 360 * separates the update of read_count from the test of the
411 * stopped. */ 361 * queue state. */
412 smp_mb(); 362 smp_mb();
413 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 363 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
364 likely(efx->port_enabled)) {
414 fill_level = tx_queue->insert_count - tx_queue->read_count; 365 fill_level = tx_queue->insert_count - tx_queue->read_count;
415 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 366 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
416 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 367 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
417 368 netif_tx_wake_queue(tx_queue->core_txq);
418 /* Do this under netif_tx_lock(), to avoid racing
419 * with efx_xmit(). */
420 queue = netdev_get_tx_queue(
421 efx->net_dev,
422 tx_queue->queue / EFX_TXQ_TYPES);
423 __netif_tx_lock(queue, smp_processor_id());
424 if (tx_queue->stopped) {
425 tx_queue->stopped = 0;
426 efx_wake_queue(tx_queue->channel);
427 }
428 __netif_tx_unlock(queue);
429 } 369 }
430 } 370 }
431 371
@@ -487,7 +427,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
487 tx_queue->read_count = 0; 427 tx_queue->read_count = 0;
488 tx_queue->old_read_count = 0; 428 tx_queue->old_read_count = 0;
489 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; 429 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
490 BUG_ON(tx_queue->stopped);
491 430
492 /* Set up TX descriptor ring */ 431 /* Set up TX descriptor ring */
493 efx_nic_init_tx(tx_queue); 432 efx_nic_init_tx(tx_queue);
@@ -523,12 +462,6 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
523 462
524 /* Free up TSO header cache */ 463 /* Free up TSO header cache */
525 efx_fini_tso(tx_queue); 464 efx_fini_tso(tx_queue);
526
527 /* Release queue's stop on port, if any */
528 if (tx_queue->stopped) {
529 tx_queue->stopped = 0;
530 efx_wake_queue(tx_queue->channel);
531 }
532} 465}
533 466
534void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 467void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
@@ -770,9 +703,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
770 * since the xmit path last checked. Update 703 * since the xmit path last checked. Update
771 * the xmit path's copy of read_count. 704 * the xmit path's copy of read_count.
772 */ 705 */
773 ++tx_queue->stopped; 706 netif_tx_stop_queue(tx_queue->core_txq);
774 /* This memory barrier protects the change of 707 /* This memory barrier protects the change of
775 * stopped from the access of read_count. */ 708 * queue state from the access of read_count. */
776 smp_mb(); 709 smp_mb();
777 tx_queue->old_read_count = 710 tx_queue->old_read_count =
778 ACCESS_ONCE(tx_queue->read_count); 711 ACCESS_ONCE(tx_queue->read_count);
@@ -784,7 +717,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
784 return 1; 717 return 1;
785 } 718 }
786 smp_mb(); 719 smp_mb();
787 --tx_queue->stopped; 720 netif_tx_start_queue(tx_queue->core_txq);
788 } 721 }
789 722
790 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 723 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
@@ -1124,8 +1057,10 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1124 1057
1125 while (1) { 1058 while (1) {
1126 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1059 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1127 if (unlikely(rc)) 1060 if (unlikely(rc)) {
1128 goto stop; 1061 rc2 = NETDEV_TX_BUSY;
1062 goto unwind;
1063 }
1129 1064
1130 /* Move onto the next fragment? */ 1065 /* Move onto the next fragment? */
1131 if (state.in_len == 0) { 1066 if (state.in_len == 0) {
@@ -1154,14 +1089,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1154 netif_err(efx, tx_err, efx->net_dev, 1089 netif_err(efx, tx_err, efx->net_dev,
1155 "Out of memory for TSO headers, or PCI mapping error\n"); 1090 "Out of memory for TSO headers, or PCI mapping error\n");
1156 dev_kfree_skb_any(skb); 1091 dev_kfree_skb_any(skb);
1157 goto unwind;
1158
1159 stop:
1160 rc2 = NETDEV_TX_BUSY;
1161
1162 /* Stop the queue if it wasn't stopped before. */
1163 if (tx_queue->stopped == 1)
1164 efx_stop_queue(tx_queue->channel);
1165 1092
1166 unwind: 1093 unwind:
1167 /* Free the DMA mapping we were in the process of writing out */ 1094 /* Free the DMA mapping we were in the process of writing out */