aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/tx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2010-04-28 05:30:43 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-28 15:44:44 -0400
commita4900ac9f7d10ad062e54dd03125e3619e0ac17a (patch)
tree32a6f316ccbda7994d5c52197d8a2b5ebb60d6da /drivers/net/sfc/tx.c
parent5298c37f4d1f0360082be9d9e3a236b9cc114a03 (diff)
sfc: Create multiple TX queues
Create a core TX queue and 2 hardware TX queues for each channel. If separate_tx_channels is set, create equal numbers of RX and TX channels instead. Rewrite the channel and queue iteration macros accordingly. Eliminate efx_channel::used_flags as redundant. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r--drivers/net/sfc/tx.c61
1 files changed, 37 insertions, 24 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index be0e110a1f73..6bb12a87ef2d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,32 +30,46 @@
30 */ 30 */
31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) 31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
32 32
33/* We want to be able to nest calls to netif_stop_queue(), since each 33/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * channel can have an individual stop on the queue. 34 * because of the 2 hardware queues associated with each core queue,
35 */ 35 * but also so that we can inhibit TX for reasons other than a full
36void efx_stop_queue(struct efx_nic *efx) 36 * hardware queue. */
37void efx_stop_queue(struct efx_channel *channel)
37{ 38{
38 spin_lock_bh(&efx->netif_stop_lock); 39 struct efx_nic *efx = channel->efx;
40
41 if (!channel->tx_queue)
42 return;
43
44 spin_lock_bh(&channel->tx_stop_lock);
39 EFX_TRACE(efx, "stop TX queue\n"); 45 EFX_TRACE(efx, "stop TX queue\n");
40 46
41 atomic_inc(&efx->netif_stop_count); 47 atomic_inc(&channel->tx_stop_count);
42 netif_stop_queue(efx->net_dev); 48 netif_tx_stop_queue(
49 netdev_get_tx_queue(
50 efx->net_dev,
51 channel->tx_queue->queue / EFX_TXQ_TYPES));
43 52
44 spin_unlock_bh(&efx->netif_stop_lock); 53 spin_unlock_bh(&channel->tx_stop_lock);
45} 54}
46 55
47/* Wake netif's TX queue 56/* Decrement core TX queue stop count and wake it if the count is 0 */
48 * We want to be able to nest calls to netif_stop_queue(), since each 57void efx_wake_queue(struct efx_channel *channel)
49 * channel can have an individual stop on the queue.
50 */
51void efx_wake_queue(struct efx_nic *efx)
52{ 58{
59 struct efx_nic *efx = channel->efx;
60
61 if (!channel->tx_queue)
62 return;
63
53 local_bh_disable(); 64 local_bh_disable();
54 if (atomic_dec_and_lock(&efx->netif_stop_count, 65 if (atomic_dec_and_lock(&channel->tx_stop_count,
55 &efx->netif_stop_lock)) { 66 &channel->tx_stop_lock)) {
56 EFX_TRACE(efx, "waking TX queue\n"); 67 EFX_TRACE(efx, "waking TX queue\n");
57 netif_wake_queue(efx->net_dev); 68 netif_tx_wake_queue(
58 spin_unlock(&efx->netif_stop_lock); 69 netdev_get_tx_queue(
70 efx->net_dev,
71 channel->tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock);
59 } 73 }
60 local_bh_enable(); 74 local_bh_enable();
61} 75}
@@ -298,7 +312,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
298 rc = NETDEV_TX_BUSY; 312 rc = NETDEV_TX_BUSY;
299 313
300 if (tx_queue->stopped == 1) 314 if (tx_queue->stopped == 1)
301 efx_stop_queue(efx); 315 efx_stop_queue(tx_queue->channel);
302 316
303 unwind: 317 unwind:
304 /* Work backwards until we hit the original insert pointer value */ 318 /* Work backwards until we hit the original insert pointer value */
@@ -374,10 +388,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
374 if (unlikely(efx->port_inhibited)) 388 if (unlikely(efx->port_inhibited))
375 return NETDEV_TX_BUSY; 389 return NETDEV_TX_BUSY;
376 390
391 tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
377 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) 392 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
378 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM]; 393 tx_queue += EFX_TXQ_TYPE_OFFLOAD;
379 else
380 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
381 394
382 return efx_enqueue_skb(tx_queue, skb); 395 return efx_enqueue_skb(tx_queue, skb);
383} 396}
@@ -405,7 +418,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
405 netif_tx_lock(efx->net_dev); 418 netif_tx_lock(efx->net_dev);
406 if (tx_queue->stopped) { 419 if (tx_queue->stopped) {
407 tx_queue->stopped = 0; 420 tx_queue->stopped = 0;
408 efx_wake_queue(efx); 421 efx_wake_queue(tx_queue->channel);
409 } 422 }
410 netif_tx_unlock(efx->net_dev); 423 netif_tx_unlock(efx->net_dev);
411 } 424 }
@@ -488,7 +501,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
488 /* Release queue's stop on port, if any */ 501 /* Release queue's stop on port, if any */
489 if (tx_queue->stopped) { 502 if (tx_queue->stopped) {
490 tx_queue->stopped = 0; 503 tx_queue->stopped = 0;
491 efx_wake_queue(tx_queue->efx); 504 efx_wake_queue(tx_queue->channel);
492 } 505 }
493} 506}
494 507
@@ -1120,7 +1133,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1120 1133
1121 /* Stop the queue if it wasn't stopped before. */ 1134 /* Stop the queue if it wasn't stopped before. */
1122 if (tx_queue->stopped == 1) 1135 if (tx_queue->stopped == 1)
1123 efx_stop_queue(efx); 1136 efx_stop_queue(tx_queue->channel);
1124 1137
1125 unwind: 1138 unwind:
1126 /* Free the DMA mapping we were in the process of writing out */ 1139 /* Free the DMA mapping we were in the process of writing out */