aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/tx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2011-01-10 16:18:20 -0500
committerBen Hutchings <bhutchings@solarflare.com>2011-02-15 14:45:35 -0500
commit94b274bf5fba6c75b922c8a23ad4b5639a168780 (patch)
tree48f3bb2629ee14ba620a08098da1908d16bbe22f /drivers/net/sfc/tx.c
parent525da9072c28df815bff64bf00f3b11ab88face8 (diff)
sfc: Add TX queues for high-priority traffic
Implement the ndo_setup_tc() operation with 2 traffic classes. Current Solarstorm controllers do not implement TX queue priority, but they do allow queues to be 'paced' with an enforced delay between packets. Paced and unpaced queues are scheduled in round-robin within two separate hardware bins (paced queues with a large delay may be placed into a third bin temporarily, but we won't use that). If there are queues in both bins, the TX scheduler will alternate between them. If we make high-priority queues unpaced and best-effort queues paced, and high-priority queues are mostly empty, a single high-priority queue can then instantly take 50% of the packet rate regardless of how many of the best-effort queues have descriptors outstanding. We do not actually want an enforced delay between packets on best- effort queues, so we set the pace value to a reserved value that actually results in a delay of 0. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r--drivers/net/sfc/tx.c87
1 files changed, 82 insertions, 5 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 7e463fb19fb9..1a51653bb92b 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -336,22 +336,89 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
336{ 336{
337 struct efx_nic *efx = netdev_priv(net_dev); 337 struct efx_nic *efx = netdev_priv(net_dev);
338 struct efx_tx_queue *tx_queue; 338 struct efx_tx_queue *tx_queue;
339 unsigned index, type;
339 340
340 if (unlikely(efx->port_inhibited)) 341 if (unlikely(efx->port_inhibited))
341 return NETDEV_TX_BUSY; 342 return NETDEV_TX_BUSY;
342 343
343 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), 344 index = skb_get_queue_mapping(skb);
344 skb->ip_summed == CHECKSUM_PARTIAL ? 345 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
345 EFX_TXQ_TYPE_OFFLOAD : 0); 346 if (index >= efx->n_tx_channels) {
347 index -= efx->n_tx_channels;
348 type |= EFX_TXQ_TYPE_HIGHPRI;
349 }
350 tx_queue = efx_get_tx_queue(efx, index, type);
346 351
347 return efx_enqueue_skb(tx_queue, skb); 352 return efx_enqueue_skb(tx_queue, skb);
348} 353}
349 354
350void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) 355void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
351{ 356{
357 struct efx_nic *efx = tx_queue->efx;
358
352 /* Must be inverse of queue lookup in efx_hard_start_xmit() */ 359 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
353 tx_queue->core_txq = netdev_get_tx_queue( 360 tx_queue->core_txq =
354 tx_queue->efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES); 361 netdev_get_tx_queue(efx->net_dev,
362 tx_queue->queue / EFX_TXQ_TYPES +
363 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
364 efx->n_tx_channels : 0));
365}
366
367int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
368{
369 struct efx_nic *efx = netdev_priv(net_dev);
370 struct efx_channel *channel;
371 struct efx_tx_queue *tx_queue;
372 unsigned tc;
373 int rc;
374
375 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
376 return -EINVAL;
377
378 if (num_tc == net_dev->num_tc)
379 return 0;
380
381 for (tc = 0; tc < num_tc; tc++) {
382 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
383 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
384 }
385
386 if (num_tc > net_dev->num_tc) {
387 /* Initialise high-priority queues as necessary */
388 efx_for_each_channel(channel, efx) {
389 efx_for_each_possible_channel_tx_queue(tx_queue,
390 channel) {
391 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
392 continue;
393 if (!tx_queue->buffer) {
394 rc = efx_probe_tx_queue(tx_queue);
395 if (rc)
396 return rc;
397 }
398 if (!tx_queue->initialised)
399 efx_init_tx_queue(tx_queue);
400 efx_init_tx_queue_core_txq(tx_queue);
401 }
402 }
403 } else {
404 /* Reduce number of classes before number of queues */
405 net_dev->num_tc = num_tc;
406 }
407
408 rc = netif_set_real_num_tx_queues(net_dev,
409 max_t(int, num_tc, 1) *
410 efx->n_tx_channels);
411 if (rc)
412 return rc;
413
414 /* Do not destroy high-priority queues when they become
415 * unused. We would have to flush them first, and it is
416 * fairly difficult to flush a subset of TX queues. Leave
417 * it to efx_fini_channels().
418 */
419
420 net_dev->num_tc = num_tc;
421 return 0;
355} 422}
356 423
357void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 424void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
@@ -437,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
437 504
438 /* Set up TX descriptor ring */ 505 /* Set up TX descriptor ring */
439 efx_nic_init_tx(tx_queue); 506 efx_nic_init_tx(tx_queue);
507
508 tx_queue->initialised = true;
440} 509}
441 510
442void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 511void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -459,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
459 528
460void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 529void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
461{ 530{
531 if (!tx_queue->initialised)
532 return;
533
462 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 534 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
463 "shutting down TX queue %d\n", tx_queue->queue); 535 "shutting down TX queue %d\n", tx_queue->queue);
464 536
537 tx_queue->initialised = false;
538
465 /* Flush TX queue, remove descriptor ring */ 539 /* Flush TX queue, remove descriptor ring */
466 efx_nic_fini_tx(tx_queue); 540 efx_nic_fini_tx(tx_queue);
467 541
@@ -473,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
473 547
474void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 548void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
475{ 549{
550 if (!tx_queue->buffer)
551 return;
552
476 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 553 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
477 "destroying TX queue %d\n", tx_queue->queue); 554 "destroying TX queue %d\n", tx_queue->queue);
478 efx_nic_remove_tx(tx_queue); 555 efx_nic_remove_tx(tx_queue);