aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2011-01-10 16:18:20 -0500
committerBen Hutchings <bhutchings@solarflare.com>2011-02-15 14:45:35 -0500
commit94b274bf5fba6c75b922c8a23ad4b5639a168780 (patch)
tree48f3bb2629ee14ba620a08098da1908d16bbe22f /drivers
parent525da9072c28df815bff64bf00f3b11ab88face8 (diff)
sfc: Add TX queues for high-priority traffic
Implement the ndo_setup_tc() operation with 2 traffic classes. Current Solarstorm controllers do not implement TX queue priority, but they do allow queues to be 'paced' with an enforced delay between packets. Paced and unpaced queues are scheduled in round-robin within two separate hardware bins (paced queues with a large delay may be placed into a third bin temporarily, but we won't use that). If there are queues in both bins, the TX scheduler will alternate between them. If we make high-priority queues unpaced and best-effort queues paced, and high-priority queues are mostly empty, a single high-priority queue can then instantly take 50% of the packet rate regardless of how many of the best-effort queues have descriptors outstanding. We do not actually want an enforced delay between packets on best- effort queues, so we set the pace value to a reserved value that actually results in a delay of 0. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/sfc/efx.c8
-rw-r--r--drivers/net/sfc/efx.h1
-rw-r--r--drivers/net/sfc/net_driver.h29
-rw-r--r--drivers/net/sfc/nic.c51
-rw-r--r--drivers/net/sfc/regs.h6
-rw-r--r--drivers/net/sfc/selftest.c2
-rw-r--r--drivers/net/sfc/tx.c87
7 files changed, 156 insertions, 28 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 6189d3066018..d4e04256730b 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -673,7 +673,7 @@ static void efx_fini_channels(struct efx_nic *efx)
673 673
674 efx_for_each_channel_rx_queue(rx_queue, channel) 674 efx_for_each_channel_rx_queue(rx_queue, channel)
675 efx_fini_rx_queue(rx_queue); 675 efx_fini_rx_queue(rx_queue);
676 efx_for_each_channel_tx_queue(tx_queue, channel) 676 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
677 efx_fini_tx_queue(tx_queue); 677 efx_fini_tx_queue(tx_queue);
678 efx_fini_eventq(channel); 678 efx_fini_eventq(channel);
679 } 679 }
@@ -689,7 +689,7 @@ static void efx_remove_channel(struct efx_channel *channel)
689 689
690 efx_for_each_channel_rx_queue(rx_queue, channel) 690 efx_for_each_channel_rx_queue(rx_queue, channel)
691 efx_remove_rx_queue(rx_queue); 691 efx_remove_rx_queue(rx_queue);
692 efx_for_each_channel_tx_queue(tx_queue, channel) 692 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
693 efx_remove_tx_queue(tx_queue); 693 efx_remove_tx_queue(tx_queue);
694 efx_remove_eventq(channel); 694 efx_remove_eventq(channel);
695} 695}
@@ -1836,6 +1836,7 @@ static const struct net_device_ops efx_netdev_ops = {
1836#ifdef CONFIG_NET_POLL_CONTROLLER 1836#ifdef CONFIG_NET_POLL_CONTROLLER
1837 .ndo_poll_controller = efx_netpoll, 1837 .ndo_poll_controller = efx_netpoll,
1838#endif 1838#endif
1839 .ndo_setup_tc = efx_setup_tc,
1839}; 1840};
1840 1841
1841static void efx_update_name(struct efx_nic *efx) 1842static void efx_update_name(struct efx_nic *efx)
@@ -2386,7 +2387,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2386 int i, rc; 2387 int i, rc;
2387 2388
2388 /* Allocate and initialise a struct net_device and struct efx_nic */ 2389 /* Allocate and initialise a struct net_device and struct efx_nic */
2389 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); 2390 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
2391 EFX_MAX_RX_QUEUES);
2390 if (!net_dev) 2392 if (!net_dev)
2391 return -ENOMEM; 2393 return -ENOMEM;
2392 net_dev->features |= (type->offload_features | NETIF_F_SG | 2394 net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 116207045068..0cb198a64a63 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -37,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
37extern netdev_tx_t 37extern netdev_tx_t
38efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 38efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
39extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 39extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
40extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
40 41
41/* RX */ 42/* RX */
42extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 43extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 77b7ce451519..96e22ad34970 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -63,10 +63,12 @@
63/* Checksum generation is a per-queue option in hardware, so each 63/* Checksum generation is a per-queue option in hardware, so each
64 * queue visible to the networking core is backed by two hardware TX 64 * queue visible to the networking core is backed by two hardware TX
65 * queues. */ 65 * queues. */
66#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS 66#define EFX_MAX_TX_TC 2
67#define EFX_TXQ_TYPE_OFFLOAD 1 67#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
68#define EFX_TXQ_TYPES 2 68#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
69#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) 69#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
70#define EFX_TXQ_TYPES 4
71#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
70 72
71/** 73/**
72 * struct efx_special_buffer - An Efx special buffer 74 * struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
140 * @buffer: The software buffer ring 142 * @buffer: The software buffer ring
141 * @txd: The hardware descriptor ring 143 * @txd: The hardware descriptor ring
142 * @ptr_mask: The size of the ring minus 1. 144 * @ptr_mask: The size of the ring minus 1.
145 * @initialised: Has hardware queue been initialised?
143 * @flushed: Used when handling queue flushing 146 * @flushed: Used when handling queue flushing
144 * @read_count: Current read pointer. 147 * @read_count: Current read pointer.
145 * This is the number of buffers that have been removed from both rings. 148 * This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@ struct efx_tx_queue {
182 struct efx_tx_buffer *buffer; 185 struct efx_tx_buffer *buffer;
183 struct efx_special_buffer txd; 186 struct efx_special_buffer txd;
184 unsigned int ptr_mask; 187 unsigned int ptr_mask;
188 bool initialised;
185 enum efx_flush_state flushed; 189 enum efx_flush_state flushed;
186 190
187 /* Members used mainly on the completion path */ 191 /* Members used mainly on the completion path */
@@ -377,7 +381,7 @@ struct efx_channel {
377 bool rx_pkt_csummed; 381 bool rx_pkt_csummed;
378 382
379 struct efx_rx_queue rx_queue; 383 struct efx_rx_queue rx_queue;
380 struct efx_tx_queue tx_queue[2]; 384 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
381}; 385};
382 386
383enum efx_led_mode { 387enum efx_led_mode {
@@ -952,15 +956,28 @@ efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
952 return &channel->tx_queue[type]; 956 return &channel->tx_queue[type];
953} 957}
954 958
959static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
960{
961 return !(tx_queue->efx->net_dev->num_tc < 2 &&
962 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
963}
964
955/* Iterate over all TX queues belonging to a channel */ 965/* Iterate over all TX queues belonging to a channel */
956#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 966#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
957 if (!efx_channel_has_tx_queues(_channel)) \ 967 if (!efx_channel_has_tx_queues(_channel)) \
958 ; \ 968 ; \
959 else \ 969 else \
960 for (_tx_queue = (_channel)->tx_queue; \ 970 for (_tx_queue = (_channel)->tx_queue; \
961 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 971 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
972 efx_tx_queue_used(_tx_queue); \
962 _tx_queue++) 973 _tx_queue++)
963 974
975/* Iterate over all possible TX queues belonging to a channel */
976#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
977 for (_tx_queue = (_channel)->tx_queue; \
978 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
979 _tx_queue++)
980
964static inline struct efx_rx_queue * 981static inline struct efx_rx_queue *
965efx_get_rx_queue(struct efx_nic *efx, unsigned index) 982efx_get_rx_queue(struct efx_nic *efx, unsigned index)
966{ 983{
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index da386599ab68..1d0b8b6f25c4 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -445,8 +445,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
445 445
446void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 446void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
447{ 447{
448 efx_oword_t tx_desc_ptr;
449 struct efx_nic *efx = tx_queue->efx; 448 struct efx_nic *efx = tx_queue->efx;
449 efx_oword_t reg;
450 450
451 tx_queue->flushed = FLUSH_NONE; 451 tx_queue->flushed = FLUSH_NONE;
452 452
@@ -454,7 +454,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
454 efx_init_special_buffer(efx, &tx_queue->txd); 454 efx_init_special_buffer(efx, &tx_queue->txd);
455 455
456 /* Push TX descriptor ring to card */ 456 /* Push TX descriptor ring to card */
457 EFX_POPULATE_OWORD_10(tx_desc_ptr, 457 EFX_POPULATE_OWORD_10(reg,
458 FRF_AZ_TX_DESCQ_EN, 1, 458 FRF_AZ_TX_DESCQ_EN, 1,
459 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 459 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
460 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 460 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +470,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
470 470
471 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 471 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
472 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 472 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
473 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 473 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
474 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 474 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
475 !csum); 475 !csum);
476 } 476 }
477 477
478 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 478 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
479 tx_queue->queue); 479 tx_queue->queue);
480 480
481 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 481 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
482 efx_oword_t reg;
483
484 /* Only 128 bits in this register */ 482 /* Only 128 bits in this register */
485 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 483 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
486 484
@@ -491,6 +489,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
491 set_bit_le(tx_queue->queue, (void *)&reg); 489 set_bit_le(tx_queue->queue, (void *)&reg);
492 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); 490 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
493 } 491 }
492
493 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
494 EFX_POPULATE_OWORD_1(reg,
495 FRF_BZ_TX_PACE,
496 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
497 FFE_BZ_TX_PACE_OFF :
498 FFE_BZ_TX_PACE_RESERVED);
499 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
500 tx_queue->queue);
501 }
494} 502}
495 503
496static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 504static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1246,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1238 1246
1239 /* Flush all tx queues in parallel */ 1247 /* Flush all tx queues in parallel */
1240 efx_for_each_channel(channel, efx) { 1248 efx_for_each_channel(channel, efx) {
1241 efx_for_each_channel_tx_queue(tx_queue, channel) 1249 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1242 efx_flush_tx_queue(tx_queue); 1250 if (tx_queue->initialised)
1251 efx_flush_tx_queue(tx_queue);
1252 }
1243 } 1253 }
1244 1254
1245 /* The hardware supports four concurrent rx flushes, each of which may 1255 /* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1272,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1262 ++rx_pending; 1272 ++rx_pending;
1263 } 1273 }
1264 } 1274 }
1265 efx_for_each_channel_tx_queue(tx_queue, channel) { 1275 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1266 if (tx_queue->flushed != FLUSH_DONE) 1276 if (tx_queue->initialised &&
1277 tx_queue->flushed != FLUSH_DONE)
1267 ++tx_pending; 1278 ++tx_pending;
1268 } 1279 }
1269 } 1280 }
@@ -1278,8 +1289,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1278 /* Mark the queues as all flushed. We're going to return failure 1289 /* Mark the queues as all flushed. We're going to return failure
1279 * leading to a reset, or fake up success anyway */ 1290 * leading to a reset, or fake up success anyway */
1280 efx_for_each_channel(channel, efx) { 1291 efx_for_each_channel(channel, efx) {
1281 efx_for_each_channel_tx_queue(tx_queue, channel) { 1292 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1282 if (tx_queue->flushed != FLUSH_DONE) 1293 if (tx_queue->initialised &&
1294 tx_queue->flushed != FLUSH_DONE)
1283 netif_err(efx, hw, efx->net_dev, 1295 netif_err(efx, hw, efx->net_dev,
1284 "tx queue %d flush command timed out\n", 1296 "tx queue %d flush command timed out\n",
1285 tx_queue->queue); 1297 tx_queue->queue);
@@ -1682,6 +1694,19 @@ void efx_nic_init_common(struct efx_nic *efx)
1682 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1694 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1683 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1695 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1684 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1696 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1697
1698 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1699 EFX_POPULATE_OWORD_4(temp,
1700 /* Default values */
1701 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1702 FRF_BZ_TX_PACE_SB_AF, 0xb,
1703 FRF_BZ_TX_PACE_FB_BASE, 0,
1704 /* Allow large pace values in the
1705 * fast bin. */
1706 FRF_BZ_TX_PACE_BIN_TH,
1707 FFE_BZ_TX_PACE_RESERVED);
1708 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1709 }
1685} 1710}
1686 1711
1687/* Register dump */ 1712/* Register dump */
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 96430ed81c36..8227de62014f 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -2907,6 +2907,12 @@
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
2909 2909
2910/* TX_PACE_TBL */
2911/* Values >20 are documented as reserved, but will result in a queue going
2912 * into the fast bin with a pace value of zero. */
2913#define FFE_BZ_TX_PACE_OFF 0
2914#define FFE_BZ_TX_PACE_RESERVED 21
2915
2910/* DRIVER_EV */ 2916/* DRIVER_EV */
2911/* Sub-fields of an RX flush completion event */ 2917/* Sub-fields of an RX flush completion event */
2912#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 2918#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0ebfb99f1299..f936892aa423 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
644 goto out; 644 goto out;
645 } 645 }
646 646
647 /* Test both types of TX queue */ 647 /* Test all enabled types of TX queue */
648 efx_for_each_channel_tx_queue(tx_queue, channel) { 648 efx_for_each_channel_tx_queue(tx_queue, channel) {
649 state->offload_csum = (tx_queue->queue & 649 state->offload_csum = (tx_queue->queue &
650 EFX_TXQ_TYPE_OFFLOAD); 650 EFX_TXQ_TYPE_OFFLOAD);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 7e463fb19fb9..1a51653bb92b 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -336,22 +336,89 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
336{ 336{
337 struct efx_nic *efx = netdev_priv(net_dev); 337 struct efx_nic *efx = netdev_priv(net_dev);
338 struct efx_tx_queue *tx_queue; 338 struct efx_tx_queue *tx_queue;
339 unsigned index, type;
339 340
340 if (unlikely(efx->port_inhibited)) 341 if (unlikely(efx->port_inhibited))
341 return NETDEV_TX_BUSY; 342 return NETDEV_TX_BUSY;
342 343
343 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), 344 index = skb_get_queue_mapping(skb);
344 skb->ip_summed == CHECKSUM_PARTIAL ? 345 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
345 EFX_TXQ_TYPE_OFFLOAD : 0); 346 if (index >= efx->n_tx_channels) {
347 index -= efx->n_tx_channels;
348 type |= EFX_TXQ_TYPE_HIGHPRI;
349 }
350 tx_queue = efx_get_tx_queue(efx, index, type);
346 351
347 return efx_enqueue_skb(tx_queue, skb); 352 return efx_enqueue_skb(tx_queue, skb);
348} 353}
349 354
350void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) 355void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
351{ 356{
357 struct efx_nic *efx = tx_queue->efx;
358
352 /* Must be inverse of queue lookup in efx_hard_start_xmit() */ 359 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
353 tx_queue->core_txq = netdev_get_tx_queue( 360 tx_queue->core_txq =
354 tx_queue->efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES); 361 netdev_get_tx_queue(efx->net_dev,
362 tx_queue->queue / EFX_TXQ_TYPES +
363 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
364 efx->n_tx_channels : 0));
365}
366
367int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
368{
369 struct efx_nic *efx = netdev_priv(net_dev);
370 struct efx_channel *channel;
371 struct efx_tx_queue *tx_queue;
372 unsigned tc;
373 int rc;
374
375 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
376 return -EINVAL;
377
378 if (num_tc == net_dev->num_tc)
379 return 0;
380
381 for (tc = 0; tc < num_tc; tc++) {
382 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
383 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
384 }
385
386 if (num_tc > net_dev->num_tc) {
387 /* Initialise high-priority queues as necessary */
388 efx_for_each_channel(channel, efx) {
389 efx_for_each_possible_channel_tx_queue(tx_queue,
390 channel) {
391 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
392 continue;
393 if (!tx_queue->buffer) {
394 rc = efx_probe_tx_queue(tx_queue);
395 if (rc)
396 return rc;
397 }
398 if (!tx_queue->initialised)
399 efx_init_tx_queue(tx_queue);
400 efx_init_tx_queue_core_txq(tx_queue);
401 }
402 }
403 } else {
404 /* Reduce number of classes before number of queues */
405 net_dev->num_tc = num_tc;
406 }
407
408 rc = netif_set_real_num_tx_queues(net_dev,
409 max_t(int, num_tc, 1) *
410 efx->n_tx_channels);
411 if (rc)
412 return rc;
413
414 /* Do not destroy high-priority queues when they become
415 * unused. We would have to flush them first, and it is
416 * fairly difficult to flush a subset of TX queues. Leave
417 * it to efx_fini_channels().
418 */
419
420 net_dev->num_tc = num_tc;
421 return 0;
355} 422}
356 423
357void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 424void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
@@ -437,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
437 504
438 /* Set up TX descriptor ring */ 505 /* Set up TX descriptor ring */
439 efx_nic_init_tx(tx_queue); 506 efx_nic_init_tx(tx_queue);
507
508 tx_queue->initialised = true;
440} 509}
441 510
442void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 511void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -459,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
459 528
460void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 529void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
461{ 530{
531 if (!tx_queue->initialised)
532 return;
533
462 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 534 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
463 "shutting down TX queue %d\n", tx_queue->queue); 535 "shutting down TX queue %d\n", tx_queue->queue);
464 536
537 tx_queue->initialised = false;
538
465 /* Flush TX queue, remove descriptor ring */ 539 /* Flush TX queue, remove descriptor ring */
466 efx_nic_fini_tx(tx_queue); 540 efx_nic_fini_tx(tx_queue);
467 541
@@ -473,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
473 547
474void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 548void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
475{ 549{
550 if (!tx_queue->buffer)
551 return;
552
476 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 553 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
477 "destroying TX queue %d\n", tx_queue->queue); 554 "destroying TX queue %d\n", tx_queue->queue);
478 efx_nic_remove_tx(tx_queue); 555 efx_nic_remove_tx(tx_queue);