aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-02-15 15:25:19 -0500
committerDavid S. Miller <davem@davemloft.net>2011-02-15 15:25:19 -0500
commitf878b995b0f746f5726af9e66940f3bf373dae91 (patch)
tree81fa0cc358ff9957116468a0c37d4f7f3f493cb3 /drivers/net
parent29e1846a6ba84e0c6e257dd5b1231ed53b98fe9b (diff)
parent94b274bf5fba6c75b922c8a23ad4b5639a168780 (diff)
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next-2.6
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/sfc/efx.c31
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/ethtool.c6
-rw-r--r--drivers/net/sfc/net_driver.h64
-rw-r--r--drivers/net/sfc/nic.c51
-rw-r--r--drivers/net/sfc/regs.h6
-rw-r--r--drivers/net/sfc/selftest.c2
-rw-r--r--drivers/net/sfc/tx.c90
8 files changed, 195 insertions, 57 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 002bac743843..d4e04256730b 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -673,7 +673,7 @@ static void efx_fini_channels(struct efx_nic *efx)
673 673
674 efx_for_each_channel_rx_queue(rx_queue, channel) 674 efx_for_each_channel_rx_queue(rx_queue, channel)
675 efx_fini_rx_queue(rx_queue); 675 efx_fini_rx_queue(rx_queue);
676 efx_for_each_channel_tx_queue(tx_queue, channel) 676 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
677 efx_fini_tx_queue(tx_queue); 677 efx_fini_tx_queue(tx_queue);
678 efx_fini_eventq(channel); 678 efx_fini_eventq(channel);
679 } 679 }
@@ -689,7 +689,7 @@ static void efx_remove_channel(struct efx_channel *channel)
689 689
690 efx_for_each_channel_rx_queue(rx_queue, channel) 690 efx_for_each_channel_rx_queue(rx_queue, channel)
691 efx_remove_rx_queue(rx_queue); 691 efx_remove_rx_queue(rx_queue);
692 efx_for_each_channel_tx_queue(tx_queue, channel) 692 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
693 efx_remove_tx_queue(tx_queue); 693 efx_remove_tx_queue(tx_queue);
694 efx_remove_eventq(channel); 694 efx_remove_eventq(channel);
695} 695}
@@ -1271,21 +1271,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1271 1271
1272static void efx_set_channels(struct efx_nic *efx) 1272static void efx_set_channels(struct efx_nic *efx)
1273{ 1273{
1274 struct efx_channel *channel;
1275 struct efx_tx_queue *tx_queue;
1276
1277 efx->tx_channel_offset = 1274 efx->tx_channel_offset =
1278 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1275 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1279
1280 /* Channel pointers were set in efx_init_struct() but we now
1281 * need to clear them for TX queues in any RX-only channels. */
1282 efx_for_each_channel(channel, efx) {
1283 if (channel->channel - efx->tx_channel_offset >=
1284 efx->n_tx_channels) {
1285 efx_for_each_channel_tx_queue(tx_queue, channel)
1286 tx_queue->channel = NULL;
1287 }
1288 }
1289} 1276}
1290 1277
1291static int efx_probe_nic(struct efx_nic *efx) 1278static int efx_probe_nic(struct efx_nic *efx)
@@ -1531,9 +1518,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1531 efx->irq_rx_adaptive = rx_adaptive; 1518 efx->irq_rx_adaptive = rx_adaptive;
1532 efx->irq_rx_moderation = rx_ticks; 1519 efx->irq_rx_moderation = rx_ticks;
1533 efx_for_each_channel(channel, efx) { 1520 efx_for_each_channel(channel, efx) {
1534 if (efx_channel_get_rx_queue(channel)) 1521 if (efx_channel_has_rx_queue(channel))
1535 channel->irq_moderation = rx_ticks; 1522 channel->irq_moderation = rx_ticks;
1536 else if (efx_channel_get_tx_queue(channel, 0)) 1523 else if (efx_channel_has_tx_queues(channel))
1537 channel->irq_moderation = tx_ticks; 1524 channel->irq_moderation = tx_ticks;
1538 } 1525 }
1539} 1526}
@@ -1849,6 +1836,7 @@ static const struct net_device_ops efx_netdev_ops = {
1849#ifdef CONFIG_NET_POLL_CONTROLLER 1836#ifdef CONFIG_NET_POLL_CONTROLLER
1850 .ndo_poll_controller = efx_netpoll, 1837 .ndo_poll_controller = efx_netpoll,
1851#endif 1838#endif
1839 .ndo_setup_tc = efx_setup_tc,
1852}; 1840};
1853 1841
1854static void efx_update_name(struct efx_nic *efx) 1842static void efx_update_name(struct efx_nic *efx)
@@ -1910,10 +1898,8 @@ static int efx_register_netdev(struct efx_nic *efx)
1910 1898
1911 efx_for_each_channel(channel, efx) { 1899 efx_for_each_channel(channel, efx) {
1912 struct efx_tx_queue *tx_queue; 1900 struct efx_tx_queue *tx_queue;
1913 efx_for_each_channel_tx_queue(tx_queue, channel) { 1901 efx_for_each_channel_tx_queue(tx_queue, channel)
1914 tx_queue->core_txq = netdev_get_tx_queue( 1902 efx_init_tx_queue_core_txq(tx_queue);
1915 efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
1916 }
1917 } 1903 }
1918 1904
1919 /* Always start with carrier off; PHY events will detect the link */ 1905 /* Always start with carrier off; PHY events will detect the link */
@@ -2401,7 +2387,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2401 int i, rc; 2387 int i, rc;
2402 2388
2403 /* Allocate and initialise a struct net_device and struct efx_nic */ 2389 /* Allocate and initialise a struct net_device and struct efx_nic */
2404 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); 2390 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
2391 EFX_MAX_RX_QUEUES);
2405 if (!net_dev) 2392 if (!net_dev)
2406 return -ENOMEM; 2393 return -ENOMEM;
2407 net_dev->features |= (type->offload_features | NETIF_F_SG | 2394 net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index d43a7e5212b1..0cb198a64a63 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -29,6 +29,7 @@
29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
32extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
32extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 33extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
33extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); 34extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
34extern netdev_tx_t 35extern netdev_tx_t
@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
36extern netdev_tx_t 37extern netdev_tx_t
37efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 38efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
38extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 39extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
40extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
39 41
40/* RX */ 42/* RX */
41extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 43extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 713969accdbd..272cfe724e1b 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -631,7 +631,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
631 /* Find lowest IRQ moderation across all used TX queues */ 631 /* Find lowest IRQ moderation across all used TX queues */
632 coalesce->tx_coalesce_usecs_irq = ~((u32) 0); 632 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
633 efx_for_each_channel(channel, efx) { 633 efx_for_each_channel(channel, efx) {
634 if (!efx_channel_get_tx_queue(channel, 0)) 634 if (!efx_channel_has_tx_queues(channel))
635 continue; 635 continue;
636 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 636 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
637 if (channel->channel < efx->n_rx_channels) 637 if (channel->channel < efx->n_rx_channels)
@@ -676,8 +676,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
676 676
677 /* If the channel is shared only allow RX parameters to be set */ 677 /* If the channel is shared only allow RX parameters to be set */
678 efx_for_each_channel(channel, efx) { 678 efx_for_each_channel(channel, efx) {
679 if (efx_channel_get_rx_queue(channel) && 679 if (efx_channel_has_rx_queue(channel) &&
680 efx_channel_get_tx_queue(channel, 0) && 680 efx_channel_has_tx_queues(channel) &&
681 tx_usecs) { 681 tx_usecs) {
682 netif_err(efx, drv, efx->net_dev, "Channel is shared. " 682 netif_err(efx, drv, efx->net_dev, "Channel is shared. "
683 "Only RX coalescing may be set\n"); 683 "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index c65270241d2d..96e22ad34970 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -63,10 +63,12 @@
63/* Checksum generation is a per-queue option in hardware, so each 63/* Checksum generation is a per-queue option in hardware, so each
64 * queue visible to the networking core is backed by two hardware TX 64 * queue visible to the networking core is backed by two hardware TX
65 * queues. */ 65 * queues. */
66#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS 66#define EFX_MAX_TX_TC 2
67#define EFX_TXQ_TYPE_OFFLOAD 1 67#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
68#define EFX_TXQ_TYPES 2 68#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
69#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) 69#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
70#define EFX_TXQ_TYPES 4
71#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
70 72
71/** 73/**
72 * struct efx_special_buffer - An Efx special buffer 74 * struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
140 * @buffer: The software buffer ring 142 * @buffer: The software buffer ring
141 * @txd: The hardware descriptor ring 143 * @txd: The hardware descriptor ring
142 * @ptr_mask: The size of the ring minus 1. 144 * @ptr_mask: The size of the ring minus 1.
145 * @initialised: Has hardware queue been initialised?
143 * @flushed: Used when handling queue flushing 146 * @flushed: Used when handling queue flushing
144 * @read_count: Current read pointer. 147 * @read_count: Current read pointer.
145 * This is the number of buffers that have been removed from both rings. 148 * This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@ struct efx_tx_queue {
182 struct efx_tx_buffer *buffer; 185 struct efx_tx_buffer *buffer;
183 struct efx_special_buffer txd; 186 struct efx_special_buffer txd;
184 unsigned int ptr_mask; 187 unsigned int ptr_mask;
188 bool initialised;
185 enum efx_flush_state flushed; 189 enum efx_flush_state flushed;
186 190
187 /* Members used mainly on the completion path */ 191 /* Members used mainly on the completion path */
@@ -377,7 +381,7 @@ struct efx_channel {
377 bool rx_pkt_csummed; 381 bool rx_pkt_csummed;
378 382
379 struct efx_rx_queue rx_queue; 383 struct efx_rx_queue rx_queue;
380 struct efx_tx_queue tx_queue[2]; 384 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
381}; 385};
382 386
383enum efx_led_mode { 387enum efx_led_mode {
@@ -938,18 +942,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
938 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; 942 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
939} 943}
940 944
945static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
946{
947 return channel->channel - channel->efx->tx_channel_offset <
948 channel->efx->n_tx_channels;
949}
950
941static inline struct efx_tx_queue * 951static inline struct efx_tx_queue *
942efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) 952efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
943{ 953{
944 struct efx_tx_queue *tx_queue = channel->tx_queue; 954 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
945 EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); 955 type >= EFX_TXQ_TYPES);
946 return tx_queue->channel ? tx_queue + type : NULL; 956 return &channel->tx_queue[type];
957}
958
959static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
960{
961 return !(tx_queue->efx->net_dev->num_tc < 2 &&
962 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
947} 963}
948 964
949/* Iterate over all TX queues belonging to a channel */ 965/* Iterate over all TX queues belonging to a channel */
950#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 966#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
951 for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ 967 if (!efx_channel_has_tx_queues(_channel)) \
952 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 968 ; \
969 else \
970 for (_tx_queue = (_channel)->tx_queue; \
971 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
972 efx_tx_queue_used(_tx_queue); \
973 _tx_queue++)
974
975/* Iterate over all possible TX queues belonging to a channel */
976#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
977 for (_tx_queue = (_channel)->tx_queue; \
978 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
953 _tx_queue++) 979 _tx_queue++)
954 980
955static inline struct efx_rx_queue * 981static inline struct efx_rx_queue *
@@ -959,18 +985,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
959 return &efx->channel[index]->rx_queue; 985 return &efx->channel[index]->rx_queue;
960} 986}
961 987
988static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
989{
990 return channel->channel < channel->efx->n_rx_channels;
991}
992
962static inline struct efx_rx_queue * 993static inline struct efx_rx_queue *
963efx_channel_get_rx_queue(struct efx_channel *channel) 994efx_channel_get_rx_queue(struct efx_channel *channel)
964{ 995{
965 return channel->channel < channel->efx->n_rx_channels ? 996 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
966 &channel->rx_queue : NULL; 997 return &channel->rx_queue;
967} 998}
968 999
969/* Iterate over all RX queues belonging to a channel */ 1000/* Iterate over all RX queues belonging to a channel */
970#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 1001#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
971 for (_rx_queue = efx_channel_get_rx_queue(channel); \ 1002 if (!efx_channel_has_rx_queue(_channel)) \
972 _rx_queue; \ 1003 ; \
973 _rx_queue = NULL) 1004 else \
1005 for (_rx_queue = &(_channel)->rx_queue; \
1006 _rx_queue; \
1007 _rx_queue = NULL)
974 1008
975static inline struct efx_channel * 1009static inline struct efx_channel *
976efx_rx_queue_channel(struct efx_rx_queue *rx_queue) 1010efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index da386599ab68..1d0b8b6f25c4 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -445,8 +445,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
445 445
446void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 446void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
447{ 447{
448 efx_oword_t tx_desc_ptr;
449 struct efx_nic *efx = tx_queue->efx; 448 struct efx_nic *efx = tx_queue->efx;
449 efx_oword_t reg;
450 450
451 tx_queue->flushed = FLUSH_NONE; 451 tx_queue->flushed = FLUSH_NONE;
452 452
@@ -454,7 +454,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
454 efx_init_special_buffer(efx, &tx_queue->txd); 454 efx_init_special_buffer(efx, &tx_queue->txd);
455 455
456 /* Push TX descriptor ring to card */ 456 /* Push TX descriptor ring to card */
457 EFX_POPULATE_OWORD_10(tx_desc_ptr, 457 EFX_POPULATE_OWORD_10(reg,
458 FRF_AZ_TX_DESCQ_EN, 1, 458 FRF_AZ_TX_DESCQ_EN, 1,
459 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 459 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
460 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 460 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +470,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
470 470
471 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 471 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
472 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 472 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
473 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 473 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
474 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 474 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
475 !csum); 475 !csum);
476 } 476 }
477 477
478 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 478 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
479 tx_queue->queue); 479 tx_queue->queue);
480 480
481 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 481 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
482 efx_oword_t reg;
483
484 /* Only 128 bits in this register */ 482 /* Only 128 bits in this register */
485 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 483 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
486 484
@@ -491,6 +489,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
491 set_bit_le(tx_queue->queue, (void *)&reg); 489 set_bit_le(tx_queue->queue, (void *)&reg);
492 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); 490 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
493 } 491 }
492
493 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
494 EFX_POPULATE_OWORD_1(reg,
495 FRF_BZ_TX_PACE,
496 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
497 FFE_BZ_TX_PACE_OFF :
498 FFE_BZ_TX_PACE_RESERVED);
499 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
500 tx_queue->queue);
501 }
494} 502}
495 503
496static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 504static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1246,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1238 1246
1239 /* Flush all tx queues in parallel */ 1247 /* Flush all tx queues in parallel */
1240 efx_for_each_channel(channel, efx) { 1248 efx_for_each_channel(channel, efx) {
1241 efx_for_each_channel_tx_queue(tx_queue, channel) 1249 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1242 efx_flush_tx_queue(tx_queue); 1250 if (tx_queue->initialised)
1251 efx_flush_tx_queue(tx_queue);
1252 }
1243 } 1253 }
1244 1254
1245 /* The hardware supports four concurrent rx flushes, each of which may 1255 /* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1272,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1262 ++rx_pending; 1272 ++rx_pending;
1263 } 1273 }
1264 } 1274 }
1265 efx_for_each_channel_tx_queue(tx_queue, channel) { 1275 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1266 if (tx_queue->flushed != FLUSH_DONE) 1276 if (tx_queue->initialised &&
1277 tx_queue->flushed != FLUSH_DONE)
1267 ++tx_pending; 1278 ++tx_pending;
1268 } 1279 }
1269 } 1280 }
@@ -1278,8 +1289,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1278 /* Mark the queues as all flushed. We're going to return failure 1289 /* Mark the queues as all flushed. We're going to return failure
1279 * leading to a reset, or fake up success anyway */ 1290 * leading to a reset, or fake up success anyway */
1280 efx_for_each_channel(channel, efx) { 1291 efx_for_each_channel(channel, efx) {
1281 efx_for_each_channel_tx_queue(tx_queue, channel) { 1292 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1282 if (tx_queue->flushed != FLUSH_DONE) 1293 if (tx_queue->initialised &&
1294 tx_queue->flushed != FLUSH_DONE)
1283 netif_err(efx, hw, efx->net_dev, 1295 netif_err(efx, hw, efx->net_dev,
1284 "tx queue %d flush command timed out\n", 1296 "tx queue %d flush command timed out\n",
1285 tx_queue->queue); 1297 tx_queue->queue);
@@ -1682,6 +1694,19 @@ void efx_nic_init_common(struct efx_nic *efx)
1682 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1694 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1683 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1695 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1684 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1696 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1697
1698 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1699 EFX_POPULATE_OWORD_4(temp,
1700 /* Default values */
1701 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1702 FRF_BZ_TX_PACE_SB_AF, 0xb,
1703 FRF_BZ_TX_PACE_FB_BASE, 0,
1704 /* Allow large pace values in the
1705 * fast bin. */
1706 FRF_BZ_TX_PACE_BIN_TH,
1707 FFE_BZ_TX_PACE_RESERVED);
1708 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1709 }
1685} 1710}
1686 1711
1687/* Register dump */ 1712/* Register dump */
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 96430ed81c36..8227de62014f 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -2907,6 +2907,12 @@
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
2909 2909
2910/* TX_PACE_TBL */
2911/* Values >20 are documented as reserved, but will result in a queue going
2912 * into the fast bin with a pace value of zero. */
2913#define FFE_BZ_TX_PACE_OFF 0
2914#define FFE_BZ_TX_PACE_RESERVED 21
2915
2910/* DRIVER_EV */ 2916/* DRIVER_EV */
2911/* Sub-fields of an RX flush completion event */ 2917/* Sub-fields of an RX flush completion event */
2912#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 2918#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0ebfb99f1299..f936892aa423 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
644 goto out; 644 goto out;
645 } 645 }
646 646
647 /* Test both types of TX queue */ 647 /* Test all enabled types of TX queue */
648 efx_for_each_channel_tx_queue(tx_queue, channel) { 648 efx_for_each_channel_tx_queue(tx_queue, channel) {
649 state->offload_csum = (tx_queue->queue & 649 state->offload_csum = (tx_queue->queue &
650 EFX_TXQ_TYPE_OFFLOAD); 650 EFX_TXQ_TYPE_OFFLOAD);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 2f5e9da657bf..1a51653bb92b 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
336{ 336{
337 struct efx_nic *efx = netdev_priv(net_dev); 337 struct efx_nic *efx = netdev_priv(net_dev);
338 struct efx_tx_queue *tx_queue; 338 struct efx_tx_queue *tx_queue;
339 unsigned index, type;
339 340
340 if (unlikely(efx->port_inhibited)) 341 if (unlikely(efx->port_inhibited))
341 return NETDEV_TX_BUSY; 342 return NETDEV_TX_BUSY;
342 343
343 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), 344 index = skb_get_queue_mapping(skb);
344 skb->ip_summed == CHECKSUM_PARTIAL ? 345 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
345 EFX_TXQ_TYPE_OFFLOAD : 0); 346 if (index >= efx->n_tx_channels) {
347 index -= efx->n_tx_channels;
348 type |= EFX_TXQ_TYPE_HIGHPRI;
349 }
350 tx_queue = efx_get_tx_queue(efx, index, type);
346 351
347 return efx_enqueue_skb(tx_queue, skb); 352 return efx_enqueue_skb(tx_queue, skb);
348} 353}
349 354
355void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
356{
357 struct efx_nic *efx = tx_queue->efx;
358
359 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
360 tx_queue->core_txq =
361 netdev_get_tx_queue(efx->net_dev,
362 tx_queue->queue / EFX_TXQ_TYPES +
363 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
364 efx->n_tx_channels : 0));
365}
366
367int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
368{
369 struct efx_nic *efx = netdev_priv(net_dev);
370 struct efx_channel *channel;
371 struct efx_tx_queue *tx_queue;
372 unsigned tc;
373 int rc;
374
375 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
376 return -EINVAL;
377
378 if (num_tc == net_dev->num_tc)
379 return 0;
380
381 for (tc = 0; tc < num_tc; tc++) {
382 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
383 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
384 }
385
386 if (num_tc > net_dev->num_tc) {
387 /* Initialise high-priority queues as necessary */
388 efx_for_each_channel(channel, efx) {
389 efx_for_each_possible_channel_tx_queue(tx_queue,
390 channel) {
391 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
392 continue;
393 if (!tx_queue->buffer) {
394 rc = efx_probe_tx_queue(tx_queue);
395 if (rc)
396 return rc;
397 }
398 if (!tx_queue->initialised)
399 efx_init_tx_queue(tx_queue);
400 efx_init_tx_queue_core_txq(tx_queue);
401 }
402 }
403 } else {
404 /* Reduce number of classes before number of queues */
405 net_dev->num_tc = num_tc;
406 }
407
408 rc = netif_set_real_num_tx_queues(net_dev,
409 max_t(int, num_tc, 1) *
410 efx->n_tx_channels);
411 if (rc)
412 return rc;
413
414 /* Do not destroy high-priority queues when they become
415 * unused. We would have to flush them first, and it is
416 * fairly difficult to flush a subset of TX queues. Leave
417 * it to efx_fini_channels().
418 */
419
420 net_dev->num_tc = num_tc;
421 return 0;
422}
423
350void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 424void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
351{ 425{
352 unsigned fill_level; 426 unsigned fill_level;
@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
430 504
431 /* Set up TX descriptor ring */ 505 /* Set up TX descriptor ring */
432 efx_nic_init_tx(tx_queue); 506 efx_nic_init_tx(tx_queue);
507
508 tx_queue->initialised = true;
433} 509}
434 510
435void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 511void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
452 528
453void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 529void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
454{ 530{
531 if (!tx_queue->initialised)
532 return;
533
455 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 534 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
456 "shutting down TX queue %d\n", tx_queue->queue); 535 "shutting down TX queue %d\n", tx_queue->queue);
457 536
537 tx_queue->initialised = false;
538
458 /* Flush TX queue, remove descriptor ring */ 539 /* Flush TX queue, remove descriptor ring */
459 efx_nic_fini_tx(tx_queue); 540 efx_nic_fini_tx(tx_queue);
460 541
@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
466 547
467void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 548void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
468{ 549{
550 if (!tx_queue->buffer)
551 return;
552
469 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 553 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
470 "destroying TX queue %d\n", tx_queue->queue); 554 "destroying TX queue %d\n", tx_queue->queue);
471 efx_nic_remove_tx(tx_queue); 555 efx_nic_remove_tx(tx_queue);