aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/sfc/efx.c113
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/ethtool.c4
-rw-r--r--drivers/net/sfc/net_driver.h61
-rw-r--r--drivers/net/sfc/nic.c12
-rw-r--r--drivers/net/sfc/selftest.c4
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/tx.c61
8 files changed, 140 insertions, 123 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 5e3f944fdd95..bc75ef683c9f 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -288,7 +288,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
288 if (spent < budget) { 288 if (spent < budget) {
289 struct efx_nic *efx = channel->efx; 289 struct efx_nic *efx = channel->efx;
290 290
291 if (channel->used_flags & EFX_USED_BY_RX && 291 if (channel->channel < efx->n_rx_channels &&
292 efx->irq_rx_adaptive && 292 efx->irq_rx_adaptive &&
293 unlikely(++channel->irq_count == 1000)) { 293 unlikely(++channel->irq_count == 1000)) {
294 if (unlikely(channel->irq_mod_score < 294 if (unlikely(channel->irq_mod_score <
@@ -333,7 +333,6 @@ void efx_process_channel_now(struct efx_channel *channel)
333{ 333{
334 struct efx_nic *efx = channel->efx; 334 struct efx_nic *efx = channel->efx;
335 335
336 BUG_ON(!channel->used_flags);
337 BUG_ON(!channel->enabled); 336 BUG_ON(!channel->enabled);
338 337
339 /* Disable interrupts and wait for ISRs to complete */ 338 /* Disable interrupts and wait for ISRs to complete */
@@ -446,12 +445,12 @@ static void efx_set_channel_names(struct efx_nic *efx)
446 445
447 efx_for_each_channel(channel, efx) { 446 efx_for_each_channel(channel, efx) {
448 number = channel->channel; 447 number = channel->channel;
449 if (efx->n_channels > efx->n_rx_queues) { 448 if (efx->n_channels > efx->n_rx_channels) {
450 if (channel->channel < efx->n_rx_queues) { 449 if (channel->channel < efx->n_rx_channels) {
451 type = "-rx"; 450 type = "-rx";
452 } else { 451 } else {
453 type = "-tx"; 452 type = "-tx";
454 number -= efx->n_rx_queues; 453 number -= efx->n_rx_channels;
455 } 454 }
456 } 455 }
457 snprintf(channel->name, sizeof(channel->name), 456 snprintf(channel->name, sizeof(channel->name),
@@ -585,8 +584,6 @@ static void efx_remove_channel(struct efx_channel *channel)
585 efx_for_each_channel_tx_queue(tx_queue, channel) 584 efx_for_each_channel_tx_queue(tx_queue, channel)
586 efx_remove_tx_queue(tx_queue); 585 efx_remove_tx_queue(tx_queue);
587 efx_remove_eventq(channel); 586 efx_remove_eventq(channel);
588
589 channel->used_flags = 0;
590} 587}
591 588
592void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 589void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
@@ -956,10 +953,9 @@ static void efx_fini_io(struct efx_nic *efx)
956 pci_disable_device(efx->pci_dev); 953 pci_disable_device(efx->pci_dev);
957} 954}
958 955
959/* Get number of RX queues wanted. Return number of online CPU 956/* Get number of channels wanted. Each channel will have its own IRQ,
960 * packages in the expectation that an IRQ balancer will spread 957 * 1 RX queue and/or 2 TX queues. */
961 * interrupts across them. */ 958static int efx_wanted_channels(void)
962static int efx_wanted_rx_queues(void)
963{ 959{
964 cpumask_var_t core_mask; 960 cpumask_var_t core_mask;
965 int count; 961 int count;
@@ -995,34 +991,39 @@ static void efx_probe_interrupts(struct efx_nic *efx)
995 991
996 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 992 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
997 struct msix_entry xentries[EFX_MAX_CHANNELS]; 993 struct msix_entry xentries[EFX_MAX_CHANNELS];
998 int wanted_ints; 994 int n_channels;
999 int rx_queues;
1000 995
1001 /* We want one RX queue and interrupt per CPU package 996 n_channels = efx_wanted_channels();
1002 * (or as specified by the rss_cpus module parameter). 997 if (separate_tx_channels)
1003 * We will need one channel per interrupt. 998 n_channels *= 2;
1004 */ 999 n_channels = min(n_channels, max_channels);
1005 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
1006 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
1007 wanted_ints = min(wanted_ints, max_channels);
1008 1000
1009 for (i = 0; i < wanted_ints; i++) 1001 for (i = 0; i < n_channels; i++)
1010 xentries[i].entry = i; 1002 xentries[i].entry = i;
1011 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints); 1003 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
1012 if (rc > 0) { 1004 if (rc > 0) {
1013 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors" 1005 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
1014 " available (%d < %d).\n", rc, wanted_ints); 1006 " available (%d < %d).\n", rc, n_channels);
1015 EFX_ERR(efx, "WARNING: Performance may be reduced.\n"); 1007 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
1016 EFX_BUG_ON_PARANOID(rc >= wanted_ints); 1008 EFX_BUG_ON_PARANOID(rc >= n_channels);
1017 wanted_ints = rc; 1009 n_channels = rc;
1018 rc = pci_enable_msix(efx->pci_dev, xentries, 1010 rc = pci_enable_msix(efx->pci_dev, xentries,
1019 wanted_ints); 1011 n_channels);
1020 } 1012 }
1021 1013
1022 if (rc == 0) { 1014 if (rc == 0) {
1023 efx->n_rx_queues = min(rx_queues, wanted_ints); 1015 efx->n_channels = n_channels;
1024 efx->n_channels = wanted_ints; 1016 if (separate_tx_channels) {
1025 for (i = 0; i < wanted_ints; i++) 1017 efx->n_tx_channels =
1018 max(efx->n_channels / 2, 1U);
1019 efx->n_rx_channels =
1020 max(efx->n_channels -
1021 efx->n_tx_channels, 1U);
1022 } else {
1023 efx->n_tx_channels = efx->n_channels;
1024 efx->n_rx_channels = efx->n_channels;
1025 }
1026 for (i = 0; i < n_channels; i++)
1026 efx->channel[i].irq = xentries[i].vector; 1027 efx->channel[i].irq = xentries[i].vector;
1027 } else { 1028 } else {
1028 /* Fall back to single channel MSI */ 1029 /* Fall back to single channel MSI */
@@ -1033,8 +1034,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1033 1034
1034 /* Try single interrupt MSI */ 1035 /* Try single interrupt MSI */
1035 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 1036 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1036 efx->n_rx_queues = 1;
1037 efx->n_channels = 1; 1037 efx->n_channels = 1;
1038 efx->n_rx_channels = 1;
1039 efx->n_tx_channels = 1;
1038 rc = pci_enable_msi(efx->pci_dev); 1040 rc = pci_enable_msi(efx->pci_dev);
1039 if (rc == 0) { 1041 if (rc == 0) {
1040 efx->channel[0].irq = efx->pci_dev->irq; 1042 efx->channel[0].irq = efx->pci_dev->irq;
@@ -1046,8 +1048,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1046 1048
1047 /* Assume legacy interrupts */ 1049 /* Assume legacy interrupts */
1048 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 1050 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1049 efx->n_rx_queues = 1;
1050 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); 1051 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
1052 efx->n_rx_channels = 1;
1053 efx->n_tx_channels = 1;
1051 efx->legacy_irq = efx->pci_dev->irq; 1054 efx->legacy_irq = efx->pci_dev->irq;
1052 } 1055 }
1053} 1056}
@@ -1068,21 +1071,24 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1068 1071
1069static void efx_set_channels(struct efx_nic *efx) 1072static void efx_set_channels(struct efx_nic *efx)
1070{ 1073{
1074 struct efx_channel *channel;
1071 struct efx_tx_queue *tx_queue; 1075 struct efx_tx_queue *tx_queue;
1072 struct efx_rx_queue *rx_queue; 1076 struct efx_rx_queue *rx_queue;
1077 unsigned tx_channel_offset =
1078 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1073 1079
1074 efx_for_each_tx_queue(tx_queue, efx) { 1080 efx_for_each_channel(channel, efx) {
1075 if (separate_tx_channels) 1081 if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
1076 tx_queue->channel = &efx->channel[efx->n_channels-1]; 1082 channel->tx_queue = &efx->tx_queue[
1077 else 1083 (channel->channel - tx_channel_offset) *
1078 tx_queue->channel = &efx->channel[0]; 1084 EFX_TXQ_TYPES];
1079 tx_queue->channel->used_flags |= EFX_USED_BY_TX; 1085 efx_for_each_channel_tx_queue(tx_queue, channel)
1086 tx_queue->channel = channel;
1087 }
1080 } 1088 }
1081 1089
1082 efx_for_each_rx_queue(rx_queue, efx) { 1090 efx_for_each_rx_queue(rx_queue, efx)
1083 rx_queue->channel = &efx->channel[rx_queue->queue]; 1091 rx_queue->channel = &efx->channel[rx_queue->queue];
1084 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
1085 }
1086} 1092}
1087 1093
1088static int efx_probe_nic(struct efx_nic *efx) 1094static int efx_probe_nic(struct efx_nic *efx)
@@ -1096,11 +1102,12 @@ static int efx_probe_nic(struct efx_nic *efx)
1096 if (rc) 1102 if (rc)
1097 return rc; 1103 return rc;
1098 1104
1099 /* Determine the number of channels and RX queues by trying to hook 1105 /* Determine the number of channels and queues by trying to hook
1100 * in MSI-X interrupts. */ 1106 * in MSI-X interrupts. */
1101 efx_probe_interrupts(efx); 1107 efx_probe_interrupts(efx);
1102 1108
1103 efx_set_channels(efx); 1109 efx_set_channels(efx);
1110 efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
1104 1111
1105 /* Initialise the interrupt moderation settings */ 1112 /* Initialise the interrupt moderation settings */
1106 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1113 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1187,11 +1194,12 @@ static void efx_start_all(struct efx_nic *efx)
1187 /* Mark the port as enabled so port reconfigurations can start, then 1194 /* Mark the port as enabled so port reconfigurations can start, then
1188 * restart the transmit interface early so the watchdog timer stops */ 1195 * restart the transmit interface early so the watchdog timer stops */
1189 efx_start_port(efx); 1196 efx_start_port(efx);
1190 if (efx_dev_registered(efx))
1191 efx_wake_queue(efx);
1192 1197
1193 efx_for_each_channel(channel, efx) 1198 efx_for_each_channel(channel, efx) {
1199 if (efx_dev_registered(efx))
1200 efx_wake_queue(channel);
1194 efx_start_channel(channel); 1201 efx_start_channel(channel);
1202 }
1195 1203
1196 efx_nic_enable_interrupts(efx); 1204 efx_nic_enable_interrupts(efx);
1197 1205
@@ -1282,7 +1290,9 @@ static void efx_stop_all(struct efx_nic *efx)
1282 /* Stop the kernel transmit interface late, so the watchdog 1290 /* Stop the kernel transmit interface late, so the watchdog
1283 * timer isn't ticking over the flush */ 1291 * timer isn't ticking over the flush */
1284 if (efx_dev_registered(efx)) { 1292 if (efx_dev_registered(efx)) {
1285 efx_stop_queue(efx); 1293 struct efx_channel *channel;
1294 efx_for_each_channel(channel, efx)
1295 efx_stop_queue(channel);
1286 netif_tx_lock_bh(efx->net_dev); 1296 netif_tx_lock_bh(efx->net_dev);
1287 netif_tx_unlock_bh(efx->net_dev); 1297 netif_tx_unlock_bh(efx->net_dev);
1288 } 1298 }
@@ -1537,9 +1547,8 @@ static void efx_watchdog(struct net_device *net_dev)
1537{ 1547{
1538 struct efx_nic *efx = netdev_priv(net_dev); 1548 struct efx_nic *efx = netdev_priv(net_dev);
1539 1549
1540 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:" 1550 EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
1541 " resetting channels\n", 1551 efx->port_enabled);
1542 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1543 1552
1544 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 1553 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1545} 1554}
@@ -2014,22 +2023,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2014 2023
2015 efx->net_dev = net_dev; 2024 efx->net_dev = net_dev;
2016 efx->rx_checksum_enabled = true; 2025 efx->rx_checksum_enabled = true;
2017 spin_lock_init(&efx->netif_stop_lock);
2018 spin_lock_init(&efx->stats_lock); 2026 spin_lock_init(&efx->stats_lock);
2019 mutex_init(&efx->mac_lock); 2027 mutex_init(&efx->mac_lock);
2020 efx->mac_op = type->default_mac_ops; 2028 efx->mac_op = type->default_mac_ops;
2021 efx->phy_op = &efx_dummy_phy_operations; 2029 efx->phy_op = &efx_dummy_phy_operations;
2022 efx->mdio.dev = net_dev; 2030 efx->mdio.dev = net_dev;
2023 INIT_WORK(&efx->mac_work, efx_mac_work); 2031 INIT_WORK(&efx->mac_work, efx_mac_work);
2024 atomic_set(&efx->netif_stop_count, 1);
2025 2032
2026 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2033 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2027 channel = &efx->channel[i]; 2034 channel = &efx->channel[i];
2028 channel->efx = efx; 2035 channel->efx = efx;
2029 channel->channel = i; 2036 channel->channel = i;
2030 channel->work_pending = false; 2037 channel->work_pending = false;
2038 spin_lock_init(&channel->tx_stop_lock);
2039 atomic_set(&channel->tx_stop_count, 1);
2031 } 2040 }
2032 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) { 2041 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
2033 tx_queue = &efx->tx_queue[i]; 2042 tx_queue = &efx->tx_queue[i];
2034 tx_queue->efx = efx; 2043 tx_queue->efx = efx;
2035 tx_queue->queue = i; 2044 tx_queue->queue = i;
@@ -2201,7 +2210,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2201 int i, rc; 2210 int i, rc;
2202 2211
2203 /* Allocate and initialise a struct net_device and struct efx_nic */ 2212 /* Allocate and initialise a struct net_device and struct efx_nic */
2204 net_dev = alloc_etherdev(sizeof(*efx)); 2213 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
2205 if (!net_dev) 2214 if (!net_dev)
2206 return -ENOMEM; 2215 return -ENOMEM;
2207 net_dev->features |= (type->offload_features | NETIF_F_SG | 2216 net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 7eff0a615cb3..ffd708c5304a 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -35,8 +35,8 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
35extern netdev_tx_t 35extern netdev_tx_t
36efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 36efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
38extern void efx_stop_queue(struct efx_nic *efx); 38extern void efx_stop_queue(struct efx_channel *channel);
39extern void efx_wake_queue(struct efx_nic *efx); 39extern void efx_wake_queue(struct efx_channel *channel);
40#define EFX_TXQ_SIZE 1024 40#define EFX_TXQ_SIZE 1024
41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1) 41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
42 42
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index cbe9319f8322..22026bfbc4c1 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -647,7 +647,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
647 efx_for_each_tx_queue(tx_queue, efx) { 647 efx_for_each_tx_queue(tx_queue, efx) {
648 channel = tx_queue->channel; 648 channel = tx_queue->channel;
649 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 649 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
650 if (channel->used_flags != EFX_USED_BY_RX_TX) 650 if (channel->channel < efx->n_rx_channels)
651 coalesce->tx_coalesce_usecs_irq = 651 coalesce->tx_coalesce_usecs_irq =
652 channel->irq_moderation; 652 channel->irq_moderation;
653 else 653 else
@@ -690,7 +690,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
690 690
691 /* If the channel is shared only allow RX parameters to be set */ 691 /* If the channel is shared only allow RX parameters to be set */
692 efx_for_each_tx_queue(tx_queue, efx) { 692 efx_for_each_tx_queue(tx_queue, efx) {
693 if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) && 693 if ((tx_queue->channel->channel < efx->n_rx_channels) &&
694 tx_usecs) { 694 tx_usecs) {
695 EFX_ERR(efx, "Channel is shared. " 695 EFX_ERR(efx, "Channel is shared. "
696 "Only RX coalescing may be set\n"); 696 "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index d68331c7fb5d..2e6fd89f2a72 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -85,9 +85,13 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
85#define EFX_MAX_CHANNELS 32 85#define EFX_MAX_CHANNELS 32
86#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 86#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
87 87
88#define EFX_TX_QUEUE_OFFLOAD_CSUM 0 88/* Checksum generation is a per-queue option in hardware, so each
89#define EFX_TX_QUEUE_NO_CSUM 1 89 * queue visible to the networking core is backed by two hardware TX
90#define EFX_TX_QUEUE_COUNT 2 90 * queues. */
91#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
92#define EFX_TXQ_TYPE_OFFLOAD 1
93#define EFX_TXQ_TYPES 2
94#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
91 95
92/** 96/**
93 * struct efx_special_buffer - An Efx special buffer 97 * struct efx_special_buffer - An Efx special buffer
@@ -187,7 +191,7 @@ struct efx_tx_buffer {
187struct efx_tx_queue { 191struct efx_tx_queue {
188 /* Members which don't change on the fast path */ 192 /* Members which don't change on the fast path */
189 struct efx_nic *efx ____cacheline_aligned_in_smp; 193 struct efx_nic *efx ____cacheline_aligned_in_smp;
190 int queue; 194 unsigned queue;
191 struct efx_channel *channel; 195 struct efx_channel *channel;
192 struct efx_nic *nic; 196 struct efx_nic *nic;
193 struct efx_tx_buffer *buffer; 197 struct efx_tx_buffer *buffer;
@@ -306,11 +310,6 @@ struct efx_buffer {
306}; 310};
307 311
308 312
309/* Flags for channel->used_flags */
310#define EFX_USED_BY_RX 1
311#define EFX_USED_BY_TX 2
312#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
313
314enum efx_rx_alloc_method { 313enum efx_rx_alloc_method {
315 RX_ALLOC_METHOD_AUTO = 0, 314 RX_ALLOC_METHOD_AUTO = 0,
316 RX_ALLOC_METHOD_SKB = 1, 315 RX_ALLOC_METHOD_SKB = 1,
@@ -327,7 +326,6 @@ enum efx_rx_alloc_method {
327 * @efx: Associated Efx NIC 326 * @efx: Associated Efx NIC
328 * @channel: Channel instance number 327 * @channel: Channel instance number
329 * @name: Name for channel and IRQ 328 * @name: Name for channel and IRQ
330 * @used_flags: Channel is used by net driver
331 * @enabled: Channel enabled indicator 329 * @enabled: Channel enabled indicator
332 * @irq: IRQ number (MSI and MSI-X only) 330 * @irq: IRQ number (MSI and MSI-X only)
333 * @irq_moderation: IRQ moderation value (in hardware ticks) 331 * @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -352,12 +350,14 @@ enum efx_rx_alloc_method {
352 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 350 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
353 * @n_rx_overlength: Count of RX_OVERLENGTH errors 351 * @n_rx_overlength: Count of RX_OVERLENGTH errors
354 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 352 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
353 * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX
354 * @tx_stop_count: Core TX queue stop count
355 * @tx_stop_lock: Core TX queue stop lock
355 */ 356 */
356struct efx_channel { 357struct efx_channel {
357 struct efx_nic *efx; 358 struct efx_nic *efx;
358 int channel; 359 int channel;
359 char name[IFNAMSIZ + 6]; 360 char name[IFNAMSIZ + 6];
360 int used_flags;
361 bool enabled; 361 bool enabled;
362 int irq; 362 int irq;
363 unsigned int irq_moderation; 363 unsigned int irq_moderation;
@@ -389,6 +389,9 @@ struct efx_channel {
389 struct efx_rx_buffer *rx_pkt; 389 struct efx_rx_buffer *rx_pkt;
390 bool rx_pkt_csummed; 390 bool rx_pkt_csummed;
391 391
392 struct efx_tx_queue *tx_queue;
393 atomic_t tx_stop_count;
394 spinlock_t tx_stop_lock;
392}; 395};
393 396
394enum efx_led_mode { 397enum efx_led_mode {
@@ -661,8 +664,9 @@ union efx_multicast_hash {
661 * @rx_queue: RX DMA queues 664 * @rx_queue: RX DMA queues
662 * @channel: Channels 665 * @channel: Channels
663 * @next_buffer_table: First available buffer table id 666 * @next_buffer_table: First available buffer table id
664 * @n_rx_queues: Number of RX queues
665 * @n_channels: Number of channels in use 667 * @n_channels: Number of channels in use
668 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
669 * @n_tx_channels: Number of channels used for TX
666 * @rx_buffer_len: RX buffer length 670 * @rx_buffer_len: RX buffer length
667 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 671 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
668 * @int_error_count: Number of internal errors seen recently 672 * @int_error_count: Number of internal errors seen recently
@@ -693,8 +697,6 @@ union efx_multicast_hash {
693 * @port_initialized: Port initialized? 697 * @port_initialized: Port initialized?
694 * @net_dev: Operating system network device. Consider holding the rtnl lock 698 * @net_dev: Operating system network device. Consider holding the rtnl lock
695 * @rx_checksum_enabled: RX checksumming enabled 699 * @rx_checksum_enabled: RX checksumming enabled
696 * @netif_stop_count: Port stop count
697 * @netif_stop_lock: Port stop lock
698 * @mac_stats: MAC statistics. These include all statistics the MACs 700 * @mac_stats: MAC statistics. These include all statistics the MACs
699 * can provide. Generic code converts these into a standard 701 * can provide. Generic code converts these into a standard
700 * &struct net_device_stats. 702 * &struct net_device_stats.
@@ -742,13 +744,14 @@ struct efx_nic {
742 enum nic_state state; 744 enum nic_state state;
743 enum reset_type reset_pending; 745 enum reset_type reset_pending;
744 746
745 struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT]; 747 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
746 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 748 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
747 struct efx_channel channel[EFX_MAX_CHANNELS]; 749 struct efx_channel channel[EFX_MAX_CHANNELS];
748 750
749 unsigned next_buffer_table; 751 unsigned next_buffer_table;
750 int n_rx_queues; 752 unsigned n_channels;
751 int n_channels; 753 unsigned n_rx_channels;
754 unsigned n_tx_channels;
752 unsigned int rx_buffer_len; 755 unsigned int rx_buffer_len;
753 unsigned int rx_buffer_order; 756 unsigned int rx_buffer_order;
754 757
@@ -780,9 +783,6 @@ struct efx_nic {
780 struct net_device *net_dev; 783 struct net_device *net_dev;
781 bool rx_checksum_enabled; 784 bool rx_checksum_enabled;
782 785
783 atomic_t netif_stop_count;
784 spinlock_t netif_stop_lock;
785
786 struct efx_mac_stats mac_stats; 786 struct efx_mac_stats mac_stats;
787 struct efx_buffer stats_buffer; 787 struct efx_buffer stats_buffer;
788 spinlock_t stats_lock; 788 spinlock_t stats_lock;
@@ -928,31 +928,26 @@ struct efx_nic_type {
928/* Iterate over all used channels */ 928/* Iterate over all used channels */
929#define efx_for_each_channel(_channel, _efx) \ 929#define efx_for_each_channel(_channel, _efx) \
930 for (_channel = &((_efx)->channel[0]); \ 930 for (_channel = &((_efx)->channel[0]); \
931 _channel < &((_efx)->channel[EFX_MAX_CHANNELS]); \ 931 _channel < &((_efx)->channel[(efx)->n_channels]); \
932 _channel++) \ 932 _channel++)
933 if (!_channel->used_flags) \
934 continue; \
935 else
936 933
937/* Iterate over all used TX queues */ 934/* Iterate over all used TX queues */
938#define efx_for_each_tx_queue(_tx_queue, _efx) \ 935#define efx_for_each_tx_queue(_tx_queue, _efx) \
939 for (_tx_queue = &((_efx)->tx_queue[0]); \ 936 for (_tx_queue = &((_efx)->tx_queue[0]); \
940 _tx_queue < &((_efx)->tx_queue[EFX_TX_QUEUE_COUNT]); \ 937 _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \
938 (_efx)->n_tx_channels]); \
941 _tx_queue++) 939 _tx_queue++)
942 940
943/* Iterate over all TX queues belonging to a channel */ 941/* Iterate over all TX queues belonging to a channel */
944#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 942#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
945 for (_tx_queue = &((_channel)->efx->tx_queue[0]); \ 943 for (_tx_queue = (_channel)->tx_queue; \
946 _tx_queue < &((_channel)->efx->tx_queue[EFX_TX_QUEUE_COUNT]); \ 944 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
947 _tx_queue++) \ 945 _tx_queue++)
948 if (_tx_queue->channel != (_channel)) \
949 continue; \
950 else
951 946
952/* Iterate over all used RX queues */ 947/* Iterate over all used RX queues */
953#define efx_for_each_rx_queue(_rx_queue, _efx) \ 948#define efx_for_each_rx_queue(_rx_queue, _efx) \
954 for (_rx_queue = &((_efx)->rx_queue[0]); \ 949 for (_rx_queue = &((_efx)->rx_queue[0]); \
955 _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_queues]); \ 950 _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
956 _rx_queue++) 951 _rx_queue++)
957 952
958/* Iterate over all RX queues belonging to a channel */ 953/* Iterate over all RX queues belonging to a channel */
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index f3226bbf9831..5d3aaec58556 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -418,7 +418,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
418 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 418 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
419 419
420 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 420 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
421 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; 421 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
422 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 422 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
423 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 423 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
424 !csum); 424 !csum);
@@ -431,10 +431,10 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
431 efx_oword_t reg; 431 efx_oword_t reg;
432 432
433 /* Only 128 bits in this register */ 433 /* Only 128 bits in this register */
434 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); 434 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
435 435
436 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG); 436 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
437 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) 437 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
438 clear_bit_le(tx_queue->queue, (void *)&reg); 438 clear_bit_le(tx_queue->queue, (void *)&reg);
439 else 439 else
440 set_bit_le(tx_queue->queue, (void *)&reg); 440 set_bit_le(tx_queue->queue, (void *)&reg);
@@ -1132,7 +1132,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1132 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { 1132 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1133 ev_queue = EFX_QWORD_FIELD(*event, 1133 ev_queue = EFX_QWORD_FIELD(*event,
1134 FSF_AZ_DRIVER_EV_SUBDATA); 1134 FSF_AZ_DRIVER_EV_SUBDATA);
1135 if (ev_queue < EFX_TX_QUEUE_COUNT) { 1135 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1136 tx_queue = efx->tx_queue + ev_queue; 1136 tx_queue = efx->tx_queue + ev_queue;
1137 tx_queue->flushed = FLUSH_DONE; 1137 tx_queue->flushed = FLUSH_DONE;
1138 } 1138 }
@@ -1142,7 +1142,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1142 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1142 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1143 ev_failed = EFX_QWORD_FIELD( 1143 ev_failed = EFX_QWORD_FIELD(
1144 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1144 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1145 if (ev_queue < efx->n_rx_queues) { 1145 if (ev_queue < efx->n_rx_channels) {
1146 rx_queue = efx->rx_queue + ev_queue; 1146 rx_queue = efx->rx_queue + ev_queue;
1147 rx_queue->flushed = 1147 rx_queue->flushed =
1148 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1148 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
@@ -1441,7 +1441,7 @@ static void efx_setup_rss_indir_table(struct efx_nic *efx)
1441 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800; 1441 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1442 offset += 0x10) { 1442 offset += 0x10) {
1443 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1443 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1444 i % efx->n_rx_queues); 1444 i % efx->n_rx_channels);
1445 efx_writed(efx, &dword, offset); 1445 efx_writed(efx, &dword, offset);
1446 i++; 1446 i++;
1447 } 1447 }
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 3a16e0612267..371e86cc090f 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -618,8 +618,8 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
618 618
619 /* Test both types of TX queue */ 619 /* Test both types of TX queue */
620 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) { 620 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
621 state->offload_csum = (tx_queue->queue == 621 state->offload_csum = (tx_queue->queue &
622 EFX_TX_QUEUE_OFFLOAD_CSUM); 622 EFX_TXQ_TYPE_OFFLOAD);
623 rc = efx_test_loopback(tx_queue, 623 rc = efx_test_loopback(tx_queue,
624 &tests->loopback[mode]); 624 &tests->loopback[mode]);
625 if (rc) 625 if (rc)
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index 643bef72b99d..aed495a4dad7 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20struct efx_loopback_self_tests { 20struct efx_loopback_self_tests {
21 int tx_sent[EFX_TX_QUEUE_COUNT]; 21 int tx_sent[EFX_TXQ_TYPES];
22 int tx_done[EFX_TX_QUEUE_COUNT]; 22 int tx_done[EFX_TXQ_TYPES];
23 int rx_good; 23 int rx_good;
24 int rx_bad; 24 int rx_bad;
25}; 25};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index be0e110a1f73..6bb12a87ef2d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,32 +30,46 @@
30 */ 30 */
31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) 31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
32 32
33/* We want to be able to nest calls to netif_stop_queue(), since each 33/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * channel can have an individual stop on the queue. 34 * because of the 2 hardware queues associated with each core queue,
35 */ 35 * but also so that we can inhibit TX for reasons other than a full
36void efx_stop_queue(struct efx_nic *efx) 36 * hardware queue. */
37void efx_stop_queue(struct efx_channel *channel)
37{ 38{
38 spin_lock_bh(&efx->netif_stop_lock); 39 struct efx_nic *efx = channel->efx;
40
41 if (!channel->tx_queue)
42 return;
43
44 spin_lock_bh(&channel->tx_stop_lock);
39 EFX_TRACE(efx, "stop TX queue\n"); 45 EFX_TRACE(efx, "stop TX queue\n");
40 46
41 atomic_inc(&efx->netif_stop_count); 47 atomic_inc(&channel->tx_stop_count);
42 netif_stop_queue(efx->net_dev); 48 netif_tx_stop_queue(
49 netdev_get_tx_queue(
50 efx->net_dev,
51 channel->tx_queue->queue / EFX_TXQ_TYPES));
43 52
44 spin_unlock_bh(&efx->netif_stop_lock); 53 spin_unlock_bh(&channel->tx_stop_lock);
45} 54}
46 55
47/* Wake netif's TX queue 56/* Decrement core TX queue stop count and wake it if the count is 0 */
48 * We want to be able to nest calls to netif_stop_queue(), since each 57void efx_wake_queue(struct efx_channel *channel)
49 * channel can have an individual stop on the queue.
50 */
51void efx_wake_queue(struct efx_nic *efx)
52{ 58{
59 struct efx_nic *efx = channel->efx;
60
61 if (!channel->tx_queue)
62 return;
63
53 local_bh_disable(); 64 local_bh_disable();
54 if (atomic_dec_and_lock(&efx->netif_stop_count, 65 if (atomic_dec_and_lock(&channel->tx_stop_count,
55 &efx->netif_stop_lock)) { 66 &channel->tx_stop_lock)) {
56 EFX_TRACE(efx, "waking TX queue\n"); 67 EFX_TRACE(efx, "waking TX queue\n");
57 netif_wake_queue(efx->net_dev); 68 netif_tx_wake_queue(
58 spin_unlock(&efx->netif_stop_lock); 69 netdev_get_tx_queue(
70 efx->net_dev,
71 channel->tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock);
59 } 73 }
60 local_bh_enable(); 74 local_bh_enable();
61} 75}
@@ -298,7 +312,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
298 rc = NETDEV_TX_BUSY; 312 rc = NETDEV_TX_BUSY;
299 313
300 if (tx_queue->stopped == 1) 314 if (tx_queue->stopped == 1)
301 efx_stop_queue(efx); 315 efx_stop_queue(tx_queue->channel);
302 316
303 unwind: 317 unwind:
304 /* Work backwards until we hit the original insert pointer value */ 318 /* Work backwards until we hit the original insert pointer value */
@@ -374,10 +388,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
374 if (unlikely(efx->port_inhibited)) 388 if (unlikely(efx->port_inhibited))
375 return NETDEV_TX_BUSY; 389 return NETDEV_TX_BUSY;
376 390
391 tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
377 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) 392 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
378 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM]; 393 tx_queue += EFX_TXQ_TYPE_OFFLOAD;
379 else
380 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
381 394
382 return efx_enqueue_skb(tx_queue, skb); 395 return efx_enqueue_skb(tx_queue, skb);
383} 396}
@@ -405,7 +418,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
405 netif_tx_lock(efx->net_dev); 418 netif_tx_lock(efx->net_dev);
406 if (tx_queue->stopped) { 419 if (tx_queue->stopped) {
407 tx_queue->stopped = 0; 420 tx_queue->stopped = 0;
408 efx_wake_queue(efx); 421 efx_wake_queue(tx_queue->channel);
409 } 422 }
410 netif_tx_unlock(efx->net_dev); 423 netif_tx_unlock(efx->net_dev);
411 } 424 }
@@ -488,7 +501,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
488 /* Release queue's stop on port, if any */ 501 /* Release queue's stop on port, if any */
489 if (tx_queue->stopped) { 502 if (tx_queue->stopped) {
490 tx_queue->stopped = 0; 503 tx_queue->stopped = 0;
491 efx_wake_queue(tx_queue->efx); 504 efx_wake_queue(tx_queue->channel);
492 } 505 }
493} 506}
494 507
@@ -1120,7 +1133,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1120 1133
1121 /* Stop the queue if it wasn't stopped before. */ 1134 /* Stop the queue if it wasn't stopped before. */
1122 if (tx_queue->stopped == 1) 1135 if (tx_queue->stopped == 1)
1123 efx_stop_queue(efx); 1136 efx_stop_queue(tx_queue->channel);
1124 1137
1125 unwind: 1138 unwind:
1126 /* Free the DMA mapping we were in the process of writing out */ 1139 /* Free the DMA mapping we were in the process of writing out */