aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/efx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2010-04-28 05:30:43 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-28 15:44:44 -0400
commita4900ac9f7d10ad062e54dd03125e3619e0ac17a (patch)
tree32a6f316ccbda7994d5c52197d8a2b5ebb60d6da /drivers/net/sfc/efx.c
parent5298c37f4d1f0360082be9d9e3a236b9cc114a03 (diff)
sfc: Create multiple TX queues
Create a core TX queue and 2 hardware TX queues for each channel. If separate_tx_channels is set, create equal numbers of RX and TX channels instead. Rewrite the channel and queue iteration macros accordingly. Eliminate efx_channel::used_flags as redundant. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/efx.c')
-rw-r--r--drivers/net/sfc/efx.c113
1 files changed, 61 insertions, 52 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 5e3f944fdd95..bc75ef683c9f 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -288,7 +288,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
288 if (spent < budget) { 288 if (spent < budget) {
289 struct efx_nic *efx = channel->efx; 289 struct efx_nic *efx = channel->efx;
290 290
291 if (channel->used_flags & EFX_USED_BY_RX && 291 if (channel->channel < efx->n_rx_channels &&
292 efx->irq_rx_adaptive && 292 efx->irq_rx_adaptive &&
293 unlikely(++channel->irq_count == 1000)) { 293 unlikely(++channel->irq_count == 1000)) {
294 if (unlikely(channel->irq_mod_score < 294 if (unlikely(channel->irq_mod_score <
@@ -333,7 +333,6 @@ void efx_process_channel_now(struct efx_channel *channel)
333{ 333{
334 struct efx_nic *efx = channel->efx; 334 struct efx_nic *efx = channel->efx;
335 335
336 BUG_ON(!channel->used_flags);
337 BUG_ON(!channel->enabled); 336 BUG_ON(!channel->enabled);
338 337
339 /* Disable interrupts and wait for ISRs to complete */ 338 /* Disable interrupts and wait for ISRs to complete */
@@ -446,12 +445,12 @@ static void efx_set_channel_names(struct efx_nic *efx)
446 445
447 efx_for_each_channel(channel, efx) { 446 efx_for_each_channel(channel, efx) {
448 number = channel->channel; 447 number = channel->channel;
449 if (efx->n_channels > efx->n_rx_queues) { 448 if (efx->n_channels > efx->n_rx_channels) {
450 if (channel->channel < efx->n_rx_queues) { 449 if (channel->channel < efx->n_rx_channels) {
451 type = "-rx"; 450 type = "-rx";
452 } else { 451 } else {
453 type = "-tx"; 452 type = "-tx";
454 number -= efx->n_rx_queues; 453 number -= efx->n_rx_channels;
455 } 454 }
456 } 455 }
457 snprintf(channel->name, sizeof(channel->name), 456 snprintf(channel->name, sizeof(channel->name),
@@ -585,8 +584,6 @@ static void efx_remove_channel(struct efx_channel *channel)
585 efx_for_each_channel_tx_queue(tx_queue, channel) 584 efx_for_each_channel_tx_queue(tx_queue, channel)
586 efx_remove_tx_queue(tx_queue); 585 efx_remove_tx_queue(tx_queue);
587 efx_remove_eventq(channel); 586 efx_remove_eventq(channel);
588
589 channel->used_flags = 0;
590} 587}
591 588
592void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 589void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
@@ -956,10 +953,9 @@ static void efx_fini_io(struct efx_nic *efx)
956 pci_disable_device(efx->pci_dev); 953 pci_disable_device(efx->pci_dev);
957} 954}
958 955
959/* Get number of RX queues wanted. Return number of online CPU 956/* Get number of channels wanted. Each channel will have its own IRQ,
960 * packages in the expectation that an IRQ balancer will spread 957 * 1 RX queue and/or 2 TX queues. */
961 * interrupts across them. */ 958static int efx_wanted_channels(void)
962static int efx_wanted_rx_queues(void)
963{ 959{
964 cpumask_var_t core_mask; 960 cpumask_var_t core_mask;
965 int count; 961 int count;
@@ -995,34 +991,39 @@ static void efx_probe_interrupts(struct efx_nic *efx)
995 991
996 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 992 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
997 struct msix_entry xentries[EFX_MAX_CHANNELS]; 993 struct msix_entry xentries[EFX_MAX_CHANNELS];
998 int wanted_ints; 994 int n_channels;
999 int rx_queues;
1000 995
1001 /* We want one RX queue and interrupt per CPU package 996 n_channels = efx_wanted_channels();
1002 * (or as specified by the rss_cpus module parameter). 997 if (separate_tx_channels)
1003 * We will need one channel per interrupt. 998 n_channels *= 2;
1004 */ 999 n_channels = min(n_channels, max_channels);
1005 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
1006 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
1007 wanted_ints = min(wanted_ints, max_channels);
1008 1000
1009 for (i = 0; i < wanted_ints; i++) 1001 for (i = 0; i < n_channels; i++)
1010 xentries[i].entry = i; 1002 xentries[i].entry = i;
1011 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints); 1003 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
1012 if (rc > 0) { 1004 if (rc > 0) {
1013 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors" 1005 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
1014 " available (%d < %d).\n", rc, wanted_ints); 1006 " available (%d < %d).\n", rc, n_channels);
1015 EFX_ERR(efx, "WARNING: Performance may be reduced.\n"); 1007 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
1016 EFX_BUG_ON_PARANOID(rc >= wanted_ints); 1008 EFX_BUG_ON_PARANOID(rc >= n_channels);
1017 wanted_ints = rc; 1009 n_channels = rc;
1018 rc = pci_enable_msix(efx->pci_dev, xentries, 1010 rc = pci_enable_msix(efx->pci_dev, xentries,
1019 wanted_ints); 1011 n_channels);
1020 } 1012 }
1021 1013
1022 if (rc == 0) { 1014 if (rc == 0) {
1023 efx->n_rx_queues = min(rx_queues, wanted_ints); 1015 efx->n_channels = n_channels;
1024 efx->n_channels = wanted_ints; 1016 if (separate_tx_channels) {
1025 for (i = 0; i < wanted_ints; i++) 1017 efx->n_tx_channels =
1018 max(efx->n_channels / 2, 1U);
1019 efx->n_rx_channels =
1020 max(efx->n_channels -
1021 efx->n_tx_channels, 1U);
1022 } else {
1023 efx->n_tx_channels = efx->n_channels;
1024 efx->n_rx_channels = efx->n_channels;
1025 }
1026 for (i = 0; i < n_channels; i++)
1026 efx->channel[i].irq = xentries[i].vector; 1027 efx->channel[i].irq = xentries[i].vector;
1027 } else { 1028 } else {
1028 /* Fall back to single channel MSI */ 1029 /* Fall back to single channel MSI */
@@ -1033,8 +1034,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1033 1034
1034 /* Try single interrupt MSI */ 1035 /* Try single interrupt MSI */
1035 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 1036 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1036 efx->n_rx_queues = 1;
1037 efx->n_channels = 1; 1037 efx->n_channels = 1;
1038 efx->n_rx_channels = 1;
1039 efx->n_tx_channels = 1;
1038 rc = pci_enable_msi(efx->pci_dev); 1040 rc = pci_enable_msi(efx->pci_dev);
1039 if (rc == 0) { 1041 if (rc == 0) {
1040 efx->channel[0].irq = efx->pci_dev->irq; 1042 efx->channel[0].irq = efx->pci_dev->irq;
@@ -1046,8 +1048,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1046 1048
1047 /* Assume legacy interrupts */ 1049 /* Assume legacy interrupts */
1048 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 1050 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1049 efx->n_rx_queues = 1;
1050 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); 1051 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
1052 efx->n_rx_channels = 1;
1053 efx->n_tx_channels = 1;
1051 efx->legacy_irq = efx->pci_dev->irq; 1054 efx->legacy_irq = efx->pci_dev->irq;
1052 } 1055 }
1053} 1056}
@@ -1068,21 +1071,24 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1068 1071
1069static void efx_set_channels(struct efx_nic *efx) 1072static void efx_set_channels(struct efx_nic *efx)
1070{ 1073{
1074 struct efx_channel *channel;
1071 struct efx_tx_queue *tx_queue; 1075 struct efx_tx_queue *tx_queue;
1072 struct efx_rx_queue *rx_queue; 1076 struct efx_rx_queue *rx_queue;
1077 unsigned tx_channel_offset =
1078 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1073 1079
1074 efx_for_each_tx_queue(tx_queue, efx) { 1080 efx_for_each_channel(channel, efx) {
1075 if (separate_tx_channels) 1081 if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
1076 tx_queue->channel = &efx->channel[efx->n_channels-1]; 1082 channel->tx_queue = &efx->tx_queue[
1077 else 1083 (channel->channel - tx_channel_offset) *
1078 tx_queue->channel = &efx->channel[0]; 1084 EFX_TXQ_TYPES];
1079 tx_queue->channel->used_flags |= EFX_USED_BY_TX; 1085 efx_for_each_channel_tx_queue(tx_queue, channel)
1086 tx_queue->channel = channel;
1087 }
1080 } 1088 }
1081 1089
1082 efx_for_each_rx_queue(rx_queue, efx) { 1090 efx_for_each_rx_queue(rx_queue, efx)
1083 rx_queue->channel = &efx->channel[rx_queue->queue]; 1091 rx_queue->channel = &efx->channel[rx_queue->queue];
1084 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
1085 }
1086} 1092}
1087 1093
1088static int efx_probe_nic(struct efx_nic *efx) 1094static int efx_probe_nic(struct efx_nic *efx)
@@ -1096,11 +1102,12 @@ static int efx_probe_nic(struct efx_nic *efx)
1096 if (rc) 1102 if (rc)
1097 return rc; 1103 return rc;
1098 1104
1099 /* Determine the number of channels and RX queues by trying to hook 1105 /* Determine the number of channels and queues by trying to hook
1100 * in MSI-X interrupts. */ 1106 * in MSI-X interrupts. */
1101 efx_probe_interrupts(efx); 1107 efx_probe_interrupts(efx);
1102 1108
1103 efx_set_channels(efx); 1109 efx_set_channels(efx);
1110 efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
1104 1111
1105 /* Initialise the interrupt moderation settings */ 1112 /* Initialise the interrupt moderation settings */
1106 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1113 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1187,11 +1194,12 @@ static void efx_start_all(struct efx_nic *efx)
1187 /* Mark the port as enabled so port reconfigurations can start, then 1194 /* Mark the port as enabled so port reconfigurations can start, then
1188 * restart the transmit interface early so the watchdog timer stops */ 1195 * restart the transmit interface early so the watchdog timer stops */
1189 efx_start_port(efx); 1196 efx_start_port(efx);
1190 if (efx_dev_registered(efx))
1191 efx_wake_queue(efx);
1192 1197
1193 efx_for_each_channel(channel, efx) 1198 efx_for_each_channel(channel, efx) {
1199 if (efx_dev_registered(efx))
1200 efx_wake_queue(channel);
1194 efx_start_channel(channel); 1201 efx_start_channel(channel);
1202 }
1195 1203
1196 efx_nic_enable_interrupts(efx); 1204 efx_nic_enable_interrupts(efx);
1197 1205
@@ -1282,7 +1290,9 @@ static void efx_stop_all(struct efx_nic *efx)
1282 /* Stop the kernel transmit interface late, so the watchdog 1290 /* Stop the kernel transmit interface late, so the watchdog
1283 * timer isn't ticking over the flush */ 1291 * timer isn't ticking over the flush */
1284 if (efx_dev_registered(efx)) { 1292 if (efx_dev_registered(efx)) {
1285 efx_stop_queue(efx); 1293 struct efx_channel *channel;
1294 efx_for_each_channel(channel, efx)
1295 efx_stop_queue(channel);
1286 netif_tx_lock_bh(efx->net_dev); 1296 netif_tx_lock_bh(efx->net_dev);
1287 netif_tx_unlock_bh(efx->net_dev); 1297 netif_tx_unlock_bh(efx->net_dev);
1288 } 1298 }
@@ -1537,9 +1547,8 @@ static void efx_watchdog(struct net_device *net_dev)
1537{ 1547{
1538 struct efx_nic *efx = netdev_priv(net_dev); 1548 struct efx_nic *efx = netdev_priv(net_dev);
1539 1549
1540 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:" 1550 EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
1541 " resetting channels\n", 1551 efx->port_enabled);
1542 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1543 1552
1544 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 1553 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1545} 1554}
@@ -2014,22 +2023,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2014 2023
2015 efx->net_dev = net_dev; 2024 efx->net_dev = net_dev;
2016 efx->rx_checksum_enabled = true; 2025 efx->rx_checksum_enabled = true;
2017 spin_lock_init(&efx->netif_stop_lock);
2018 spin_lock_init(&efx->stats_lock); 2026 spin_lock_init(&efx->stats_lock);
2019 mutex_init(&efx->mac_lock); 2027 mutex_init(&efx->mac_lock);
2020 efx->mac_op = type->default_mac_ops; 2028 efx->mac_op = type->default_mac_ops;
2021 efx->phy_op = &efx_dummy_phy_operations; 2029 efx->phy_op = &efx_dummy_phy_operations;
2022 efx->mdio.dev = net_dev; 2030 efx->mdio.dev = net_dev;
2023 INIT_WORK(&efx->mac_work, efx_mac_work); 2031 INIT_WORK(&efx->mac_work, efx_mac_work);
2024 atomic_set(&efx->netif_stop_count, 1);
2025 2032
2026 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2033 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2027 channel = &efx->channel[i]; 2034 channel = &efx->channel[i];
2028 channel->efx = efx; 2035 channel->efx = efx;
2029 channel->channel = i; 2036 channel->channel = i;
2030 channel->work_pending = false; 2037 channel->work_pending = false;
2038 spin_lock_init(&channel->tx_stop_lock);
2039 atomic_set(&channel->tx_stop_count, 1);
2031 } 2040 }
2032 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) { 2041 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
2033 tx_queue = &efx->tx_queue[i]; 2042 tx_queue = &efx->tx_queue[i];
2034 tx_queue->efx = efx; 2043 tx_queue->efx = efx;
2035 tx_queue->queue = i; 2044 tx_queue->queue = i;
@@ -2201,7 +2210,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2201 int i, rc; 2210 int i, rc;
2202 2211
2203 /* Allocate and initialise a struct net_device and struct efx_nic */ 2212 /* Allocate and initialise a struct net_device and struct efx_nic */
2204 net_dev = alloc_etherdev(sizeof(*efx)); 2213 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
2205 if (!net_dev) 2214 if (!net_dev)
2206 return -ENOMEM; 2215 return -ENOMEM;
2207 net_dev->features |= (type->offload_features | NETIF_F_SG | 2216 net_dev->features |= (type->offload_features | NETIF_F_SG |