aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/efx.c135
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/ethtool.c6
-rw-r--r--drivers/net/sfc/falcon.c20
-rw-r--r--drivers/net/sfc/falcon_boards.c13
-rw-r--r--drivers/net/sfc/falcon_xmac.c22
-rw-r--r--drivers/net/sfc/mcdi.c32
-rw-r--r--drivers/net/sfc/mcdi_mac.c25
-rw-r--r--drivers/net/sfc/mcdi_pcol.h71
-rw-r--r--drivers/net/sfc/mcdi_phy.c152
-rw-r--r--drivers/net/sfc/net_driver.h76
-rw-r--r--drivers/net/sfc/nic.c114
-rw-r--r--drivers/net/sfc/nic.h5
-rw-r--r--drivers/net/sfc/selftest.c8
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c32
-rw-r--r--drivers/net/sfc/tx.c61
-rw-r--r--drivers/net/sfc/workarounds.h2
18 files changed, 510 insertions, 272 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 1ad61b7bba40..156460527231 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -225,17 +225,17 @@ static void efx_fini_channels(struct efx_nic *efx);
225 * never be concurrently called more than once on the same channel, 225 * never be concurrently called more than once on the same channel,
226 * though different channels may be being processed concurrently. 226 * though different channels may be being processed concurrently.
227 */ 227 */
228static int efx_process_channel(struct efx_channel *channel, int rx_quota) 228static int efx_process_channel(struct efx_channel *channel, int budget)
229{ 229{
230 struct efx_nic *efx = channel->efx; 230 struct efx_nic *efx = channel->efx;
231 int rx_packets; 231 int spent;
232 232
233 if (unlikely(efx->reset_pending != RESET_TYPE_NONE || 233 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
234 !channel->enabled)) 234 !channel->enabled))
235 return 0; 235 return 0;
236 236
237 rx_packets = efx_nic_process_eventq(channel, rx_quota); 237 spent = efx_nic_process_eventq(channel, budget);
238 if (rx_packets == 0) 238 if (spent == 0)
239 return 0; 239 return 0;
240 240
241 /* Deliver last RX packet. */ 241 /* Deliver last RX packet. */
@@ -249,7 +249,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
249 249
250 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 250 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
251 251
252 return rx_packets; 252 return spent;
253} 253}
254 254
255/* Mark channel as finished processing 255/* Mark channel as finished processing
@@ -278,17 +278,17 @@ static int efx_poll(struct napi_struct *napi, int budget)
278{ 278{
279 struct efx_channel *channel = 279 struct efx_channel *channel =
280 container_of(napi, struct efx_channel, napi_str); 280 container_of(napi, struct efx_channel, napi_str);
281 int rx_packets; 281 int spent;
282 282
283 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", 283 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
284 channel->channel, raw_smp_processor_id()); 284 channel->channel, raw_smp_processor_id());
285 285
286 rx_packets = efx_process_channel(channel, budget); 286 spent = efx_process_channel(channel, budget);
287 287
288 if (rx_packets < budget) { 288 if (spent < budget) {
289 struct efx_nic *efx = channel->efx; 289 struct efx_nic *efx = channel->efx;
290 290
291 if (channel->used_flags & EFX_USED_BY_RX && 291 if (channel->channel < efx->n_rx_channels &&
292 efx->irq_rx_adaptive && 292 efx->irq_rx_adaptive &&
293 unlikely(++channel->irq_count == 1000)) { 293 unlikely(++channel->irq_count == 1000)) {
294 if (unlikely(channel->irq_mod_score < 294 if (unlikely(channel->irq_mod_score <
@@ -318,7 +318,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
318 efx_channel_processed(channel); 318 efx_channel_processed(channel);
319 } 319 }
320 320
321 return rx_packets; 321 return spent;
322} 322}
323 323
324/* Process the eventq of the specified channel immediately on this CPU 324/* Process the eventq of the specified channel immediately on this CPU
@@ -333,7 +333,6 @@ void efx_process_channel_now(struct efx_channel *channel)
333{ 333{
334 struct efx_nic *efx = channel->efx; 334 struct efx_nic *efx = channel->efx;
335 335
336 BUG_ON(!channel->used_flags);
337 BUG_ON(!channel->enabled); 336 BUG_ON(!channel->enabled);
338 337
339 /* Disable interrupts and wait for ISRs to complete */ 338 /* Disable interrupts and wait for ISRs to complete */
@@ -446,12 +445,12 @@ static void efx_set_channel_names(struct efx_nic *efx)
446 445
447 efx_for_each_channel(channel, efx) { 446 efx_for_each_channel(channel, efx) {
448 number = channel->channel; 447 number = channel->channel;
449 if (efx->n_channels > efx->n_rx_queues) { 448 if (efx->n_channels > efx->n_rx_channels) {
450 if (channel->channel < efx->n_rx_queues) { 449 if (channel->channel < efx->n_rx_channels) {
451 type = "-rx"; 450 type = "-rx";
452 } else { 451 } else {
453 type = "-tx"; 452 type = "-tx";
454 number -= efx->n_rx_queues; 453 number -= efx->n_rx_channels;
455 } 454 }
456 } 455 }
457 snprintf(channel->name, sizeof(channel->name), 456 snprintf(channel->name, sizeof(channel->name),
@@ -585,8 +584,6 @@ static void efx_remove_channel(struct efx_channel *channel)
585 efx_for_each_channel_tx_queue(tx_queue, channel) 584 efx_for_each_channel_tx_queue(tx_queue, channel)
586 efx_remove_tx_queue(tx_queue); 585 efx_remove_tx_queue(tx_queue);
587 efx_remove_eventq(channel); 586 efx_remove_eventq(channel);
588
589 channel->used_flags = 0;
590} 587}
591 588
592void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 589void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
@@ -956,10 +953,9 @@ static void efx_fini_io(struct efx_nic *efx)
956 pci_disable_device(efx->pci_dev); 953 pci_disable_device(efx->pci_dev);
957} 954}
958 955
959/* Get number of RX queues wanted. Return number of online CPU 956/* Get number of channels wanted. Each channel will have its own IRQ,
960 * packages in the expectation that an IRQ balancer will spread 957 * 1 RX queue and/or 2 TX queues. */
961 * interrupts across them. */ 958static int efx_wanted_channels(void)
962static int efx_wanted_rx_queues(void)
963{ 959{
964 cpumask_var_t core_mask; 960 cpumask_var_t core_mask;
965 int count; 961 int count;
@@ -995,34 +991,39 @@ static void efx_probe_interrupts(struct efx_nic *efx)
995 991
996 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 992 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
997 struct msix_entry xentries[EFX_MAX_CHANNELS]; 993 struct msix_entry xentries[EFX_MAX_CHANNELS];
998 int wanted_ints; 994 int n_channels;
999 int rx_queues;
1000 995
1001 /* We want one RX queue and interrupt per CPU package 996 n_channels = efx_wanted_channels();
1002 * (or as specified by the rss_cpus module parameter). 997 if (separate_tx_channels)
1003 * We will need one channel per interrupt. 998 n_channels *= 2;
1004 */ 999 n_channels = min(n_channels, max_channels);
1005 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
1006 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
1007 wanted_ints = min(wanted_ints, max_channels);
1008 1000
1009 for (i = 0; i < wanted_ints; i++) 1001 for (i = 0; i < n_channels; i++)
1010 xentries[i].entry = i; 1002 xentries[i].entry = i;
1011 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints); 1003 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
1012 if (rc > 0) { 1004 if (rc > 0) {
1013 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors" 1005 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
1014 " available (%d < %d).\n", rc, wanted_ints); 1006 " available (%d < %d).\n", rc, n_channels);
1015 EFX_ERR(efx, "WARNING: Performance may be reduced.\n"); 1007 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
1016 EFX_BUG_ON_PARANOID(rc >= wanted_ints); 1008 EFX_BUG_ON_PARANOID(rc >= n_channels);
1017 wanted_ints = rc; 1009 n_channels = rc;
1018 rc = pci_enable_msix(efx->pci_dev, xentries, 1010 rc = pci_enable_msix(efx->pci_dev, xentries,
1019 wanted_ints); 1011 n_channels);
1020 } 1012 }
1021 1013
1022 if (rc == 0) { 1014 if (rc == 0) {
1023 efx->n_rx_queues = min(rx_queues, wanted_ints); 1015 efx->n_channels = n_channels;
1024 efx->n_channels = wanted_ints; 1016 if (separate_tx_channels) {
1025 for (i = 0; i < wanted_ints; i++) 1017 efx->n_tx_channels =
1018 max(efx->n_channels / 2, 1U);
1019 efx->n_rx_channels =
1020 max(efx->n_channels -
1021 efx->n_tx_channels, 1U);
1022 } else {
1023 efx->n_tx_channels = efx->n_channels;
1024 efx->n_rx_channels = efx->n_channels;
1025 }
1026 for (i = 0; i < n_channels; i++)
1026 efx->channel[i].irq = xentries[i].vector; 1027 efx->channel[i].irq = xentries[i].vector;
1027 } else { 1028 } else {
1028 /* Fall back to single channel MSI */ 1029 /* Fall back to single channel MSI */
@@ -1033,8 +1034,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1033 1034
1034 /* Try single interrupt MSI */ 1035 /* Try single interrupt MSI */
1035 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 1036 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1036 efx->n_rx_queues = 1;
1037 efx->n_channels = 1; 1037 efx->n_channels = 1;
1038 efx->n_rx_channels = 1;
1039 efx->n_tx_channels = 1;
1038 rc = pci_enable_msi(efx->pci_dev); 1040 rc = pci_enable_msi(efx->pci_dev);
1039 if (rc == 0) { 1041 if (rc == 0) {
1040 efx->channel[0].irq = efx->pci_dev->irq; 1042 efx->channel[0].irq = efx->pci_dev->irq;
@@ -1046,8 +1048,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1046 1048
1047 /* Assume legacy interrupts */ 1049 /* Assume legacy interrupts */
1048 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 1050 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1049 efx->n_rx_queues = 1;
1050 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); 1051 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
1052 efx->n_rx_channels = 1;
1053 efx->n_tx_channels = 1;
1051 efx->legacy_irq = efx->pci_dev->irq; 1054 efx->legacy_irq = efx->pci_dev->irq;
1052 } 1055 }
1053} 1056}
@@ -1068,21 +1071,24 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1068 1071
1069static void efx_set_channels(struct efx_nic *efx) 1072static void efx_set_channels(struct efx_nic *efx)
1070{ 1073{
1074 struct efx_channel *channel;
1071 struct efx_tx_queue *tx_queue; 1075 struct efx_tx_queue *tx_queue;
1072 struct efx_rx_queue *rx_queue; 1076 struct efx_rx_queue *rx_queue;
1077 unsigned tx_channel_offset =
1078 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1073 1079
1074 efx_for_each_tx_queue(tx_queue, efx) { 1080 efx_for_each_channel(channel, efx) {
1075 if (separate_tx_channels) 1081 if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
1076 tx_queue->channel = &efx->channel[efx->n_channels-1]; 1082 channel->tx_queue = &efx->tx_queue[
1077 else 1083 (channel->channel - tx_channel_offset) *
1078 tx_queue->channel = &efx->channel[0]; 1084 EFX_TXQ_TYPES];
1079 tx_queue->channel->used_flags |= EFX_USED_BY_TX; 1085 efx_for_each_channel_tx_queue(tx_queue, channel)
1086 tx_queue->channel = channel;
1087 }
1080 } 1088 }
1081 1089
1082 efx_for_each_rx_queue(rx_queue, efx) { 1090 efx_for_each_rx_queue(rx_queue, efx)
1083 rx_queue->channel = &efx->channel[rx_queue->queue]; 1091 rx_queue->channel = &efx->channel[rx_queue->queue];
1084 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
1085 }
1086} 1092}
1087 1093
1088static int efx_probe_nic(struct efx_nic *efx) 1094static int efx_probe_nic(struct efx_nic *efx)
@@ -1096,11 +1102,12 @@ static int efx_probe_nic(struct efx_nic *efx)
1096 if (rc) 1102 if (rc)
1097 return rc; 1103 return rc;
1098 1104
1099 /* Determine the number of channels and RX queues by trying to hook 1105 /* Determine the number of channels and queues by trying to hook
1100 * in MSI-X interrupts. */ 1106 * in MSI-X interrupts. */
1101 efx_probe_interrupts(efx); 1107 efx_probe_interrupts(efx);
1102 1108
1103 efx_set_channels(efx); 1109 efx_set_channels(efx);
1110 efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
1104 1111
1105 /* Initialise the interrupt moderation settings */ 1112 /* Initialise the interrupt moderation settings */
1106 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1113 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1187,11 +1194,12 @@ static void efx_start_all(struct efx_nic *efx)
1187 /* Mark the port as enabled so port reconfigurations can start, then 1194 /* Mark the port as enabled so port reconfigurations can start, then
1188 * restart the transmit interface early so the watchdog timer stops */ 1195 * restart the transmit interface early so the watchdog timer stops */
1189 efx_start_port(efx); 1196 efx_start_port(efx);
1190 if (efx_dev_registered(efx))
1191 efx_wake_queue(efx);
1192 1197
1193 efx_for_each_channel(channel, efx) 1198 efx_for_each_channel(channel, efx) {
1199 if (efx_dev_registered(efx))
1200 efx_wake_queue(channel);
1194 efx_start_channel(channel); 1201 efx_start_channel(channel);
1202 }
1195 1203
1196 efx_nic_enable_interrupts(efx); 1204 efx_nic_enable_interrupts(efx);
1197 1205
@@ -1282,7 +1290,9 @@ static void efx_stop_all(struct efx_nic *efx)
1282 /* Stop the kernel transmit interface late, so the watchdog 1290 /* Stop the kernel transmit interface late, so the watchdog
1283 * timer isn't ticking over the flush */ 1291 * timer isn't ticking over the flush */
1284 if (efx_dev_registered(efx)) { 1292 if (efx_dev_registered(efx)) {
1285 efx_stop_queue(efx); 1293 struct efx_channel *channel;
1294 efx_for_each_channel(channel, efx)
1295 efx_stop_queue(channel);
1286 netif_tx_lock_bh(efx->net_dev); 1296 netif_tx_lock_bh(efx->net_dev);
1287 netif_tx_unlock_bh(efx->net_dev); 1297 netif_tx_unlock_bh(efx->net_dev);
1288 } 1298 }
@@ -1537,9 +1547,8 @@ static void efx_watchdog(struct net_device *net_dev)
1537{ 1547{
1538 struct efx_nic *efx = netdev_priv(net_dev); 1548 struct efx_nic *efx = netdev_priv(net_dev);
1539 1549
1540 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:" 1550 EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
1541 " resetting channels\n", 1551 efx->port_enabled);
1542 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1543 1552
1544 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 1553 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1545} 1554}
@@ -1861,6 +1870,7 @@ out:
1861 } 1870 }
1862 1871
1863 if (disabled) { 1872 if (disabled) {
1873 dev_close(efx->net_dev);
1864 EFX_ERR(efx, "has been disabled\n"); 1874 EFX_ERR(efx, "has been disabled\n");
1865 efx->state = STATE_DISABLED; 1875 efx->state = STATE_DISABLED;
1866 } else { 1876 } else {
@@ -1884,8 +1894,7 @@ static void efx_reset_work(struct work_struct *data)
1884 } 1894 }
1885 1895
1886 rtnl_lock(); 1896 rtnl_lock();
1887 if (efx_reset(efx, efx->reset_pending)) 1897 (void)efx_reset(efx, efx->reset_pending);
1888 dev_close(efx->net_dev);
1889 rtnl_unlock(); 1898 rtnl_unlock();
1890} 1899}
1891 1900
@@ -2014,22 +2023,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2014 2023
2015 efx->net_dev = net_dev; 2024 efx->net_dev = net_dev;
2016 efx->rx_checksum_enabled = true; 2025 efx->rx_checksum_enabled = true;
2017 spin_lock_init(&efx->netif_stop_lock);
2018 spin_lock_init(&efx->stats_lock); 2026 spin_lock_init(&efx->stats_lock);
2019 mutex_init(&efx->mac_lock); 2027 mutex_init(&efx->mac_lock);
2020 efx->mac_op = type->default_mac_ops; 2028 efx->mac_op = type->default_mac_ops;
2021 efx->phy_op = &efx_dummy_phy_operations; 2029 efx->phy_op = &efx_dummy_phy_operations;
2022 efx->mdio.dev = net_dev; 2030 efx->mdio.dev = net_dev;
2023 INIT_WORK(&efx->mac_work, efx_mac_work); 2031 INIT_WORK(&efx->mac_work, efx_mac_work);
2024 atomic_set(&efx->netif_stop_count, 1);
2025 2032
2026 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2033 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2027 channel = &efx->channel[i]; 2034 channel = &efx->channel[i];
2028 channel->efx = efx; 2035 channel->efx = efx;
2029 channel->channel = i; 2036 channel->channel = i;
2030 channel->work_pending = false; 2037 channel->work_pending = false;
2038 spin_lock_init(&channel->tx_stop_lock);
2039 atomic_set(&channel->tx_stop_count, 1);
2031 } 2040 }
2032 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) { 2041 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
2033 tx_queue = &efx->tx_queue[i]; 2042 tx_queue = &efx->tx_queue[i];
2034 tx_queue->efx = efx; 2043 tx_queue->efx = efx;
2035 tx_queue->queue = i; 2044 tx_queue->queue = i;
@@ -2201,7 +2210,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2201 int i, rc; 2210 int i, rc;
2202 2211
2203 /* Allocate and initialise a struct net_device and struct efx_nic */ 2212 /* Allocate and initialise a struct net_device and struct efx_nic */
2204 net_dev = alloc_etherdev(sizeof(*efx)); 2213 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
2205 if (!net_dev) 2214 if (!net_dev)
2206 return -ENOMEM; 2215 return -ENOMEM;
2207 net_dev->features |= (type->offload_features | NETIF_F_SG | 2216 net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 7eff0a615cb3..ffd708c5304a 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -35,8 +35,8 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
35extern netdev_tx_t 35extern netdev_tx_t
36efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 36efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
38extern void efx_stop_queue(struct efx_nic *efx); 38extern void efx_stop_queue(struct efx_channel *channel);
39extern void efx_wake_queue(struct efx_nic *efx); 39extern void efx_wake_queue(struct efx_channel *channel);
40#define EFX_TXQ_SIZE 1024 40#define EFX_TXQ_SIZE 1024
41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1) 41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
42 42
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index d9f9c02a928e..22026bfbc4c1 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -304,7 +304,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
304{ 304{
305 struct efx_tx_queue *tx_queue; 305 struct efx_tx_queue *tx_queue;
306 306
307 efx_for_each_tx_queue(tx_queue, efx) { 307 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
308 efx_fill_test(test_index++, strings, data, 308 efx_fill_test(test_index++, strings, data,
309 &lb_tests->tx_sent[tx_queue->queue], 309 &lb_tests->tx_sent[tx_queue->queue],
310 EFX_TX_QUEUE_NAME(tx_queue), 310 EFX_TX_QUEUE_NAME(tx_queue),
@@ -647,7 +647,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
647 efx_for_each_tx_queue(tx_queue, efx) { 647 efx_for_each_tx_queue(tx_queue, efx) {
648 channel = tx_queue->channel; 648 channel = tx_queue->channel;
649 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 649 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
650 if (channel->used_flags != EFX_USED_BY_RX_TX) 650 if (channel->channel < efx->n_rx_channels)
651 coalesce->tx_coalesce_usecs_irq = 651 coalesce->tx_coalesce_usecs_irq =
652 channel->irq_moderation; 652 channel->irq_moderation;
653 else 653 else
@@ -690,7 +690,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
690 690
691 /* If the channel is shared only allow RX parameters to be set */ 691 /* If the channel is shared only allow RX parameters to be set */
692 efx_for_each_tx_queue(tx_queue, efx) { 692 efx_for_each_tx_queue(tx_queue, efx) {
693 if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) && 693 if ((tx_queue->channel->channel < efx->n_rx_channels) &&
694 tx_usecs) { 694 tx_usecs) {
695 EFX_ERR(efx, "Channel is shared. " 695 EFX_ERR(efx, "Channel is shared. "
696 "Only RX coalescing may be set\n"); 696 "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index d294d66fd600..655b697b45b2 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -175,16 +175,19 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
175 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 175 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
176 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 176 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
177 177
178 /* Check to see if we have a serious error condition */
179 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
180 if (unlikely(syserr))
181 return efx_nic_fatal_interrupt(efx);
182
183 /* Determine interrupting queues, clear interrupt status 178 /* Determine interrupting queues, clear interrupt status
184 * register and acknowledge the device interrupt. 179 * register and acknowledge the device interrupt.
185 */ 180 */
186 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); 181 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
187 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); 182 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
183
184 /* Check to see if we have a serious error condition */
185 if (queues & (1U << efx->fatal_irq_level)) {
186 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
187 if (unlikely(syserr))
188 return efx_nic_fatal_interrupt(efx);
189 }
190
188 EFX_ZERO_OWORD(*int_ker); 191 EFX_ZERO_OWORD(*int_ker);
189 wmb(); /* Ensure the vector is cleared before interrupt ack */ 192 wmb(); /* Ensure the vector is cleared before interrupt ack */
190 falcon_irq_ack_a1(efx); 193 falcon_irq_ack_a1(efx);
@@ -504,6 +507,9 @@ static void falcon_reset_macs(struct efx_nic *efx)
504 /* Ensure the correct MAC is selected before statistics 507 /* Ensure the correct MAC is selected before statistics
505 * are re-enabled by the caller */ 508 * are re-enabled by the caller */
506 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); 509 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
510
511 /* This can run even when the GMAC is selected */
512 falcon_setup_xaui(efx);
507} 513}
508 514
509void falcon_drain_tx_fifo(struct efx_nic *efx) 515void falcon_drain_tx_fifo(struct efx_nic *efx)
@@ -1320,7 +1326,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1320 1326
1321 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 1327 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
1322 1328
1323 falcon_probe_board(efx, board_rev); 1329 rc = falcon_probe_board(efx, board_rev);
1330 if (rc)
1331 goto fail2;
1324 1332
1325 kfree(nvconfig); 1333 kfree(nvconfig);
1326 return 0; 1334 return 0;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 5712fddd72f2..c7a933a3292e 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
728 }, 728 },
729}; 729};
730 730
731static const struct falcon_board_type falcon_dummy_board = { 731int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
732 .init = efx_port_dummy_op_int,
733 .init_phy = efx_port_dummy_op_void,
734 .fini = efx_port_dummy_op_void,
735 .set_id_led = efx_port_dummy_op_set_id_led,
736 .monitor = efx_port_dummy_op_int,
737};
738
739void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
740{ 732{
741 struct falcon_board *board = falcon_board(efx); 733 struct falcon_board *board = falcon_board(efx);
742 u8 type_id = FALCON_BOARD_TYPE(revision_info); 734 u8 type_id = FALCON_BOARD_TYPE(revision_info);
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
754 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) 746 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
755 ? board->type->ref_model : board->type->gen_type, 747 ? board->type->ref_model : board->type->gen_type,
756 'A' + board->major, board->minor); 748 'A' + board->major, board->minor);
749 return 0;
757 } else { 750 } else {
758 EFX_ERR(efx, "unknown board type %d\n", type_id); 751 EFX_ERR(efx, "unknown board type %d\n", type_id);
759 board->type = &falcon_dummy_board; 752 return -ENODEV;
760 } 753 }
761} 754}
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 8ccab2c67a20..c84a2ce2ccbb 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -26,7 +26,7 @@
26 *************************************************************************/ 26 *************************************************************************/
27 27
28/* Configure the XAUI driver that is an output from Falcon */ 28/* Configure the XAUI driver that is an output from Falcon */
29static void falcon_setup_xaui(struct efx_nic *efx) 29void falcon_setup_xaui(struct efx_nic *efx)
30{ 30{
31 efx_oword_t sdctl, txdrv; 31 efx_oword_t sdctl, txdrv;
32 32
@@ -85,14 +85,14 @@ int falcon_reset_xaui(struct efx_nic *efx)
85 return -ETIMEDOUT; 85 return -ETIMEDOUT;
86} 86}
87 87
88static void falcon_mask_status_intr(struct efx_nic *efx, bool enable) 88static void falcon_ack_status_intr(struct efx_nic *efx)
89{ 89{
90 efx_oword_t reg; 90 efx_oword_t reg;
91 91
92 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) 92 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
93 return; 93 return;
94 94
95 /* We expect xgmii faults if the wireside link is up */ 95 /* We expect xgmii faults if the wireside link is down */
96 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) 96 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
97 return; 97 return;
98 98
@@ -101,14 +101,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
101 if (efx->xmac_poll_required) 101 if (efx->xmac_poll_required)
102 return; 102 return;
103 103
104 /* Flush the ISR */ 104 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
105 if (enable)
106 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
107
108 EFX_POPULATE_OWORD_2(reg,
109 FRF_AB_XM_MSK_RMTFLT, !enable,
110 FRF_AB_XM_MSK_LCLFLT, !enable);
111 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
112} 105}
113 106
114static bool falcon_xgxs_link_ok(struct efx_nic *efx) 107static bool falcon_xgxs_link_ok(struct efx_nic *efx)
@@ -283,15 +276,13 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
283 276
284static int falcon_reconfigure_xmac(struct efx_nic *efx) 277static int falcon_reconfigure_xmac(struct efx_nic *efx)
285{ 278{
286 falcon_mask_status_intr(efx, false);
287
288 falcon_reconfigure_xgxs_core(efx); 279 falcon_reconfigure_xgxs_core(efx);
289 falcon_reconfigure_xmac_core(efx); 280 falcon_reconfigure_xmac_core(efx);
290 281
291 falcon_reconfigure_mac_wrapper(efx); 282 falcon_reconfigure_mac_wrapper(efx);
292 283
293 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); 284 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
294 falcon_mask_status_intr(efx, true); 285 falcon_ack_status_intr(efx);
295 286
296 return 0; 287 return 0;
297} 288}
@@ -362,9 +353,8 @@ void falcon_poll_xmac(struct efx_nic *efx)
362 !efx->xmac_poll_required) 353 !efx->xmac_poll_required)
363 return; 354 return;
364 355
365 falcon_mask_status_intr(efx, false);
366 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 356 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
367 falcon_mask_status_intr(efx, true); 357 falcon_ack_status_intr(efx);
368} 358}
369 359
370struct efx_mac_operations falcon_xmac_operations = { 360struct efx_mac_operations falcon_xmac_operations = {
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index c48669c77414..93cc3c1b9450 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -613,7 +613,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
613 } 613 }
614 614
615 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { 615 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
616 rc = -EMSGSIZE; 616 rc = -EIO;
617 goto fail; 617 goto fail;
618 } 618 }
619 619
@@ -647,8 +647,10 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
647 outbuf, sizeof(outbuf), &outlen); 647 outbuf, sizeof(outbuf), &outlen);
648 if (rc) 648 if (rc)
649 goto fail; 649 goto fail;
650 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) 650 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
651 rc = -EIO;
651 goto fail; 652 goto fail;
653 }
652 654
653 if (was_attached != NULL) 655 if (was_attached != NULL)
654 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 656 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -676,7 +678,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
676 goto fail; 678 goto fail;
677 679
678 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { 680 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
679 rc = -EMSGSIZE; 681 rc = -EIO;
680 goto fail; 682 goto fail;
681 } 683 }
682 684
@@ -738,8 +740,10 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
738 outbuf, sizeof(outbuf), &outlen); 740 outbuf, sizeof(outbuf), &outlen);
739 if (rc) 741 if (rc)
740 goto fail; 742 goto fail;
741 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) 743 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
744 rc = -EIO;
742 goto fail; 745 goto fail;
746 }
743 747
744 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); 748 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
745 return 0; 749 return 0;
@@ -765,8 +769,10 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
765 outbuf, sizeof(outbuf), &outlen); 769 outbuf, sizeof(outbuf), &outlen);
766 if (rc) 770 if (rc)
767 goto fail; 771 goto fail;
768 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) 772 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
773 rc = -EIO;
769 goto fail; 774 goto fail;
775 }
770 776
771 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 777 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
772 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 778 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
@@ -926,20 +932,26 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
926 932
927 rc = efx_mcdi_nvram_types(efx, &nvram_types); 933 rc = efx_mcdi_nvram_types(efx, &nvram_types);
928 if (rc) 934 if (rc)
929 return rc; 935 goto fail1;
930 936
931 type = 0; 937 type = 0;
932 while (nvram_types != 0) { 938 while (nvram_types != 0) {
933 if (nvram_types & 1) { 939 if (nvram_types & 1) {
934 rc = efx_mcdi_nvram_test(efx, type); 940 rc = efx_mcdi_nvram_test(efx, type);
935 if (rc) 941 if (rc)
936 return rc; 942 goto fail2;
937 } 943 }
938 type++; 944 type++;
939 nvram_types >>= 1; 945 nvram_types >>= 1;
940 } 946 }
941 947
942 return 0; 948 return 0;
949
950fail2:
951 EFX_ERR(efx, "%s: failed type=%u\n", __func__, type);
952fail1:
953 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
954 return rc;
943} 955}
944 956
945static int efx_mcdi_read_assertion(struct efx_nic *efx) 957static int efx_mcdi_read_assertion(struct efx_nic *efx)
@@ -968,7 +980,7 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
968 if (rc) 980 if (rc)
969 return rc; 981 return rc;
970 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 982 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
971 return -EINVAL; 983 return -EIO;
972 984
973 /* Print out any recorded assertion state */ 985 /* Print out any recorded assertion state */
974 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); 986 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
@@ -1086,7 +1098,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1086 goto fail; 1098 goto fail;
1087 1099
1088 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { 1100 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1089 rc = -EMSGSIZE; 1101 rc = -EIO;
1090 goto fail; 1102 goto fail;
1091 } 1103 }
1092 1104
@@ -1121,7 +1133,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1121 goto fail; 1133 goto fail;
1122 1134
1123 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { 1135 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1124 rc = -EMSGSIZE; 1136 rc = -EIO;
1125 goto fail; 1137 goto fail;
1126 } 1138 }
1127 1139
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 06d24a1e412a..39182631ac92 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -80,7 +80,7 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
80 u8 inbuf[MC_CMD_MAC_STATS_IN_LEN]; 80 u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
81 int rc; 81 int rc;
82 efx_dword_t *cmd_ptr; 82 efx_dword_t *cmd_ptr;
83 int period = 1000; 83 int period = enable ? 1000 : 0;
84 u32 addr_hi; 84 u32 addr_hi;
85 u32 addr_lo; 85 u32 addr_lo;
86 86
@@ -92,21 +92,14 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
92 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo); 92 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
93 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi); 93 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
94 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); 94 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
95 if (enable) 95 EFX_POPULATE_DWORD_7(*cmd_ptr,
96 EFX_POPULATE_DWORD_6(*cmd_ptr, 96 MC_CMD_MAC_STATS_CMD_DMA, !!enable,
97 MC_CMD_MAC_STATS_CMD_DMA, 1, 97 MC_CMD_MAC_STATS_CMD_CLEAR, clear,
98 MC_CMD_MAC_STATS_CMD_CLEAR, clear, 98 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
99 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1, 99 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable,
100 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 1, 100 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
101 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0, 101 MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1,
102 MC_CMD_MAC_STATS_CMD_PERIOD_MS, period); 102 MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
103 else
104 EFX_POPULATE_DWORD_5(*cmd_ptr,
105 MC_CMD_MAC_STATS_CMD_DMA, 0,
106 MC_CMD_MAC_STATS_CMD_CLEAR, clear,
107 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
108 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 0,
109 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0);
110 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 103 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
111 104
112 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 105 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index bd59302695b3..90359e644006 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -863,7 +863,7 @@
863 * bist output. The driver should only consume the BIST output 863 * bist output. The driver should only consume the BIST output
864 * after validating OUTLEN and PHY_CFG.PHY_TYPE. 864 * after validating OUTLEN and PHY_CFG.PHY_TYPE.
865 * 865 *
866 * If a driver can't succesfully parse the BIST output, it should 866 * If a driver can't successfully parse the BIST output, it should
867 * still respect the pass/Fail in OUT.RESULT 867 * still respect the pass/Fail in OUT.RESULT
868 * 868 *
869 * Locks required: PHY_LOCK if doing a PHY BIST 869 * Locks required: PHY_LOCK if doing a PHY BIST
@@ -872,7 +872,7 @@
872#define MC_CMD_POLL_BIST 0x26 872#define MC_CMD_POLL_BIST 0x26
873#define MC_CMD_POLL_BIST_IN_LEN 0 873#define MC_CMD_POLL_BIST_IN_LEN 0
874#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN 874#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
875#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40 875#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
876#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 876#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
877#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 877#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
878#define MC_CMD_POLL_BIST_RUNNING 1 878#define MC_CMD_POLL_BIST_RUNNING 1
@@ -882,15 +882,14 @@
882/* Generic: */ 882/* Generic: */
883#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 883#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
884/* SFT9001-specific: */ 884/* SFT9001-specific: */
885/* (offset 4 unused?) */ 885#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
886#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8 886#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
887#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12 887#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
888#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16 888#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
889#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20 889#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
890#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24 890#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
891#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28 891#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
892#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32 892#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
893#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
894#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1 893#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
895#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2 894#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
896#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3 895#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
@@ -1054,9 +1053,13 @@
1054/* MC_CMD_PHY_STATS: 1053/* MC_CMD_PHY_STATS:
1055 * Get generic PHY statistics 1054 * Get generic PHY statistics
1056 * 1055 *
1057 * This call returns the statistics for a generic PHY, by direct DMA 1056 * This call returns the statistics for a generic PHY in a sparse
1058 * into host memory, in a sparse array (indexed by the enumerate). 1057 * array (indexed by the enumerate). Each value is represented by
1059 * Each value is represented by a 32bit number. 1058 * a 32bit number.
1059 *
1060 * If the DMA_ADDR is 0, then no DMA is performed, and the statistics
1061 * may be read directly out of shared memory. If DMA_ADDR != 0, then
1062 * the statistics are dmad to that (page-aligned location)
1060 * 1063 *
1061 * Locks required: None 1064 * Locks required: None
1062 * Returns: 0, ETIME 1065 * Returns: 0, ETIME
@@ -1066,7 +1069,8 @@
1066#define MC_CMD_PHY_STATS_IN_LEN 8 1069#define MC_CMD_PHY_STATS_IN_LEN 8
1067#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 1070#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
1068#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4 1071#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
1069#define MC_CMD_PHY_STATS_OUT_LEN 0 1072#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
1073#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
1070 1074
1071/* Unified MAC statistics enumeration */ 1075/* Unified MAC statistics enumeration */
1072#define MC_CMD_MAC_GENERATION_START 0 1076#define MC_CMD_MAC_GENERATION_START 0
@@ -1158,11 +1162,13 @@
1158#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1 1162#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
1159#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2 1163#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
1160#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1 1164#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
1161/* Fields only relevent when PERIODIC_CHANGE is set */ 1165/* Remaining PERIOD* fields only relevent when PERIODIC_CHANGE is set */
1162#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3 1166#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
1163#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1 1167#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
1164#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4 1168#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
1165#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1 1169#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
1170#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
1171#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
1166#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16 1172#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
1167#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16 1173#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
1168#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 1174#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
@@ -1729,6 +1735,39 @@
1729#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */ 1735#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
1730#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */ 1736#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
1731 1737
1738/* MC_CMD_TEST_HACK: (debug (unsurprisingly))
1739 * Change bits of network port state for test purposes in ways that would never be
1740 * useful in normal operation and so need a special command to change. */
1741#define MC_CMD_TEST_HACK 0x2f
1742#define MC_CMD_TEST_HACK_IN_LEN 8
1743#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0
1744#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */
1745#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */
1746#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */
1747#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */
1748#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */
1749#define MC_CMD_TEST_HACK_OUT_LEN 0
1750
1751/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
1752 * is a warranty-voiding operation.
1753 *
1754 * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
1755 * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
1756 * of these limits are meaningful and what their interpretation is is sensor-specific.
1757 *
1758 * OUT: nothing
1759 *
1760 * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
1761 * out of range.
1762 */
1763#define MC_CMD_SENSOR_SET_LIMS 0x4e
1764#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
1765#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
1766#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
1767#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
1768#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
1769#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
1770
1732/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be 1771/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
1733 * used for post-3.0 extensions. If you run out of space, look for gaps or 1772 * used for post-3.0 extensions. If you run out of space, look for gaps or
1734 * commands that are unused in the existing range. */ 1773 * commands that are unused in the existing range. */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 2f2354696663..6032c0e1f1f8 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -17,6 +17,8 @@
17#include "mcdi.h" 17#include "mcdi.h"
18#include "mcdi_pcol.h" 18#include "mcdi_pcol.h"
19#include "mdio_10g.h" 19#include "mdio_10g.h"
20#include "nic.h"
21#include "selftest.h"
20 22
21struct efx_mcdi_phy_cfg { 23struct efx_mcdi_phy_cfg {
22 u32 flags; 24 u32 flags;
@@ -48,7 +50,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
48 goto fail; 50 goto fail;
49 51
50 if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { 52 if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
51 rc = -EMSGSIZE; 53 rc = -EIO;
52 goto fail; 54 goto fail;
53 } 55 }
54 56
@@ -111,7 +113,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
111 goto fail; 113 goto fail;
112 114
113 if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { 115 if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
114 rc = -EMSGSIZE; 116 rc = -EIO;
115 goto fail; 117 goto fail;
116 } 118 }
117 119
@@ -587,13 +589,153 @@ static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
587 return rc; 589 return rc;
588 590
589 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) 591 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
590 return -EMSGSIZE; 592 return -EIO;
591 if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK) 593 if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
592 return -EINVAL; 594 return -EINVAL;
593 595
594 return 0; 596 return 0;
595} 597}
596 598
599static const char *const mcdi_sft9001_cable_diag_names[] = {
600 "cable.pairA.length",
601 "cable.pairB.length",
602 "cable.pairC.length",
603 "cable.pairD.length",
604 "cable.pairA.status",
605 "cable.pairB.status",
606 "cable.pairC.status",
607 "cable.pairD.status",
608};
609
610static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
611 int *results)
612{
613 unsigned int retry, i, count = 0;
614 size_t outlen;
615 u32 status;
616 u8 *buf, *ptr;
617 int rc;
618
619 buf = kzalloc(0x100, GFP_KERNEL);
620 if (buf == NULL)
621 return -ENOMEM;
622
623 BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
624 MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
625 rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
626 NULL, 0, NULL);
627 if (rc)
628 goto out;
629
630 /* Wait up to 10s for BIST to finish */
631 for (retry = 0; retry < 100; ++retry) {
632 BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
633 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
634 buf, 0x100, &outlen);
635 if (rc)
636 goto out;
637
638 status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
639 if (status != MC_CMD_POLL_BIST_RUNNING)
640 goto finished;
641
642 msleep(100);
643 }
644
645 rc = -ETIMEDOUT;
646 goto out;
647
648finished:
649 results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
650
651 /* SFT9001 specific cable diagnostics output */
652 if (efx->phy_type == PHY_TYPE_SFT9001B &&
653 (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
654 bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
655 ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
656 if (status == MC_CMD_POLL_BIST_PASSED &&
657 outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
658 for (i = 0; i < 8; i++) {
659 results[count + i] =
660 EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
661 EFX_DWORD_0);
662 }
663 }
664 count += 8;
665 }
666 rc = count;
667
668out:
669 kfree(buf);
670
671 return rc;
672}
673
674static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
675 unsigned flags)
676{
677 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
678 u32 mode;
679 int rc;
680
681 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
682 rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
683 if (rc < 0)
684 return rc;
685
686 results += rc;
687 }
688
689 /* If we support both LONG and SHORT, then run each in response to
690 * break or not. Otherwise, run the one we support */
691 mode = 0;
692 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) {
693 if ((flags & ETH_TEST_FL_OFFLINE) &&
694 (phy_cfg->flags &
695 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)))
696 mode = MC_CMD_PHY_BIST_CABLE_LONG;
697 else
698 mode = MC_CMD_PHY_BIST_CABLE_SHORT;
699 } else if (phy_cfg->flags &
700 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))
701 mode = MC_CMD_PHY_BIST_CABLE_LONG;
702
703 if (mode != 0) {
704 rc = efx_mcdi_bist(efx, mode, results);
705 if (rc < 0)
706 return rc;
707 results += rc;
708 }
709
710 return 0;
711}
712
713const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
714{
715 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
716
717 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
718 if (index == 0)
719 return "bist";
720 --index;
721 }
722
723 if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) |
724 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) {
725 if (index == 0)
726 return "cable";
727 --index;
728
729 if (efx->phy_type == PHY_TYPE_SFT9001B) {
730 if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
731 return mcdi_sft9001_cable_diag_names[index];
732 index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
733 }
734 }
735
736 return NULL;
737}
738
597struct efx_phy_operations efx_mcdi_phy_ops = { 739struct efx_phy_operations efx_mcdi_phy_ops = {
598 .probe = efx_mcdi_phy_probe, 740 .probe = efx_mcdi_phy_probe,
599 .init = efx_port_dummy_op_int, 741 .init = efx_port_dummy_op_int,
@@ -604,6 +746,6 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
604 .get_settings = efx_mcdi_phy_get_settings, 746 .get_settings = efx_mcdi_phy_get_settings,
605 .set_settings = efx_mcdi_phy_set_settings, 747 .set_settings = efx_mcdi_phy_set_settings,
606 .test_alive = efx_mcdi_phy_test_alive, 748 .test_alive = efx_mcdi_phy_test_alive,
607 .run_tests = NULL, 749 .run_tests = efx_mcdi_phy_run_tests,
608 .test_name = NULL, 750 .test_name = efx_mcdi_phy_test_name,
609}; 751};
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index cb018e272097..2e6fd89f2a72 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -85,9 +85,13 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
85#define EFX_MAX_CHANNELS 32 85#define EFX_MAX_CHANNELS 32
86#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 86#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
87 87
88#define EFX_TX_QUEUE_OFFLOAD_CSUM 0 88/* Checksum generation is a per-queue option in hardware, so each
89#define EFX_TX_QUEUE_NO_CSUM 1 89 * queue visible to the networking core is backed by two hardware TX
90#define EFX_TX_QUEUE_COUNT 2 90 * queues. */
91#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
92#define EFX_TXQ_TYPE_OFFLOAD 1
93#define EFX_TXQ_TYPES 2
94#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
91 95
92/** 96/**
93 * struct efx_special_buffer - An Efx special buffer 97 * struct efx_special_buffer - An Efx special buffer
@@ -187,7 +191,7 @@ struct efx_tx_buffer {
187struct efx_tx_queue { 191struct efx_tx_queue {
188 /* Members which don't change on the fast path */ 192 /* Members which don't change on the fast path */
189 struct efx_nic *efx ____cacheline_aligned_in_smp; 193 struct efx_nic *efx ____cacheline_aligned_in_smp;
190 int queue; 194 unsigned queue;
191 struct efx_channel *channel; 195 struct efx_channel *channel;
192 struct efx_nic *nic; 196 struct efx_nic *nic;
193 struct efx_tx_buffer *buffer; 197 struct efx_tx_buffer *buffer;
@@ -306,11 +310,6 @@ struct efx_buffer {
306}; 310};
307 311
308 312
309/* Flags for channel->used_flags */
310#define EFX_USED_BY_RX 1
311#define EFX_USED_BY_TX 2
312#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
313
314enum efx_rx_alloc_method { 313enum efx_rx_alloc_method {
315 RX_ALLOC_METHOD_AUTO = 0, 314 RX_ALLOC_METHOD_AUTO = 0,
316 RX_ALLOC_METHOD_SKB = 1, 315 RX_ALLOC_METHOD_SKB = 1,
@@ -327,7 +326,6 @@ enum efx_rx_alloc_method {
327 * @efx: Associated Efx NIC 326 * @efx: Associated Efx NIC
328 * @channel: Channel instance number 327 * @channel: Channel instance number
329 * @name: Name for channel and IRQ 328 * @name: Name for channel and IRQ
330 * @used_flags: Channel is used by net driver
331 * @enabled: Channel enabled indicator 329 * @enabled: Channel enabled indicator
332 * @irq: IRQ number (MSI and MSI-X only) 330 * @irq: IRQ number (MSI and MSI-X only)
333 * @irq_moderation: IRQ moderation value (in hardware ticks) 331 * @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -352,12 +350,14 @@ enum efx_rx_alloc_method {
352 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 350 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
353 * @n_rx_overlength: Count of RX_OVERLENGTH errors 351 * @n_rx_overlength: Count of RX_OVERLENGTH errors
354 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 352 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
353 * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX
354 * @tx_stop_count: Core TX queue stop count
355 * @tx_stop_lock: Core TX queue stop lock
355 */ 356 */
356struct efx_channel { 357struct efx_channel {
357 struct efx_nic *efx; 358 struct efx_nic *efx;
358 int channel; 359 int channel;
359 char name[IFNAMSIZ + 6]; 360 char name[IFNAMSIZ + 6];
360 int used_flags;
361 bool enabled; 361 bool enabled;
362 int irq; 362 int irq;
363 unsigned int irq_moderation; 363 unsigned int irq_moderation;
@@ -389,6 +389,9 @@ struct efx_channel {
389 struct efx_rx_buffer *rx_pkt; 389 struct efx_rx_buffer *rx_pkt;
390 bool rx_pkt_csummed; 390 bool rx_pkt_csummed;
391 391
392 struct efx_tx_queue *tx_queue;
393 atomic_t tx_stop_count;
394 spinlock_t tx_stop_lock;
392}; 395};
393 396
394enum efx_led_mode { 397enum efx_led_mode {
@@ -661,8 +664,9 @@ union efx_multicast_hash {
661 * @rx_queue: RX DMA queues 664 * @rx_queue: RX DMA queues
662 * @channel: Channels 665 * @channel: Channels
663 * @next_buffer_table: First available buffer table id 666 * @next_buffer_table: First available buffer table id
664 * @n_rx_queues: Number of RX queues
665 * @n_channels: Number of channels in use 667 * @n_channels: Number of channels in use
668 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
669 * @n_tx_channels: Number of channels used for TX
666 * @rx_buffer_len: RX buffer length 670 * @rx_buffer_len: RX buffer length
667 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 671 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
668 * @int_error_count: Number of internal errors seen recently 672 * @int_error_count: Number of internal errors seen recently
@@ -672,6 +676,8 @@ union efx_multicast_hash {
672 * This register is written with the SMP processor ID whenever an 676 * This register is written with the SMP processor ID whenever an
673 * interrupt is handled. It is used by efx_nic_test_interrupt() 677 * interrupt is handled. It is used by efx_nic_test_interrupt()
674 * to verify that an interrupt has occurred. 678 * to verify that an interrupt has occurred.
679 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
680 * @fatal_irq_level: IRQ level (bit number) used for serious errors
675 * @spi_flash: SPI flash device 681 * @spi_flash: SPI flash device
676 * This field will be %NULL if no flash device is present (or for Siena). 682 * This field will be %NULL if no flash device is present (or for Siena).
677 * @spi_eeprom: SPI EEPROM device 683 * @spi_eeprom: SPI EEPROM device
@@ -691,8 +697,6 @@ union efx_multicast_hash {
691 * @port_initialized: Port initialized? 697 * @port_initialized: Port initialized?
692 * @net_dev: Operating system network device. Consider holding the rtnl lock 698 * @net_dev: Operating system network device. Consider holding the rtnl lock
693 * @rx_checksum_enabled: RX checksumming enabled 699 * @rx_checksum_enabled: RX checksumming enabled
694 * @netif_stop_count: Port stop count
695 * @netif_stop_lock: Port stop lock
696 * @mac_stats: MAC statistics. These include all statistics the MACs 700 * @mac_stats: MAC statistics. These include all statistics the MACs
697 * can provide. Generic code converts these into a standard 701 * can provide. Generic code converts these into a standard
698 * &struct net_device_stats. 702 * &struct net_device_stats.
@@ -740,13 +744,14 @@ struct efx_nic {
740 enum nic_state state; 744 enum nic_state state;
741 enum reset_type reset_pending; 745 enum reset_type reset_pending;
742 746
743 struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT]; 747 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
744 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 748 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
745 struct efx_channel channel[EFX_MAX_CHANNELS]; 749 struct efx_channel channel[EFX_MAX_CHANNELS];
746 750
747 unsigned next_buffer_table; 751 unsigned next_buffer_table;
748 int n_rx_queues; 752 unsigned n_channels;
749 int n_channels; 753 unsigned n_rx_channels;
754 unsigned n_tx_channels;
750 unsigned int rx_buffer_len; 755 unsigned int rx_buffer_len;
751 unsigned int rx_buffer_order; 756 unsigned int rx_buffer_order;
752 757
@@ -755,7 +760,8 @@ struct efx_nic {
755 760
756 struct efx_buffer irq_status; 761 struct efx_buffer irq_status;
757 volatile signed int last_irq_cpu; 762 volatile signed int last_irq_cpu;
758 unsigned long irq_zero_count; 763 unsigned irq_zero_count;
764 unsigned fatal_irq_level;
759 765
760 struct efx_spi_device *spi_flash; 766 struct efx_spi_device *spi_flash;
761 struct efx_spi_device *spi_eeprom; 767 struct efx_spi_device *spi_eeprom;
@@ -777,9 +783,6 @@ struct efx_nic {
777 struct net_device *net_dev; 783 struct net_device *net_dev;
778 bool rx_checksum_enabled; 784 bool rx_checksum_enabled;
779 785
780 atomic_t netif_stop_count;
781 spinlock_t netif_stop_lock;
782
783 struct efx_mac_stats mac_stats; 786 struct efx_mac_stats mac_stats;
784 struct efx_buffer stats_buffer; 787 struct efx_buffer stats_buffer;
785 spinlock_t stats_lock; 788 spinlock_t stats_lock;
@@ -924,40 +927,35 @@ struct efx_nic_type {
924 927
925/* Iterate over all used channels */ 928/* Iterate over all used channels */
926#define efx_for_each_channel(_channel, _efx) \ 929#define efx_for_each_channel(_channel, _efx) \
927 for (_channel = &_efx->channel[0]; \ 930 for (_channel = &((_efx)->channel[0]); \
928 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \ 931 _channel < &((_efx)->channel[(efx)->n_channels]); \
929 _channel++) \ 932 _channel++)
930 if (!_channel->used_flags) \
931 continue; \
932 else
933 933
934/* Iterate over all used TX queues */ 934/* Iterate over all used TX queues */
935#define efx_for_each_tx_queue(_tx_queue, _efx) \ 935#define efx_for_each_tx_queue(_tx_queue, _efx) \
936 for (_tx_queue = &_efx->tx_queue[0]; \ 936 for (_tx_queue = &((_efx)->tx_queue[0]); \
937 _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \ 937 _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \
938 (_efx)->n_tx_channels]); \
938 _tx_queue++) 939 _tx_queue++)
939 940
940/* Iterate over all TX queues belonging to a channel */ 941/* Iterate over all TX queues belonging to a channel */
941#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 942#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
942 for (_tx_queue = &_channel->efx->tx_queue[0]; \ 943 for (_tx_queue = (_channel)->tx_queue; \
943 _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \ 944 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
944 _tx_queue++) \ 945 _tx_queue++)
945 if (_tx_queue->channel != _channel) \
946 continue; \
947 else
948 946
949/* Iterate over all used RX queues */ 947/* Iterate over all used RX queues */
950#define efx_for_each_rx_queue(_rx_queue, _efx) \ 948#define efx_for_each_rx_queue(_rx_queue, _efx) \
951 for (_rx_queue = &_efx->rx_queue[0]; \ 949 for (_rx_queue = &((_efx)->rx_queue[0]); \
952 _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \ 950 _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
953 _rx_queue++) 951 _rx_queue++)
954 952
955/* Iterate over all RX queues belonging to a channel */ 953/* Iterate over all RX queues belonging to a channel */
956#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 954#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
957 for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \ 955 for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
958 _rx_queue; \ 956 _rx_queue; \
959 _rx_queue = NULL) \ 957 _rx_queue = NULL) \
960 if (_rx_queue->channel != _channel) \ 958 if (_rx_queue->channel != (_channel)) \
961 continue; \ 959 continue; \
962 else 960 else
963 961
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index b06f8e348307..5d3aaec58556 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -418,7 +418,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
418 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 418 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
419 419
420 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 420 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
421 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; 421 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
422 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 422 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
423 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 423 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
424 !csum); 424 !csum);
@@ -431,10 +431,10 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
431 efx_oword_t reg; 431 efx_oword_t reg;
432 432
433 /* Only 128 bits in this register */ 433 /* Only 128 bits in this register */
434 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); 434 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
435 435
436 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG); 436 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
437 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) 437 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
438 clear_bit_le(tx_queue->queue, (void *)&reg); 438 clear_bit_le(tx_queue->queue, (void *)&reg);
439 else 439 else
440 set_bit_le(tx_queue->queue, (void *)&reg); 440 set_bit_le(tx_queue->queue, (void *)&reg);
@@ -654,22 +654,23 @@ void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
654 * The NIC batches TX completion events; the message we receive is of 654 * The NIC batches TX completion events; the message we receive is of
655 * the form "complete all TX events up to this index". 655 * the form "complete all TX events up to this index".
656 */ 656 */
657static void 657static int
658efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 658efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
659{ 659{
660 unsigned int tx_ev_desc_ptr; 660 unsigned int tx_ev_desc_ptr;
661 unsigned int tx_ev_q_label; 661 unsigned int tx_ev_q_label;
662 struct efx_tx_queue *tx_queue; 662 struct efx_tx_queue *tx_queue;
663 struct efx_nic *efx = channel->efx; 663 struct efx_nic *efx = channel->efx;
664 int tx_packets = 0;
664 665
665 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 666 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
666 /* Transmit completion */ 667 /* Transmit completion */
667 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 668 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
668 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 669 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
669 tx_queue = &efx->tx_queue[tx_ev_q_label]; 670 tx_queue = &efx->tx_queue[tx_ev_q_label];
670 channel->irq_mod_score += 671 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
671 (tx_ev_desc_ptr - tx_queue->read_count) & 672 EFX_TXQ_MASK);
672 EFX_TXQ_MASK; 673 channel->irq_mod_score += tx_packets;
673 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 674 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
674 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 675 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
675 /* Rewrite the FIFO write pointer */ 676 /* Rewrite the FIFO write pointer */
@@ -689,6 +690,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
689 EFX_QWORD_FMT"\n", channel->channel, 690 EFX_QWORD_FMT"\n", channel->channel,
690 EFX_QWORD_VAL(*event)); 691 EFX_QWORD_VAL(*event));
691 } 692 }
693
694 return tx_packets;
692} 695}
693 696
694/* Detect errors included in the rx_evt_pkt_ok bit. */ 697/* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -947,16 +950,17 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
947 } 950 }
948} 951}
949 952
950int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota) 953int efx_nic_process_eventq(struct efx_channel *channel, int budget)
951{ 954{
952 unsigned int read_ptr; 955 unsigned int read_ptr;
953 efx_qword_t event, *p_event; 956 efx_qword_t event, *p_event;
954 int ev_code; 957 int ev_code;
955 int rx_packets = 0; 958 int tx_packets = 0;
959 int spent = 0;
956 960
957 read_ptr = channel->eventq_read_ptr; 961 read_ptr = channel->eventq_read_ptr;
958 962
959 do { 963 for (;;) {
960 p_event = efx_event(channel, read_ptr); 964 p_event = efx_event(channel, read_ptr);
961 event = *p_event; 965 event = *p_event;
962 966
@@ -970,15 +974,23 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
970 /* Clear this event by marking it all ones */ 974 /* Clear this event by marking it all ones */
971 EFX_SET_QWORD(*p_event); 975 EFX_SET_QWORD(*p_event);
972 976
977 /* Increment read pointer */
978 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
979
973 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 980 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
974 981
975 switch (ev_code) { 982 switch (ev_code) {
976 case FSE_AZ_EV_CODE_RX_EV: 983 case FSE_AZ_EV_CODE_RX_EV:
977 efx_handle_rx_event(channel, &event); 984 efx_handle_rx_event(channel, &event);
978 ++rx_packets; 985 if (++spent == budget)
986 goto out;
979 break; 987 break;
980 case FSE_AZ_EV_CODE_TX_EV: 988 case FSE_AZ_EV_CODE_TX_EV:
981 efx_handle_tx_event(channel, &event); 989 tx_packets += efx_handle_tx_event(channel, &event);
990 if (tx_packets >= EFX_TXQ_SIZE) {
991 spent = budget;
992 goto out;
993 }
982 break; 994 break;
983 case FSE_AZ_EV_CODE_DRV_GEN_EV: 995 case FSE_AZ_EV_CODE_DRV_GEN_EV:
984 channel->eventq_magic = EFX_QWORD_FIELD( 996 channel->eventq_magic = EFX_QWORD_FIELD(
@@ -1001,14 +1013,11 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
1001 " (data " EFX_QWORD_FMT ")\n", channel->channel, 1013 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1002 ev_code, EFX_QWORD_VAL(event)); 1014 ev_code, EFX_QWORD_VAL(event));
1003 } 1015 }
1016 }
1004 1017
1005 /* Increment read pointer */ 1018out:
1006 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1007
1008 } while (rx_packets < rx_quota);
1009
1010 channel->eventq_read_ptr = read_ptr; 1019 channel->eventq_read_ptr = read_ptr;
1011 return rx_packets; 1020 return spent;
1012} 1021}
1013 1022
1014 1023
@@ -1123,7 +1132,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1123 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { 1132 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1124 ev_queue = EFX_QWORD_FIELD(*event, 1133 ev_queue = EFX_QWORD_FIELD(*event,
1125 FSF_AZ_DRIVER_EV_SUBDATA); 1134 FSF_AZ_DRIVER_EV_SUBDATA);
1126 if (ev_queue < EFX_TX_QUEUE_COUNT) { 1135 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1127 tx_queue = efx->tx_queue + ev_queue; 1136 tx_queue = efx->tx_queue + ev_queue;
1128 tx_queue->flushed = FLUSH_DONE; 1137 tx_queue->flushed = FLUSH_DONE;
1129 } 1138 }
@@ -1133,7 +1142,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1133 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1142 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1134 ev_failed = EFX_QWORD_FIELD( 1143 ev_failed = EFX_QWORD_FIELD(
1135 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1144 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1136 if (ev_queue < efx->n_rx_queues) { 1145 if (ev_queue < efx->n_rx_channels) {
1137 rx_queue = efx->rx_queue + ev_queue; 1146 rx_queue = efx->rx_queue + ev_queue;
1138 rx_queue->flushed = 1147 rx_queue->flushed =
1139 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1148 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
@@ -1229,15 +1238,9 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
1229 bool enabled, bool force) 1238 bool enabled, bool force)
1230{ 1239{
1231 efx_oword_t int_en_reg_ker; 1240 efx_oword_t int_en_reg_ker;
1232 unsigned int level = 0;
1233
1234 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1235 /* Set the level always even if we're generating a test
1236 * interrupt, because our legacy interrupt handler is safe */
1237 level = 0x1f;
1238 1241
1239 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1242 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1240 FRF_AZ_KER_INT_LEVE_SEL, level, 1243 FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
1241 FRF_AZ_KER_INT_KER, force, 1244 FRF_AZ_KER_INT_KER, force,
1242 FRF_AZ_DRV_INT_EN_KER, enabled); 1245 FRF_AZ_DRV_INT_EN_KER, enabled);
1243 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1246 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
@@ -1291,11 +1294,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1291 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1294 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1292 EFX_OWORD_VAL(fatal_intr), 1295 EFX_OWORD_VAL(fatal_intr),
1293 error ? "disabling bus mastering" : "no recognised error"); 1296 error ? "disabling bus mastering" : "no recognised error");
1294 if (error == 0)
1295 goto out;
1296 1297
1297 /* If this is a memory parity error dump which blocks are offending */ 1298 /* If this is a memory parity error dump which blocks are offending */
1298 mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER); 1299 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1300 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1299 if (mem_perr) { 1301 if (mem_perr) {
1300 efx_oword_t reg; 1302 efx_oword_t reg;
1301 efx_reado(efx, &reg, FR_AZ_MEM_STAT); 1303 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
@@ -1324,7 +1326,7 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1324 "NIC will be disabled\n"); 1326 "NIC will be disabled\n");
1325 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1327 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1326 } 1328 }
1327out: 1329
1328 return IRQ_HANDLED; 1330 return IRQ_HANDLED;
1329} 1331}
1330 1332
@@ -1346,9 +1348,11 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1346 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1348 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1347 1349
1348 /* Check to see if we have a serious error condition */ 1350 /* Check to see if we have a serious error condition */
1349 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1351 if (queues & (1U << efx->fatal_irq_level)) {
1350 if (unlikely(syserr)) 1352 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1351 return efx_nic_fatal_interrupt(efx); 1353 if (unlikely(syserr))
1354 return efx_nic_fatal_interrupt(efx);
1355 }
1352 1356
1353 if (queues != 0) { 1357 if (queues != 0) {
1354 if (EFX_WORKAROUND_15783(efx)) 1358 if (EFX_WORKAROUND_15783(efx))
@@ -1362,33 +1366,28 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1362 } 1366 }
1363 result = IRQ_HANDLED; 1367 result = IRQ_HANDLED;
1364 1368
1365 } else if (EFX_WORKAROUND_15783(efx) && 1369 } else if (EFX_WORKAROUND_15783(efx)) {
1366 efx->irq_zero_count++ == 0) {
1367 efx_qword_t *event; 1370 efx_qword_t *event;
1368 1371
1369 /* Ensure we rearm all event queues */ 1372 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1373 * because this might be a shared interrupt. */
1374 if (efx->irq_zero_count++ == 0)
1375 result = IRQ_HANDLED;
1376
1377 /* Ensure we schedule or rearm all event queues */
1370 efx_for_each_channel(channel, efx) { 1378 efx_for_each_channel(channel, efx) {
1371 event = efx_event(channel, channel->eventq_read_ptr); 1379 event = efx_event(channel, channel->eventq_read_ptr);
1372 if (efx_event_present(event)) 1380 if (efx_event_present(event))
1373 efx_schedule_channel(channel); 1381 efx_schedule_channel(channel);
1382 else
1383 efx_nic_eventq_read_ack(channel);
1374 } 1384 }
1375
1376 result = IRQ_HANDLED;
1377 } 1385 }
1378 1386
1379 if (result == IRQ_HANDLED) { 1387 if (result == IRQ_HANDLED) {
1380 efx->last_irq_cpu = raw_smp_processor_id(); 1388 efx->last_irq_cpu = raw_smp_processor_id();
1381 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1389 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1382 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1390 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1383 } else if (EFX_WORKAROUND_15783(efx)) {
1384 /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
1385 * because this might be a shared interrupt, but we do need to
1386 * check the channel every time and preemptively rearm it if
1387 * it's idle. */
1388 efx_for_each_channel(channel, efx) {
1389 if (!channel->work_pending)
1390 efx_nic_eventq_read_ack(channel);
1391 }
1392 } 1391 }
1393 1392
1394 return result; 1393 return result;
@@ -1413,9 +1412,11 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1413 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1412 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1414 1413
1415 /* Check to see if we have a serious error condition */ 1414 /* Check to see if we have a serious error condition */
1416 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1415 if (channel->channel == efx->fatal_irq_level) {
1417 if (unlikely(syserr)) 1416 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1418 return efx_nic_fatal_interrupt(efx); 1417 if (unlikely(syserr))
1418 return efx_nic_fatal_interrupt(efx);
1419 }
1419 1420
1420 /* Schedule processing of the channel */ 1421 /* Schedule processing of the channel */
1421 efx_schedule_channel(channel); 1422 efx_schedule_channel(channel);
@@ -1440,7 +1441,7 @@ static void efx_setup_rss_indir_table(struct efx_nic *efx)
1440 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800; 1441 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1441 offset += 0x10) { 1442 offset += 0x10) {
1442 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1443 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1443 i % efx->n_rx_queues); 1444 i % efx->n_rx_channels);
1444 efx_writed(efx, &dword, offset); 1445 efx_writed(efx, &dword, offset);
1445 i++; 1446 i++;
1446 } 1447 }
@@ -1553,6 +1554,13 @@ void efx_nic_init_common(struct efx_nic *efx)
1553 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1554 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1554 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1555 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1555 1556
1557 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1558 /* Use an interrupt level unused by event queues */
1559 efx->fatal_irq_level = 0x1f;
1560 else
1561 /* Use a valid MSI-X vector */
1562 efx->fatal_irq_level = 0;
1563
1556 /* Enable all the genuinely fatal interrupts. (They are still 1564 /* Enable all the genuinely fatal interrupts. (They are still
1557 * masked by the overall interrupt mask, controlled by 1565 * masked by the overall interrupt mask, controlled by
1558 * falcon_interrupts()). 1566 * falcon_interrupts()).
@@ -1563,6 +1571,8 @@ void efx_nic_init_common(struct efx_nic *efx)
1563 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1571 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1564 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1572 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1565 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1573 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1574 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1575 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1566 EFX_INVERT_OWORD(temp); 1576 EFX_INVERT_OWORD(temp);
1567 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1577 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1568 1578
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 9351c0331a47..bbc2c0c2f843 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -135,12 +135,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
135 * @fw_build: Firmware build number 135 * @fw_build: Firmware build number
136 * @mcdi: Management-Controller-to-Driver Interface 136 * @mcdi: Management-Controller-to-Driver Interface
137 * @wol_filter_id: Wake-on-LAN packet filter id 137 * @wol_filter_id: Wake-on-LAN packet filter id
138 * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
138 */ 139 */
139struct siena_nic_data { 140struct siena_nic_data {
140 u64 fw_version; 141 u64 fw_version;
141 u32 fw_build; 142 u32 fw_build;
142 struct efx_mcdi_iface mcdi; 143 struct efx_mcdi_iface mcdi;
143 int wol_filter_id; 144 int wol_filter_id;
145 u8 ipv6_rss_key[40];
144}; 146};
145 147
146extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len); 148extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -156,7 +158,7 @@ extern struct efx_nic_type siena_a0_nic_type;
156 ************************************************************************** 158 **************************************************************************
157 */ 159 */
158 160
159extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); 161extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
160 162
161/* TX data path */ 163/* TX data path */
162extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); 164extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
@@ -203,6 +205,7 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx);
203extern int efx_nic_flush_queues(struct efx_nic *efx); 205extern int efx_nic_flush_queues(struct efx_nic *efx);
204extern void falcon_start_nic_stats(struct efx_nic *efx); 206extern void falcon_start_nic_stats(struct efx_nic *efx);
205extern void falcon_stop_nic_stats(struct efx_nic *efx); 207extern void falcon_stop_nic_stats(struct efx_nic *efx);
208extern void falcon_setup_xaui(struct efx_nic *efx);
206extern int falcon_reset_xaui(struct efx_nic *efx); 209extern int falcon_reset_xaui(struct efx_nic *efx);
207extern void efx_nic_init_common(struct efx_nic *efx); 210extern void efx_nic_init_common(struct efx_nic *efx);
208 211
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0106b1d9aae2..371e86cc090f 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -616,10 +616,10 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
616 goto out; 616 goto out;
617 } 617 }
618 618
619 /* Test every TX queue */ 619 /* Test both types of TX queue */
620 efx_for_each_tx_queue(tx_queue, efx) { 620 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
621 state->offload_csum = (tx_queue->queue == 621 state->offload_csum = (tx_queue->queue &
622 EFX_TX_QUEUE_OFFLOAD_CSUM); 622 EFX_TXQ_TYPE_OFFLOAD);
623 rc = efx_test_loopback(tx_queue, 623 rc = efx_test_loopback(tx_queue,
624 &tests->loopback[mode]); 624 &tests->loopback[mode]);
625 if (rc) 625 if (rc)
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index 643bef72b99d..aed495a4dad7 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20struct efx_loopback_self_tests { 20struct efx_loopback_self_tests {
21 int tx_sent[EFX_TX_QUEUE_COUNT]; 21 int tx_sent[EFX_TXQ_TYPES];
22 int tx_done[EFX_TX_QUEUE_COUNT]; 22 int tx_done[EFX_TXQ_TYPES];
23 int rx_good; 23 int rx_good;
24 int rx_bad; 24 int rx_bad;
25}; 25};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 38dcc42c4f79..727b4228e081 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -13,6 +13,7 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/random.h>
16#include "net_driver.h" 17#include "net_driver.h"
17#include "bitfield.h" 18#include "bitfield.h"
18#include "efx.h" 19#include "efx.h"
@@ -274,6 +275,9 @@ static int siena_probe_nic(struct efx_nic *efx)
274 goto fail5; 275 goto fail5;
275 } 276 }
276 277
278 get_random_bytes(&nic_data->ipv6_rss_key,
279 sizeof(nic_data->ipv6_rss_key));
280
277 return 0; 281 return 0;
278 282
279fail5: 283fail5:
@@ -293,6 +297,7 @@ fail1:
293 */ 297 */
294static int siena_init_nic(struct efx_nic *efx) 298static int siena_init_nic(struct efx_nic *efx)
295{ 299{
300 struct siena_nic_data *nic_data = efx->nic_data;
296 efx_oword_t temp; 301 efx_oword_t temp;
297 int rc; 302 int rc;
298 303
@@ -319,6 +324,20 @@ static int siena_init_nic(struct efx_nic *efx)
319 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); 324 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
320 efx_writeo(efx, &temp, FR_AZ_RX_CFG); 325 efx_writeo(efx, &temp, FR_AZ_RX_CFG);
321 326
327 /* Enable IPv6 RSS */
328 BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
329 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
330 FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
331 memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
332 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
333 memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
334 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
335 EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
336 FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
337 memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
338 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
339 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
340
322 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) 341 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
323 /* No MCDI operation has been defined to set thresholds */ 342 /* No MCDI operation has been defined to set thresholds */
324 EFX_ERR(efx, "ignoring RX flow control thresholds\n"); 343 EFX_ERR(efx, "ignoring RX flow control thresholds\n");
@@ -456,8 +475,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
456 475
457static void siena_update_nic_stats(struct efx_nic *efx) 476static void siena_update_nic_stats(struct efx_nic *efx)
458{ 477{
459 while (siena_try_update_nic_stats(efx) == -EAGAIN) 478 int retry;
460 cpu_relax(); 479
480 /* If we're unlucky enough to read statistics wduring the DMA, wait
481 * up to 10ms for it to finish (typically takes <500us) */
482 for (retry = 0; retry < 100; ++retry) {
483 if (siena_try_update_nic_stats(efx) == 0)
484 return;
485 udelay(100);
486 }
487
488 /* Use the old values instead */
461} 489}
462 490
463static void siena_start_nic_stats(struct efx_nic *efx) 491static void siena_start_nic_stats(struct efx_nic *efx)
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index be0e110a1f73..6bb12a87ef2d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,32 +30,46 @@
30 */ 30 */
31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) 31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
32 32
33/* We want to be able to nest calls to netif_stop_queue(), since each 33/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * channel can have an individual stop on the queue. 34 * because of the 2 hardware queues associated with each core queue,
35 */ 35 * but also so that we can inhibit TX for reasons other than a full
36void efx_stop_queue(struct efx_nic *efx) 36 * hardware queue. */
37void efx_stop_queue(struct efx_channel *channel)
37{ 38{
38 spin_lock_bh(&efx->netif_stop_lock); 39 struct efx_nic *efx = channel->efx;
40
41 if (!channel->tx_queue)
42 return;
43
44 spin_lock_bh(&channel->tx_stop_lock);
39 EFX_TRACE(efx, "stop TX queue\n"); 45 EFX_TRACE(efx, "stop TX queue\n");
40 46
41 atomic_inc(&efx->netif_stop_count); 47 atomic_inc(&channel->tx_stop_count);
42 netif_stop_queue(efx->net_dev); 48 netif_tx_stop_queue(
49 netdev_get_tx_queue(
50 efx->net_dev,
51 channel->tx_queue->queue / EFX_TXQ_TYPES));
43 52
44 spin_unlock_bh(&efx->netif_stop_lock); 53 spin_unlock_bh(&channel->tx_stop_lock);
45} 54}
46 55
47/* Wake netif's TX queue 56/* Decrement core TX queue stop count and wake it if the count is 0 */
48 * We want to be able to nest calls to netif_stop_queue(), since each 57void efx_wake_queue(struct efx_channel *channel)
49 * channel can have an individual stop on the queue.
50 */
51void efx_wake_queue(struct efx_nic *efx)
52{ 58{
59 struct efx_nic *efx = channel->efx;
60
61 if (!channel->tx_queue)
62 return;
63
53 local_bh_disable(); 64 local_bh_disable();
54 if (atomic_dec_and_lock(&efx->netif_stop_count, 65 if (atomic_dec_and_lock(&channel->tx_stop_count,
55 &efx->netif_stop_lock)) { 66 &channel->tx_stop_lock)) {
56 EFX_TRACE(efx, "waking TX queue\n"); 67 EFX_TRACE(efx, "waking TX queue\n");
57 netif_wake_queue(efx->net_dev); 68 netif_tx_wake_queue(
58 spin_unlock(&efx->netif_stop_lock); 69 netdev_get_tx_queue(
70 efx->net_dev,
71 channel->tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock);
59 } 73 }
60 local_bh_enable(); 74 local_bh_enable();
61} 75}
@@ -298,7 +312,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
298 rc = NETDEV_TX_BUSY; 312 rc = NETDEV_TX_BUSY;
299 313
300 if (tx_queue->stopped == 1) 314 if (tx_queue->stopped == 1)
301 efx_stop_queue(efx); 315 efx_stop_queue(tx_queue->channel);
302 316
303 unwind: 317 unwind:
304 /* Work backwards until we hit the original insert pointer value */ 318 /* Work backwards until we hit the original insert pointer value */
@@ -374,10 +388,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
374 if (unlikely(efx->port_inhibited)) 388 if (unlikely(efx->port_inhibited))
375 return NETDEV_TX_BUSY; 389 return NETDEV_TX_BUSY;
376 390
391 tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
377 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) 392 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
378 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM]; 393 tx_queue += EFX_TXQ_TYPE_OFFLOAD;
379 else
380 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
381 394
382 return efx_enqueue_skb(tx_queue, skb); 395 return efx_enqueue_skb(tx_queue, skb);
383} 396}
@@ -405,7 +418,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
405 netif_tx_lock(efx->net_dev); 418 netif_tx_lock(efx->net_dev);
406 if (tx_queue->stopped) { 419 if (tx_queue->stopped) {
407 tx_queue->stopped = 0; 420 tx_queue->stopped = 0;
408 efx_wake_queue(efx); 421 efx_wake_queue(tx_queue->channel);
409 } 422 }
410 netif_tx_unlock(efx->net_dev); 423 netif_tx_unlock(efx->net_dev);
411 } 424 }
@@ -488,7 +501,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
488 /* Release queue's stop on port, if any */ 501 /* Release queue's stop on port, if any */
489 if (tx_queue->stopped) { 502 if (tx_queue->stopped) {
490 tx_queue->stopped = 0; 503 tx_queue->stopped = 0;
491 efx_wake_queue(tx_queue->efx); 504 efx_wake_queue(tx_queue->channel);
492 } 505 }
493} 506}
494 507
@@ -1120,7 +1133,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1120 1133
1121 /* Stop the queue if it wasn't stopped before. */ 1134 /* Stop the queue if it wasn't stopped before. */
1122 if (tx_queue->stopped == 1) 1135 if (tx_queue->stopped == 1)
1123 efx_stop_queue(efx); 1136 efx_stop_queue(tx_queue->channel);
1124 1137
1125 unwind: 1138 unwind:
1126 /* Free the DMA mapping we were in the process of writing out */ 1139 /* Free the DMA mapping we were in the process of writing out */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index acd9c734e483..518f7fc91473 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -37,7 +37,7 @@
37/* Truncated IPv4 packets can confuse the TX packet parser */ 37/* Truncated IPv4 packets can confuse the TX packet parser */
38#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB 38#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
39/* Legacy ISR read can return zero once */ 39/* Legacy ISR read can return zero once */
40#define EFX_WORKAROUND_15783 EFX_WORKAROUND_SIENA 40#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
41/* Legacy interrupt storm when interrupt fifo fills */ 41/* Legacy interrupt storm when interrupt fifo fills */
42#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 42#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
43 43