aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/Makefile7
-rw-r--r--drivers/net/sfc/efx.c539
-rw-r--r--drivers/net/sfc/efx.h68
-rw-r--r--drivers/net/sfc/ethtool.c386
-rw-r--r--drivers/net/sfc/falcon.c374
-rw-r--r--drivers/net/sfc/falcon_boards.c325
-rw-r--r--drivers/net/sfc/falcon_gmac.c230
-rw-r--r--drivers/net/sfc/falcon_xmac.c20
-rw-r--r--drivers/net/sfc/filter.c720
-rw-r--r--drivers/net/sfc/filter.h112
-rw-r--r--drivers/net/sfc/io.h158
-rw-r--r--drivers/net/sfc/mac.h6
-rw-r--r--drivers/net/sfc/mcdi.c78
-rw-r--r--drivers/net/sfc/mcdi.h6
-rw-r--r--drivers/net/sfc/mcdi_mac.c4
-rw-r--r--drivers/net/sfc/mcdi_pcol.h8
-rw-r--r--drivers/net/sfc/mcdi_phy.c16
-rw-r--r--drivers/net/sfc/mdio_10g.c71
-rw-r--r--drivers/net/sfc/mdio_10g.h7
-rw-r--r--drivers/net/sfc/mtd.c106
-rw-r--r--drivers/net/sfc/net_driver.h299
-rw-r--r--drivers/net/sfc/nic.c383
-rw-r--r--drivers/net/sfc/nic.h30
-rw-r--r--drivers/net/sfc/phy.h26
-rw-r--r--drivers/net/sfc/qt202x_phy.c10
-rw-r--r--drivers/net/sfc/regs.h22
-rw-r--r--drivers/net/sfc/rx.c249
-rw-r--r--drivers/net/sfc/selftest.c55
-rw-r--r--drivers/net/sfc/selftest.h2
-rw-r--r--drivers/net/sfc/siena.c67
-rw-r--r--drivers/net/sfc/spi.h7
-rw-r--r--drivers/net/sfc/tenxpress.c434
-rw-r--r--drivers/net/sfc/tx.c276
-rw-r--r--drivers/net/sfc/txc43128_phy.c560
-rw-r--r--drivers/net/sfc/workarounds.h13
35 files changed, 3439 insertions, 2235 deletions
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 1047b19c60a5..ab31c7124db1 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,7 +1,8 @@
1sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o \ 1sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
2 falcon_gmac.o falcon_xmac.o mcdi_mac.o \ 2 falcon_xmac.o mcdi_mac.o \
3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ 3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o falcon_boards.o mcdi.o mcdi_phy.o 4 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_phy.o
5sfc-$(CONFIG_SFC_MTD) += mtd.o 6sfc-$(CONFIG_SFC_MTD) += mtd.o
6 7
7obj-$(CONFIG_SFC) += sfc.o 8obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index ba674c5ca29e..c914729f9554 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -21,9 +21,9 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include <linux/cpu_rmap.h>
24#include "net_driver.h" 25#include "net_driver.h"
25#include "efx.h" 26#include "efx.h"
26#include "mdio_10g.h"
27#include "nic.h" 27#include "nic.h"
28 28
29#include "mcdi.h" 29#include "mcdi.h"
@@ -68,14 +68,6 @@ const char *efx_loopback_mode_names[] = {
68 [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 68 [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
69}; 69};
70 70
71/* Interrupt mode names (see INT_MODE())) */
72const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
73const char *efx_interrupt_mode_names[] = {
74 [EFX_INT_MODE_MSIX] = "MSI-X",
75 [EFX_INT_MODE_MSI] = "MSI",
76 [EFX_INT_MODE_LEGACY] = "legacy",
77};
78
79const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 71const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
80const char *efx_reset_type_names[] = { 72const char *efx_reset_type_names[] = {
81 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 73 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
@@ -114,7 +106,7 @@ static struct workqueue_struct *reset_workqueue;
114 * This is only used in MSI-X interrupt mode 106 * This is only used in MSI-X interrupt mode
115 */ 107 */
116static unsigned int separate_tx_channels; 108static unsigned int separate_tx_channels;
117module_param(separate_tx_channels, uint, 0644); 109module_param(separate_tx_channels, uint, 0444);
118MODULE_PARM_DESC(separate_tx_channels, 110MODULE_PARM_DESC(separate_tx_channels,
119 "Use separate channels for TX and RX"); 111 "Use separate channels for TX and RX");
120 112
@@ -124,10 +116,11 @@ MODULE_PARM_DESC(separate_tx_channels,
124static int napi_weight = 64; 116static int napi_weight = 64;
125 117
126/* This is the time (in jiffies) between invocations of the hardware 118/* This is the time (in jiffies) between invocations of the hardware
127 * monitor, which checks for known hardware bugs and resets the 119 * monitor. On Falcon-based NICs, this will:
128 * hardware and driver as necessary. 120 * - Check the on-board hardware monitor;
121 * - Poll the link state and reconfigure the hardware as necessary.
129 */ 122 */
130unsigned int efx_monitor_interval = 1 * HZ; 123static unsigned int efx_monitor_interval = 1 * HZ;
131 124
132/* This controls whether or not the driver will initialise devices 125/* This controls whether or not the driver will initialise devices
133 * with invalid MAC addresses stored in the EEPROM or flash. If true, 126 * with invalid MAC addresses stored in the EEPROM or flash. If true,
@@ -201,10 +194,15 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
201 * Utility functions and prototypes 194 * Utility functions and prototypes
202 * 195 *
203 *************************************************************************/ 196 *************************************************************************/
204static void efx_remove_channel(struct efx_channel *channel); 197
198static void efx_remove_channels(struct efx_nic *efx);
205static void efx_remove_port(struct efx_nic *efx); 199static void efx_remove_port(struct efx_nic *efx);
200static void efx_init_napi(struct efx_nic *efx);
206static void efx_fini_napi(struct efx_nic *efx); 201static void efx_fini_napi(struct efx_nic *efx);
207static void efx_fini_channels(struct efx_nic *efx); 202static void efx_fini_napi_channel(struct efx_channel *channel);
203static void efx_fini_struct(struct efx_nic *efx);
204static void efx_start_all(struct efx_nic *efx);
205static void efx_stop_all(struct efx_nic *efx);
208 206
209#define EFX_ASSERT_RESET_SERIALISED(efx) \ 207#define EFX_ASSERT_RESET_SERIALISED(efx) \
210 do { \ 208 do { \
@@ -248,7 +246,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
248 246
249 efx_rx_strategy(channel); 247 efx_rx_strategy(channel);
250 248
251 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 249 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
252 250
253 return spent; 251 return spent;
254} 252}
@@ -310,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
310 channel->irq_mod_score = 0; 308 channel->irq_mod_score = 0;
311 } 309 }
312 310
311 efx_filter_rfs_expire(channel);
312
313 /* There is no race here; although napi_disable() will 313 /* There is no race here; although napi_disable() will
314 * only wait for napi_complete(), this isn't a problem 314 * only wait for napi_complete(), this isn't a problem
315 * since efx_channel_processed() will have no effect if 315 * since efx_channel_processed() will have no effect if
@@ -328,18 +328,23 @@ static int efx_poll(struct napi_struct *napi, int budget)
328 * processing to finish, then directly poll (and ack ) the eventq. 328 * processing to finish, then directly poll (and ack ) the eventq.
329 * Finally reenable NAPI and interrupts. 329 * Finally reenable NAPI and interrupts.
330 * 330 *
331 * Since we are touching interrupts the caller should hold the suspend lock 331 * This is for use only during a loopback self-test. It must not
332 * deliver any packets up the stack as this can result in deadlock.
332 */ 333 */
333void efx_process_channel_now(struct efx_channel *channel) 334void efx_process_channel_now(struct efx_channel *channel)
334{ 335{
335 struct efx_nic *efx = channel->efx; 336 struct efx_nic *efx = channel->efx;
336 337
338 BUG_ON(channel->channel >= efx->n_channels);
337 BUG_ON(!channel->enabled); 339 BUG_ON(!channel->enabled);
340 BUG_ON(!efx->loopback_selftest);
338 341
339 /* Disable interrupts and wait for ISRs to complete */ 342 /* Disable interrupts and wait for ISRs to complete */
340 efx_nic_disable_interrupts(efx); 343 efx_nic_disable_interrupts(efx);
341 if (efx->legacy_irq) 344 if (efx->legacy_irq) {
342 synchronize_irq(efx->legacy_irq); 345 synchronize_irq(efx->legacy_irq);
346 efx->legacy_irq_enabled = false;
347 }
343 if (channel->irq) 348 if (channel->irq)
344 synchronize_irq(channel->irq); 349 synchronize_irq(channel->irq);
345 350
@@ -347,13 +352,15 @@ void efx_process_channel_now(struct efx_channel *channel)
347 napi_disable(&channel->napi_str); 352 napi_disable(&channel->napi_str);
348 353
349 /* Poll the channel */ 354 /* Poll the channel */
350 efx_process_channel(channel, EFX_EVQ_SIZE); 355 efx_process_channel(channel, channel->eventq_mask + 1);
351 356
352 /* Ack the eventq. This may cause an interrupt to be generated 357 /* Ack the eventq. This may cause an interrupt to be generated
353 * when they are reenabled */ 358 * when they are reenabled */
354 efx_channel_processed(channel); 359 efx_channel_processed(channel);
355 360
356 napi_enable(&channel->napi_str); 361 napi_enable(&channel->napi_str);
362 if (efx->legacy_irq)
363 efx->legacy_irq_enabled = true;
357 efx_nic_enable_interrupts(efx); 364 efx_nic_enable_interrupts(efx);
358} 365}
359 366
@@ -364,9 +371,18 @@ void efx_process_channel_now(struct efx_channel *channel)
364 */ 371 */
365static int efx_probe_eventq(struct efx_channel *channel) 372static int efx_probe_eventq(struct efx_channel *channel)
366{ 373{
374 struct efx_nic *efx = channel->efx;
375 unsigned long entries;
376
367 netif_dbg(channel->efx, probe, channel->efx->net_dev, 377 netif_dbg(channel->efx, probe, channel->efx->net_dev,
368 "chan %d create event queue\n", channel->channel); 378 "chan %d create event queue\n", channel->channel);
369 379
380 /* Build an event queue with room for one event per tx and rx buffer,
381 * plus some extra for link state events and MCDI completions. */
382 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
383 EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
384 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
385
370 return efx_nic_probe_eventq(channel); 386 return efx_nic_probe_eventq(channel);
371} 387}
372 388
@@ -403,6 +419,61 @@ static void efx_remove_eventq(struct efx_channel *channel)
403 * 419 *
404 *************************************************************************/ 420 *************************************************************************/
405 421
422/* Allocate and initialise a channel structure, optionally copying
423 * parameters (but not resources) from an old channel structure. */
424static struct efx_channel *
425efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
426{
427 struct efx_channel *channel;
428 struct efx_rx_queue *rx_queue;
429 struct efx_tx_queue *tx_queue;
430 int j;
431
432 if (old_channel) {
433 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
434 if (!channel)
435 return NULL;
436
437 *channel = *old_channel;
438
439 channel->napi_dev = NULL;
440 memset(&channel->eventq, 0, sizeof(channel->eventq));
441
442 rx_queue = &channel->rx_queue;
443 rx_queue->buffer = NULL;
444 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
445
446 for (j = 0; j < EFX_TXQ_TYPES; j++) {
447 tx_queue = &channel->tx_queue[j];
448 if (tx_queue->channel)
449 tx_queue->channel = channel;
450 tx_queue->buffer = NULL;
451 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
452 }
453 } else {
454 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
455 if (!channel)
456 return NULL;
457
458 channel->efx = efx;
459 channel->channel = i;
460
461 for (j = 0; j < EFX_TXQ_TYPES; j++) {
462 tx_queue = &channel->tx_queue[j];
463 tx_queue->efx = efx;
464 tx_queue->queue = i * EFX_TXQ_TYPES + j;
465 tx_queue->channel = channel;
466 }
467 }
468
469 rx_queue = &channel->rx_queue;
470 rx_queue->efx = efx;
471 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
472 (unsigned long)rx_queue);
473
474 return channel;
475}
476
406static int efx_probe_channel(struct efx_channel *channel) 477static int efx_probe_channel(struct efx_channel *channel)
407{ 478{
408 struct efx_tx_queue *tx_queue; 479 struct efx_tx_queue *tx_queue;
@@ -459,11 +530,38 @@ static void efx_set_channel_names(struct efx_nic *efx)
459 number -= efx->n_rx_channels; 530 number -= efx->n_rx_channels;
460 } 531 }
461 } 532 }
462 snprintf(channel->name, sizeof(channel->name), 533 snprintf(efx->channel_name[channel->channel],
534 sizeof(efx->channel_name[0]),
463 "%s%s-%d", efx->name, type, number); 535 "%s%s-%d", efx->name, type, number);
464 } 536 }
465} 537}
466 538
539static int efx_probe_channels(struct efx_nic *efx)
540{
541 struct efx_channel *channel;
542 int rc;
543
544 /* Restart special buffer allocation */
545 efx->next_buffer_table = 0;
546
547 efx_for_each_channel(channel, efx) {
548 rc = efx_probe_channel(channel);
549 if (rc) {
550 netif_err(efx, probe, efx->net_dev,
551 "failed to create channel %d\n",
552 channel->channel);
553 goto fail;
554 }
555 }
556 efx_set_channel_names(efx);
557
558 return 0;
559
560fail:
561 efx_remove_channels(efx);
562 return rc;
563}
564
467/* Channels are shutdown and reinitialised whilst the NIC is running 565/* Channels are shutdown and reinitialised whilst the NIC is running
468 * to propagate configuration changes (mtu, checksum offload), or 566 * to propagate configuration changes (mtu, checksum offload), or
469 * to clear hardware error conditions 567 * to clear hardware error conditions
@@ -580,7 +678,7 @@ static void efx_fini_channels(struct efx_nic *efx)
580 678
581 efx_for_each_channel_rx_queue(rx_queue, channel) 679 efx_for_each_channel_rx_queue(rx_queue, channel)
582 efx_fini_rx_queue(rx_queue); 680 efx_fini_rx_queue(rx_queue);
583 efx_for_each_channel_tx_queue(tx_queue, channel) 681 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
584 efx_fini_tx_queue(tx_queue); 682 efx_fini_tx_queue(tx_queue);
585 efx_fini_eventq(channel); 683 efx_fini_eventq(channel);
586 } 684 }
@@ -596,11 +694,84 @@ static void efx_remove_channel(struct efx_channel *channel)
596 694
597 efx_for_each_channel_rx_queue(rx_queue, channel) 695 efx_for_each_channel_rx_queue(rx_queue, channel)
598 efx_remove_rx_queue(rx_queue); 696 efx_remove_rx_queue(rx_queue);
599 efx_for_each_channel_tx_queue(tx_queue, channel) 697 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
600 efx_remove_tx_queue(tx_queue); 698 efx_remove_tx_queue(tx_queue);
601 efx_remove_eventq(channel); 699 efx_remove_eventq(channel);
602} 700}
603 701
702static void efx_remove_channels(struct efx_nic *efx)
703{
704 struct efx_channel *channel;
705
706 efx_for_each_channel(channel, efx)
707 efx_remove_channel(channel);
708}
709
710int
711efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
712{
713 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
714 u32 old_rxq_entries, old_txq_entries;
715 unsigned i;
716 int rc;
717
718 efx_stop_all(efx);
719 efx_fini_channels(efx);
720
721 /* Clone channels */
722 memset(other_channel, 0, sizeof(other_channel));
723 for (i = 0; i < efx->n_channels; i++) {
724 channel = efx_alloc_channel(efx, i, efx->channel[i]);
725 if (!channel) {
726 rc = -ENOMEM;
727 goto out;
728 }
729 other_channel[i] = channel;
730 }
731
732 /* Swap entry counts and channel pointers */
733 old_rxq_entries = efx->rxq_entries;
734 old_txq_entries = efx->txq_entries;
735 efx->rxq_entries = rxq_entries;
736 efx->txq_entries = txq_entries;
737 for (i = 0; i < efx->n_channels; i++) {
738 channel = efx->channel[i];
739 efx->channel[i] = other_channel[i];
740 other_channel[i] = channel;
741 }
742
743 rc = efx_probe_channels(efx);
744 if (rc)
745 goto rollback;
746
747 efx_init_napi(efx);
748
749 /* Destroy old channels */
750 for (i = 0; i < efx->n_channels; i++) {
751 efx_fini_napi_channel(other_channel[i]);
752 efx_remove_channel(other_channel[i]);
753 }
754out:
755 /* Free unused channel structures */
756 for (i = 0; i < efx->n_channels; i++)
757 kfree(other_channel[i]);
758
759 efx_init_channels(efx);
760 efx_start_all(efx);
761 return rc;
762
763rollback:
764 /* Swap back */
765 efx->rxq_entries = old_rxq_entries;
766 efx->txq_entries = old_txq_entries;
767 for (i = 0; i < efx->n_channels; i++) {
768 channel = efx->channel[i];
769 efx->channel[i] = other_channel[i];
770 other_channel[i] = channel;
771 }
772 goto out;
773}
774
604void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) 775void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
605{ 776{
606 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); 777 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
@@ -627,11 +798,6 @@ void efx_link_status_changed(struct efx_nic *efx)
627 if (!netif_running(efx->net_dev)) 798 if (!netif_running(efx->net_dev))
628 return; 799 return;
629 800
630 if (efx->port_inhibited) {
631 netif_carrier_off(efx->net_dev);
632 return;
633 }
634
635 if (link_state->up != netif_carrier_ok(efx->net_dev)) { 801 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
636 efx->n_link_state_changes++; 802 efx->n_link_state_changes++;
637 803
@@ -667,7 +833,7 @@ void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
667 } 833 }
668} 834}
669 835
670void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type wanted_fc) 836void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
671{ 837{
672 efx->wanted_fc = wanted_fc; 838 efx->wanted_fc = wanted_fc;
673 if (efx->link_advertising) { 839 if (efx->link_advertising) {
@@ -751,6 +917,7 @@ static void efx_mac_work(struct work_struct *data)
751 917
752static int efx_probe_port(struct efx_nic *efx) 918static int efx_probe_port(struct efx_nic *efx)
753{ 919{
920 unsigned char *perm_addr;
754 int rc; 921 int rc;
755 922
756 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 923 netif_dbg(efx, probe, efx->net_dev, "create port\n");
@@ -761,14 +928,15 @@ static int efx_probe_port(struct efx_nic *efx)
761 /* Connect up MAC/PHY operations table */ 928 /* Connect up MAC/PHY operations table */
762 rc = efx->type->probe_port(efx); 929 rc = efx->type->probe_port(efx);
763 if (rc) 930 if (rc)
764 goto err; 931 return rc;
765 932
766 /* Sanity check MAC address */ 933 /* Sanity check MAC address */
767 if (is_valid_ether_addr(efx->mac_address)) { 934 perm_addr = efx->net_dev->perm_addr;
768 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); 935 if (is_valid_ether_addr(perm_addr)) {
936 memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
769 } else { 937 } else {
770 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", 938 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
771 efx->mac_address); 939 perm_addr);
772 if (!allow_bad_hwaddr) { 940 if (!allow_bad_hwaddr) {
773 rc = -EINVAL; 941 rc = -EINVAL;
774 goto err; 942 goto err;
@@ -782,7 +950,7 @@ static int efx_probe_port(struct efx_nic *efx)
782 return 0; 950 return 0;
783 951
784 err: 952 err:
785 efx_remove_port(efx); 953 efx->type->remove_port(efx);
786 return rc; 954 return rc;
787} 955}
788 956
@@ -883,6 +1051,7 @@ static int efx_init_io(struct efx_nic *efx)
883{ 1051{
884 struct pci_dev *pci_dev = efx->pci_dev; 1052 struct pci_dev *pci_dev = efx->pci_dev;
885 dma_addr_t dma_mask = efx->type->max_dma_mask; 1053 dma_addr_t dma_mask = efx->type->max_dma_mask;
1054 bool use_wc;
886 int rc; 1055 int rc;
887 1056
888 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1057 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -933,8 +1102,21 @@ static int efx_init_io(struct efx_nic *efx)
933 rc = -EIO; 1102 rc = -EIO;
934 goto fail3; 1103 goto fail3;
935 } 1104 }
936 efx->membase = ioremap_nocache(efx->membase_phys, 1105
937 efx->type->mem_map_size); 1106 /* bug22643: If SR-IOV is enabled then tx push over a write combined
1107 * mapping is unsafe. We need to disable write combining in this case.
1108 * MSI is unsupported when SR-IOV is enabled, and the firmware will
1109 * have removed the MSI capability. So write combining is safe if
1110 * there is an MSI capability.
1111 */
1112 use_wc = (!EFX_WORKAROUND_22643(efx) ||
1113 pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
1114 if (use_wc)
1115 efx->membase = ioremap_wc(efx->membase_phys,
1116 efx->type->mem_map_size);
1117 else
1118 efx->membase = ioremap_nocache(efx->membase_phys,
1119 efx->type->mem_map_size);
938 if (!efx->membase) { 1120 if (!efx->membase) {
939 netif_err(efx, probe, efx->net_dev, 1121 netif_err(efx, probe, efx->net_dev,
940 "could not map memory BAR at %llx+%x\n", 1122 "could not map memory BAR at %llx+%x\n",
@@ -985,6 +1167,9 @@ static int efx_wanted_channels(void)
985 int count; 1167 int count;
986 int cpu; 1168 int cpu;
987 1169
1170 if (rss_cpus)
1171 return rss_cpus;
1172
988 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { 1173 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
989 printk(KERN_WARNING 1174 printk(KERN_WARNING
990 "sfc: RSS disabled due to allocation failure\n"); 1175 "sfc: RSS disabled due to allocation failure\n");
@@ -1004,10 +1189,32 @@ static int efx_wanted_channels(void)
1004 return count; 1189 return count;
1005} 1190}
1006 1191
1192static int
1193efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1194{
1195#ifdef CONFIG_RFS_ACCEL
1196 int i, rc;
1197
1198 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
1199 if (!efx->net_dev->rx_cpu_rmap)
1200 return -ENOMEM;
1201 for (i = 0; i < efx->n_rx_channels; i++) {
1202 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
1203 xentries[i].vector);
1204 if (rc) {
1205 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
1206 efx->net_dev->rx_cpu_rmap = NULL;
1207 return rc;
1208 }
1209 }
1210#endif
1211 return 0;
1212}
1213
1007/* Probe the number and type of interrupts we are able to obtain, and 1214/* Probe the number and type of interrupts we are able to obtain, and
1008 * the resulting numbers of channels and RX queues. 1215 * the resulting numbers of channels and RX queues.
1009 */ 1216 */
1010static void efx_probe_interrupts(struct efx_nic *efx) 1217static int efx_probe_interrupts(struct efx_nic *efx)
1011{ 1218{
1012 int max_channels = 1219 int max_channels =
1013 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 1220 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1049,8 +1256,14 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1049 efx->n_tx_channels = efx->n_channels; 1256 efx->n_tx_channels = efx->n_channels;
1050 efx->n_rx_channels = efx->n_channels; 1257 efx->n_rx_channels = efx->n_channels;
1051 } 1258 }
1259 rc = efx_init_rx_cpu_rmap(efx, xentries);
1260 if (rc) {
1261 pci_disable_msix(efx->pci_dev);
1262 return rc;
1263 }
1052 for (i = 0; i < n_channels; i++) 1264 for (i = 0; i < n_channels; i++)
1053 efx->channel[i].irq = xentries[i].vector; 1265 efx_get_channel(efx, i)->irq =
1266 xentries[i].vector;
1054 } else { 1267 } else {
1055 /* Fall back to single channel MSI */ 1268 /* Fall back to single channel MSI */
1056 efx->interrupt_mode = EFX_INT_MODE_MSI; 1269 efx->interrupt_mode = EFX_INT_MODE_MSI;
@@ -1066,7 +1279,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1066 efx->n_tx_channels = 1; 1279 efx->n_tx_channels = 1;
1067 rc = pci_enable_msi(efx->pci_dev); 1280 rc = pci_enable_msi(efx->pci_dev);
1068 if (rc == 0) { 1281 if (rc == 0) {
1069 efx->channel[0].irq = efx->pci_dev->irq; 1282 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1070 } else { 1283 } else {
1071 netif_err(efx, drv, efx->net_dev, 1284 netif_err(efx, drv, efx->net_dev,
1072 "could not enable MSI\n"); 1285 "could not enable MSI\n");
@@ -1081,6 +1294,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1081 efx->n_tx_channels = 1; 1294 efx->n_tx_channels = 1;
1082 efx->legacy_irq = efx->pci_dev->irq; 1295 efx->legacy_irq = efx->pci_dev->irq;
1083 } 1296 }
1297
1298 return 0;
1084} 1299}
1085 1300
1086static void efx_remove_interrupts(struct efx_nic *efx) 1301static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1101,22 +1316,18 @@ static void efx_set_channels(struct efx_nic *efx)
1101{ 1316{
1102 struct efx_channel *channel; 1317 struct efx_channel *channel;
1103 struct efx_tx_queue *tx_queue; 1318 struct efx_tx_queue *tx_queue;
1104 struct efx_rx_queue *rx_queue; 1319
1105 unsigned tx_channel_offset = 1320 efx->tx_channel_offset =
1106 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1321 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1107 1322
1323 /* We need to adjust the TX queue numbers if we have separate
1324 * RX-only and TX-only channels.
1325 */
1108 efx_for_each_channel(channel, efx) { 1326 efx_for_each_channel(channel, efx) {
1109 if (channel->channel - tx_channel_offset < efx->n_tx_channels) { 1327 efx_for_each_channel_tx_queue(tx_queue, channel)
1110 channel->tx_queue = &efx->tx_queue[ 1328 tx_queue->queue -= (efx->tx_channel_offset *
1111 (channel->channel - tx_channel_offset) * 1329 EFX_TXQ_TYPES);
1112 EFX_TXQ_TYPES];
1113 efx_for_each_channel_tx_queue(tx_queue, channel)
1114 tx_queue->channel = channel;
1115 }
1116 } 1330 }
1117
1118 efx_for_each_rx_queue(rx_queue, efx)
1119 rx_queue->channel = &efx->channel[rx_queue->queue];
1120} 1331}
1121 1332
1122static int efx_probe_nic(struct efx_nic *efx) 1333static int efx_probe_nic(struct efx_nic *efx)
@@ -1133,7 +1344,9 @@ static int efx_probe_nic(struct efx_nic *efx)
1133 1344
1134 /* Determine the number of channels and queues by trying to hook 1345 /* Determine the number of channels and queues by trying to hook
1135 * in MSI-X interrupts. */ 1346 * in MSI-X interrupts. */
1136 efx_probe_interrupts(efx); 1347 rc = efx_probe_interrupts(efx);
1348 if (rc)
1349 goto fail;
1137 1350
1138 if (efx->n_channels > 1) 1351 if (efx->n_channels > 1)
1139 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1352 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1141,12 +1354,17 @@ static int efx_probe_nic(struct efx_nic *efx)
1141 efx->rx_indir_table[i] = i % efx->n_rx_channels; 1354 efx->rx_indir_table[i] = i % efx->n_rx_channels;
1142 1355
1143 efx_set_channels(efx); 1356 efx_set_channels(efx);
1144 efx->net_dev->real_num_tx_queues = efx->n_tx_channels; 1357 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1358 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1145 1359
1146 /* Initialise the interrupt moderation settings */ 1360 /* Initialise the interrupt moderation settings */
1147 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1361 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
1148 1362
1149 return 0; 1363 return 0;
1364
1365fail:
1366 efx->type->remove(efx);
1367 return rc;
1150} 1368}
1151 1369
1152static void efx_remove_nic(struct efx_nic *efx) 1370static void efx_remove_nic(struct efx_nic *efx)
@@ -1165,40 +1383,37 @@ static void efx_remove_nic(struct efx_nic *efx)
1165 1383
1166static int efx_probe_all(struct efx_nic *efx) 1384static int efx_probe_all(struct efx_nic *efx)
1167{ 1385{
1168 struct efx_channel *channel;
1169 int rc; 1386 int rc;
1170 1387
1171 /* Create NIC */
1172 rc = efx_probe_nic(efx); 1388 rc = efx_probe_nic(efx);
1173 if (rc) { 1389 if (rc) {
1174 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 1390 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1175 goto fail1; 1391 goto fail1;
1176 } 1392 }
1177 1393
1178 /* Create port */
1179 rc = efx_probe_port(efx); 1394 rc = efx_probe_port(efx);
1180 if (rc) { 1395 if (rc) {
1181 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 1396 netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1182 goto fail2; 1397 goto fail2;
1183 } 1398 }
1184 1399
1185 /* Create channels */ 1400 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1186 efx_for_each_channel(channel, efx) { 1401 rc = efx_probe_channels(efx);
1187 rc = efx_probe_channel(channel); 1402 if (rc)
1188 if (rc) { 1403 goto fail3;
1189 netif_err(efx, probe, efx->net_dev, 1404
1190 "failed to create channel %d\n", 1405 rc = efx_probe_filters(efx);
1191 channel->channel); 1406 if (rc) {
1192 goto fail3; 1407 netif_err(efx, probe, efx->net_dev,
1193 } 1408 "failed to create filter tables\n");
1409 goto fail4;
1194 } 1410 }
1195 efx_set_channel_names(efx);
1196 1411
1197 return 0; 1412 return 0;
1198 1413
1414 fail4:
1415 efx_remove_channels(efx);
1199 fail3: 1416 fail3:
1200 efx_for_each_channel(channel, efx)
1201 efx_remove_channel(channel);
1202 efx_remove_port(efx); 1417 efx_remove_port(efx);
1203 fail2: 1418 fail2:
1204 efx_remove_nic(efx); 1419 efx_remove_nic(efx);
@@ -1230,12 +1445,14 @@ static void efx_start_all(struct efx_nic *efx)
1230 * restart the transmit interface early so the watchdog timer stops */ 1445 * restart the transmit interface early so the watchdog timer stops */
1231 efx_start_port(efx); 1446 efx_start_port(efx);
1232 1447
1233 efx_for_each_channel(channel, efx) { 1448 if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
1234 if (efx_dev_registered(efx)) 1449 netif_tx_wake_all_queues(efx->net_dev);
1235 efx_wake_queue(channel); 1450
1451 efx_for_each_channel(channel, efx)
1236 efx_start_channel(channel); 1452 efx_start_channel(channel);
1237 }
1238 1453
1454 if (efx->legacy_irq)
1455 efx->legacy_irq_enabled = true;
1239 efx_nic_enable_interrupts(efx); 1456 efx_nic_enable_interrupts(efx);
1240 1457
1241 /* Switch to event based MCDI completions after enabling interrupts. 1458 /* Switch to event based MCDI completions after enabling interrupts.
@@ -1296,8 +1513,10 @@ static void efx_stop_all(struct efx_nic *efx)
1296 1513
1297 /* Disable interrupts and wait for ISR to complete */ 1514 /* Disable interrupts and wait for ISR to complete */
1298 efx_nic_disable_interrupts(efx); 1515 efx_nic_disable_interrupts(efx);
1299 if (efx->legacy_irq) 1516 if (efx->legacy_irq) {
1300 synchronize_irq(efx->legacy_irq); 1517 synchronize_irq(efx->legacy_irq);
1518 efx->legacy_irq_enabled = false;
1519 }
1301 efx_for_each_channel(channel, efx) { 1520 efx_for_each_channel(channel, efx) {
1302 if (channel->irq) 1521 if (channel->irq)
1303 synchronize_irq(channel->irq); 1522 synchronize_irq(channel->irq);
@@ -1318,9 +1537,7 @@ static void efx_stop_all(struct efx_nic *efx)
1318 /* Stop the kernel transmit interface late, so the watchdog 1537 /* Stop the kernel transmit interface late, so the watchdog
1319 * timer isn't ticking over the flush */ 1538 * timer isn't ticking over the flush */
1320 if (efx_dev_registered(efx)) { 1539 if (efx_dev_registered(efx)) {
1321 struct efx_channel *channel; 1540 netif_tx_stop_all_queues(efx->net_dev);
1322 efx_for_each_channel(channel, efx)
1323 efx_stop_queue(channel);
1324 netif_tx_lock_bh(efx->net_dev); 1541 netif_tx_lock_bh(efx->net_dev);
1325 netif_tx_unlock_bh(efx->net_dev); 1542 netif_tx_unlock_bh(efx->net_dev);
1326 } 1543 }
@@ -1328,10 +1545,8 @@ static void efx_stop_all(struct efx_nic *efx)
1328 1545
1329static void efx_remove_all(struct efx_nic *efx) 1546static void efx_remove_all(struct efx_nic *efx)
1330{ 1547{
1331 struct efx_channel *channel; 1548 efx_remove_filters(efx);
1332 1549 efx_remove_channels(efx);
1333 efx_for_each_channel(channel, efx)
1334 efx_remove_channel(channel);
1335 efx_remove_port(efx); 1550 efx_remove_port(efx);
1336 efx_remove_nic(efx); 1551 efx_remove_nic(efx);
1337} 1552}
@@ -1355,20 +1570,20 @@ static unsigned irq_mod_ticks(int usecs, int resolution)
1355void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, 1570void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1356 bool rx_adaptive) 1571 bool rx_adaptive)
1357{ 1572{
1358 struct efx_tx_queue *tx_queue; 1573 struct efx_channel *channel;
1359 struct efx_rx_queue *rx_queue;
1360 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); 1574 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
1361 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); 1575 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
1362 1576
1363 EFX_ASSERT_RESET_SERIALISED(efx); 1577 EFX_ASSERT_RESET_SERIALISED(efx);
1364 1578
1365 efx_for_each_tx_queue(tx_queue, efx)
1366 tx_queue->channel->irq_moderation = tx_ticks;
1367
1368 efx->irq_rx_adaptive = rx_adaptive; 1579 efx->irq_rx_adaptive = rx_adaptive;
1369 efx->irq_rx_moderation = rx_ticks; 1580 efx->irq_rx_moderation = rx_ticks;
1370 efx_for_each_rx_queue(rx_queue, efx) 1581 efx_for_each_channel(channel, efx) {
1371 rx_queue->channel->irq_moderation = rx_ticks; 1582 if (efx_channel_has_rx_queue(channel))
1583 channel->irq_moderation = rx_ticks;
1584 else if (efx_channel_has_tx_queues(channel))
1585 channel->irq_moderation = tx_ticks;
1586 }
1372} 1587}
1373 1588
1374/************************************************************************** 1589/**************************************************************************
@@ -1377,8 +1592,7 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1377 * 1592 *
1378 **************************************************************************/ 1593 **************************************************************************/
1379 1594
1380/* Run periodically off the general workqueue. Serialised against 1595/* Run periodically off the general workqueue */
1381 * efx_reconfigure_port via the mac_lock */
1382static void efx_monitor(struct work_struct *data) 1596static void efx_monitor(struct work_struct *data)
1383{ 1597{
1384 struct efx_nic *efx = container_of(data, struct efx_nic, 1598 struct efx_nic *efx = container_of(data, struct efx_nic,
@@ -1391,16 +1605,13 @@ static void efx_monitor(struct work_struct *data)
1391 1605
1392 /* If the mac_lock is already held then it is likely a port 1606 /* If the mac_lock is already held then it is likely a port
1393 * reconfiguration is already in place, which will likely do 1607 * reconfiguration is already in place, which will likely do
1394 * most of the work of check_hw() anyway. */ 1608 * most of the work of monitor() anyway. */
1395 if (!mutex_trylock(&efx->mac_lock)) 1609 if (mutex_trylock(&efx->mac_lock)) {
1396 goto out_requeue; 1610 if (efx->port_enabled)
1397 if (!efx->port_enabled) 1611 efx->type->monitor(efx);
1398 goto out_unlock; 1612 mutex_unlock(&efx->mac_lock);
1399 efx->type->monitor(efx); 1613 }
1400 1614
1401out_unlock:
1402 mutex_unlock(&efx->mac_lock);
1403out_requeue:
1404 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1615 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1405 efx_monitor_interval); 1616 efx_monitor_interval);
1406} 1617}
@@ -1435,7 +1646,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1435 * 1646 *
1436 **************************************************************************/ 1647 **************************************************************************/
1437 1648
1438static int efx_init_napi(struct efx_nic *efx) 1649static void efx_init_napi(struct efx_nic *efx)
1439{ 1650{
1440 struct efx_channel *channel; 1651 struct efx_channel *channel;
1441 1652
@@ -1444,18 +1655,21 @@ static int efx_init_napi(struct efx_nic *efx)
1444 netif_napi_add(channel->napi_dev, &channel->napi_str, 1655 netif_napi_add(channel->napi_dev, &channel->napi_str,
1445 efx_poll, napi_weight); 1656 efx_poll, napi_weight);
1446 } 1657 }
1447 return 0; 1658}
1659
1660static void efx_fini_napi_channel(struct efx_channel *channel)
1661{
1662 if (channel->napi_dev)
1663 netif_napi_del(&channel->napi_str);
1664 channel->napi_dev = NULL;
1448} 1665}
1449 1666
1450static void efx_fini_napi(struct efx_nic *efx) 1667static void efx_fini_napi(struct efx_nic *efx)
1451{ 1668{
1452 struct efx_channel *channel; 1669 struct efx_channel *channel;
1453 1670
1454 efx_for_each_channel(channel, efx) { 1671 efx_for_each_channel(channel, efx)
1455 if (channel->napi_dev) 1672 efx_fini_napi_channel(channel);
1456 netif_napi_del(&channel->napi_str);
1457 channel->napi_dev = NULL;
1458 }
1459} 1673}
1460 1674
1461/************************************************************************** 1675/**************************************************************************
@@ -1546,11 +1760,11 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struc
1546 stats->tx_packets = mac_stats->tx_packets; 1760 stats->tx_packets = mac_stats->tx_packets;
1547 stats->rx_bytes = mac_stats->rx_bytes; 1761 stats->rx_bytes = mac_stats->rx_bytes;
1548 stats->tx_bytes = mac_stats->tx_bytes; 1762 stats->tx_bytes = mac_stats->tx_bytes;
1763 stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
1549 stats->multicast = mac_stats->rx_multicast; 1764 stats->multicast = mac_stats->rx_multicast;
1550 stats->collisions = mac_stats->tx_collision; 1765 stats->collisions = mac_stats->tx_collision;
1551 stats->rx_length_errors = (mac_stats->rx_gtjumbo + 1766 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1552 mac_stats->rx_length_error); 1767 mac_stats->rx_length_error);
1553 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1554 stats->rx_crc_errors = mac_stats->rx_bad; 1768 stats->rx_crc_errors = mac_stats->rx_bad;
1555 stats->rx_frame_errors = mac_stats->rx_align_error; 1769 stats->rx_frame_errors = mac_stats->rx_align_error;
1556 stats->rx_fifo_errors = mac_stats->rx_overflow; 1770 stats->rx_fifo_errors = mac_stats->rx_overflow;
@@ -1669,6 +1883,17 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1669 /* Otherwise efx_start_port() will do this */ 1883 /* Otherwise efx_start_port() will do this */
1670} 1884}
1671 1885
1886static int efx_set_features(struct net_device *net_dev, u32 data)
1887{
1888 struct efx_nic *efx = netdev_priv(net_dev);
1889
1890 /* If disabling RX n-tuple filtering, clear existing filters */
1891 if (net_dev->features & ~data & NETIF_F_NTUPLE)
1892 efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
1893
1894 return 0;
1895}
1896
1672static const struct net_device_ops efx_netdev_ops = { 1897static const struct net_device_ops efx_netdev_ops = {
1673 .ndo_open = efx_net_open, 1898 .ndo_open = efx_net_open,
1674 .ndo_stop = efx_net_stop, 1899 .ndo_stop = efx_net_stop,
@@ -1680,9 +1905,14 @@ static const struct net_device_ops efx_netdev_ops = {
1680 .ndo_change_mtu = efx_change_mtu, 1905 .ndo_change_mtu = efx_change_mtu,
1681 .ndo_set_mac_address = efx_set_mac_address, 1906 .ndo_set_mac_address = efx_set_mac_address,
1682 .ndo_set_multicast_list = efx_set_multicast_list, 1907 .ndo_set_multicast_list = efx_set_multicast_list,
1908 .ndo_set_features = efx_set_features,
1683#ifdef CONFIG_NET_POLL_CONTROLLER 1909#ifdef CONFIG_NET_POLL_CONTROLLER
1684 .ndo_poll_controller = efx_netpoll, 1910 .ndo_poll_controller = efx_netpoll,
1685#endif 1911#endif
1912 .ndo_setup_tc = efx_setup_tc,
1913#ifdef CONFIG_RFS_ACCEL
1914 .ndo_rx_flow_steer = efx_filter_rfs,
1915#endif
1686}; 1916};
1687 1917
1688static void efx_update_name(struct efx_nic *efx) 1918static void efx_update_name(struct efx_nic *efx)
@@ -1719,6 +1949,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
1719static int efx_register_netdev(struct efx_nic *efx) 1949static int efx_register_netdev(struct efx_nic *efx)
1720{ 1950{
1721 struct net_device *net_dev = efx->net_dev; 1951 struct net_device *net_dev = efx->net_dev;
1952 struct efx_channel *channel;
1722 int rc; 1953 int rc;
1723 1954
1724 net_dev->watchdog_timeo = 5 * HZ; 1955 net_dev->watchdog_timeo = 5 * HZ;
@@ -1741,6 +1972,12 @@ static int efx_register_netdev(struct efx_nic *efx)
1741 if (rc) 1972 if (rc)
1742 goto fail_locked; 1973 goto fail_locked;
1743 1974
1975 efx_for_each_channel(channel, efx) {
1976 struct efx_tx_queue *tx_queue;
1977 efx_for_each_channel_tx_queue(tx_queue, channel)
1978 efx_init_tx_queue_core_txq(tx_queue);
1979 }
1980
1744 /* Always start with carrier off; PHY events will detect the link */ 1981 /* Always start with carrier off; PHY events will detect the link */
1745 netif_carrier_off(efx->net_dev); 1982 netif_carrier_off(efx->net_dev);
1746 1983
@@ -1767,6 +2004,7 @@ fail_registered:
1767 2004
1768static void efx_unregister_netdev(struct efx_nic *efx) 2005static void efx_unregister_netdev(struct efx_nic *efx)
1769{ 2006{
2007 struct efx_channel *channel;
1770 struct efx_tx_queue *tx_queue; 2008 struct efx_tx_queue *tx_queue;
1771 2009
1772 if (!efx->net_dev) 2010 if (!efx->net_dev)
@@ -1777,8 +2015,10 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1777 /* Free up any skbs still remaining. This has to happen before 2015 /* Free up any skbs still remaining. This has to happen before
1778 * we try to unregister the netdev as running their destructors 2016 * we try to unregister the netdev as running their destructors
1779 * may be needed to get the device ref. count to 0. */ 2017 * may be needed to get the device ref. count to 0. */
1780 efx_for_each_tx_queue(tx_queue, efx) 2018 efx_for_each_channel(channel, efx) {
1781 efx_release_tx_buffers(tx_queue); 2019 efx_for_each_channel_tx_queue(tx_queue, channel)
2020 efx_release_tx_buffers(tx_queue);
2021 }
1782 2022
1783 if (efx_dev_registered(efx)) { 2023 if (efx_dev_registered(efx)) {
1784 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2024 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
@@ -1801,7 +2041,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
1801 2041
1802 efx_stop_all(efx); 2042 efx_stop_all(efx);
1803 mutex_lock(&efx->mac_lock); 2043 mutex_lock(&efx->mac_lock);
1804 mutex_lock(&efx->spi_lock);
1805 2044
1806 efx_fini_channels(efx); 2045 efx_fini_channels(efx);
1807 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 2046 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -1841,8 +2080,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
1841 efx->mac_op->reconfigure(efx); 2080 efx->mac_op->reconfigure(efx);
1842 2081
1843 efx_init_channels(efx); 2082 efx_init_channels(efx);
2083 efx_restore_filters(efx);
1844 2084
1845 mutex_unlock(&efx->spi_lock);
1846 mutex_unlock(&efx->mac_lock); 2085 mutex_unlock(&efx->mac_lock);
1847 2086
1848 efx_start_all(efx); 2087 efx_start_all(efx);
@@ -1852,7 +2091,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
1852fail: 2091fail:
1853 efx->port_initialized = false; 2092 efx->port_initialized = false;
1854 2093
1855 mutex_unlock(&efx->spi_lock);
1856 mutex_unlock(&efx->mac_lock); 2094 mutex_unlock(&efx->mac_lock);
1857 2095
1858 return rc; 2096 return rc;
@@ -1871,6 +2109,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
1871 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", 2109 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
1872 RESET_TYPE(method)); 2110 RESET_TYPE(method));
1873 2111
2112 netif_device_detach(efx->net_dev);
1874 efx_reset_down(efx, method); 2113 efx_reset_down(efx, method);
1875 2114
1876 rc = efx->type->reset(efx, method); 2115 rc = efx->type->reset(efx, method);
@@ -1904,6 +2143,7 @@ out:
1904 efx->state = STATE_DISABLED; 2143 efx->state = STATE_DISABLED;
1905 } else { 2144 } else {
1906 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); 2145 netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2146 netif_device_attach(efx->net_dev);
1907 } 2147 }
1908 return rc; 2148 return rc;
1909} 2149}
@@ -2010,15 +2250,13 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
2010 return 0; 2250 return 0;
2011} 2251}
2012void efx_port_dummy_op_void(struct efx_nic *efx) {} 2252void efx_port_dummy_op_void(struct efx_nic *efx) {}
2013void efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 2253
2014{ 2254static bool efx_port_dummy_op_poll(struct efx_nic *efx)
2015}
2016bool efx_port_dummy_op_poll(struct efx_nic *efx)
2017{ 2255{
2018 return false; 2256 return false;
2019} 2257}
2020 2258
2021static struct efx_phy_operations efx_dummy_phy_operations = { 2259static const struct efx_phy_operations efx_dummy_phy_operations = {
2022 .init = efx_port_dummy_op_int, 2260 .init = efx_port_dummy_op_int,
2023 .reconfigure = efx_port_dummy_op_int, 2261 .reconfigure = efx_port_dummy_op_int,
2024 .poll = efx_port_dummy_op_poll, 2262 .poll = efx_port_dummy_op_poll,
@@ -2034,19 +2272,14 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
2034/* This zeroes out and then fills in the invariants in a struct 2272/* This zeroes out and then fills in the invariants in a struct
2035 * efx_nic (including all sub-structures). 2273 * efx_nic (including all sub-structures).
2036 */ 2274 */
2037static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, 2275static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2038 struct pci_dev *pci_dev, struct net_device *net_dev) 2276 struct pci_dev *pci_dev, struct net_device *net_dev)
2039{ 2277{
2040 struct efx_channel *channel;
2041 struct efx_tx_queue *tx_queue;
2042 struct efx_rx_queue *rx_queue;
2043 int i; 2278 int i;
2044 2279
2045 /* Initialise common structures */ 2280 /* Initialise common structures */
2046 memset(efx, 0, sizeof(*efx)); 2281 memset(efx, 0, sizeof(*efx));
2047 spin_lock_init(&efx->biu_lock); 2282 spin_lock_init(&efx->biu_lock);
2048 mutex_init(&efx->mdio_lock);
2049 mutex_init(&efx->spi_lock);
2050#ifdef CONFIG_SFC_MTD 2283#ifdef CONFIG_SFC_MTD
2051 INIT_LIST_HEAD(&efx->mtd_list); 2284 INIT_LIST_HEAD(&efx->mtd_list);
2052#endif 2285#endif
@@ -2059,7 +2292,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2059 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2292 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2060 2293
2061 efx->net_dev = net_dev; 2294 efx->net_dev = net_dev;
2062 efx->rx_checksum_enabled = true;
2063 spin_lock_init(&efx->stats_lock); 2295 spin_lock_init(&efx->stats_lock);
2064 mutex_init(&efx->mac_lock); 2296 mutex_init(&efx->mac_lock);
2065 efx->mac_op = type->default_mac_ops; 2297 efx->mac_op = type->default_mac_ops;
@@ -2068,36 +2300,13 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2068 INIT_WORK(&efx->mac_work, efx_mac_work); 2300 INIT_WORK(&efx->mac_work, efx_mac_work);
2069 2301
2070 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2302 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2071 channel = &efx->channel[i]; 2303 efx->channel[i] = efx_alloc_channel(efx, i, NULL);
2072 channel->efx = efx; 2304 if (!efx->channel[i])
2073 channel->channel = i; 2305 goto fail;
2074 channel->work_pending = false;
2075 spin_lock_init(&channel->tx_stop_lock);
2076 atomic_set(&channel->tx_stop_count, 1);
2077 }
2078 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
2079 tx_queue = &efx->tx_queue[i];
2080 tx_queue->efx = efx;
2081 tx_queue->queue = i;
2082 tx_queue->buffer = NULL;
2083 tx_queue->channel = &efx->channel[0]; /* for safety */
2084 tx_queue->tso_headers_free = NULL;
2085 }
2086 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
2087 rx_queue = &efx->rx_queue[i];
2088 rx_queue->efx = efx;
2089 rx_queue->queue = i;
2090 rx_queue->channel = &efx->channel[0]; /* for safety */
2091 rx_queue->buffer = NULL;
2092 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
2093 (unsigned long)rx_queue);
2094 } 2306 }
2095 2307
2096 efx->type = type; 2308 efx->type = type;
2097 2309
2098 /* As close as we can get to guaranteeing that we don't overflow */
2099 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
2100
2101 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 2310 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
2102 2311
2103 /* Higher numbered interrupt modes are less capable! */ 2312 /* Higher numbered interrupt modes are less capable! */
@@ -2109,13 +2318,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2109 pci_name(pci_dev)); 2318 pci_name(pci_dev));
2110 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); 2319 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2111 if (!efx->workqueue) 2320 if (!efx->workqueue)
2112 return -ENOMEM; 2321 goto fail;
2113 2322
2114 return 0; 2323 return 0;
2324
2325fail:
2326 efx_fini_struct(efx);
2327 return -ENOMEM;
2115} 2328}
2116 2329
2117static void efx_fini_struct(struct efx_nic *efx) 2330static void efx_fini_struct(struct efx_nic *efx)
2118{ 2331{
2332 int i;
2333
2334 for (i = 0; i < EFX_MAX_CHANNELS; i++)
2335 kfree(efx->channel[i]);
2336
2119 if (efx->workqueue) { 2337 if (efx->workqueue) {
2120 destroy_workqueue(efx->workqueue); 2338 destroy_workqueue(efx->workqueue);
2121 efx->workqueue = NULL; 2339 efx->workqueue = NULL;
@@ -2133,6 +2351,10 @@ static void efx_fini_struct(struct efx_nic *efx)
2133 */ 2351 */
2134static void efx_pci_remove_main(struct efx_nic *efx) 2352static void efx_pci_remove_main(struct efx_nic *efx)
2135{ 2353{
2354#ifdef CONFIG_RFS_ACCEL
2355 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2356 efx->net_dev->rx_cpu_rmap = NULL;
2357#endif
2136 efx_nic_fini_interrupt(efx); 2358 efx_nic_fini_interrupt(efx);
2137 efx_fini_channels(efx); 2359 efx_fini_channels(efx);
2138 efx_fini_port(efx); 2360 efx_fini_port(efx);
@@ -2192,9 +2414,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2192 if (rc) 2414 if (rc)
2193 goto fail1; 2415 goto fail1;
2194 2416
2195 rc = efx_init_napi(efx); 2417 efx_init_napi(efx);
2196 if (rc)
2197 goto fail2;
2198 2418
2199 rc = efx->type->init(efx); 2419 rc = efx->type->init(efx);
2200 if (rc) { 2420 if (rc) {
@@ -2225,7 +2445,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2225 efx->type->fini(efx); 2445 efx->type->fini(efx);
2226 fail3: 2446 fail3:
2227 efx_fini_napi(efx); 2447 efx_fini_napi(efx);
2228 fail2:
2229 efx_remove_all(efx); 2448 efx_remove_all(efx);
2230 fail1: 2449 fail1:
2231 return rc; 2450 return rc;
@@ -2243,23 +2462,27 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2243static int __devinit efx_pci_probe(struct pci_dev *pci_dev, 2462static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2244 const struct pci_device_id *entry) 2463 const struct pci_device_id *entry)
2245{ 2464{
2246 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data; 2465 const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
2247 struct net_device *net_dev; 2466 struct net_device *net_dev;
2248 struct efx_nic *efx; 2467 struct efx_nic *efx;
2249 int i, rc; 2468 int i, rc;
2250 2469
2251 /* Allocate and initialise a struct net_device and struct efx_nic */ 2470 /* Allocate and initialise a struct net_device and struct efx_nic */
2252 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); 2471 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
2472 EFX_MAX_RX_QUEUES);
2253 if (!net_dev) 2473 if (!net_dev)
2254 return -ENOMEM; 2474 return -ENOMEM;
2255 net_dev->features |= (type->offload_features | NETIF_F_SG | 2475 net_dev->features |= (type->offload_features | NETIF_F_SG |
2256 NETIF_F_HIGHDMA | NETIF_F_TSO | 2476 NETIF_F_HIGHDMA | NETIF_F_TSO |
2257 NETIF_F_GRO); 2477 NETIF_F_RXCSUM);
2258 if (type->offload_features & NETIF_F_V6_CSUM) 2478 if (type->offload_features & NETIF_F_V6_CSUM)
2259 net_dev->features |= NETIF_F_TSO6; 2479 net_dev->features |= NETIF_F_TSO6;
2260 /* Mask for features that also apply to VLAN devices */ 2480 /* Mask for features that also apply to VLAN devices */
2261 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2481 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2262 NETIF_F_HIGHDMA | NETIF_F_TSO); 2482 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
2483 NETIF_F_RXCSUM);
2484 /* All offloads can be toggled */
2485 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
2263 efx = netdev_priv(net_dev); 2486 efx = netdev_priv(net_dev);
2264 pci_set_drvdata(pci_dev, efx); 2487 pci_set_drvdata(pci_dev, efx);
2265 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 2488 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 060dc952a0fd..b0d1209ea18d 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -12,6 +12,7 @@
12#define EFX_EFX_H 12#define EFX_EFX_H
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15#include "filter.h"
15 16
16/* PCI IDs */ 17/* PCI IDs */
17#define EFX_VENDID_SFC 0x1924 18#define EFX_VENDID_SFC 0x1924
@@ -28,6 +29,7 @@
28extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
29extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
30extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
32extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
31extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 33extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
32extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); 34extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
33extern netdev_tx_t 35extern netdev_tx_t
@@ -35,10 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
35extern netdev_tx_t 37extern netdev_tx_t
36efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 38efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 39extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
38extern void efx_stop_queue(struct efx_channel *channel); 40extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
39extern void efx_wake_queue(struct efx_channel *channel);
40#define EFX_TXQ_SIZE 1024
41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
42 41
43/* RX */ 42/* RX */
44extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 43extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -53,23 +52,56 @@ extern void __efx_rx_packet(struct efx_channel *channel,
53extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 52extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
54 unsigned int len, bool checksummed, bool discard); 53 unsigned int len, bool checksummed, bool discard);
55extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 54extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
56#define EFX_RXQ_SIZE 1024 55
57#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1) 56#define EFX_MAX_DMAQ_SIZE 4096UL
57#define EFX_DEFAULT_DMAQ_SIZE 1024UL
58#define EFX_MIN_DMAQ_SIZE 512UL
59
60#define EFX_MAX_EVQ_SIZE 16384UL
61#define EFX_MIN_EVQ_SIZE 512UL
62
63/* The smallest [rt]xq_entries that the driver supports. Callers of
64 * efx_wake_queue() assume that they can subsequently send at least one
65 * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
66#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
67
68/* Filters */
69extern int efx_probe_filters(struct efx_nic *efx);
70extern void efx_restore_filters(struct efx_nic *efx);
71extern void efx_remove_filters(struct efx_nic *efx);
72extern int efx_filter_insert_filter(struct efx_nic *efx,
73 struct efx_filter_spec *spec,
74 bool replace);
75extern int efx_filter_remove_filter(struct efx_nic *efx,
76 struct efx_filter_spec *spec);
77extern void efx_filter_clear_rx(struct efx_nic *efx,
78 enum efx_filter_priority priority);
79#ifdef CONFIG_RFS_ACCEL
80extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
81 u16 rxq_index, u32 flow_id);
82extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
83static inline void efx_filter_rfs_expire(struct efx_channel *channel)
84{
85 if (channel->rfs_filters_added >= 60 &&
86 __efx_filter_rfs_expire(channel->efx, 100))
87 channel->rfs_filters_added -= 60;
88}
89#define efx_filter_rfs_enabled() 1
90#else
91static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
92#define efx_filter_rfs_enabled() 0
93#endif
58 94
59/* Channels */ 95/* Channels */
60extern void efx_process_channel_now(struct efx_channel *channel); 96extern void efx_process_channel_now(struct efx_channel *channel);
61#define EFX_EVQ_SIZE 4096 97extern int
62#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1) 98efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
63 99
64/* Ports */ 100/* Ports */
65extern int efx_reconfigure_port(struct efx_nic *efx); 101extern int efx_reconfigure_port(struct efx_nic *efx);
66extern int __efx_reconfigure_port(struct efx_nic *efx); 102extern int __efx_reconfigure_port(struct efx_nic *efx);
67 103
68/* Ethtool support */ 104/* Ethtool support */
69extern int efx_ethtool_get_settings(struct net_device *net_dev,
70 struct ethtool_cmd *ecmd);
71extern int efx_ethtool_set_settings(struct net_device *net_dev,
72 struct ethtool_cmd *ecmd);
73extern const struct ethtool_ops efx_ethtool_ops; 105extern const struct ethtool_ops efx_ethtool_ops;
74 106
75/* Reset handling */ 107/* Reset handling */
@@ -81,15 +113,11 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
81extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 113extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
82extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, 114extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
83 int rx_usecs, bool rx_adaptive); 115 int rx_usecs, bool rx_adaptive);
84extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
85extern void efx_hex_dump(const u8 *, unsigned int, const char *);
86 116
87/* Dummy PHY ops for PHY drivers */ 117/* Dummy PHY ops for PHY drivers */
88extern int efx_port_dummy_op_int(struct efx_nic *efx); 118extern int efx_port_dummy_op_int(struct efx_nic *efx);
89extern void efx_port_dummy_op_void(struct efx_nic *efx); 119extern void efx_port_dummy_op_void(struct efx_nic *efx);
90extern void 120
91efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
92extern bool efx_port_dummy_op_poll(struct efx_nic *efx);
93 121
94/* MTD */ 122/* MTD */
95#ifdef CONFIG_SFC_MTD 123#ifdef CONFIG_SFC_MTD
@@ -102,8 +130,6 @@ static inline void efx_mtd_rename(struct efx_nic *efx) {}
102static inline void efx_mtd_remove(struct efx_nic *efx) {} 130static inline void efx_mtd_remove(struct efx_nic *efx) {}
103#endif 131#endif
104 132
105extern unsigned int efx_monitor_interval;
106
107static inline void efx_schedule_channel(struct efx_channel *channel) 133static inline void efx_schedule_channel(struct efx_channel *channel)
108{ 134{
109 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 135 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
@@ -116,6 +142,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
116 142
117extern void efx_link_status_changed(struct efx_nic *efx); 143extern void efx_link_status_changed(struct efx_nic *efx);
118extern void efx_link_set_advertising(struct efx_nic *efx, u32); 144extern void efx_link_set_advertising(struct efx_nic *efx, u32);
119extern void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type); 145extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
120 146
121#endif /* EFX_EFX_H */ 147#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index fd19d6ab97a2..d229027dc363 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -11,13 +11,13 @@
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/ethtool.h> 12#include <linux/ethtool.h>
13#include <linux/rtnetlink.h> 13#include <linux/rtnetlink.h>
14#include <linux/in.h>
14#include "net_driver.h" 15#include "net_driver.h"
15#include "workarounds.h" 16#include "workarounds.h"
16#include "selftest.h" 17#include "selftest.h"
17#include "efx.h" 18#include "efx.h"
19#include "filter.h"
18#include "nic.h" 20#include "nic.h"
19#include "spi.h"
20#include "mdio_10g.h"
21 21
22struct ethtool_string { 22struct ethtool_string {
23 char name[ETH_GSTRING_LEN]; 23 char name[ETH_GSTRING_LEN];
@@ -28,7 +28,8 @@ struct efx_ethtool_stat {
28 enum { 28 enum {
29 EFX_ETHTOOL_STAT_SOURCE_mac_stats, 29 EFX_ETHTOOL_STAT_SOURCE_mac_stats,
30 EFX_ETHTOOL_STAT_SOURCE_nic, 30 EFX_ETHTOOL_STAT_SOURCE_nic,
31 EFX_ETHTOOL_STAT_SOURCE_channel 31 EFX_ETHTOOL_STAT_SOURCE_channel,
32 EFX_ETHTOOL_STAT_SOURCE_tx_queue
32 } source; 33 } source;
33 unsigned offset; 34 unsigned offset;
34 u64(*get_stat) (void *field); /* Reader function */ 35 u64(*get_stat) (void *field); /* Reader function */
@@ -86,6 +87,10 @@ static u64 efx_get_atomic_stat(void *field)
86 EFX_ETHTOOL_STAT(field, channel, n_##field, \ 87 EFX_ETHTOOL_STAT(field, channel, n_##field, \
87 unsigned int, efx_get_uint_stat) 88 unsigned int, efx_get_uint_stat)
88 89
90#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
91 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
92 unsigned int, efx_get_uint_stat)
93
89static struct efx_ethtool_stat efx_ethtool_stats[] = { 94static struct efx_ethtool_stat efx_ethtool_stats[] = {
90 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), 95 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
91 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), 96 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
@@ -116,6 +121,10 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
116 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), 121 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
117 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), 122 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
118 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), 123 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
124 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
125 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
126 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
127 EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
119 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), 128 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
120 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), 129 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
121 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), 130 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
@@ -169,25 +178,33 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
169 */ 178 */
170 179
171/* Identify device by flashing LEDs */ 180/* Identify device by flashing LEDs */
172static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count) 181static int efx_ethtool_phys_id(struct net_device *net_dev,
182 enum ethtool_phys_id_state state)
173{ 183{
174 struct efx_nic *efx = netdev_priv(net_dev); 184 struct efx_nic *efx = netdev_priv(net_dev);
185 enum efx_led_mode mode = EFX_LED_DEFAULT;
175 186
176 do { 187 switch (state) {
177 efx->type->set_id_led(efx, EFX_LED_ON); 188 case ETHTOOL_ID_ON:
178 schedule_timeout_interruptible(HZ / 2); 189 mode = EFX_LED_ON;
179 190 break;
180 efx->type->set_id_led(efx, EFX_LED_OFF); 191 case ETHTOOL_ID_OFF:
181 schedule_timeout_interruptible(HZ / 2); 192 mode = EFX_LED_OFF;
182 } while (!signal_pending(current) && --count != 0); 193 break;
194 case ETHTOOL_ID_INACTIVE:
195 mode = EFX_LED_DEFAULT;
196 break;
197 case ETHTOOL_ID_ACTIVE:
198 return 1; /* cycle on/off once per second */
199 }
183 200
184 efx->type->set_id_led(efx, EFX_LED_DEFAULT); 201 efx->type->set_id_led(efx, mode);
185 return 0; 202 return 0;
186} 203}
187 204
188/* This must be called with rtnl_lock held. */ 205/* This must be called with rtnl_lock held. */
189int efx_ethtool_get_settings(struct net_device *net_dev, 206static int efx_ethtool_get_settings(struct net_device *net_dev,
190 struct ethtool_cmd *ecmd) 207 struct ethtool_cmd *ecmd)
191{ 208{
192 struct efx_nic *efx = netdev_priv(net_dev); 209 struct efx_nic *efx = netdev_priv(net_dev);
193 struct efx_link_state *link_state = &efx->link_state; 210 struct efx_link_state *link_state = &efx->link_state;
@@ -202,7 +219,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
202 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 219 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
203 220
204 if (LOOPBACK_INTERNAL(efx)) { 221 if (LOOPBACK_INTERNAL(efx)) {
205 ecmd->speed = link_state->speed; 222 ethtool_cmd_speed_set(ecmd, link_state->speed);
206 ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; 223 ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
207 } 224 }
208 225
@@ -210,14 +227,15 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
210} 227}
211 228
212/* This must be called with rtnl_lock held. */ 229/* This must be called with rtnl_lock held. */
213int efx_ethtool_set_settings(struct net_device *net_dev, 230static int efx_ethtool_set_settings(struct net_device *net_dev,
214 struct ethtool_cmd *ecmd) 231 struct ethtool_cmd *ecmd)
215{ 232{
216 struct efx_nic *efx = netdev_priv(net_dev); 233 struct efx_nic *efx = netdev_priv(net_dev);
217 int rc; 234 int rc;
218 235
219 /* GMAC does not support 1000Mbps HD */ 236 /* GMAC does not support 1000Mbps HD */
220 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { 237 if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
238 (ecmd->duplex != DUPLEX_FULL)) {
221 netif_dbg(efx, drv, efx->net_dev, 239 netif_dbg(efx, drv, efx->net_dev,
222 "rejecting unsupported 1000Mbps HD setting\n"); 240 "rejecting unsupported 1000Mbps HD setting\n");
223 return -EINVAL; 241 return -EINVAL;
@@ -237,8 +255,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
237 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 255 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
238 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); 256 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
239 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 257 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
240 siena_print_fwver(efx, info->fw_version, 258 efx_mcdi_print_fwver(efx, info->fw_version,
241 sizeof(info->fw_version)); 259 sizeof(info->fw_version));
242 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 260 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
243} 261}
244 262
@@ -328,9 +346,10 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
328 unsigned int test_index, 346 unsigned int test_index,
329 struct ethtool_string *strings, u64 *data) 347 struct ethtool_string *strings, u64 *data)
330{ 348{
349 struct efx_channel *channel = efx_get_channel(efx, 0);
331 struct efx_tx_queue *tx_queue; 350 struct efx_tx_queue *tx_queue;
332 351
333 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) { 352 efx_for_each_channel_tx_queue(tx_queue, channel) {
334 efx_fill_test(test_index++, strings, data, 353 efx_fill_test(test_index++, strings, data,
335 &lb_tests->tx_sent[tx_queue->queue], 354 &lb_tests->tx_sent[tx_queue->queue],
336 EFX_TX_QUEUE_NAME(tx_queue), 355 EFX_TX_QUEUE_NAME(tx_queue),
@@ -469,6 +488,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
469 struct efx_mac_stats *mac_stats = &efx->mac_stats; 488 struct efx_mac_stats *mac_stats = &efx->mac_stats;
470 struct efx_ethtool_stat *stat; 489 struct efx_ethtool_stat *stat;
471 struct efx_channel *channel; 490 struct efx_channel *channel;
491 struct efx_tx_queue *tx_queue;
472 struct rtnl_link_stats64 temp; 492 struct rtnl_link_stats64 temp;
473 int i; 493 int i;
474 494
@@ -494,74 +514,31 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
494 data[i] += stat->get_stat((void *)channel + 514 data[i] += stat->get_stat((void *)channel +
495 stat->offset); 515 stat->offset);
496 break; 516 break;
517 case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
518 data[i] = 0;
519 efx_for_each_channel(channel, efx) {
520 efx_for_each_channel_tx_queue(tx_queue, channel)
521 data[i] +=
522 stat->get_stat((void *)tx_queue
523 + stat->offset);
524 }
525 break;
497 } 526 }
498 } 527 }
499} 528}
500 529
501static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
502{
503 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
504 unsigned long features;
505
506 features = NETIF_F_TSO;
507 if (efx->type->offload_features & NETIF_F_V6_CSUM)
508 features |= NETIF_F_TSO6;
509
510 if (enable)
511 net_dev->features |= features;
512 else
513 net_dev->features &= ~features;
514
515 return 0;
516}
517
518static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
519{
520 struct efx_nic *efx = netdev_priv(net_dev);
521 unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
522
523 if (enable)
524 net_dev->features |= features;
525 else
526 net_dev->features &= ~features;
527
528 return 0;
529}
530
531static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
532{
533 struct efx_nic *efx = netdev_priv(net_dev);
534
535 /* No way to stop the hardware doing the checks; we just
536 * ignore the result.
537 */
538 efx->rx_checksum_enabled = !!enable;
539
540 return 0;
541}
542
543static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
544{
545 struct efx_nic *efx = netdev_priv(net_dev);
546
547 return efx->rx_checksum_enabled;
548}
549
550static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
551{
552 struct efx_nic *efx = netdev_priv(net_dev);
553 u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH;
554
555 return ethtool_op_set_flags(net_dev, data, supported);
556}
557
558static void efx_ethtool_self_test(struct net_device *net_dev, 530static void efx_ethtool_self_test(struct net_device *net_dev,
559 struct ethtool_test *test, u64 *data) 531 struct ethtool_test *test, u64 *data)
560{ 532{
561 struct efx_nic *efx = netdev_priv(net_dev); 533 struct efx_nic *efx = netdev_priv(net_dev);
562 struct efx_self_tests efx_tests; 534 struct efx_self_tests *efx_tests;
563 int already_up; 535 int already_up;
564 int rc; 536 int rc = -ENOMEM;
537
538 efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
539 if (!efx_tests)
540 goto fail;
541
565 542
566 ASSERT_RTNL(); 543 ASSERT_RTNL();
567 if (efx->state != STATE_RUNNING) { 544 if (efx->state != STATE_RUNNING) {
@@ -569,6 +546,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
569 goto fail1; 546 goto fail1;
570 } 547 }
571 548
549 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
550 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
551
572 /* We need rx buffers and interrupts. */ 552 /* We need rx buffers and interrupts. */
573 already_up = (efx->net_dev->flags & IFF_UP); 553 already_up = (efx->net_dev->flags & IFF_UP);
574 if (!already_up) { 554 if (!already_up) {
@@ -576,25 +556,24 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
576 if (rc) { 556 if (rc) {
577 netif_err(efx, drv, efx->net_dev, 557 netif_err(efx, drv, efx->net_dev,
578 "failed opening device.\n"); 558 "failed opening device.\n");
579 goto fail2; 559 goto fail1;
580 } 560 }
581 } 561 }
582 562
583 memset(&efx_tests, 0, sizeof(efx_tests)); 563 rc = efx_selftest(efx, efx_tests, test->flags);
584
585 rc = efx_selftest(efx, &efx_tests, test->flags);
586 564
587 if (!already_up) 565 if (!already_up)
588 dev_close(efx->net_dev); 566 dev_close(efx->net_dev);
589 567
590 netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n", 568 netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
591 rc == 0 ? "passed" : "failed", 569 rc == 0 ? "passed" : "failed",
592 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 570 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
593 571
594 fail2: 572fail1:
595 fail1:
596 /* Fill ethtool results structures */ 573 /* Fill ethtool results structures */
597 efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); 574 efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
575 kfree(efx_tests);
576fail:
598 if (rc) 577 if (rc)
599 test->flags |= ETH_TEST_FL_FAILED; 578 test->flags |= ETH_TEST_FL_FAILED;
600} 579}
@@ -607,81 +586,19 @@ static int efx_ethtool_nway_reset(struct net_device *net_dev)
607 return mdio45_nway_restart(&efx->mdio); 586 return mdio45_nway_restart(&efx->mdio);
608} 587}
609 588
610static u32 efx_ethtool_get_link(struct net_device *net_dev)
611{
612 struct efx_nic *efx = netdev_priv(net_dev);
613
614 return efx->link_state.up;
615}
616
617static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
618{
619 struct efx_nic *efx = netdev_priv(net_dev);
620 struct efx_spi_device *spi = efx->spi_eeprom;
621
622 if (!spi)
623 return 0;
624 return min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
625 min(spi->size, EFX_EEPROM_BOOTCONFIG_START);
626}
627
628static int efx_ethtool_get_eeprom(struct net_device *net_dev,
629 struct ethtool_eeprom *eeprom, u8 *buf)
630{
631 struct efx_nic *efx = netdev_priv(net_dev);
632 struct efx_spi_device *spi = efx->spi_eeprom;
633 size_t len;
634 int rc;
635
636 rc = mutex_lock_interruptible(&efx->spi_lock);
637 if (rc)
638 return rc;
639 rc = falcon_spi_read(efx, spi,
640 eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
641 eeprom->len, &len, buf);
642 mutex_unlock(&efx->spi_lock);
643
644 eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
645 eeprom->len = len;
646 return rc;
647}
648
649static int efx_ethtool_set_eeprom(struct net_device *net_dev,
650 struct ethtool_eeprom *eeprom, u8 *buf)
651{
652 struct efx_nic *efx = netdev_priv(net_dev);
653 struct efx_spi_device *spi = efx->spi_eeprom;
654 size_t len;
655 int rc;
656
657 if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
658 return -EINVAL;
659
660 rc = mutex_lock_interruptible(&efx->spi_lock);
661 if (rc)
662 return rc;
663 rc = falcon_spi_write(efx, spi,
664 eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
665 eeprom->len, &len, buf);
666 mutex_unlock(&efx->spi_lock);
667
668 eeprom->len = len;
669 return rc;
670}
671
672static int efx_ethtool_get_coalesce(struct net_device *net_dev, 589static int efx_ethtool_get_coalesce(struct net_device *net_dev,
673 struct ethtool_coalesce *coalesce) 590 struct ethtool_coalesce *coalesce)
674{ 591{
675 struct efx_nic *efx = netdev_priv(net_dev); 592 struct efx_nic *efx = netdev_priv(net_dev);
676 struct efx_tx_queue *tx_queue;
677 struct efx_channel *channel; 593 struct efx_channel *channel;
678 594
679 memset(coalesce, 0, sizeof(*coalesce)); 595 memset(coalesce, 0, sizeof(*coalesce));
680 596
681 /* Find lowest IRQ moderation across all used TX queues */ 597 /* Find lowest IRQ moderation across all used TX queues */
682 coalesce->tx_coalesce_usecs_irq = ~((u32) 0); 598 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
683 efx_for_each_tx_queue(tx_queue, efx) { 599 efx_for_each_channel(channel, efx) {
684 channel = tx_queue->channel; 600 if (!efx_channel_has_tx_queues(channel))
601 continue;
685 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 602 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
686 if (channel->channel < efx->n_rx_channels) 603 if (channel->channel < efx->n_rx_channels)
687 coalesce->tx_coalesce_usecs_irq = 604 coalesce->tx_coalesce_usecs_irq =
@@ -708,7 +625,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
708{ 625{
709 struct efx_nic *efx = netdev_priv(net_dev); 626 struct efx_nic *efx = netdev_priv(net_dev);
710 struct efx_channel *channel; 627 struct efx_channel *channel;
711 struct efx_tx_queue *tx_queue;
712 unsigned tx_usecs, rx_usecs, adaptive; 628 unsigned tx_usecs, rx_usecs, adaptive;
713 629
714 if (coalesce->use_adaptive_tx_coalesce) 630 if (coalesce->use_adaptive_tx_coalesce)
@@ -725,8 +641,9 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
725 adaptive = coalesce->use_adaptive_rx_coalesce; 641 adaptive = coalesce->use_adaptive_rx_coalesce;
726 642
727 /* If the channel is shared only allow RX parameters to be set */ 643 /* If the channel is shared only allow RX parameters to be set */
728 efx_for_each_tx_queue(tx_queue, efx) { 644 efx_for_each_channel(channel, efx) {
729 if ((tx_queue->channel->channel < efx->n_rx_channels) && 645 if (efx_channel_has_rx_queue(channel) &&
646 efx_channel_has_tx_queues(channel) &&
730 tx_usecs) { 647 tx_usecs) {
731 netif_err(efx, drv, efx->net_dev, "Channel is shared. " 648 netif_err(efx, drv, efx->net_dev, "Channel is shared. "
732 "Only RX coalescing may be set\n"); 649 "Only RX coalescing may be set\n");
@@ -741,11 +658,47 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
741 return 0; 658 return 0;
742} 659}
743 660
661static void efx_ethtool_get_ringparam(struct net_device *net_dev,
662 struct ethtool_ringparam *ring)
663{
664 struct efx_nic *efx = netdev_priv(net_dev);
665
666 ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
667 ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
668 ring->rx_mini_max_pending = 0;
669 ring->rx_jumbo_max_pending = 0;
670 ring->rx_pending = efx->rxq_entries;
671 ring->tx_pending = efx->txq_entries;
672 ring->rx_mini_pending = 0;
673 ring->rx_jumbo_pending = 0;
674}
675
676static int efx_ethtool_set_ringparam(struct net_device *net_dev,
677 struct ethtool_ringparam *ring)
678{
679 struct efx_nic *efx = netdev_priv(net_dev);
680
681 if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
682 ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
683 ring->tx_pending > EFX_MAX_DMAQ_SIZE)
684 return -EINVAL;
685
686 if (ring->rx_pending < EFX_MIN_RING_SIZE ||
687 ring->tx_pending < EFX_MIN_RING_SIZE) {
688 netif_err(efx, drv, efx->net_dev,
689 "TX and RX queues cannot be smaller than %ld\n",
690 EFX_MIN_RING_SIZE);
691 return -EINVAL;
692 }
693
694 return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
695}
696
744static int efx_ethtool_set_pauseparam(struct net_device *net_dev, 697static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
745 struct ethtool_pauseparam *pause) 698 struct ethtool_pauseparam *pause)
746{ 699{
747 struct efx_nic *efx = netdev_priv(net_dev); 700 struct efx_nic *efx = netdev_priv(net_dev);
748 enum efx_fc_type wanted_fc, old_fc; 701 u8 wanted_fc, old_fc;
749 u32 old_adv; 702 u32 old_adv;
750 bool reset; 703 bool reset;
751 int rc = 0; 704 int rc = 0;
@@ -840,7 +793,7 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
840 return efx->type->set_wol(efx, wol->wolopts); 793 return efx->type->set_wol(efx, wol->wolopts);
841} 794}
842 795
843extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) 796static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
844{ 797{
845 struct efx_nic *efx = netdev_priv(net_dev); 798 struct efx_nic *efx = netdev_priv(net_dev);
846 enum reset_type method; 799 enum reset_type method;
@@ -918,6 +871,95 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
918 } 871 }
919} 872}
920 873
874static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
875 struct ethtool_rx_ntuple *ntuple)
876{
877 struct efx_nic *efx = netdev_priv(net_dev);
878 struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
879 struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
880 struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
881 struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
882 struct efx_filter_spec filter;
883 int rc;
884
885 /* Range-check action */
886 if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
887 ntuple->fs.action >= (s32)efx->n_rx_channels)
888 return -EINVAL;
889
890 if (~ntuple->fs.data_mask)
891 return -EINVAL;
892
893 efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
894 (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
895 0xfff : ntuple->fs.action);
896
897 switch (ntuple->fs.flow_type) {
898 case TCP_V4_FLOW:
899 case UDP_V4_FLOW: {
900 u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
901 IPPROTO_TCP : IPPROTO_UDP);
902
903 /* Must match all of destination, */
904 if (ip_mask->ip4dst | ip_mask->pdst)
905 return -EINVAL;
906 /* all or none of source, */
907 if ((ip_mask->ip4src | ip_mask->psrc) &&
908 ((__force u32)~ip_mask->ip4src |
909 (__force u16)~ip_mask->psrc))
910 return -EINVAL;
911 /* and nothing else */
912 if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
913 return -EINVAL;
914
915 if (!ip_mask->ip4src)
916 rc = efx_filter_set_ipv4_full(&filter, proto,
917 ip_entry->ip4dst,
918 ip_entry->pdst,
919 ip_entry->ip4src,
920 ip_entry->psrc);
921 else
922 rc = efx_filter_set_ipv4_local(&filter, proto,
923 ip_entry->ip4dst,
924 ip_entry->pdst);
925 if (rc)
926 return rc;
927 break;
928 }
929
930 case ETHER_FLOW:
931 /* Must match all of destination, */
932 if (!is_zero_ether_addr(mac_mask->h_dest))
933 return -EINVAL;
934 /* all or none of VID, */
935 if (ntuple->fs.vlan_tag_mask != 0xf000 &&
936 ntuple->fs.vlan_tag_mask != 0xffff)
937 return -EINVAL;
938 /* and nothing else */
939 if (!is_broadcast_ether_addr(mac_mask->h_source) ||
940 mac_mask->h_proto != htons(0xffff))
941 return -EINVAL;
942
943 rc = efx_filter_set_eth_local(
944 &filter,
945 (ntuple->fs.vlan_tag_mask == 0xf000) ?
946 ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
947 mac_entry->h_dest);
948 if (rc)
949 return rc;
950 break;
951
952 default:
953 return -EINVAL;
954 }
955
956 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
957 return efx_filter_remove_filter(efx, &filter);
958
959 rc = efx_filter_insert_filter(efx, &filter, true);
960 return rc < 0 ? rc : 0;
961}
962
921static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, 963static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
922 struct ethtool_rxfh_indir *indir) 964 struct ethtool_rxfh_indir *indir)
923{ 965{
@@ -965,35 +1007,23 @@ const struct ethtool_ops efx_ethtool_ops = {
965 .get_msglevel = efx_ethtool_get_msglevel, 1007 .get_msglevel = efx_ethtool_get_msglevel,
966 .set_msglevel = efx_ethtool_set_msglevel, 1008 .set_msglevel = efx_ethtool_set_msglevel,
967 .nway_reset = efx_ethtool_nway_reset, 1009 .nway_reset = efx_ethtool_nway_reset,
968 .get_link = efx_ethtool_get_link, 1010 .get_link = ethtool_op_get_link,
969 .get_eeprom_len = efx_ethtool_get_eeprom_len,
970 .get_eeprom = efx_ethtool_get_eeprom,
971 .set_eeprom = efx_ethtool_set_eeprom,
972 .get_coalesce = efx_ethtool_get_coalesce, 1011 .get_coalesce = efx_ethtool_get_coalesce,
973 .set_coalesce = efx_ethtool_set_coalesce, 1012 .set_coalesce = efx_ethtool_set_coalesce,
1013 .get_ringparam = efx_ethtool_get_ringparam,
1014 .set_ringparam = efx_ethtool_set_ringparam,
974 .get_pauseparam = efx_ethtool_get_pauseparam, 1015 .get_pauseparam = efx_ethtool_get_pauseparam,
975 .set_pauseparam = efx_ethtool_set_pauseparam, 1016 .set_pauseparam = efx_ethtool_set_pauseparam,
976 .get_rx_csum = efx_ethtool_get_rx_csum,
977 .set_rx_csum = efx_ethtool_set_rx_csum,
978 .get_tx_csum = ethtool_op_get_tx_csum,
979 /* Need to enable/disable IPv6 too */
980 .set_tx_csum = efx_ethtool_set_tx_csum,
981 .get_sg = ethtool_op_get_sg,
982 .set_sg = ethtool_op_set_sg,
983 .get_tso = ethtool_op_get_tso,
984 /* Need to enable/disable TSO-IPv6 too */
985 .set_tso = efx_ethtool_set_tso,
986 .get_flags = ethtool_op_get_flags,
987 .set_flags = efx_ethtool_set_flags,
988 .get_sset_count = efx_ethtool_get_sset_count, 1017 .get_sset_count = efx_ethtool_get_sset_count,
989 .self_test = efx_ethtool_self_test, 1018 .self_test = efx_ethtool_self_test,
990 .get_strings = efx_ethtool_get_strings, 1019 .get_strings = efx_ethtool_get_strings,
991 .phys_id = efx_ethtool_phys_id, 1020 .set_phys_id = efx_ethtool_phys_id,
992 .get_ethtool_stats = efx_ethtool_get_stats, 1021 .get_ethtool_stats = efx_ethtool_get_stats,
993 .get_wol = efx_ethtool_get_wol, 1022 .get_wol = efx_ethtool_get_wol,
994 .set_wol = efx_ethtool_set_wol, 1023 .set_wol = efx_ethtool_set_wol,
995 .reset = efx_ethtool_reset, 1024 .reset = efx_ethtool_reset,
996 .get_rxnfc = efx_ethtool_get_rxnfc, 1025 .get_rxnfc = efx_ethtool_get_rxnfc,
1026 .set_rx_ntuple = efx_ethtool_set_rx_ntuple,
997 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1027 .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
998 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1028 .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
999}; 1029};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 4f9d33f3cca1..60176e873d62 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -24,7 +24,6 @@
24#include "nic.h" 24#include "nic.h"
25#include "regs.h" 25#include "regs.h"
26#include "io.h" 26#include "io.h"
27#include "mdio_10g.h"
28#include "phy.h" 27#include "phy.h"
29#include "workarounds.h" 28#include "workarounds.h"
30 29
@@ -159,7 +158,6 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
159{ 158{
160 struct efx_nic *efx = dev_id; 159 struct efx_nic *efx = dev_id;
161 efx_oword_t *int_ker = efx->irq_status.addr; 160 efx_oword_t *int_ker = efx->irq_status.addr;
162 struct efx_channel *channel;
163 int syserr; 161 int syserr;
164 int queues; 162 int queues;
165 163
@@ -194,15 +192,10 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
194 wmb(); /* Ensure the vector is cleared before interrupt ack */ 192 wmb(); /* Ensure the vector is cleared before interrupt ack */
195 falcon_irq_ack_a1(efx); 193 falcon_irq_ack_a1(efx);
196 194
197 /* Schedule processing of any interrupting queues */ 195 if (queues & 1)
198 channel = &efx->channel[0]; 196 efx_schedule_channel(efx_get_channel(efx, 0));
199 while (queues) { 197 if (queues & 2)
200 if (queues & 0x01) 198 efx_schedule_channel(efx_get_channel(efx, 1));
201 efx_schedule_channel(channel);
202 channel++;
203 queues >>= 1;
204 }
205
206 return IRQ_HANDLED; 199 return IRQ_HANDLED;
207} 200}
208/************************************************************************** 201/**************************************************************************
@@ -261,7 +254,6 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
261 /* Input validation */ 254 /* Input validation */
262 if (len > FALCON_SPI_MAX_LEN) 255 if (len > FALCON_SPI_MAX_LEN)
263 return -EINVAL; 256 return -EINVAL;
264 BUG_ON(!mutex_is_locked(&efx->spi_lock));
265 257
266 /* Check that previous command is not still running */ 258 /* Check that previous command is not still running */
267 rc = falcon_spi_poll(efx); 259 rc = falcon_spi_poll(efx);
@@ -452,30 +444,19 @@ static void falcon_reset_macs(struct efx_nic *efx)
452 /* It's not safe to use GLB_CTL_REG to reset the 444 /* It's not safe to use GLB_CTL_REG to reset the
453 * macs, so instead use the internal MAC resets 445 * macs, so instead use the internal MAC resets
454 */ 446 */
455 if (!EFX_IS10G(efx)) { 447 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
456 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1); 448 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
457 efx_writeo(efx, &reg, FR_AB_GM_CFG1); 449
458 udelay(1000); 450 for (count = 0; count < 10000; count++) {
459 451 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
460 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0); 452 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
461 efx_writeo(efx, &reg, FR_AB_GM_CFG1); 453 0)
462 udelay(1000); 454 return;
463 return; 455 udelay(10);
464 } else {
465 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
466 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
467
468 for (count = 0; count < 10000; count++) {
469 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
470 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
471 0)
472 return;
473 udelay(10);
474 }
475
476 netif_err(efx, hw, efx->net_dev,
477 "timed out waiting for XMAC core reset\n");
478 } 456 }
457
458 netif_err(efx, hw, efx->net_dev,
459 "timed out waiting for XMAC core reset\n");
479 } 460 }
480 461
481 /* Mac stats will fail whist the TX fifo is draining */ 462 /* Mac stats will fail whist the TX fifo is draining */
@@ -514,7 +495,6 @@ static void falcon_reset_macs(struct efx_nic *efx)
514 * are re-enabled by the caller */ 495 * are re-enabled by the caller */
515 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); 496 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
516 497
517 /* This can run even when the GMAC is selected */
518 falcon_setup_xaui(efx); 498 falcon_setup_xaui(efx);
519} 499}
520 500
@@ -652,8 +632,6 @@ static void falcon_stats_timer_func(unsigned long context)
652 spin_unlock(&efx->stats_lock); 632 spin_unlock(&efx->stats_lock);
653} 633}
654 634
655static void falcon_switch_mac(struct efx_nic *efx);
656
657static bool falcon_loopback_link_poll(struct efx_nic *efx) 635static bool falcon_loopback_link_poll(struct efx_nic *efx)
658{ 636{
659 struct efx_link_state old_state = efx->link_state; 637 struct efx_link_state old_state = efx->link_state;
@@ -664,11 +642,7 @@ static bool falcon_loopback_link_poll(struct efx_nic *efx)
664 efx->link_state.fd = true; 642 efx->link_state.fd = true;
665 efx->link_state.fc = efx->wanted_fc; 643 efx->link_state.fc = efx->wanted_fc;
666 efx->link_state.up = true; 644 efx->link_state.up = true;
667 645 efx->link_state.speed = 10000;
668 if (efx->loopback_mode == LOOPBACK_GMAC)
669 efx->link_state.speed = 1000;
670 else
671 efx->link_state.speed = 10000;
672 646
673 return !efx_link_state_equal(&efx->link_state, &old_state); 647 return !efx_link_state_equal(&efx->link_state, &old_state);
674} 648}
@@ -691,7 +665,7 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
691 falcon_stop_nic_stats(efx); 665 falcon_stop_nic_stats(efx);
692 falcon_deconfigure_mac_wrapper(efx); 666 falcon_deconfigure_mac_wrapper(efx);
693 667
694 falcon_switch_mac(efx); 668 falcon_reset_macs(efx);
695 669
696 efx->phy_op->reconfigure(efx); 670 efx->phy_op->reconfigure(efx);
697 rc = efx->mac_op->reconfigure(efx); 671 rc = efx->mac_op->reconfigure(efx);
@@ -718,7 +692,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
718 efx_oword_t md_stat; 692 efx_oword_t md_stat;
719 int count; 693 int count;
720 694
721 /* wait upto 50ms - taken max from datasheet */ 695 /* wait up to 50ms - taken max from datasheet */
722 for (count = 0; count < 5000; count++) { 696 for (count = 0; count < 5000; count++) {
723 efx_reado(efx, &md_stat, FR_AB_MD_STAT); 697 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
724 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { 698 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
@@ -743,6 +717,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
743 int prtad, int devad, u16 addr, u16 value) 717 int prtad, int devad, u16 addr, u16 value)
744{ 718{
745 struct efx_nic *efx = netdev_priv(net_dev); 719 struct efx_nic *efx = netdev_priv(net_dev);
720 struct falcon_nic_data *nic_data = efx->nic_data;
746 efx_oword_t reg; 721 efx_oword_t reg;
747 int rc; 722 int rc;
748 723
@@ -750,7 +725,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
750 "writing MDIO %d register %d.%d with 0x%04x\n", 725 "writing MDIO %d register %d.%d with 0x%04x\n",
751 prtad, devad, addr, value); 726 prtad, devad, addr, value);
752 727
753 mutex_lock(&efx->mdio_lock); 728 mutex_lock(&nic_data->mdio_lock);
754 729
755 /* Check MDIO not currently being accessed */ 730 /* Check MDIO not currently being accessed */
756 rc = falcon_gmii_wait(efx); 731 rc = falcon_gmii_wait(efx);
@@ -786,7 +761,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
786 } 761 }
787 762
788out: 763out:
789 mutex_unlock(&efx->mdio_lock); 764 mutex_unlock(&nic_data->mdio_lock);
790 return rc; 765 return rc;
791} 766}
792 767
@@ -795,10 +770,11 @@ static int falcon_mdio_read(struct net_device *net_dev,
795 int prtad, int devad, u16 addr) 770 int prtad, int devad, u16 addr)
796{ 771{
797 struct efx_nic *efx = netdev_priv(net_dev); 772 struct efx_nic *efx = netdev_priv(net_dev);
773 struct falcon_nic_data *nic_data = efx->nic_data;
798 efx_oword_t reg; 774 efx_oword_t reg;
799 int rc; 775 int rc;
800 776
801 mutex_lock(&efx->mdio_lock); 777 mutex_lock(&nic_data->mdio_lock);
802 778
803 /* Check MDIO not currently being accessed */ 779 /* Check MDIO not currently being accessed */
804 rc = falcon_gmii_wait(efx); 780 rc = falcon_gmii_wait(efx);
@@ -837,77 +813,27 @@ static int falcon_mdio_read(struct net_device *net_dev,
837 } 813 }
838 814
839out: 815out:
840 mutex_unlock(&efx->mdio_lock); 816 mutex_unlock(&nic_data->mdio_lock);
841 return rc; 817 return rc;
842} 818}
843 819
844static void falcon_clock_mac(struct efx_nic *efx)
845{
846 unsigned strap_val;
847 efx_oword_t nic_stat;
848
849 /* Configure the NIC generated MAC clock correctly */
850 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
851 strap_val = EFX_IS10G(efx) ? 5 : 3;
852 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
853 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
854 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
855 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
856 } else {
857 /* Falcon A1 does not support 1G/10G speed switching
858 * and must not be used with a PHY that does. */
859 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
860 strap_val);
861 }
862}
863
864static void falcon_switch_mac(struct efx_nic *efx)
865{
866 struct efx_mac_operations *old_mac_op = efx->mac_op;
867 struct falcon_nic_data *nic_data = efx->nic_data;
868 unsigned int stats_done_offset;
869
870 WARN_ON(!mutex_is_locked(&efx->mac_lock));
871 WARN_ON(nic_data->stats_disable_count == 0);
872
873 efx->mac_op = (EFX_IS10G(efx) ?
874 &falcon_xmac_operations : &falcon_gmac_operations);
875
876 if (EFX_IS10G(efx))
877 stats_done_offset = XgDmaDone_offset;
878 else
879 stats_done_offset = GDmaDone_offset;
880 nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
881
882 if (old_mac_op == efx->mac_op)
883 return;
884
885 falcon_clock_mac(efx);
886
887 netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n",
888 EFX_IS10G(efx) ? 'X' : 'G');
889 /* Not all macs support a mac-level link state */
890 efx->xmac_poll_required = false;
891 falcon_reset_macs(efx);
892}
893
894/* This call is responsible for hooking in the MAC and PHY operations */ 820/* This call is responsible for hooking in the MAC and PHY operations */
895static int falcon_probe_port(struct efx_nic *efx) 821static int falcon_probe_port(struct efx_nic *efx)
896{ 822{
823 struct falcon_nic_data *nic_data = efx->nic_data;
897 int rc; 824 int rc;
898 825
899 switch (efx->phy_type) { 826 switch (efx->phy_type) {
900 case PHY_TYPE_SFX7101: 827 case PHY_TYPE_SFX7101:
901 efx->phy_op = &falcon_sfx7101_phy_ops; 828 efx->phy_op = &falcon_sfx7101_phy_ops;
902 break; 829 break;
903 case PHY_TYPE_SFT9001A:
904 case PHY_TYPE_SFT9001B:
905 efx->phy_op = &falcon_sft9001_phy_ops;
906 break;
907 case PHY_TYPE_QT2022C2: 830 case PHY_TYPE_QT2022C2:
908 case PHY_TYPE_QT2025C: 831 case PHY_TYPE_QT2025C:
909 efx->phy_op = &falcon_qt202x_phy_ops; 832 efx->phy_op = &falcon_qt202x_phy_ops;
910 break; 833 break;
834 case PHY_TYPE_TXC43128:
835 efx->phy_op = &falcon_txc_phy_ops;
836 break;
911 default: 837 default:
912 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", 838 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
913 efx->phy_type); 839 efx->phy_type);
@@ -915,6 +841,7 @@ static int falcon_probe_port(struct efx_nic *efx)
915 } 841 }
916 842
917 /* Fill out MDIO structure and loopback modes */ 843 /* Fill out MDIO structure and loopback modes */
844 mutex_init(&nic_data->mdio_lock);
918 efx->mdio.mdio_read = falcon_mdio_read; 845 efx->mdio.mdio_read = falcon_mdio_read;
919 efx->mdio.mdio_write = falcon_mdio_write; 846 efx->mdio.mdio_write = falcon_mdio_write;
920 rc = efx->phy_op->probe(efx); 847 rc = efx->phy_op->probe(efx);
@@ -943,6 +870,7 @@ static int falcon_probe_port(struct efx_nic *efx)
943 (u64)efx->stats_buffer.dma_addr, 870 (u64)efx->stats_buffer.dma_addr,
944 efx->stats_buffer.addr, 871 efx->stats_buffer.addr,
945 (u64)virt_to_phys(efx->stats_buffer.addr)); 872 (u64)virt_to_phys(efx->stats_buffer.addr));
873 nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
946 874
947 return 0; 875 return 0;
948} 876}
@@ -953,6 +881,41 @@ static void falcon_remove_port(struct efx_nic *efx)
953 efx_nic_free_buffer(efx, &efx->stats_buffer); 881 efx_nic_free_buffer(efx, &efx->stats_buffer);
954} 882}
955 883
884/* Global events are basically PHY events */
885static bool
886falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
887{
888 struct efx_nic *efx = channel->efx;
889 struct falcon_nic_data *nic_data = efx->nic_data;
890
891 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
892 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
893 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
894 /* Ignored */
895 return true;
896
897 if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
898 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
899 nic_data->xmac_poll_required = true;
900 return true;
901 }
902
903 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
904 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
905 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
906 netif_err(efx, rx_err, efx->net_dev,
907 "channel %d seen global RX_RESET event. Resetting.\n",
908 channel->channel);
909
910 atomic_inc(&efx->rx_reset);
911 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
912 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
913 return true;
914 }
915
916 return false;
917}
918
956/************************************************************************** 919/**************************************************************************
957 * 920 *
958 * Falcon test code 921 * Falcon test code
@@ -962,6 +925,7 @@ static void falcon_remove_port(struct efx_nic *efx)
962static int 925static int
963falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) 926falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
964{ 927{
928 struct falcon_nic_data *nic_data = efx->nic_data;
965 struct falcon_nvconfig *nvconfig; 929 struct falcon_nvconfig *nvconfig;
966 struct efx_spi_device *spi; 930 struct efx_spi_device *spi;
967 void *region; 931 void *region;
@@ -969,8 +933,11 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
969 __le16 *word, *limit; 933 __le16 *word, *limit;
970 u32 csum; 934 u32 csum;
971 935
972 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom; 936 if (efx_spi_present(&nic_data->spi_flash))
973 if (!spi) 937 spi = &nic_data->spi_flash;
938 else if (efx_spi_present(&nic_data->spi_eeprom))
939 spi = &nic_data->spi_eeprom;
940 else
974 return -EINVAL; 941 return -EINVAL;
975 942
976 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); 943 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
@@ -978,12 +945,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
978 return -ENOMEM; 945 return -ENOMEM;
979 nvconfig = region + FALCON_NVCONFIG_OFFSET; 946 nvconfig = region + FALCON_NVCONFIG_OFFSET;
980 947
981 mutex_lock(&efx->spi_lock); 948 mutex_lock(&nic_data->spi_lock);
982 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); 949 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
983 mutex_unlock(&efx->spi_lock); 950 mutex_unlock(&nic_data->spi_lock);
984 if (rc) { 951 if (rc) {
985 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", 952 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
986 efx->spi_flash ? "flash" : "EEPROM"); 953 efx_spi_present(&nic_data->spi_flash) ?
954 "flash" : "EEPROM");
987 rc = -EIO; 955 rc = -EIO;
988 goto out; 956 goto out;
989 } 957 }
@@ -1085,7 +1053,7 @@ static int falcon_b0_test_registers(struct efx_nic *efx)
1085 1053
1086/* Resets NIC to known state. This routine must be called in process 1054/* Resets NIC to known state. This routine must be called in process
1087 * context and is allowed to sleep. */ 1055 * context and is allowed to sleep. */
1088static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) 1056static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1089{ 1057{
1090 struct falcon_nic_data *nic_data = efx->nic_data; 1058 struct falcon_nic_data *nic_data = efx->nic_data;
1091 efx_oword_t glb_ctl_reg_ker; 1059 efx_oword_t glb_ctl_reg_ker;
@@ -1139,22 +1107,9 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1139 1107
1140 /* Restore PCI configuration if needed */ 1108 /* Restore PCI configuration if needed */
1141 if (method == RESET_TYPE_WORLD) { 1109 if (method == RESET_TYPE_WORLD) {
1142 if (efx_nic_is_dual_func(efx)) { 1110 if (efx_nic_is_dual_func(efx))
1143 rc = pci_restore_state(nic_data->pci_dev2); 1111 pci_restore_state(nic_data->pci_dev2);
1144 if (rc) { 1112 pci_restore_state(efx->pci_dev);
1145 netif_err(efx, drv, efx->net_dev,
1146 "failed to restore PCI config for "
1147 "the secondary function\n");
1148 goto fail3;
1149 }
1150 }
1151 rc = pci_restore_state(efx->pci_dev);
1152 if (rc) {
1153 netif_err(efx, drv, efx->net_dev,
1154 "failed to restore PCI config for the "
1155 "primary function\n");
1156 goto fail4;
1157 }
1158 netif_dbg(efx, drv, efx->net_dev, 1113 netif_dbg(efx, drv, efx->net_dev,
1159 "successfully restored PCI config\n"); 1114 "successfully restored PCI config\n");
1160 } 1115 }
@@ -1165,7 +1120,7 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1165 rc = -ETIMEDOUT; 1120 rc = -ETIMEDOUT;
1166 netif_err(efx, hw, efx->net_dev, 1121 netif_err(efx, hw, efx->net_dev,
1167 "timed out waiting for hardware reset\n"); 1122 "timed out waiting for hardware reset\n");
1168 goto fail5; 1123 goto fail3;
1169 } 1124 }
1170 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); 1125 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
1171 1126
@@ -1173,11 +1128,21 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1173 1128
1174 /* pci_save_state() and pci_restore_state() MUST be called in pairs */ 1129 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
1175fail2: 1130fail2:
1176fail3:
1177 pci_restore_state(efx->pci_dev); 1131 pci_restore_state(efx->pci_dev);
1178fail1: 1132fail1:
1179fail4: 1133fail3:
1180fail5: 1134 return rc;
1135}
1136
1137static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1138{
1139 struct falcon_nic_data *nic_data = efx->nic_data;
1140 int rc;
1141
1142 mutex_lock(&nic_data->spi_lock);
1143 rc = __falcon_reset_hw(efx, method);
1144 mutex_unlock(&nic_data->spi_lock);
1145
1181 return rc; 1146 return rc;
1182} 1147}
1183 1148
@@ -1207,7 +1172,7 @@ static void falcon_monitor(struct efx_nic *efx)
1207 falcon_stop_nic_stats(efx); 1172 falcon_stop_nic_stats(efx);
1208 falcon_deconfigure_mac_wrapper(efx); 1173 falcon_deconfigure_mac_wrapper(efx);
1209 1174
1210 falcon_switch_mac(efx); 1175 falcon_reset_macs(efx);
1211 rc = efx->mac_op->reconfigure(efx); 1176 rc = efx->mac_op->reconfigure(efx);
1212 BUG_ON(rc); 1177 BUG_ON(rc);
1213 1178
@@ -1216,8 +1181,7 @@ static void falcon_monitor(struct efx_nic *efx)
1216 efx_link_status_changed(efx); 1181 efx_link_status_changed(efx);
1217 } 1182 }
1218 1183
1219 if (EFX_IS10G(efx)) 1184 falcon_poll_xmac(efx);
1220 falcon_poll_xmac(efx);
1221} 1185}
1222 1186
1223/* Zeroes out the SRAM contents. This routine must be called in 1187/* Zeroes out the SRAM contents. This routine must be called in
@@ -1257,22 +1221,17 @@ static int falcon_reset_sram(struct efx_nic *efx)
1257 1221
1258 return 0; 1222 return 0;
1259 } 1223 }
1260 } while (++count < 20); /* wait upto 0.4 sec */ 1224 } while (++count < 20); /* wait up to 0.4 sec */
1261 1225
1262 netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); 1226 netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
1263 return -ETIMEDOUT; 1227 return -ETIMEDOUT;
1264} 1228}
1265 1229
1266static int falcon_spi_device_init(struct efx_nic *efx, 1230static void falcon_spi_device_init(struct efx_nic *efx,
1267 struct efx_spi_device **spi_device_ret, 1231 struct efx_spi_device *spi_device,
1268 unsigned int device_id, u32 device_type) 1232 unsigned int device_id, u32 device_type)
1269{ 1233{
1270 struct efx_spi_device *spi_device;
1271
1272 if (device_type != 0) { 1234 if (device_type != 0) {
1273 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
1274 if (!spi_device)
1275 return -ENOMEM;
1276 spi_device->device_id = device_id; 1235 spi_device->device_id = device_id;
1277 spi_device->size = 1236 spi_device->size =
1278 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); 1237 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
@@ -1289,27 +1248,15 @@ static int falcon_spi_device_init(struct efx_nic *efx,
1289 1 << SPI_DEV_TYPE_FIELD(device_type, 1248 1 << SPI_DEV_TYPE_FIELD(device_type,
1290 SPI_DEV_TYPE_BLOCK_SIZE); 1249 SPI_DEV_TYPE_BLOCK_SIZE);
1291 } else { 1250 } else {
1292 spi_device = NULL; 1251 spi_device->size = 0;
1293 } 1252 }
1294
1295 kfree(*spi_device_ret);
1296 *spi_device_ret = spi_device;
1297 return 0;
1298}
1299
1300static void falcon_remove_spi_devices(struct efx_nic *efx)
1301{
1302 kfree(efx->spi_eeprom);
1303 efx->spi_eeprom = NULL;
1304 kfree(efx->spi_flash);
1305 efx->spi_flash = NULL;
1306} 1253}
1307 1254
1308/* Extract non-volatile configuration */ 1255/* Extract non-volatile configuration */
1309static int falcon_probe_nvconfig(struct efx_nic *efx) 1256static int falcon_probe_nvconfig(struct efx_nic *efx)
1310{ 1257{
1258 struct falcon_nic_data *nic_data = efx->nic_data;
1311 struct falcon_nvconfig *nvconfig; 1259 struct falcon_nvconfig *nvconfig;
1312 int board_rev;
1313 int rc; 1260 int rc;
1314 1261
1315 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); 1262 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
@@ -1317,55 +1264,32 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1317 return -ENOMEM; 1264 return -ENOMEM;
1318 1265
1319 rc = falcon_read_nvram(efx, nvconfig); 1266 rc = falcon_read_nvram(efx, nvconfig);
1320 if (rc == -EINVAL) { 1267 if (rc)
1321 netif_err(efx, probe, efx->net_dev, 1268 goto out;
1322 "NVRAM is invalid therefore using defaults\n"); 1269
1323 efx->phy_type = PHY_TYPE_NONE; 1270 efx->phy_type = nvconfig->board_v2.port0_phy_type;
1324 efx->mdio.prtad = MDIO_PRTAD_NONE; 1271 efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
1325 board_rev = 0; 1272
1326 rc = 0; 1273 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1327 } else if (rc) { 1274 falcon_spi_device_init(
1328 goto fail1; 1275 efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1329 } else { 1276 le32_to_cpu(nvconfig->board_v3
1330 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2; 1277 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
1331 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3; 1278 falcon_spi_device_init(
1332 1279 efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1333 efx->phy_type = v2->port0_phy_type; 1280 le32_to_cpu(nvconfig->board_v3
1334 efx->mdio.prtad = v2->port0_phy_addr; 1281 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
1335 board_rev = le16_to_cpu(v2->board_revision);
1336
1337 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1338 rc = falcon_spi_device_init(
1339 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1340 le32_to_cpu(v3->spi_device_type
1341 [FFE_AB_SPI_DEVICE_FLASH]));
1342 if (rc)
1343 goto fail2;
1344 rc = falcon_spi_device_init(
1345 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1346 le32_to_cpu(v3->spi_device_type
1347 [FFE_AB_SPI_DEVICE_EEPROM]));
1348 if (rc)
1349 goto fail2;
1350 }
1351 } 1282 }
1352 1283
1353 /* Read the MAC addresses */ 1284 /* Read the MAC addresses */
1354 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); 1285 memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
1355 1286
1356 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", 1287 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
1357 efx->phy_type, efx->mdio.prtad); 1288 efx->phy_type, efx->mdio.prtad);
1358 1289
1359 rc = falcon_probe_board(efx, board_rev); 1290 rc = falcon_probe_board(efx,
1360 if (rc) 1291 le16_to_cpu(nvconfig->board_v2.board_revision));
1361 goto fail2; 1292out:
1362
1363 kfree(nvconfig);
1364 return 0;
1365
1366 fail2:
1367 falcon_remove_spi_devices(efx);
1368 fail1:
1369 kfree(nvconfig); 1293 kfree(nvconfig);
1370 return rc; 1294 return rc;
1371} 1295}
@@ -1373,6 +1297,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1373/* Probe all SPI devices on the NIC */ 1297/* Probe all SPI devices on the NIC */
1374static void falcon_probe_spi_devices(struct efx_nic *efx) 1298static void falcon_probe_spi_devices(struct efx_nic *efx)
1375{ 1299{
1300 struct falcon_nic_data *nic_data = efx->nic_data;
1376 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 1301 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1377 int boot_dev; 1302 int boot_dev;
1378 1303
@@ -1401,12 +1326,14 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
1401 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); 1326 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1402 } 1327 }
1403 1328
1329 mutex_init(&nic_data->spi_lock);
1330
1404 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) 1331 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
1405 falcon_spi_device_init(efx, &efx->spi_flash, 1332 falcon_spi_device_init(efx, &nic_data->spi_flash,
1406 FFE_AB_SPI_DEVICE_FLASH, 1333 FFE_AB_SPI_DEVICE_FLASH,
1407 default_flash_type); 1334 default_flash_type);
1408 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) 1335 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
1409 falcon_spi_device_init(efx, &efx->spi_eeprom, 1336 falcon_spi_device_init(efx, &nic_data->spi_eeprom,
1410 FFE_AB_SPI_DEVICE_EEPROM, 1337 FFE_AB_SPI_DEVICE_EEPROM,
1411 large_eeprom_type); 1338 large_eeprom_type);
1412} 1339}
@@ -1471,7 +1398,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
1471 } 1398 }
1472 1399
1473 /* Now we can reset the NIC */ 1400 /* Now we can reset the NIC */
1474 rc = falcon_reset_hw(efx, RESET_TYPE_ALL); 1401 rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
1475 if (rc) { 1402 if (rc) {
1476 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); 1403 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
1477 goto fail3; 1404 goto fail3;
@@ -1493,8 +1420,11 @@ static int falcon_probe_nic(struct efx_nic *efx)
1493 1420
1494 /* Read in the non-volatile configuration */ 1421 /* Read in the non-volatile configuration */
1495 rc = falcon_probe_nvconfig(efx); 1422 rc = falcon_probe_nvconfig(efx);
1496 if (rc) 1423 if (rc) {
1424 if (rc == -EINVAL)
1425 netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
1497 goto fail5; 1426 goto fail5;
1427 }
1498 1428
1499 /* Initialise I2C adapter */ 1429 /* Initialise I2C adapter */
1500 board = falcon_board(efx); 1430 board = falcon_board(efx);
@@ -1526,7 +1456,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
1526 BUG_ON(i2c_del_adapter(&board->i2c_adap)); 1456 BUG_ON(i2c_del_adapter(&board->i2c_adap));
1527 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 1457 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1528 fail5: 1458 fail5:
1529 falcon_remove_spi_devices(efx);
1530 efx_nic_free_buffer(efx, &efx->irq_status); 1459 efx_nic_free_buffer(efx, &efx->irq_status);
1531 fail4: 1460 fail4:
1532 fail3: 1461 fail3:
@@ -1549,36 +1478,26 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
1549 /* RX control FIFO thresholds (32 entries) */ 1478 /* RX control FIFO thresholds (32 entries) */
1550 const unsigned ctrl_xon_thr = 20; 1479 const unsigned ctrl_xon_thr = 20;
1551 const unsigned ctrl_xoff_thr = 25; 1480 const unsigned ctrl_xoff_thr = 25;
1552 /* RX data FIFO thresholds (256-byte units; size varies) */
1553 int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
1554 int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
1555 efx_oword_t reg; 1481 efx_oword_t reg;
1556 1482
1557 efx_reado(efx, &reg, FR_AZ_RX_CFG); 1483 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1558 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { 1484 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1559 /* Data FIFO size is 5.5K */ 1485 /* Data FIFO size is 5.5K */
1560 if (data_xon_thr < 0)
1561 data_xon_thr = 512 >> 8;
1562 if (data_xoff_thr < 0)
1563 data_xoff_thr = 2048 >> 8;
1564 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); 1486 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
1565 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, 1487 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
1566 huge_buf_size); 1488 huge_buf_size);
1567 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr); 1489 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
1568 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr); 1490 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
1569 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); 1491 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
1570 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); 1492 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
1571 } else { 1493 } else {
1572 /* Data FIFO size is 80K; register fields moved */ 1494 /* Data FIFO size is 80K; register fields moved */
1573 if (data_xon_thr < 0)
1574 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
1575 if (data_xoff_thr < 0)
1576 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
1577 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); 1495 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
1578 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, 1496 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
1579 huge_buf_size); 1497 huge_buf_size);
1580 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr); 1498 /* Send XON and XOFF at ~3 * max MTU away from empty/full */
1581 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr); 1499 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
1500 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
1582 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); 1501 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
1583 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); 1502 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
1584 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); 1503 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
@@ -1610,16 +1529,6 @@ static int falcon_init_nic(struct efx_nic *efx)
1610 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); 1529 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
1611 efx_writeo(efx, &temp, FR_AB_NIC_STAT); 1530 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
1612 1531
1613 /* Set the source of the GMAC clock */
1614 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
1615 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
1616 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
1617 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
1618 }
1619
1620 /* Select the correct MAC */
1621 falcon_clock_mac(efx);
1622
1623 rc = falcon_reset_sram(efx); 1532 rc = falcon_reset_sram(efx);
1624 if (rc) 1533 if (rc)
1625 return rc; 1534 return rc;
@@ -1690,10 +1599,9 @@ static void falcon_remove_nic(struct efx_nic *efx)
1690 BUG_ON(rc); 1599 BUG_ON(rc);
1691 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 1600 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1692 1601
1693 falcon_remove_spi_devices(efx);
1694 efx_nic_free_buffer(efx, &efx->irq_status); 1602 efx_nic_free_buffer(efx, &efx->irq_status);
1695 1603
1696 falcon_reset_hw(efx, RESET_TYPE_ALL); 1604 __falcon_reset_hw(efx, RESET_TYPE_ALL);
1697 1605
1698 /* Release the second function after the reset */ 1606 /* Release the second function after the reset */
1699 if (nic_data->pci_dev2) { 1607 if (nic_data->pci_dev2) {
@@ -1795,7 +1703,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
1795 ************************************************************************** 1703 **************************************************************************
1796 */ 1704 */
1797 1705
1798struct efx_nic_type falcon_a1_nic_type = { 1706const struct efx_nic_type falcon_a1_nic_type = {
1799 .probe = falcon_probe_nic, 1707 .probe = falcon_probe_nic,
1800 .remove = falcon_remove_nic, 1708 .remove = falcon_remove_nic,
1801 .init = falcon_init_nic, 1709 .init = falcon_init_nic,
@@ -1804,6 +1712,7 @@ struct efx_nic_type falcon_a1_nic_type = {
1804 .reset = falcon_reset_hw, 1712 .reset = falcon_reset_hw,
1805 .probe_port = falcon_probe_port, 1713 .probe_port = falcon_probe_port,
1806 .remove_port = falcon_remove_port, 1714 .remove_port = falcon_remove_port,
1715 .handle_global_event = falcon_handle_global_event,
1807 .prepare_flush = falcon_prepare_flush, 1716 .prepare_flush = falcon_prepare_flush,
1808 .update_stats = falcon_update_nic_stats, 1717 .update_stats = falcon_update_nic_stats,
1809 .start_stats = falcon_start_nic_stats, 1718 .start_stats = falcon_start_nic_stats,
@@ -1835,7 +1744,7 @@ struct efx_nic_type falcon_a1_nic_type = {
1835 .reset_world_flags = ETH_RESET_IRQ, 1744 .reset_world_flags = ETH_RESET_IRQ,
1836}; 1745};
1837 1746
1838struct efx_nic_type falcon_b0_nic_type = { 1747const struct efx_nic_type falcon_b0_nic_type = {
1839 .probe = falcon_probe_nic, 1748 .probe = falcon_probe_nic,
1840 .remove = falcon_remove_nic, 1749 .remove = falcon_remove_nic,
1841 .init = falcon_init_nic, 1750 .init = falcon_init_nic,
@@ -1844,6 +1753,7 @@ struct efx_nic_type falcon_b0_nic_type = {
1844 .reset = falcon_reset_hw, 1753 .reset = falcon_reset_hw,
1845 .probe_port = falcon_probe_port, 1754 .probe_port = falcon_probe_port,
1846 .remove_port = falcon_remove_port, 1755 .remove_port = falcon_remove_port,
1756 .handle_global_event = falcon_handle_global_event,
1847 .prepare_flush = falcon_prepare_flush, 1757 .prepare_flush = falcon_prepare_flush,
1848 .update_stats = falcon_update_nic_stats, 1758 .update_stats = falcon_update_nic_stats,
1849 .start_stats = falcon_start_nic_stats, 1759 .start_stats = falcon_start_nic_stats,
@@ -1880,7 +1790,7 @@ struct efx_nic_type falcon_b0_nic_type = {
1880 * channels */ 1790 * channels */
1881 .tx_dc_base = 0x130000, 1791 .tx_dc_base = 0x130000,
1882 .rx_dc_base = 0x100000, 1792 .rx_dc_base = 0x100000,
1883 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH, 1793 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
1884 .reset_world_flags = ETH_RESET_IRQ, 1794 .reset_world_flags = ETH_RESET_IRQ,
1885}; 1795};
1886 1796
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 3d950c2cf205..b9cc846811d6 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -13,8 +13,6 @@
13#include "phy.h" 13#include "phy.h"
14#include "efx.h" 14#include "efx.h"
15#include "nic.h" 15#include "nic.h"
16#include "regs.h"
17#include "io.h"
18#include "workarounds.h" 16#include "workarounds.h"
19 17
20/* Macros for unpacking the board revision */ 18/* Macros for unpacking the board revision */
@@ -26,21 +24,32 @@
26/* Board types */ 24/* Board types */
27#define FALCON_BOARD_SFE4001 0x01 25#define FALCON_BOARD_SFE4001 0x01
28#define FALCON_BOARD_SFE4002 0x02 26#define FALCON_BOARD_SFE4002 0x02
29#define FALCON_BOARD_SFN4111T 0x51 27#define FALCON_BOARD_SFE4003 0x03
30#define FALCON_BOARD_SFN4112F 0x52 28#define FALCON_BOARD_SFN4112F 0x52
31 29
32/* Board temperature is about 15°C above ambient when air flow is 30/* Board temperature is about 15°C above ambient when air flow is
33 * limited. */ 31 * limited. The maximum acceptable ambient temperature varies
32 * depending on the PHY specifications but the critical temperature
33 * above which we should shut down to avoid damage is 80°C. */
34#define FALCON_BOARD_TEMP_BIAS 15 34#define FALCON_BOARD_TEMP_BIAS 15
35#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
35 36
36/* SFC4000 datasheet says: 'The maximum permitted junction temperature 37/* SFC4000 datasheet says: 'The maximum permitted junction temperature
37 * is 125°C; the thermal design of the environment for the SFC4000 38 * is 125°C; the thermal design of the environment for the SFC4000
38 * should aim to keep this well below 100°C.' */ 39 * should aim to keep this well below 100°C.' */
40#define FALCON_JUNC_TEMP_MIN 0
39#define FALCON_JUNC_TEMP_MAX 90 41#define FALCON_JUNC_TEMP_MAX 90
42#define FALCON_JUNC_TEMP_CRIT 125
40 43
41/***************************************************************************** 44/*****************************************************************************
42 * Support for LM87 sensor chip used on several boards 45 * Support for LM87 sensor chip used on several boards
43 */ 46 */
47#define LM87_REG_TEMP_HW_INT_LOCK 0x13
48#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
49#define LM87_REG_TEMP_HW_INT 0x17
50#define LM87_REG_TEMP_HW_EXT 0x18
51#define LM87_REG_TEMP_EXT1 0x26
52#define LM87_REG_TEMP_INT 0x27
44#define LM87_REG_ALARMS1 0x41 53#define LM87_REG_ALARMS1 0x41
45#define LM87_REG_ALARMS2 0x42 54#define LM87_REG_ALARMS2 0x42
46#define LM87_IN_LIMITS(nr, _min, _max) \ 55#define LM87_IN_LIMITS(nr, _min, _max) \
@@ -57,6 +66,27 @@
57 66
58#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE) 67#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
59 68
69static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
70{
71 while (*reg_values) {
72 u8 reg = *reg_values++;
73 u8 value = *reg_values++;
74 int rc = i2c_smbus_write_byte_data(client, reg, value);
75 if (rc)
76 return rc;
77 }
78 return 0;
79}
80
81static const u8 falcon_lm87_common_regs[] = {
82 LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
83 LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
84 LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
85 LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
86 LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
87 0
88};
89
60static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, 90static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
61 const u8 *reg_values) 91 const u8 *reg_values)
62{ 92{
@@ -67,13 +97,16 @@ static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
67 if (!client) 97 if (!client)
68 return -EIO; 98 return -EIO;
69 99
70 while (*reg_values) { 100 /* Read-to-clear alarm/interrupt status */
71 u8 reg = *reg_values++; 101 i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
72 u8 value = *reg_values++; 102 i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
73 rc = i2c_smbus_write_byte_data(client, reg, value); 103
74 if (rc) 104 rc = efx_poke_lm87(client, reg_values);
75 goto err; 105 if (rc)
76 } 106 goto err;
107 rc = efx_poke_lm87(client, falcon_lm87_common_regs);
108 if (rc)
109 goto err;
77 110
78 board->hwmon_client = client; 111 board->hwmon_client = client;
79 return 0; 112 return 0;
@@ -91,36 +124,56 @@ static void efx_fini_lm87(struct efx_nic *efx)
91static int efx_check_lm87(struct efx_nic *efx, unsigned mask) 124static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
92{ 125{
93 struct i2c_client *client = falcon_board(efx)->hwmon_client; 126 struct i2c_client *client = falcon_board(efx)->hwmon_client;
94 s32 alarms1, alarms2; 127 bool temp_crit, elec_fault, is_failure;
128 u16 alarms;
129 s32 reg;
95 130
96 /* If link is up then do not monitor temperature */ 131 /* If link is up then do not monitor temperature */
97 if (EFX_WORKAROUND_7884(efx) && efx->link_state.up) 132 if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
98 return 0; 133 return 0;
99 134
100 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); 135 reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
101 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); 136 if (reg < 0)
102 if (alarms1 < 0) 137 return reg;
103 return alarms1; 138 alarms = reg;
104 if (alarms2 < 0) 139 reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
105 return alarms2; 140 if (reg < 0)
106 alarms1 &= mask; 141 return reg;
107 alarms2 &= mask >> 8; 142 alarms |= reg << 8;
108 if (alarms1 || alarms2) { 143 alarms &= mask;
144
145 temp_crit = false;
146 if (alarms & LM87_ALARM_TEMP_INT) {
147 reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
148 if (reg < 0)
149 return reg;
150 if (reg > FALCON_BOARD_TEMP_CRIT)
151 temp_crit = true;
152 }
153 if (alarms & LM87_ALARM_TEMP_EXT1) {
154 reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
155 if (reg < 0)
156 return reg;
157 if (reg > FALCON_JUNC_TEMP_CRIT)
158 temp_crit = true;
159 }
160 elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
161 is_failure = temp_crit || elec_fault;
162
163 if (alarms)
109 netif_err(efx, hw, efx->net_dev, 164 netif_err(efx, hw, efx->net_dev,
110 "LM87 detected a hardware failure (status %02x:%02x)" 165 "LM87 detected a hardware %s (status %02x:%02x)"
111 "%s%s%s\n", 166 "%s%s%s%s\n",
112 alarms1, alarms2, 167 is_failure ? "failure" : "problem",
113 (alarms1 & LM87_ALARM_TEMP_INT) ? 168 alarms & 0xff, alarms >> 8,
169 (alarms & LM87_ALARM_TEMP_INT) ?
114 "; board is overheating" : "", 170 "; board is overheating" : "",
115 (alarms1 & LM87_ALARM_TEMP_EXT1) ? 171 (alarms & LM87_ALARM_TEMP_EXT1) ?
116 "; controller is overheating" : "", 172 "; controller is overheating" : "",
117 (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1) 173 temp_crit ? "; reached critical temperature" : "",
118 || alarms2) ? 174 elec_fault ? "; electrical fault" : "");
119 "; electrical fault" : "");
120 return -ERANGE;
121 }
122 175
123 return 0; 176 return is_failure ? -ERANGE : 0;
124} 177}
125 178
126#else /* !CONFIG_SENSORS_LM87 */ 179#else /* !CONFIG_SENSORS_LM87 */
@@ -142,17 +195,17 @@ static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
142#endif /* CONFIG_SENSORS_LM87 */ 195#endif /* CONFIG_SENSORS_LM87 */
143 196
144/***************************************************************************** 197/*****************************************************************************
145 * Support for the SFE4001 and SFN4111T NICs. 198 * Support for the SFE4001 NIC.
146 * 199 *
147 * The SFE4001 does not power-up fully at reset due to its high power 200 * The SFE4001 does not power-up fully at reset due to its high power
148 * consumption. We control its power via a PCA9539 I/O expander. 201 * consumption. We control its power via a PCA9539 I/O expander.
149 * Both boards have a MAX6647 temperature monitor which we expose to 202 * It also has a MAX6647 temperature monitor which we expose to
150 * the lm90 driver. 203 * the lm90 driver.
151 * 204 *
152 * This also provides minimal support for reflashing the PHY, which is 205 * This also provides minimal support for reflashing the PHY, which is
153 * initiated by resetting it with the FLASH_CFG_1 pin pulled down. 206 * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
154 * On SFE4001 rev A2 and later this is connected to the 3V3X output of 207 * On SFE4001 rev A2 and later this is connected to the 3V3X output of
155 * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3. 208 * the IO-expander.
156 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually 209 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
157 * exclusive with the network device being open. 210 * exclusive with the network device being open.
158 */ 211 */
@@ -304,34 +357,6 @@ fail_on:
304 return rc; 357 return rc;
305} 358}
306 359
307static int sfn4111t_reset(struct efx_nic *efx)
308{
309 struct falcon_board *board = falcon_board(efx);
310 efx_oword_t reg;
311
312 /* GPIO 3 and the GPIO register are shared with I2C, so block that */
313 i2c_lock_adapter(&board->i2c_adap);
314
315 /* Pull RST_N (GPIO 2) low then let it up again, setting the
316 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
317 * output enables; the output levels should always be 0 (low)
318 * and we rely on external pull-ups. */
319 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
320 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true);
321 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
322 msleep(1000);
323 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false);
324 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN,
325 !!(efx->phy_mode & PHY_MODE_SPECIAL));
326 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
327 msleep(1);
328
329 i2c_unlock_adapter(&board->i2c_adap);
330
331 ssleep(1);
332 return 0;
333}
334
335static ssize_t show_phy_flash_cfg(struct device *dev, 360static ssize_t show_phy_flash_cfg(struct device *dev,
336 struct device_attribute *attr, char *buf) 361 struct device_attribute *attr, char *buf)
337{ 362{
@@ -353,7 +378,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
353 new_mode = old_mode & ~PHY_MODE_SPECIAL; 378 new_mode = old_mode & ~PHY_MODE_SPECIAL;
354 else 379 else
355 new_mode = PHY_MODE_SPECIAL; 380 new_mode = PHY_MODE_SPECIAL;
356 if (old_mode == new_mode) { 381 if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
357 err = 0; 382 err = 0;
358 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { 383 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
359 err = -EBUSY; 384 err = -EBUSY;
@@ -363,10 +388,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
363 efx->phy_mode = new_mode; 388 efx->phy_mode = new_mode;
364 if (new_mode & PHY_MODE_SPECIAL) 389 if (new_mode & PHY_MODE_SPECIAL)
365 falcon_stop_nic_stats(efx); 390 falcon_stop_nic_stats(efx);
366 if (falcon_board(efx)->type->id == FALCON_BOARD_SFE4001) 391 err = sfe4001_poweron(efx);
367 err = sfe4001_poweron(efx);
368 else
369 err = sfn4111t_reset(efx);
370 if (!err) 392 if (!err)
371 err = efx_reconfigure_port(efx); 393 err = efx_reconfigure_port(efx);
372 if (!(new_mode & PHY_MODE_SPECIAL)) 394 if (!(new_mode & PHY_MODE_SPECIAL))
@@ -393,10 +415,11 @@ static void sfe4001_fini(struct efx_nic *efx)
393 415
394static int sfe4001_check_hw(struct efx_nic *efx) 416static int sfe4001_check_hw(struct efx_nic *efx)
395{ 417{
418 struct falcon_nic_data *nic_data = efx->nic_data;
396 s32 status; 419 s32 status;
397 420
398 /* If XAUI link is up then do not monitor */ 421 /* If XAUI link is up then do not monitor */
399 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required) 422 if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
400 return 0; 423 return 0;
401 424
402 /* Check the powered status of the PHY. Lack of power implies that 425 /* Check the powered status of the PHY. Lack of power implies that
@@ -479,83 +502,6 @@ fail_hwmon:
479 return rc; 502 return rc;
480} 503}
481 504
482static int sfn4111t_check_hw(struct efx_nic *efx)
483{
484 s32 status;
485
486 /* If XAUI link is up then do not monitor */
487 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
488 return 0;
489
490 /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
491 status = i2c_smbus_read_byte_data(falcon_board(efx)->hwmon_client,
492 MAX664X_REG_RSL);
493 if (status < 0)
494 return -EIO;
495 if (status & 0x57)
496 return -ERANGE;
497 return 0;
498}
499
500static void sfn4111t_fini(struct efx_nic *efx)
501{
502 netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
503
504 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
505 i2c_unregister_device(falcon_board(efx)->hwmon_client);
506}
507
508static struct i2c_board_info sfn4111t_a0_hwmon_info = {
509 I2C_BOARD_INFO("max6647", 0x4e),
510};
511
512static struct i2c_board_info sfn4111t_r5_hwmon_info = {
513 I2C_BOARD_INFO("max6646", 0x4d),
514};
515
516static void sfn4111t_init_phy(struct efx_nic *efx)
517{
518 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
519 if (sft9001_wait_boot(efx) != -EINVAL)
520 return;
521
522 efx->phy_mode = PHY_MODE_SPECIAL;
523 falcon_stop_nic_stats(efx);
524 }
525
526 sfn4111t_reset(efx);
527 sft9001_wait_boot(efx);
528}
529
530static int sfn4111t_init(struct efx_nic *efx)
531{
532 struct falcon_board *board = falcon_board(efx);
533 int rc;
534
535 board->hwmon_client =
536 i2c_new_device(&board->i2c_adap,
537 (board->minor < 5) ?
538 &sfn4111t_a0_hwmon_info :
539 &sfn4111t_r5_hwmon_info);
540 if (!board->hwmon_client)
541 return -EIO;
542
543 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
544 if (rc)
545 goto fail_hwmon;
546
547 if (efx->phy_mode & PHY_MODE_SPECIAL)
548 /* PHY may not generate a 156.25 MHz clock and MAC
549 * stats fetch will fail. */
550 falcon_stop_nic_stats(efx);
551
552 return 0;
553
554fail_hwmon:
555 i2c_unregister_device(board->hwmon_client);
556 return rc;
557}
558
559/***************************************************************************** 505/*****************************************************************************
560 * Support for the SFE4002 506 * Support for the SFE4002
561 * 507 *
@@ -691,6 +637,75 @@ static int sfn4112f_init(struct efx_nic *efx)
691 return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs); 637 return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
692} 638}
693 639
640/*****************************************************************************
641 * Support for the SFE4003
642 *
643 */
644static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */
645
646static const u8 sfe4003_lm87_regs[] = {
647 LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */
648 LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
649 LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
650 LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
651 LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
652 LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS),
653 0
654};
655
656static struct i2c_board_info sfe4003_hwmon_info = {
657 I2C_BOARD_INFO("lm87", 0x2e),
658 .platform_data = &sfe4003_lm87_channel,
659};
660
661/* Board-specific LED info. */
662#define SFE4003_RED_LED_GPIO 11
663#define SFE4003_LED_ON 1
664#define SFE4003_LED_OFF 0
665
666static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
667{
668 struct falcon_board *board = falcon_board(efx);
669
670 /* The LEDs were not wired to GPIOs before A3 */
671 if (board->minor < 3 && board->major == 0)
672 return;
673
674 falcon_txc_set_gpio_val(
675 efx, SFE4003_RED_LED_GPIO,
676 (mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
677}
678
679static void sfe4003_init_phy(struct efx_nic *efx)
680{
681 struct falcon_board *board = falcon_board(efx);
682
683 /* The LEDs were not wired to GPIOs before A3 */
684 if (board->minor < 3 && board->major == 0)
685 return;
686
687 falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
688 falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
689}
690
691static int sfe4003_check_hw(struct efx_nic *efx)
692{
693 struct falcon_board *board = falcon_board(efx);
694
695 /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time
696 * (bad sensor) so we mask it out. */
697 unsigned alarm_mask =
698 (board->major == 0 && board->minor <= 2) ?
699 ~LM87_ALARM_TEMP_EXT1 : ~0;
700
701 return efx_check_lm87(efx, alarm_mask);
702}
703
704static int sfe4003_init(struct efx_nic *efx)
705{
706 return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
707}
708
694static const struct falcon_board_type board_types[] = { 709static const struct falcon_board_type board_types[] = {
695 { 710 {
696 .id = FALCON_BOARD_SFE4001, 711 .id = FALCON_BOARD_SFE4001,
@@ -713,14 +728,14 @@ static const struct falcon_board_type board_types[] = {
713 .monitor = sfe4002_check_hw, 728 .monitor = sfe4002_check_hw,
714 }, 729 },
715 { 730 {
716 .id = FALCON_BOARD_SFN4111T, 731 .id = FALCON_BOARD_SFE4003,
717 .ref_model = "SFN4111T", 732 .ref_model = "SFE4003",
718 .gen_type = "100/1000/10GBASE-T adapter", 733 .gen_type = "10GBASE-CX4 adapter",
719 .init = sfn4111t_init, 734 .init = sfe4003_init,
720 .init_phy = sfn4111t_init_phy, 735 .init_phy = sfe4003_init_phy,
721 .fini = sfn4111t_fini, 736 .fini = efx_fini_lm87,
722 .set_id_led = tenxpress_set_id_led, 737 .set_id_led = sfe4003_set_id_led,
723 .monitor = sfn4111t_check_hw, 738 .monitor = sfe4003_check_hw,
724 }, 739 },
725 { 740 {
726 .id = FALCON_BOARD_SFN4112F, 741 .id = FALCON_BOARD_SFN4112F,
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
deleted file mode 100644
index 7dadfcbd6ce7..000000000000
--- a/drivers/net/sfc/falcon_gmac.c
+++ /dev/null
@@ -1,230 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "efx.h"
14#include "nic.h"
15#include "mac.h"
16#include "regs.h"
17#include "io.h"
18
19/**************************************************************************
20 *
21 * MAC operations
22 *
23 *************************************************************************/
24
25static int falcon_reconfigure_gmac(struct efx_nic *efx)
26{
27 struct efx_link_state *link_state = &efx->link_state;
28 bool loopback, tx_fc, rx_fc, bytemode;
29 int if_mode;
30 unsigned int max_frame_len;
31 efx_oword_t reg;
32
33 /* Configuration register 1 */
34 tx_fc = (link_state->fc & EFX_FC_TX) || !link_state->fd;
35 rx_fc = !!(link_state->fc & EFX_FC_RX);
36 loopback = (efx->loopback_mode == LOOPBACK_GMAC);
37 bytemode = (link_state->speed == 1000);
38
39 EFX_POPULATE_OWORD_5(reg,
40 FRF_AB_GM_LOOP, loopback,
41 FRF_AB_GM_TX_EN, 1,
42 FRF_AB_GM_TX_FC_EN, tx_fc,
43 FRF_AB_GM_RX_EN, 1,
44 FRF_AB_GM_RX_FC_EN, rx_fc);
45 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
46 udelay(10);
47
48 /* Configuration register 2 */
49 if_mode = (bytemode) ? 2 : 1;
50 EFX_POPULATE_OWORD_5(reg,
51 FRF_AB_GM_IF_MODE, if_mode,
52 FRF_AB_GM_PAD_CRC_EN, 1,
53 FRF_AB_GM_LEN_CHK, 1,
54 FRF_AB_GM_FD, link_state->fd,
55 FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */);
56
57 efx_writeo(efx, &reg, FR_AB_GM_CFG2);
58 udelay(10);
59
60 /* Max frame len register */
61 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
62 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len);
63 efx_writeo(efx, &reg, FR_AB_GM_MAX_FLEN);
64 udelay(10);
65
66 /* FIFO configuration register 0 */
67 EFX_POPULATE_OWORD_5(reg,
68 FRF_AB_GMF_FTFENREQ, 1,
69 FRF_AB_GMF_STFENREQ, 1,
70 FRF_AB_GMF_FRFENREQ, 1,
71 FRF_AB_GMF_SRFENREQ, 1,
72 FRF_AB_GMF_WTMENREQ, 1);
73 efx_writeo(efx, &reg, FR_AB_GMF_CFG0);
74 udelay(10);
75
76 /* FIFO configuration register 1 */
77 EFX_POPULATE_OWORD_2(reg,
78 FRF_AB_GMF_CFGFRTH, 0x12,
79 FRF_AB_GMF_CFGXOFFRTX, 0xffff);
80 efx_writeo(efx, &reg, FR_AB_GMF_CFG1);
81 udelay(10);
82
83 /* FIFO configuration register 2 */
84 EFX_POPULATE_OWORD_2(reg,
85 FRF_AB_GMF_CFGHWM, 0x3f,
86 FRF_AB_GMF_CFGLWM, 0xa);
87 efx_writeo(efx, &reg, FR_AB_GMF_CFG2);
88 udelay(10);
89
90 /* FIFO configuration register 3 */
91 EFX_POPULATE_OWORD_2(reg,
92 FRF_AB_GMF_CFGHWMFT, 0x1c,
93 FRF_AB_GMF_CFGFTTH, 0x08);
94 efx_writeo(efx, &reg, FR_AB_GMF_CFG3);
95 udelay(10);
96
97 /* FIFO configuration register 4 */
98 EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1);
99 efx_writeo(efx, &reg, FR_AB_GMF_CFG4);
100 udelay(10);
101
102 /* FIFO configuration register 5 */
103 efx_reado(efx, &reg, FR_AB_GMF_CFG5);
104 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode);
105 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !link_state->fd);
106 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !link_state->fd);
107 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0);
108 efx_writeo(efx, &reg, FR_AB_GMF_CFG5);
109 udelay(10);
110
111 /* MAC address */
112 EFX_POPULATE_OWORD_4(reg,
113 FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5],
114 FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4],
115 FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3],
116 FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]);
117 efx_writeo(efx, &reg, FR_AB_GM_ADR1);
118 udelay(10);
119 EFX_POPULATE_OWORD_2(reg,
120 FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1],
121 FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]);
122 efx_writeo(efx, &reg, FR_AB_GM_ADR2);
123 udelay(10);
124
125 falcon_reconfigure_mac_wrapper(efx);
126
127 return 0;
128}
129
130static void falcon_update_stats_gmac(struct efx_nic *efx)
131{
132 struct efx_mac_stats *mac_stats = &efx->mac_stats;
133 unsigned long old_rx_pause, old_tx_pause;
134 unsigned long new_rx_pause, new_tx_pause;
135
136 /* Pause frames are erroneously counted as errors (SFC bug 3269) */
137 old_rx_pause = mac_stats->rx_pause;
138 old_tx_pause = mac_stats->tx_pause;
139
140 /* Update MAC stats from DMAed values */
141 FALCON_STAT(efx, GRxGoodOct, rx_good_bytes);
142 FALCON_STAT(efx, GRxBadOct, rx_bad_bytes);
143 FALCON_STAT(efx, GRxMissPkt, rx_missed);
144 FALCON_STAT(efx, GRxFalseCRS, rx_false_carrier);
145 FALCON_STAT(efx, GRxPausePkt, rx_pause);
146 FALCON_STAT(efx, GRxBadPkt, rx_bad);
147 FALCON_STAT(efx, GRxUcastPkt, rx_unicast);
148 FALCON_STAT(efx, GRxMcastPkt, rx_multicast);
149 FALCON_STAT(efx, GRxBcastPkt, rx_broadcast);
150 FALCON_STAT(efx, GRxGoodLt64Pkt, rx_good_lt64);
151 FALCON_STAT(efx, GRxBadLt64Pkt, rx_bad_lt64);
152 FALCON_STAT(efx, GRx64Pkt, rx_64);
153 FALCON_STAT(efx, GRx65to127Pkt, rx_65_to_127);
154 FALCON_STAT(efx, GRx128to255Pkt, rx_128_to_255);
155 FALCON_STAT(efx, GRx256to511Pkt, rx_256_to_511);
156 FALCON_STAT(efx, GRx512to1023Pkt, rx_512_to_1023);
157 FALCON_STAT(efx, GRx1024to15xxPkt, rx_1024_to_15xx);
158 FALCON_STAT(efx, GRx15xxtoJumboPkt, rx_15xx_to_jumbo);
159 FALCON_STAT(efx, GRxGtJumboPkt, rx_gtjumbo);
160 FALCON_STAT(efx, GRxFcsErr64to15xxPkt, rx_bad_64_to_15xx);
161 FALCON_STAT(efx, GRxFcsErr15xxtoJumboPkt, rx_bad_15xx_to_jumbo);
162 FALCON_STAT(efx, GRxFcsErrGtJumboPkt, rx_bad_gtjumbo);
163 FALCON_STAT(efx, GTxGoodBadOct, tx_bytes);
164 FALCON_STAT(efx, GTxGoodOct, tx_good_bytes);
165 FALCON_STAT(efx, GTxSglColPkt, tx_single_collision);
166 FALCON_STAT(efx, GTxMultColPkt, tx_multiple_collision);
167 FALCON_STAT(efx, GTxExColPkt, tx_excessive_collision);
168 FALCON_STAT(efx, GTxDefPkt, tx_deferred);
169 FALCON_STAT(efx, GTxLateCol, tx_late_collision);
170 FALCON_STAT(efx, GTxExDefPkt, tx_excessive_deferred);
171 FALCON_STAT(efx, GTxPausePkt, tx_pause);
172 FALCON_STAT(efx, GTxBadPkt, tx_bad);
173 FALCON_STAT(efx, GTxUcastPkt, tx_unicast);
174 FALCON_STAT(efx, GTxMcastPkt, tx_multicast);
175 FALCON_STAT(efx, GTxBcastPkt, tx_broadcast);
176 FALCON_STAT(efx, GTxLt64Pkt, tx_lt64);
177 FALCON_STAT(efx, GTx64Pkt, tx_64);
178 FALCON_STAT(efx, GTx65to127Pkt, tx_65_to_127);
179 FALCON_STAT(efx, GTx128to255Pkt, tx_128_to_255);
180 FALCON_STAT(efx, GTx256to511Pkt, tx_256_to_511);
181 FALCON_STAT(efx, GTx512to1023Pkt, tx_512_to_1023);
182 FALCON_STAT(efx, GTx1024to15xxPkt, tx_1024_to_15xx);
183 FALCON_STAT(efx, GTx15xxtoJumboPkt, tx_15xx_to_jumbo);
184 FALCON_STAT(efx, GTxGtJumboPkt, tx_gtjumbo);
185 FALCON_STAT(efx, GTxNonTcpUdpPkt, tx_non_tcpudp);
186 FALCON_STAT(efx, GTxMacSrcErrPkt, tx_mac_src_error);
187 FALCON_STAT(efx, GTxIpSrcErrPkt, tx_ip_src_error);
188
189 /* Pause frames are erroneously counted as errors (SFC bug 3269) */
190 new_rx_pause = mac_stats->rx_pause;
191 new_tx_pause = mac_stats->tx_pause;
192 mac_stats->rx_bad -= (new_rx_pause - old_rx_pause);
193 mac_stats->tx_bad -= (new_tx_pause - old_tx_pause);
194
195 /* Derive stats that the MAC doesn't provide directly */
196 mac_stats->tx_bad_bytes =
197 mac_stats->tx_bytes - mac_stats->tx_good_bytes;
198 mac_stats->tx_packets =
199 mac_stats->tx_lt64 + mac_stats->tx_64 +
200 mac_stats->tx_65_to_127 + mac_stats->tx_128_to_255 +
201 mac_stats->tx_256_to_511 + mac_stats->tx_512_to_1023 +
202 mac_stats->tx_1024_to_15xx + mac_stats->tx_15xx_to_jumbo +
203 mac_stats->tx_gtjumbo;
204 mac_stats->tx_collision =
205 mac_stats->tx_single_collision +
206 mac_stats->tx_multiple_collision +
207 mac_stats->tx_excessive_collision +
208 mac_stats->tx_late_collision;
209 mac_stats->rx_bytes =
210 mac_stats->rx_good_bytes + mac_stats->rx_bad_bytes;
211 mac_stats->rx_packets =
212 mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64 +
213 mac_stats->rx_64 + mac_stats->rx_65_to_127 +
214 mac_stats->rx_128_to_255 + mac_stats->rx_256_to_511 +
215 mac_stats->rx_512_to_1023 + mac_stats->rx_1024_to_15xx +
216 mac_stats->rx_15xx_to_jumbo + mac_stats->rx_gtjumbo;
217 mac_stats->rx_good = mac_stats->rx_packets - mac_stats->rx_bad;
218 mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64;
219}
220
221static bool falcon_gmac_check_fault(struct efx_nic *efx)
222{
223 return false;
224}
225
226struct efx_mac_operations falcon_gmac_operations = {
227 .reconfigure = falcon_reconfigure_gmac,
228 .update_stats = falcon_update_stats_gmac,
229 .check_fault = falcon_gmac_check_fault,
230};
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index bae656dd2c4e..9516452c079c 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -16,7 +16,6 @@
16#include "io.h" 16#include "io.h"
17#include "mac.h" 17#include "mac.h"
18#include "mdio_10g.h" 18#include "mdio_10g.h"
19#include "phy.h"
20#include "workarounds.h" 19#include "workarounds.h"
21 20
22/************************************************************************** 21/**************************************************************************
@@ -88,6 +87,7 @@ int falcon_reset_xaui(struct efx_nic *efx)
88 87
89static void falcon_ack_status_intr(struct efx_nic *efx) 88static void falcon_ack_status_intr(struct efx_nic *efx)
90{ 89{
90 struct falcon_nic_data *nic_data = efx->nic_data;
91 efx_oword_t reg; 91 efx_oword_t reg;
92 92
93 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) 93 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
@@ -99,7 +99,7 @@ static void falcon_ack_status_intr(struct efx_nic *efx)
99 99
100 /* We can only use this interrupt to signal the negative edge of 100 /* We can only use this interrupt to signal the negative edge of
101 * xaui_align [we have to poll the positive edge]. */ 101 * xaui_align [we have to poll the positive edge]. */
102 if (efx->xmac_poll_required) 102 if (nic_data->xmac_poll_required)
103 return; 103 return;
104 104
105 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK); 105 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
@@ -143,7 +143,7 @@ static bool falcon_xmac_link_ok(struct efx_nic *efx)
143 efx_mdio_phyxgxs_lane_sync(efx)); 143 efx_mdio_phyxgxs_lane_sync(efx));
144} 144}
145 145
146void falcon_reconfigure_xmac_core(struct efx_nic *efx) 146static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
147{ 147{
148 unsigned int max_frame_len; 148 unsigned int max_frame_len;
149 efx_oword_t reg; 149 efx_oword_t reg;
@@ -277,12 +277,14 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
277 277
278static int falcon_reconfigure_xmac(struct efx_nic *efx) 278static int falcon_reconfigure_xmac(struct efx_nic *efx)
279{ 279{
280 struct falcon_nic_data *nic_data = efx->nic_data;
281
280 falcon_reconfigure_xgxs_core(efx); 282 falcon_reconfigure_xgxs_core(efx);
281 falcon_reconfigure_xmac_core(efx); 283 falcon_reconfigure_xmac_core(efx);
282 284
283 falcon_reconfigure_mac_wrapper(efx); 285 falcon_reconfigure_mac_wrapper(efx);
284 286
285 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); 287 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
286 falcon_ack_status_intr(efx); 288 falcon_ack_status_intr(efx);
287 289
288 return 0; 290 return 0;
@@ -350,15 +352,17 @@ static void falcon_update_stats_xmac(struct efx_nic *efx)
350 352
351void falcon_poll_xmac(struct efx_nic *efx) 353void falcon_poll_xmac(struct efx_nic *efx)
352{ 354{
355 struct falcon_nic_data *nic_data = efx->nic_data;
356
353 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || 357 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
354 !efx->xmac_poll_required) 358 !nic_data->xmac_poll_required)
355 return; 359 return;
356 360
357 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 361 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
358 falcon_ack_status_intr(efx); 362 falcon_ack_status_intr(efx);
359} 363}
360 364
361struct efx_mac_operations falcon_xmac_operations = { 365const struct efx_mac_operations falcon_xmac_operations = {
362 .reconfigure = falcon_reconfigure_xmac, 366 .reconfigure = falcon_reconfigure_xmac,
363 .update_stats = falcon_update_stats_xmac, 367 .update_stats = falcon_update_stats_xmac,
364 .check_fault = falcon_xmac_check_fault, 368 .check_fault = falcon_xmac_check_fault,
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
new file mode 100644
index 000000000000..95a980fd63d5
--- /dev/null
+++ b/drivers/net/sfc/filter.c
@@ -0,0 +1,720 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/in.h>
11#include <net/ip.h>
12#include "efx.h"
13#include "filter.h"
14#include "io.h"
15#include "nic.h"
16#include "regs.h"
17
18/* "Fudge factors" - difference between programmed value and actual depth.
19 * Due to pipelined implementation we need to program H/W with a value that
20 * is larger than the hop limit we want.
21 */
22#define FILTER_CTL_SRCH_FUDGE_WILD 3
23#define FILTER_CTL_SRCH_FUDGE_FULL 1
24
25/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
26 * We also need to avoid infinite loops in efx_filter_search() when the
27 * table is full.
28 */
29#define FILTER_CTL_SRCH_MAX 200
30
31/* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33#define FILTER_CTL_SRCH_HINT_MAX 5
34
35enum efx_filter_table_id {
36 EFX_FILTER_TABLE_RX_IP = 0,
37 EFX_FILTER_TABLE_RX_MAC,
38 EFX_FILTER_TABLE_COUNT,
39};
40
41struct efx_filter_table {
42 enum efx_filter_table_id id;
43 u32 offset; /* address of table relative to BAR */
44 unsigned size; /* number of entries */
45 unsigned step; /* step between entries */
46 unsigned used; /* number currently used */
47 unsigned long *used_bitmap;
48 struct efx_filter_spec *spec;
49 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
50};
51
52struct efx_filter_state {
53 spinlock_t lock;
54 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
55#ifdef CONFIG_RFS_ACCEL
56 u32 *rps_flow_id;
57 unsigned rps_expire_index;
58#endif
59};
60
61/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
62 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
63static u16 efx_filter_hash(u32 key)
64{
65 u16 tmp;
66
67 /* First 16 rounds */
68 tmp = 0x1fff ^ key >> 16;
69 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
70 tmp = tmp ^ tmp >> 9;
71 /* Last 16 rounds */
72 tmp = tmp ^ tmp << 13 ^ key;
73 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
74 return tmp ^ tmp >> 9;
75}
76
77/* To allow for hash collisions, filter search continues at these
78 * increments from the first possible entry selected by the hash. */
79static u16 efx_filter_increment(u32 key)
80{
81 return key * 2 - 1;
82}
83
84static enum efx_filter_table_id
85efx_filter_spec_table_id(const struct efx_filter_spec *spec)
86{
87 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
88 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
89 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
90 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
91 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
92 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
93 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
94 return spec->type >> 2;
95}
96
97static struct efx_filter_table *
98efx_filter_spec_table(struct efx_filter_state *state,
99 const struct efx_filter_spec *spec)
100{
101 if (spec->type == EFX_FILTER_UNSPEC)
102 return NULL;
103 else
104 return &state->table[efx_filter_spec_table_id(spec)];
105}
106
107static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
108{
109 memset(table->search_depth, 0, sizeof(table->search_depth));
110}
111
112static void efx_filter_push_rx_limits(struct efx_nic *efx)
113{
114 struct efx_filter_state *state = efx->filter_state;
115 struct efx_filter_table *table;
116 efx_oword_t filter_ctl;
117
118 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
119
120 table = &state->table[EFX_FILTER_TABLE_RX_IP];
121 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
122 table->search_depth[EFX_FILTER_TCP_FULL] +
123 FILTER_CTL_SRCH_FUDGE_FULL);
124 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
125 table->search_depth[EFX_FILTER_TCP_WILD] +
126 FILTER_CTL_SRCH_FUDGE_WILD);
127 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
128 table->search_depth[EFX_FILTER_UDP_FULL] +
129 FILTER_CTL_SRCH_FUDGE_FULL);
130 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
131 table->search_depth[EFX_FILTER_UDP_WILD] +
132 FILTER_CTL_SRCH_FUDGE_WILD);
133
134 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
135 if (table->size) {
136 EFX_SET_OWORD_FIELD(
137 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
138 table->search_depth[EFX_FILTER_MAC_FULL] +
139 FILTER_CTL_SRCH_FUDGE_FULL);
140 EFX_SET_OWORD_FIELD(
141 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
142 table->search_depth[EFX_FILTER_MAC_WILD] +
143 FILTER_CTL_SRCH_FUDGE_WILD);
144 }
145
146 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
147}
148
149static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
150 __be32 host1, __be16 port1,
151 __be32 host2, __be16 port2)
152{
153 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
154 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
155 spec->data[2] = ntohl(host2);
156}
157
158/**
159 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
160 * @spec: Specification to initialise
161 * @proto: Transport layer protocol number
162 * @host: Local host address (network byte order)
163 * @port: Local port (network byte order)
164 */
165int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
166 __be32 host, __be16 port)
167{
168 __be32 host1;
169 __be16 port1;
170
171 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
172
173 /* This cannot currently be combined with other filtering */
174 if (spec->type != EFX_FILTER_UNSPEC)
175 return -EPROTONOSUPPORT;
176
177 if (port == 0)
178 return -EINVAL;
179
180 switch (proto) {
181 case IPPROTO_TCP:
182 spec->type = EFX_FILTER_TCP_WILD;
183 break;
184 case IPPROTO_UDP:
185 spec->type = EFX_FILTER_UDP_WILD;
186 break;
187 default:
188 return -EPROTONOSUPPORT;
189 }
190
191 /* Filter is constructed in terms of source and destination,
192 * with the odd wrinkle that the ports are swapped in a UDP
193 * wildcard filter. We need to convert from local and remote
194 * (= zero for wildcard) addresses.
195 */
196 host1 = 0;
197 if (proto != IPPROTO_UDP) {
198 port1 = 0;
199 } else {
200 port1 = port;
201 port = 0;
202 }
203
204 __efx_filter_set_ipv4(spec, host1, port1, host, port);
205 return 0;
206}
207
208/**
209 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
210 * @spec: Specification to initialise
211 * @proto: Transport layer protocol number
212 * @host: Local host address (network byte order)
213 * @port: Local port (network byte order)
214 * @rhost: Remote host address (network byte order)
215 * @rport: Remote port (network byte order)
216 */
217int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
218 __be32 host, __be16 port,
219 __be32 rhost, __be16 rport)
220{
221 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
222
223 /* This cannot currently be combined with other filtering */
224 if (spec->type != EFX_FILTER_UNSPEC)
225 return -EPROTONOSUPPORT;
226
227 if (port == 0 || rport == 0)
228 return -EINVAL;
229
230 switch (proto) {
231 case IPPROTO_TCP:
232 spec->type = EFX_FILTER_TCP_FULL;
233 break;
234 case IPPROTO_UDP:
235 spec->type = EFX_FILTER_UDP_FULL;
236 break;
237 default:
238 return -EPROTONOSUPPORT;
239 }
240
241 __efx_filter_set_ipv4(spec, rhost, rport, host, port);
242 return 0;
243}
244
245/**
246 * efx_filter_set_eth_local - specify local Ethernet address and optional VID
247 * @spec: Specification to initialise
248 * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
249 * @addr: Local Ethernet MAC address
250 */
251int efx_filter_set_eth_local(struct efx_filter_spec *spec,
252 u16 vid, const u8 *addr)
253{
254 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
255
256 /* This cannot currently be combined with other filtering */
257 if (spec->type != EFX_FILTER_UNSPEC)
258 return -EPROTONOSUPPORT;
259
260 if (vid == EFX_FILTER_VID_UNSPEC) {
261 spec->type = EFX_FILTER_MAC_WILD;
262 spec->data[0] = 0;
263 } else {
264 spec->type = EFX_FILTER_MAC_FULL;
265 spec->data[0] = vid;
266 }
267
268 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
269 spec->data[2] = addr[0] << 8 | addr[1];
270 return 0;
271}
272
273/* Build a filter entry and return its n-tuple key. */
274static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
275{
276 u32 data3;
277
278 switch (efx_filter_spec_table_id(spec)) {
279 case EFX_FILTER_TABLE_RX_IP: {
280 bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
281 spec->type == EFX_FILTER_UDP_WILD);
282 EFX_POPULATE_OWORD_7(
283 *filter,
284 FRF_BZ_RSS_EN,
285 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
286 FRF_BZ_SCATTER_EN,
287 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
288 FRF_BZ_TCP_UDP, is_udp,
289 FRF_BZ_RXQ_ID, spec->dmaq_id,
290 EFX_DWORD_2, spec->data[2],
291 EFX_DWORD_1, spec->data[1],
292 EFX_DWORD_0, spec->data[0]);
293 data3 = is_udp;
294 break;
295 }
296
297 case EFX_FILTER_TABLE_RX_MAC: {
298 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
299 EFX_POPULATE_OWORD_8(
300 *filter,
301 FRF_CZ_RMFT_RSS_EN,
302 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
303 FRF_CZ_RMFT_SCATTER_EN,
304 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
305 FRF_CZ_RMFT_IP_OVERRIDE,
306 !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
307 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
308 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
309 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
310 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
311 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
312 data3 = is_wild;
313 break;
314 }
315
316 default:
317 BUG();
318 }
319
320 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
321}
322
323static bool efx_filter_equal(const struct efx_filter_spec *left,
324 const struct efx_filter_spec *right)
325{
326 if (left->type != right->type ||
327 memcmp(left->data, right->data, sizeof(left->data)))
328 return false;
329
330 return true;
331}
332
333static int efx_filter_search(struct efx_filter_table *table,
334 struct efx_filter_spec *spec, u32 key,
335 bool for_insert, int *depth_required)
336{
337 unsigned hash, incr, filter_idx, depth, depth_max;
338 struct efx_filter_spec *cmp;
339
340 hash = efx_filter_hash(key);
341 incr = efx_filter_increment(key);
342 depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
343 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
344
345 for (depth = 1, filter_idx = hash & (table->size - 1);
346 depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
347 ++depth) {
348 cmp = &table->spec[filter_idx];
349 if (efx_filter_equal(spec, cmp))
350 goto found;
351 filter_idx = (filter_idx + incr) & (table->size - 1);
352 }
353 if (!for_insert)
354 return -ENOENT;
355 if (depth > depth_max)
356 return -EBUSY;
357found:
358 *depth_required = depth;
359 return filter_idx;
360}
361
362/* Construct/deconstruct external filter IDs */
363
364static inline int
365efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
366{
367 return table_id << 16 | index;
368}
369
370/**
371 * efx_filter_insert_filter - add or replace a filter
372 * @efx: NIC in which to insert the filter
373 * @spec: Specification for the filter
374 * @replace: Flag for whether the specified filter may replace a filter
375 * with an identical match expression and equal or lower priority
376 *
377 * On success, return the filter ID.
378 * On failure, return a negative error code.
379 */
380int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
381 bool replace)
382{
383 struct efx_filter_state *state = efx->filter_state;
384 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
385 struct efx_filter_spec *saved_spec;
386 efx_oword_t filter;
387 int filter_idx, depth;
388 u32 key;
389 int rc;
390
391 if (!table || table->size == 0)
392 return -EINVAL;
393
394 key = efx_filter_build(&filter, spec);
395
396 netif_vdbg(efx, hw, efx->net_dev,
397 "%s: type %d search_depth=%d", __func__, spec->type,
398 table->search_depth[spec->type]);
399
400 spin_lock_bh(&state->lock);
401
402 rc = efx_filter_search(table, spec, key, true, &depth);
403 if (rc < 0)
404 goto out;
405 filter_idx = rc;
406 BUG_ON(filter_idx >= table->size);
407 saved_spec = &table->spec[filter_idx];
408
409 if (test_bit(filter_idx, table->used_bitmap)) {
410 /* Should we replace the existing filter? */
411 if (!replace) {
412 rc = -EEXIST;
413 goto out;
414 }
415 if (spec->priority < saved_spec->priority) {
416 rc = -EPERM;
417 goto out;
418 }
419 } else {
420 __set_bit(filter_idx, table->used_bitmap);
421 ++table->used;
422 }
423 *saved_spec = *spec;
424
425 if (table->search_depth[spec->type] < depth) {
426 table->search_depth[spec->type] = depth;
427 efx_filter_push_rx_limits(efx);
428 }
429
430 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
431
432 netif_vdbg(efx, hw, efx->net_dev,
433 "%s: filter type %d index %d rxq %u set",
434 __func__, spec->type, filter_idx, spec->dmaq_id);
435 rc = efx_filter_make_id(table->id, filter_idx);
436
437out:
438 spin_unlock_bh(&state->lock);
439 return rc;
440}
441
442static void efx_filter_table_clear_entry(struct efx_nic *efx,
443 struct efx_filter_table *table,
444 int filter_idx)
445{
446 static efx_oword_t filter;
447
448 if (test_bit(filter_idx, table->used_bitmap)) {
449 __clear_bit(filter_idx, table->used_bitmap);
450 --table->used;
451 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
452
453 efx_writeo(efx, &filter,
454 table->offset + table->step * filter_idx);
455 }
456}
457
458/**
459 * efx_filter_remove_filter - remove a filter by specification
460 * @efx: NIC from which to remove the filter
461 * @spec: Specification for the filter
462 *
463 * On success, return zero.
464 * On failure, return a negative error code.
465 */
466int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
467{
468 struct efx_filter_state *state = efx->filter_state;
469 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
470 struct efx_filter_spec *saved_spec;
471 efx_oword_t filter;
472 int filter_idx, depth;
473 u32 key;
474 int rc;
475
476 if (!table)
477 return -EINVAL;
478
479 key = efx_filter_build(&filter, spec);
480
481 spin_lock_bh(&state->lock);
482
483 rc = efx_filter_search(table, spec, key, false, &depth);
484 if (rc < 0)
485 goto out;
486 filter_idx = rc;
487 saved_spec = &table->spec[filter_idx];
488
489 if (spec->priority < saved_spec->priority) {
490 rc = -EPERM;
491 goto out;
492 }
493
494 efx_filter_table_clear_entry(efx, table, filter_idx);
495 if (table->used == 0)
496 efx_filter_table_reset_search_depth(table);
497 rc = 0;
498
499out:
500 spin_unlock_bh(&state->lock);
501 return rc;
502}
503
504static void efx_filter_table_clear(struct efx_nic *efx,
505 enum efx_filter_table_id table_id,
506 enum efx_filter_priority priority)
507{
508 struct efx_filter_state *state = efx->filter_state;
509 struct efx_filter_table *table = &state->table[table_id];
510 int filter_idx;
511
512 spin_lock_bh(&state->lock);
513
514 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
515 if (table->spec[filter_idx].priority <= priority)
516 efx_filter_table_clear_entry(efx, table, filter_idx);
517 if (table->used == 0)
518 efx_filter_table_reset_search_depth(table);
519
520 spin_unlock_bh(&state->lock);
521}
522
523/**
524 * efx_filter_clear_rx - remove RX filters by priority
525 * @efx: NIC from which to remove the filters
526 * @priority: Maximum priority to remove
527 */
528void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
529{
530 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
531 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
532}
533
534/* Restore filter stater after reset */
535void efx_restore_filters(struct efx_nic *efx)
536{
537 struct efx_filter_state *state = efx->filter_state;
538 enum efx_filter_table_id table_id;
539 struct efx_filter_table *table;
540 efx_oword_t filter;
541 int filter_idx;
542
543 spin_lock_bh(&state->lock);
544
545 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
546 table = &state->table[table_id];
547 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
548 if (!test_bit(filter_idx, table->used_bitmap))
549 continue;
550 efx_filter_build(&filter, &table->spec[filter_idx]);
551 efx_writeo(efx, &filter,
552 table->offset + table->step * filter_idx);
553 }
554 }
555
556 efx_filter_push_rx_limits(efx);
557
558 spin_unlock_bh(&state->lock);
559}
560
561int efx_probe_filters(struct efx_nic *efx)
562{
563 struct efx_filter_state *state;
564 struct efx_filter_table *table;
565 unsigned table_id;
566
567 state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
568 if (!state)
569 return -ENOMEM;
570 efx->filter_state = state;
571
572 spin_lock_init(&state->lock);
573
574 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
575#ifdef CONFIG_RFS_ACCEL
576 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
577 sizeof(*state->rps_flow_id),
578 GFP_KERNEL);
579 if (!state->rps_flow_id)
580 goto fail;
581#endif
582 table = &state->table[EFX_FILTER_TABLE_RX_IP];
583 table->id = EFX_FILTER_TABLE_RX_IP;
584 table->offset = FR_BZ_RX_FILTER_TBL0;
585 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
586 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
587 }
588
589 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
590 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
591 table->id = EFX_FILTER_TABLE_RX_MAC;
592 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
593 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
594 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
595 }
596
597 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
598 table = &state->table[table_id];
599 if (table->size == 0)
600 continue;
601 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
602 sizeof(unsigned long),
603 GFP_KERNEL);
604 if (!table->used_bitmap)
605 goto fail;
606 table->spec = vzalloc(table->size * sizeof(*table->spec));
607 if (!table->spec)
608 goto fail;
609 }
610
611 return 0;
612
613fail:
614 efx_remove_filters(efx);
615 return -ENOMEM;
616}
617
618void efx_remove_filters(struct efx_nic *efx)
619{
620 struct efx_filter_state *state = efx->filter_state;
621 enum efx_filter_table_id table_id;
622
623 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
624 kfree(state->table[table_id].used_bitmap);
625 vfree(state->table[table_id].spec);
626 }
627#ifdef CONFIG_RFS_ACCEL
628 kfree(state->rps_flow_id);
629#endif
630 kfree(state);
631}
632
633#ifdef CONFIG_RFS_ACCEL
634
635int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
636 u16 rxq_index, u32 flow_id)
637{
638 struct efx_nic *efx = netdev_priv(net_dev);
639 struct efx_channel *channel;
640 struct efx_filter_state *state = efx->filter_state;
641 struct efx_filter_spec spec;
642 const struct iphdr *ip;
643 const __be16 *ports;
644 int nhoff;
645 int rc;
646
647 nhoff = skb_network_offset(skb);
648
649 if (skb->protocol != htons(ETH_P_IP))
650 return -EPROTONOSUPPORT;
651
652 /* RFS must validate the IP header length before calling us */
653 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
654 ip = (const struct iphdr *)(skb->data + nhoff);
655 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
656 return -EPROTONOSUPPORT;
657 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
658 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
659
660 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
661 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
662 ip->daddr, ports[1], ip->saddr, ports[0]);
663 if (rc)
664 return rc;
665
666 rc = efx_filter_insert_filter(efx, &spec, true);
667 if (rc < 0)
668 return rc;
669
670 /* Remember this so we can check whether to expire the filter later */
671 state->rps_flow_id[rc] = flow_id;
672 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
673 ++channel->rfs_filters_added;
674
675 netif_info(efx, rx_status, efx->net_dev,
676 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
677 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
678 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
679 rxq_index, flow_id, rc);
680
681 return rc;
682}
683
684bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
685{
686 struct efx_filter_state *state = efx->filter_state;
687 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
688 unsigned mask = table->size - 1;
689 unsigned index;
690 unsigned stop;
691
692 if (!spin_trylock_bh(&state->lock))
693 return false;
694
695 index = state->rps_expire_index;
696 stop = (index + quota) & mask;
697
698 while (index != stop) {
699 if (test_bit(index, table->used_bitmap) &&
700 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
701 rps_may_expire_flow(efx->net_dev,
702 table->spec[index].dmaq_id,
703 state->rps_flow_id[index], index)) {
704 netif_info(efx, rx_status, efx->net_dev,
705 "expiring filter %d [flow %u]\n",
706 index, state->rps_flow_id[index]);
707 efx_filter_table_clear_entry(efx, table, index);
708 }
709 index = (index + 1) & mask;
710 }
711
712 state->rps_expire_index = stop;
713 if (table->used == 0)
714 efx_filter_table_reset_search_depth(table);
715
716 spin_unlock_bh(&state->lock);
717 return true;
718}
719
720#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h
new file mode 100644
index 000000000000..872f2132a496
--- /dev/null
+++ b/drivers/net/sfc/filter.h
@@ -0,0 +1,112 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_FILTER_H
11#define EFX_FILTER_H
12
13#include <linux/types.h>
14
15/**
16 * enum efx_filter_type - type of hardware filter
17 * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
18 * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
19 * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
20 * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
21 * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
22 * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
23 * @EFX_FILTER_UNSPEC: Match type is unspecified
24 *
25 * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
26 */
27enum efx_filter_type {
28 EFX_FILTER_TCP_FULL = 0,
29 EFX_FILTER_TCP_WILD,
30 EFX_FILTER_UDP_FULL,
31 EFX_FILTER_UDP_WILD,
32 EFX_FILTER_MAC_FULL = 4,
33 EFX_FILTER_MAC_WILD,
34 EFX_FILTER_TYPE_COUNT, /* number of specific types */
35 EFX_FILTER_UNSPEC = 0xf,
36};
37
38/**
39 * enum efx_filter_priority - priority of a hardware filter specification
40 * @EFX_FILTER_PRI_HINT: Performance hint
41 * @EFX_FILTER_PRI_MANUAL: Manually configured filter
42 * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour
43 */
44enum efx_filter_priority {
45 EFX_FILTER_PRI_HINT = 0,
46 EFX_FILTER_PRI_MANUAL,
47 EFX_FILTER_PRI_REQUIRED,
48};
49
50/**
51 * enum efx_filter_flags - flags for hardware filter specifications
52 * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
53 * By default, matching packets will be delivered only to the
54 * specified queue. If this flag is set, they will be delivered
55 * to a range of queues offset from the specified queue number
56 * according to the indirection table.
57 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
58 * queue.
59 * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
60 * any IP filter that matches the same packet. By default, IP
61 * filters take precedence.
62 * @EFX_FILTER_FLAG_RX: Filter is for RX
63 */
64enum efx_filter_flags {
65 EFX_FILTER_FLAG_RX_RSS = 0x01,
66 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
67 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
68 EFX_FILTER_FLAG_RX = 0x08,
69};
70
71/**
72 * struct efx_filter_spec - specification for a hardware filter
73 * @type: Type of match to be performed, from &enum efx_filter_type
74 * @priority: Priority of the filter, from &enum efx_filter_priority
75 * @flags: Miscellaneous flags, from &enum efx_filter_flags
76 * @dmaq_id: Source/target queue index
77 * @data: Match data (type-dependent)
78 *
79 * Use the efx_filter_set_*() functions to initialise the @type and
80 * @data fields.
81 */
82struct efx_filter_spec {
83 u8 type:4;
84 u8 priority:4;
85 u8 flags;
86 u16 dmaq_id;
87 u32 data[3];
88};
89
90static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
91 enum efx_filter_priority priority,
92 enum efx_filter_flags flags,
93 unsigned rxq_id)
94{
95 spec->type = EFX_FILTER_UNSPEC;
96 spec->priority = priority;
97 spec->flags = EFX_FILTER_FLAG_RX | flags;
98 spec->dmaq_id = rxq_id;
99}
100
101extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
102 __be32 host, __be16 port);
103extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
104 __be32 host, __be16 port,
105 __be32 rhost, __be16 rport);
106extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
107 u16 vid, const u8 *addr);
108enum {
109 EFX_FILTER_VID_UNSPEC = 0xffff,
110};
111
112#endif /* EFX_FILTER_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index 85a99fe87437..cc978803d484 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -22,28 +22,39 @@
22 * 22 *
23 * Notes on locking strategy: 23 * Notes on locking strategy:
24 * 24 *
25 * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes 25 * Most CSRs are 128-bit (oword) and therefore cannot be read or
26 * which necessitates locking. 26 * written atomically. Access from the host is buffered by the Bus
27 * Under normal operation few writes to NIC registers are made and these 27 * Interface Unit (BIU). Whenever the host reads from the lowest
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special 28 * address of such a register, or from the address of a different such
29 * cased to allow 4-byte (hence lockless) accesses. 29 * register, the BIU latches the register's value. Subsequent reads
30 * from higher addresses of the same register will read the latched
31 * value. Whenever the host writes part of such a register, the BIU
32 * collects the written value and does not write to the underlying
33 * register until all 4 dwords have been written. A similar buffering
34 * scheme applies to host access to the NIC's 64-bit SRAM.
30 * 35 *
31 * It *is* safe to write to these 4-byte registers in the middle of an 36 * Access to different CSRs and 64-bit SRAM words must be serialised,
32 * access to an 8-byte or 16-byte register. We therefore use a 37 * since interleaved access can result in lost writes or lost
33 * spinlock to protect accesses to the larger registers, but no locks 38 * information from read-to-clear fields. We use efx_nic::biu_lock
34 * for the 4-byte registers. 39 * for this. (We could use separate locks for read and write, but
40 * this is not normally a performance bottleneck.)
35 * 41 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2 42 * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
37 * due to the way the 16byte registers are "collected" in the BIU. 43 * 128-bit but are special-cased in the BIU to avoid the need for
44 * locking in the host:
38 * 45 *
39 * We also lock when carrying out reads, to ensure consistency of the 46 * - They are write-only.
40 * data (made possible since the BIU reads all 128 bits into a cache). 47 * - The semantics of writing to these registers are such that
41 * Reads are very rare, so this isn't a significant performance 48 * replacing the low 96 bits with zero does not affect functionality.
42 * impact. (Most data transferred from NIC to host is DMAed directly 49 * - If the host writes to the last dword address of such a register
43 * into host memory). 50 * (i.e. the high 32 bits) the underlying register will always be
44 * 51 * written. If the collector and the current write together do not
45 * I/O BAR access uses locks for both reads and writes (but is only provided 52 * provide values for all 128 bits of the register, the low 96 bits
46 * for testing purposes). 53 * will be written as zero.
54 * - If the host writes to the address of any other part of such a
55 * register while the collector already holds values for some other
56 * register, the write is discarded and the collector maintains its
57 * current state.
47 */ 58 */
48 59
49#if BITS_PER_LONG == 64 60#if BITS_PER_LONG == 64
@@ -72,7 +83,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
72 return (__force __le32)__raw_readl(efx->membase + reg); 83 return (__force __le32)__raw_readl(efx->membase + reg);
73} 84}
74 85
75/* Writes to a normal 16-byte Efx register, locking as appropriate. */ 86/* Write a normal 128-bit CSR, locking as appropriate. */
76static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, 87static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
77 unsigned int reg) 88 unsigned int reg)
78{ 89{
@@ -85,21 +96,19 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
85 spin_lock_irqsave(&efx->biu_lock, flags); 96 spin_lock_irqsave(&efx->biu_lock, flags);
86#ifdef EFX_USE_QWORD_IO 97#ifdef EFX_USE_QWORD_IO
87 _efx_writeq(efx, value->u64[0], reg + 0); 98 _efx_writeq(efx, value->u64[0], reg + 0);
88 wmb();
89 _efx_writeq(efx, value->u64[1], reg + 8); 99 _efx_writeq(efx, value->u64[1], reg + 8);
90#else 100#else
91 _efx_writed(efx, value->u32[0], reg + 0); 101 _efx_writed(efx, value->u32[0], reg + 0);
92 _efx_writed(efx, value->u32[1], reg + 4); 102 _efx_writed(efx, value->u32[1], reg + 4);
93 _efx_writed(efx, value->u32[2], reg + 8); 103 _efx_writed(efx, value->u32[2], reg + 8);
94 wmb();
95 _efx_writed(efx, value->u32[3], reg + 12); 104 _efx_writed(efx, value->u32[3], reg + 12);
96#endif 105#endif
106 wmb();
97 mmiowb(); 107 mmiowb();
98 spin_unlock_irqrestore(&efx->biu_lock, flags); 108 spin_unlock_irqrestore(&efx->biu_lock, flags);
99} 109}
100 110
101/* Write an 8-byte NIC SRAM entry through the supplied mapping, 111/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
102 * locking as appropriate. */
103static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, 112static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
104 efx_qword_t *value, unsigned int index) 113 efx_qword_t *value, unsigned int index)
105{ 114{
@@ -115,36 +124,27 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
115 __raw_writeq((__force u64)value->u64[0], membase + addr); 124 __raw_writeq((__force u64)value->u64[0], membase + addr);
116#else 125#else
117 __raw_writel((__force u32)value->u32[0], membase + addr); 126 __raw_writel((__force u32)value->u32[0], membase + addr);
118 wmb();
119 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 127 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
120#endif 128#endif
129 wmb();
121 mmiowb(); 130 mmiowb();
122 spin_unlock_irqrestore(&efx->biu_lock, flags); 131 spin_unlock_irqrestore(&efx->biu_lock, flags);
123} 132}
124 133
125/* Write dword to NIC register that allows partial writes 134/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
126 *
127 * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
128 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
129 * for lockless writes.
130 */
131static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, 135static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
132 unsigned int reg) 136 unsigned int reg)
133{ 137{
134 netif_vdbg(efx, hw, efx->net_dev, 138 netif_vdbg(efx, hw, efx->net_dev,
135 "writing partial register %x with "EFX_DWORD_FMT"\n", 139 "writing register %x with "EFX_DWORD_FMT"\n",
136 reg, EFX_DWORD_VAL(*value)); 140 reg, EFX_DWORD_VAL(*value));
137 141
138 /* No lock required */ 142 /* No lock required */
139 _efx_writed(efx, value->u32[0], reg); 143 _efx_writed(efx, value->u32[0], reg);
144 wmb();
140} 145}
141 146
142/* Read from a NIC register 147/* Read a 128-bit CSR, locking as appropriate. */
143 *
144 * This reads an entire 16-byte register in one go, locking as
145 * appropriate. It is essential to read the first dword first, as this
146 * prompts the NIC to load the current value into the shadow register.
147 */
148static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, 148static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
149 unsigned int reg) 149 unsigned int reg)
150{ 150{
@@ -163,8 +163,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
163 EFX_OWORD_VAL(*value)); 163 EFX_OWORD_VAL(*value));
164} 164}
165 165
166/* Read an 8-byte SRAM entry through supplied mapping, 166/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
167 * locking as appropriate. */
168static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, 167static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
169 efx_qword_t *value, unsigned int index) 168 efx_qword_t *value, unsigned int index)
170{ 169{
@@ -186,7 +185,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
186 addr, EFX_QWORD_VAL(*value)); 185 addr, EFX_QWORD_VAL(*value));
187} 186}
188 187
189/* Read dword from register that allows partial writes (sic) */ 188/* Read a 32-bit CSR or SRAM */
190static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, 189static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
191 unsigned int reg) 190 unsigned int reg)
192{ 191{
@@ -196,28 +195,28 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
196 reg, EFX_DWORD_VAL(*value)); 195 reg, EFX_DWORD_VAL(*value));
197} 196}
198 197
199/* Write to a register forming part of a table */ 198/* Write a 128-bit CSR forming part of a table */
200static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, 199static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
201 unsigned int reg, unsigned int index) 200 unsigned int reg, unsigned int index)
202{ 201{
203 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); 202 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
204} 203}
205 204
206/* Read to a register forming part of a table */ 205/* Read a 128-bit CSR forming part of a table */
207static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, 206static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
208 unsigned int reg, unsigned int index) 207 unsigned int reg, unsigned int index)
209{ 208{
210 efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); 209 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
211} 210}
212 211
213/* Write to a dword register forming part of a table */ 212/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
214static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value, 213static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
215 unsigned int reg, unsigned int index) 214 unsigned int reg, unsigned int index)
216{ 215{
217 efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); 216 efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
218} 217}
219 218
220/* Read from a dword register forming part of a table */ 219/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
221static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value, 220static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
222 unsigned int reg, unsigned int index) 221 unsigned int reg, unsigned int index)
223{ 222{
@@ -231,29 +230,56 @@ static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
231#define EFX_PAGED_REG(page, reg) \ 230#define EFX_PAGED_REG(page, reg) \
232 ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) 231 ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
233 232
234/* As for efx_writeo(), but for a page-mapped register. */ 233/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
235static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, 234static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
236 unsigned int reg, unsigned int page) 235 unsigned int reg, unsigned int page)
237{ 236{
238 efx_writeo(efx, value, EFX_PAGED_REG(page, reg)); 237 reg = EFX_PAGED_REG(page, reg);
239} 238
239 netif_vdbg(efx, hw, efx->net_dev,
240 "writing register %x with " EFX_OWORD_FMT "\n", reg,
241 EFX_OWORD_VAL(*value));
240 242
241/* As for efx_writed(), but for a page-mapped register. */ 243#ifdef EFX_USE_QWORD_IO
242static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value, 244 _efx_writeq(efx, value->u64[0], reg + 0);
243 unsigned int reg, unsigned int page) 245 _efx_writeq(efx, value->u64[1], reg + 8);
246#else
247 _efx_writed(efx, value->u32[0], reg + 0);
248 _efx_writed(efx, value->u32[1], reg + 4);
249 _efx_writed(efx, value->u32[2], reg + 8);
250 _efx_writed(efx, value->u32[3], reg + 12);
251#endif
252 wmb();
253}
254#define efx_writeo_page(efx, value, reg, page) \
255 _efx_writeo_page(efx, value, \
256 reg + \
257 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
258 page)
259
260/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
261 * RX_DESC_UPD or TX_DESC_UPD)
262 */
263static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
264 unsigned int reg, unsigned int page)
244{ 265{
245 efx_writed(efx, value, EFX_PAGED_REG(page, reg)); 266 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
246} 267}
247 268#define efx_writed_page(efx, value, reg, page) \
248/* Write dword to page-mapped register with an extra lock. 269 _efx_writed_page(efx, value, \
249 * 270 reg + \
250 * As for efx_writed_page(), but for a register that suffers from 271 BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
251 * SFC bug 3181. Take out a lock so the BIU collector cannot be 272 && (reg) != 0xa1c), \
252 * confused. */ 273 page)
253static inline void efx_writed_page_locked(struct efx_nic *efx, 274
254 efx_dword_t *value, 275/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
255 unsigned int reg, 276 * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
256 unsigned int page) 277 * collector register.
278 */
279static inline void _efx_writed_page_locked(struct efx_nic *efx,
280 efx_dword_t *value,
281 unsigned int reg,
282 unsigned int page)
257{ 283{
258 unsigned long flags __attribute__ ((unused)); 284 unsigned long flags __attribute__ ((unused));
259 285
@@ -265,5 +291,9 @@ static inline void efx_writed_page_locked(struct efx_nic *efx,
265 efx_writed(efx, value, EFX_PAGED_REG(page, reg)); 291 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
266 } 292 }
267} 293}
294#define efx_writed_page_locked(efx, value, reg, page) \
295 _efx_writed_page_locked(efx, value, \
296 reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
297 page)
268 298
269#endif /* EFX_IO_H */ 299#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index f1aa5f374890..d6a255d0856b 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -13,10 +13,8 @@
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15 15
16extern struct efx_mac_operations falcon_gmac_operations; 16extern const struct efx_mac_operations falcon_xmac_operations;
17extern struct efx_mac_operations falcon_xmac_operations; 17extern const struct efx_mac_operations efx_mcdi_mac_operations;
18extern struct efx_mac_operations efx_mcdi_mac_operations;
19extern void falcon_reconfigure_xmac_core(struct efx_nic *efx);
20extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, 18extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
21 u32 dma_len, int enable, int clear); 19 u32 dma_len, int enable, int clear);
22 20
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 3912b8fed912..3dd45ed61f0a 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc. 3 * Copyright 2008-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -50,6 +50,20 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
50 return &nic_data->mcdi; 50 return &nic_data->mcdi;
51} 51}
52 52
53static inline void
54efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
55{
56 struct siena_nic_data *nic_data = efx->nic_data;
57 value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
58}
59
60static inline void
61efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
62{
63 struct siena_nic_data *nic_data = efx->nic_data;
64 __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
65}
66
53void efx_mcdi_init(struct efx_nic *efx) 67void efx_mcdi_init(struct efx_nic *efx)
54{ 68{
55 struct efx_mcdi_iface *mcdi; 69 struct efx_mcdi_iface *mcdi;
@@ -70,8 +84,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
70 const u8 *inbuf, size_t inlen) 84 const u8 *inbuf, size_t inlen)
71{ 85{
72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 86 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 87 unsigned pdu = MCDI_PDU(efx);
74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 88 unsigned doorbell = MCDI_DOORBELL(efx);
75 unsigned int i; 89 unsigned int i;
76 efx_dword_t hdr; 90 efx_dword_t hdr;
77 u32 xflags, seqno; 91 u32 xflags, seqno;
@@ -92,29 +106,28 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
92 MCDI_HEADER_SEQ, seqno, 106 MCDI_HEADER_SEQ, seqno,
93 MCDI_HEADER_XFLAGS, xflags); 107 MCDI_HEADER_XFLAGS, xflags);
94 108
95 efx_writed(efx, &hdr, pdu); 109 efx_mcdi_writed(efx, &hdr, pdu);
96 110
97 for (i = 0; i < inlen; i += 4) 111 for (i = 0; i < inlen; i += 4)
98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 112 efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
99 113 pdu + 4 + i);
100 /* Ensure the payload is written out before the header */
101 wmb();
102 114
103 /* ring the doorbell with a distinctive value */ 115 /* ring the doorbell with a distinctive value */
104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 116 EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
117 efx_mcdi_writed(efx, &hdr, doorbell);
105} 118}
106 119
107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 120static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
108{ 121{
109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 123 unsigned int pdu = MCDI_PDU(efx);
111 int i; 124 int i;
112 125
113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 126 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
114 BUG_ON(outlen & 3 || outlen >= 0x100); 127 BUG_ON(outlen & 3 || outlen >= 0x100);
115 128
116 for (i = 0; i < outlen; i += 4) 129 for (i = 0; i < outlen; i += 4)
117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 130 efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
118} 131}
119 132
120static int efx_mcdi_poll(struct efx_nic *efx) 133static int efx_mcdi_poll(struct efx_nic *efx)
@@ -122,7 +135,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 135 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned int time, finish; 136 unsigned int time, finish;
124 unsigned int respseq, respcmd, error; 137 unsigned int respseq, respcmd, error;
125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 138 unsigned int pdu = MCDI_PDU(efx);
126 unsigned int rc, spins; 139 unsigned int rc, spins;
127 efx_dword_t reg; 140 efx_dword_t reg;
128 141
@@ -148,8 +161,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
148 161
149 time = get_seconds(); 162 time = get_seconds();
150 163
151 rmb(); 164 efx_mcdi_readd(efx, &reg, pdu);
152 efx_readd(efx, &reg, pdu);
153 165
154 /* All 1's indicates that shared memory is in reset (and is 166 /* All 1's indicates that shared memory is in reset (and is
155 * not a valid header). Wait for it to come out reset before 167 * not a valid header). Wait for it to come out reset before
@@ -176,7 +188,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
176 respseq, mcdi->seqno); 188 respseq, mcdi->seqno);
177 rc = EIO; 189 rc = EIO;
178 } else if (error) { 190 } else if (error) {
179 efx_readd(efx, &reg, pdu + 4); 191 efx_mcdi_readd(efx, &reg, pdu + 4);
180 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 192 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
181#define TRANSLATE_ERROR(name) \ 193#define TRANSLATE_ERROR(name) \
182 case MC_CMD_ERR_ ## name: \ 194 case MC_CMD_ERR_ ## name: \
@@ -210,21 +222,21 @@ out:
210/* Test and clear MC-rebooted flag for this port/function */ 222/* Test and clear MC-rebooted flag for this port/function */
211int efx_mcdi_poll_reboot(struct efx_nic *efx) 223int efx_mcdi_poll_reboot(struct efx_nic *efx)
212{ 224{
213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); 225 unsigned int addr = MCDI_REBOOT_FLAG(efx);
214 efx_dword_t reg; 226 efx_dword_t reg;
215 uint32_t value; 227 uint32_t value;
216 228
217 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 229 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
218 return false; 230 return false;
219 231
220 efx_readd(efx, &reg, addr); 232 efx_mcdi_readd(efx, &reg, addr);
221 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 233 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
222 234
223 if (value == 0) 235 if (value == 0)
224 return 0; 236 return 0;
225 237
226 EFX_ZERO_DWORD(reg); 238 EFX_ZERO_DWORD(reg);
227 efx_writed(efx, &reg, addr); 239 efx_mcdi_writed(efx, &reg, addr);
228 240
229 if (value == MC_STATUS_DWORD_ASSERT) 241 if (value == MC_STATUS_DWORD_ASSERT)
230 return -EINTR; 242 return -EINTR;
@@ -381,7 +393,7 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
381 -rc); 393 -rc);
382 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 394 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
383 } else 395 } else
384 netif_err(efx, hw, efx->net_dev, 396 netif_dbg(efx, hw, efx->net_dev,
385 "MC command 0x%x inlen %d failed rc=%d\n", 397 "MC command 0x%x inlen %d failed rc=%d\n",
386 cmd, (int)inlen, -rc); 398 cmd, (int)inlen, -rc);
387 } 399 }
@@ -452,7 +464,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
452 * 464 *
453 * There's a race here with efx_mcdi_rpc(), because we might receive 465 * There's a race here with efx_mcdi_rpc(), because we might receive
454 * a REBOOT event *before* the request has been copied out. In polled 466 * a REBOOT event *before* the request has been copied out. In polled
455 * mode (during startup) this is irrelevent, because efx_mcdi_complete() 467 * mode (during startup) this is irrelevant, because efx_mcdi_complete()
456 * is ignored. In event mode, this condition is just an edge-case of 468 * is ignored. In event mode, this condition is just an edge-case of
457 * receiving a REBOOT event after posting the MCDI request. Did the mc 469 * receiving a REBOOT event after posting the MCDI request. Did the mc
458 * reboot before or after the copyout? The best we can do always is 470 * reboot before or after the copyout? The best we can do always is
@@ -463,6 +475,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
463 if (mcdi->mode == MCDI_MODE_EVENTS) { 475 if (mcdi->mode == MCDI_MODE_EVENTS) {
464 mcdi->resprc = rc; 476 mcdi->resprc = rc;
465 mcdi->resplen = 0; 477 mcdi->resplen = 0;
478 ++mcdi->credits;
466 } 479 }
467 } else 480 } else
468 /* Nobody was waiting for an MCDI request, so trigger a reset */ 481 /* Nobody was waiting for an MCDI request, so trigger a reset */
@@ -601,7 +614,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
601 ************************************************************************** 614 **************************************************************************
602 */ 615 */
603 616
604int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) 617void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
605{ 618{
606 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; 619 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
607 size_t outlength; 620 size_t outlength;
@@ -615,29 +628,20 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
615 if (rc) 628 if (rc)
616 goto fail; 629 goto fail;
617 630
618 if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
619 *version = 0;
620 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
621 return 0;
622 }
623
624 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { 631 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
625 rc = -EIO; 632 rc = -EIO;
626 goto fail; 633 goto fail;
627 } 634 }
628 635
629 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 636 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
630 *version = (((u64)le16_to_cpu(ver_words[0]) << 48) | 637 snprintf(buf, len, "%u.%u.%u.%u",
631 ((u64)le16_to_cpu(ver_words[1]) << 32) | 638 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
632 ((u64)le16_to_cpu(ver_words[2]) << 16) | 639 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
633 le16_to_cpu(ver_words[3])); 640 return;
634 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
635
636 return 0;
637 641
638fail: 642fail:
639 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 643 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
640 return rc; 644 buf[0] = 0;
641} 645}
642 646
643int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 647int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
@@ -1093,8 +1097,8 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
1093 return rc; 1097 return rc;
1094} 1098}
1095 1099
1096int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, 1100static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1097 const u8 *mac, int *id_out) 1101 const u8 *mac, int *id_out)
1098{ 1102{
1099 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; 1103 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
1100 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; 1104 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index f1f89ad4075a..aced2a7856fc 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc. 3 * Copyright 2008-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -93,7 +93,7 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
93#define MCDI_EVENT_FIELD(_ev, _field) \ 93#define MCDI_EVENT_FIELD(_ev, _field) \
94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
95 95
96extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build); 96extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
98 bool *was_attached_out); 98 bool *was_attached_out);
99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
@@ -121,8 +121,6 @@ extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
121extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 121extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
122extern int efx_mcdi_reset_port(struct efx_nic *efx); 122extern int efx_mcdi_reset_port(struct efx_nic *efx);
123extern int efx_mcdi_reset_mc(struct efx_nic *efx); 123extern int efx_mcdi_reset_mc(struct efx_nic *efx);
124extern int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
125 const u8 *mac, int *id_out);
126extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, 124extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
127 const u8 *mac, int *id_out); 125 const u8 *mac, int *id_out);
128extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); 126extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index f88f4bf986ff..50c20777a564 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -138,7 +138,7 @@ static bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
138} 138}
139 139
140 140
141struct efx_mac_operations efx_mcdi_mac_operations = { 141const struct efx_mac_operations efx_mcdi_mac_operations = {
142 .reconfigure = efx_mcdi_mac_reconfigure, 142 .reconfigure = efx_mcdi_mac_reconfigure,
143 .update_stats = efx_port_dummy_op_void, 143 .update_stats = efx_port_dummy_op_void,
144 .check_fault = efx_mcdi_mac_check_fault, 144 .check_fault = efx_mcdi_mac_check_fault,
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 90359e644006..41fe06fa0600 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -103,7 +103,7 @@
103 * 103 *
104 * If Code==CMDDONE, then the fields are further interpreted as: 104 * If Code==CMDDONE, then the fields are further interpreted as:
105 * 105 *
106 * - LEVEL==INFO Command succeded 106 * - LEVEL==INFO Command succeeded
107 * - LEVEL==ERR Command failed 107 * - LEVEL==ERR Command failed
108 * 108 *
109 * 0 8 16 24 32 109 * 0 8 16 24 32
@@ -572,7 +572,7 @@
572 (4*(_numwords)) 572 (4*(_numwords))
573 573
574/* MC_CMD_SET_RAND_SEED: 574/* MC_CMD_SET_RAND_SEED:
575 * Set the 16byte seed for the MC psuedo-random generator 575 * Set the 16byte seed for the MC pseudo-random generator
576 */ 576 */
577#define MC_CMD_SET_RAND_SEED 0x1a 577#define MC_CMD_SET_RAND_SEED 0x1a
578#define MC_CMD_SET_RAND_SEED_IN_LEN 16 578#define MC_CMD_SET_RAND_SEED_IN_LEN 16
@@ -1162,7 +1162,7 @@
1162#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1 1162#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
1163#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2 1163#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
1164#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1 1164#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
1165/* Remaining PERIOD* fields only relevent when PERIODIC_CHANGE is set */ 1165/* Remaining PERIOD* fields only relevant when PERIODIC_CHANGE is set */
1166#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3 1166#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
1167#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1 1167#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
1168#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4 1168#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0121e71702bf..6c63ab0710af 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -16,7 +16,6 @@
16#include "phy.h" 16#include "phy.h"
17#include "mcdi.h" 17#include "mcdi.h"
18#include "mcdi_pcol.h" 18#include "mcdi_pcol.h"
19#include "mdio_10g.h"
20#include "nic.h" 19#include "nic.h"
21#include "selftest.h" 20#include "selftest.h"
22 21
@@ -450,7 +449,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
450 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 449 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
451 u32 rmtadv; 450 u32 rmtadv;
452 451
453 /* The link partner capabilities are only relevent if the 452 /* The link partner capabilities are only relevant if the
454 * link supports flow control autonegotiation */ 453 * link supports flow control autonegotiation */
455 if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) 454 if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
456 return; 455 return;
@@ -514,7 +513,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
514 ecmd->supported = 513 ecmd->supported =
515 mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); 514 mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
516 ecmd->advertising = efx->link_advertising; 515 ecmd->advertising = efx->link_advertising;
517 ecmd->speed = efx->link_state.speed; 516 ethtool_cmd_speed_set(ecmd, efx->link_state.speed);
518 ecmd->duplex = efx->link_state.fd; 517 ecmd->duplex = efx->link_state.fd;
519 ecmd->port = mcdi_to_ethtool_media(phy_cfg->media); 518 ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
520 ecmd->phy_address = phy_cfg->port; 519 ecmd->phy_address = phy_cfg->port;
@@ -546,7 +545,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
546 caps = (ethtool_to_mcdi_cap(ecmd->advertising) | 545 caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
547 1 << MC_CMD_PHY_CAP_AN_LBN); 546 1 << MC_CMD_PHY_CAP_AN_LBN);
548 } else if (ecmd->duplex) { 547 } else if (ecmd->duplex) {
549 switch (ecmd->speed) { 548 switch (ethtool_cmd_speed(ecmd)) {
550 case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; 549 case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
551 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; 550 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
552 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; 551 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
@@ -554,7 +553,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
554 default: return -EINVAL; 553 default: return -EINVAL;
555 } 554 }
556 } else { 555 } else {
557 switch (ecmd->speed) { 556 switch (ethtool_cmd_speed(ecmd)) {
558 case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; 557 case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
559 case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; 558 case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
560 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; 559 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
@@ -713,7 +712,8 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
713 return 0; 712 return 0;
714} 713}
715 714
716const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index) 715static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
716 unsigned int index)
717{ 717{
718 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 718 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
719 719
@@ -739,7 +739,7 @@ const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
739 return NULL; 739 return NULL;
740} 740}
741 741
742struct efx_phy_operations efx_mcdi_phy_ops = { 742const struct efx_phy_operations efx_mcdi_phy_ops = {
743 .probe = efx_mcdi_phy_probe, 743 .probe = efx_mcdi_phy_probe,
744 .init = efx_port_dummy_op_int, 744 .init = efx_port_dummy_op_int,
745 .reconfigure = efx_mcdi_phy_reconfigure, 745 .reconfigure = efx_mcdi_phy_reconfigure,
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index eeaf0bd64bd3..7ab385c8136d 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -15,7 +15,6 @@
15#include "net_driver.h" 15#include "net_driver.h"
16#include "mdio_10g.h" 16#include "mdio_10g.h"
17#include "workarounds.h" 17#include "workarounds.h"
18#include "nic.h"
19 18
20unsigned efx_mdio_id_oui(u32 id) 19unsigned efx_mdio_id_oui(u32 id)
21{ 20{
@@ -52,13 +51,10 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
52 return spins ? spins : -ETIMEDOUT; 51 return spins ? spins : -ETIMEDOUT;
53} 52}
54 53
55static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal) 54static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
56{ 55{
57 int status; 56 int status;
58 57
59 if (LOOPBACK_INTERNAL(efx))
60 return 0;
61
62 if (mmd != MDIO_MMD_AN) { 58 if (mmd != MDIO_MMD_AN) {
63 /* Read MMD STATUS2 to check it is responding. */ 59 /* Read MMD STATUS2 to check it is responding. */
64 status = efx_mdio_read(efx, mmd, MDIO_STAT2); 60 status = efx_mdio_read(efx, mmd, MDIO_STAT2);
@@ -69,20 +65,6 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
69 } 65 }
70 } 66 }
71 67
72 /* Read MMD STATUS 1 to check for fault. */
73 status = efx_mdio_read(efx, mmd, MDIO_STAT1);
74 if (status & MDIO_STAT1_FAULT) {
75 if (fault_fatal) {
76 netif_err(efx, hw, efx->net_dev,
77 "PHY MMD %d reporting fatal"
78 " fault: status %x\n", mmd, status);
79 return -EIO;
80 } else {
81 netif_dbg(efx, hw, efx->net_dev,
82 "PHY MMD %d reporting status"
83 " %x (expected)\n", mmd, status);
84 }
85 }
86 return 0; 68 return 0;
87} 69}
88 70
@@ -131,8 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
131 return rc; 113 return rc;
132} 114}
133 115
134int efx_mdio_check_mmds(struct efx_nic *efx, 116int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
135 unsigned int mmd_mask, unsigned int fatal_mask)
136{ 117{
137 int mmd = 0, probe_mmd, devs1, devs2; 118 int mmd = 0, probe_mmd, devs1, devs2;
138 u32 devices; 119 u32 devices;
@@ -162,13 +143,9 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
162 143
163 /* Check all required MMDs are responding and happy. */ 144 /* Check all required MMDs are responding and happy. */
164 while (mmd_mask) { 145 while (mmd_mask) {
165 if (mmd_mask & 1) { 146 if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
166 int fault_fatal = fatal_mask & 1; 147 return -EIO;
167 if (efx_mdio_check_mmd(efx, mmd, fault_fatal))
168 return -EIO;
169 }
170 mmd_mask = mmd_mask >> 1; 148 mmd_mask = mmd_mask >> 1;
171 fatal_mask = fatal_mask >> 1;
172 mmd++; 149 mmd++;
173 } 150 }
174 151
@@ -255,12 +232,12 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
255 */ 232 */
256int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 233int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
257{ 234{
258 struct ethtool_cmd prev; 235 struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
259 236
260 efx->phy_op->get_settings(efx, &prev); 237 efx->phy_op->get_settings(efx, &prev);
261 238
262 if (ecmd->advertising == prev.advertising && 239 if (ecmd->advertising == prev.advertising &&
263 ecmd->speed == prev.speed && 240 ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) &&
264 ecmd->duplex == prev.duplex && 241 ecmd->duplex == prev.duplex &&
265 ecmd->port == prev.port && 242 ecmd->port == prev.port &&
266 ecmd->autoneg == prev.autoneg) 243 ecmd->autoneg == prev.autoneg)
@@ -286,50 +263,28 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
286 */ 263 */
287void efx_mdio_an_reconfigure(struct efx_nic *efx) 264void efx_mdio_an_reconfigure(struct efx_nic *efx)
288{ 265{
289 bool xnp = (efx->link_advertising & ADVERTISED_10000baseT_Full
290 || EFX_WORKAROUND_13204(efx));
291 int reg; 266 int reg;
292 267
293 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN)); 268 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
294 269
295 /* Set up the base page */ 270 /* Set up the base page */
296 reg = ADVERTISE_CSMA; 271 reg = ADVERTISE_CSMA | ADVERTISE_RESV;
297 if (efx->link_advertising & ADVERTISED_10baseT_Half)
298 reg |= ADVERTISE_10HALF;
299 if (efx->link_advertising & ADVERTISED_10baseT_Full)
300 reg |= ADVERTISE_10FULL;
301 if (efx->link_advertising & ADVERTISED_100baseT_Half)
302 reg |= ADVERTISE_100HALF;
303 if (efx->link_advertising & ADVERTISED_100baseT_Full)
304 reg |= ADVERTISE_100FULL;
305 if (xnp)
306 reg |= ADVERTISE_RESV;
307 else if (efx->link_advertising & (ADVERTISED_1000baseT_Half |
308 ADVERTISED_1000baseT_Full))
309 reg |= ADVERTISE_NPAGE;
310 if (efx->link_advertising & ADVERTISED_Pause) 272 if (efx->link_advertising & ADVERTISED_Pause)
311 reg |= ADVERTISE_PAUSE_CAP; 273 reg |= ADVERTISE_PAUSE_CAP;
312 if (efx->link_advertising & ADVERTISED_Asym_Pause) 274 if (efx->link_advertising & ADVERTISED_Asym_Pause)
313 reg |= ADVERTISE_PAUSE_ASYM; 275 reg |= ADVERTISE_PAUSE_ASYM;
314 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); 276 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
315 277
316 /* Set up the (extended) next page if necessary */ 278 /* Set up the (extended) next page */
317 if (efx->phy_op->set_npage_adv) 279 efx->phy_op->set_npage_adv(efx, efx->link_advertising);
318 efx->phy_op->set_npage_adv(efx, efx->link_advertising);
319 280
320 /* Enable and restart AN */ 281 /* Enable and restart AN */
321 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1); 282 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
322 reg |= MDIO_AN_CTRL1_ENABLE; 283 reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
323 if (!(EFX_WORKAROUND_15195(efx) && LOOPBACK_EXTERNAL(efx)))
324 reg |= MDIO_AN_CTRL1_RESTART;
325 if (xnp)
326 reg |= MDIO_AN_CTRL1_XNP;
327 else
328 reg &= ~MDIO_AN_CTRL1_XNP;
329 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg); 284 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
330} 285}
331 286
332enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx) 287u8 efx_mdio_get_pause(struct efx_nic *efx)
333{ 288{
334 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX)); 289 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
335 290
@@ -360,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx)
360 "no MDIO PHY present with ID %d\n", efx->mdio.prtad); 315 "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
361 rc = -EINVAL; 316 rc = -EINVAL;
362 } else { 317 } else {
363 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0); 318 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
364 } 319 }
365 320
366 mutex_unlock(&efx->mac_lock); 321 mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 75791d3d4963..a97dbbd2de99 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -68,8 +68,7 @@ extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
68 int spins, int spintime); 68 int spins, int spintime);
69 69
70/* As efx_mdio_check_mmd but for multiple MMDs */ 70/* As efx_mdio_check_mmd but for multiple MMDs */
71int efx_mdio_check_mmds(struct efx_nic *efx, 71int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
72 unsigned int mmd_mask, unsigned int fatal_mask);
73 72
74/* Check the link status of specified mmds in bit mask */ 73/* Check the link status of specified mmds in bit mask */
75extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); 74extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
@@ -93,7 +92,7 @@ extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
93/* Get pause parameters from AN if available (otherwise return 92/* Get pause parameters from AN if available (otherwise return
94 * requested pause parameters) 93 * requested pause parameters)
95 */ 94 */
96enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx); 95u8 efx_mdio_get_pause(struct efx_nic *efx);
97 96
98/* Wait for specified MMDs to exit reset within a timeout */ 97/* Wait for specified MMDs to exit reset within a timeout */
99extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx, 98extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 02e54b4f701f..b6304486f244 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -216,7 +216,7 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
216 int rc; 216 int rc;
217 217
218 for (;;) { 218 for (;;) {
219 rc = del_mtd_device(&part->mtd); 219 rc = mtd_device_unregister(&part->mtd);
220 if (rc != -EBUSY) 220 if (rc != -EBUSY)
221 break; 221 break;
222 ssleep(1); 222 ssleep(1);
@@ -268,7 +268,7 @@ static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
268 part->mtd.write = efx_mtd->ops->write; 268 part->mtd.write = efx_mtd->ops->write;
269 part->mtd.sync = efx_mtd_sync; 269 part->mtd.sync = efx_mtd_sync;
270 270
271 if (add_mtd_device(&part->mtd)) 271 if (mtd_device_register(&part->mtd, NULL, 0))
272 goto fail; 272 goto fail;
273 } 273 }
274 274
@@ -280,7 +280,7 @@ fail:
280 --part; 280 --part;
281 efx_mtd_remove_partition(part); 281 efx_mtd_remove_partition(part);
282 } 282 }
283 /* add_mtd_device() returns 1 if the MTD table is full */ 283 /* mtd_device_register() returns 1 if the MTD table is full */
284 return -ENOMEM; 284 return -ENOMEM;
285} 285}
286 286
@@ -321,14 +321,15 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
321 struct efx_mtd *efx_mtd = mtd->priv; 321 struct efx_mtd *efx_mtd = mtd->priv;
322 const struct efx_spi_device *spi = efx_mtd->spi; 322 const struct efx_spi_device *spi = efx_mtd->spi;
323 struct efx_nic *efx = efx_mtd->efx; 323 struct efx_nic *efx = efx_mtd->efx;
324 struct falcon_nic_data *nic_data = efx->nic_data;
324 int rc; 325 int rc;
325 326
326 rc = mutex_lock_interruptible(&efx->spi_lock); 327 rc = mutex_lock_interruptible(&nic_data->spi_lock);
327 if (rc) 328 if (rc)
328 return rc; 329 return rc;
329 rc = falcon_spi_read(efx, spi, part->offset + start, len, 330 rc = falcon_spi_read(efx, spi, part->offset + start, len,
330 retlen, buffer); 331 retlen, buffer);
331 mutex_unlock(&efx->spi_lock); 332 mutex_unlock(&nic_data->spi_lock);
332 return rc; 333 return rc;
333} 334}
334 335
@@ -337,13 +338,14 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
337 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); 338 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
338 struct efx_mtd *efx_mtd = mtd->priv; 339 struct efx_mtd *efx_mtd = mtd->priv;
339 struct efx_nic *efx = efx_mtd->efx; 340 struct efx_nic *efx = efx_mtd->efx;
341 struct falcon_nic_data *nic_data = efx->nic_data;
340 int rc; 342 int rc;
341 343
342 rc = mutex_lock_interruptible(&efx->spi_lock); 344 rc = mutex_lock_interruptible(&nic_data->spi_lock);
343 if (rc) 345 if (rc)
344 return rc; 346 return rc;
345 rc = efx_spi_erase(part, part->offset + start, len); 347 rc = efx_spi_erase(part, part->offset + start, len);
346 mutex_unlock(&efx->spi_lock); 348 mutex_unlock(&nic_data->spi_lock);
347 return rc; 349 return rc;
348} 350}
349 351
@@ -354,14 +356,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
354 struct efx_mtd *efx_mtd = mtd->priv; 356 struct efx_mtd *efx_mtd = mtd->priv;
355 const struct efx_spi_device *spi = efx_mtd->spi; 357 const struct efx_spi_device *spi = efx_mtd->spi;
356 struct efx_nic *efx = efx_mtd->efx; 358 struct efx_nic *efx = efx_mtd->efx;
359 struct falcon_nic_data *nic_data = efx->nic_data;
357 int rc; 360 int rc;
358 361
359 rc = mutex_lock_interruptible(&efx->spi_lock); 362 rc = mutex_lock_interruptible(&nic_data->spi_lock);
360 if (rc) 363 if (rc)
361 return rc; 364 return rc;
362 rc = falcon_spi_write(efx, spi, part->offset + start, len, 365 rc = falcon_spi_write(efx, spi, part->offset + start, len,
363 retlen, buffer); 366 retlen, buffer);
364 mutex_unlock(&efx->spi_lock); 367 mutex_unlock(&nic_data->spi_lock);
365 return rc; 368 return rc;
366} 369}
367 370
@@ -370,11 +373,12 @@ static int falcon_mtd_sync(struct mtd_info *mtd)
370 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); 373 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
371 struct efx_mtd *efx_mtd = mtd->priv; 374 struct efx_mtd *efx_mtd = mtd->priv;
372 struct efx_nic *efx = efx_mtd->efx; 375 struct efx_nic *efx = efx_mtd->efx;
376 struct falcon_nic_data *nic_data = efx->nic_data;
373 int rc; 377 int rc;
374 378
375 mutex_lock(&efx->spi_lock); 379 mutex_lock(&nic_data->spi_lock);
376 rc = efx_spi_slow_wait(part, true); 380 rc = efx_spi_slow_wait(part, true);
377 mutex_unlock(&efx->spi_lock); 381 mutex_unlock(&nic_data->spi_lock);
378 return rc; 382 return rc;
379} 383}
380 384
@@ -387,35 +391,67 @@ static struct efx_mtd_ops falcon_mtd_ops = {
387 391
388static int falcon_mtd_probe(struct efx_nic *efx) 392static int falcon_mtd_probe(struct efx_nic *efx)
389{ 393{
390 struct efx_spi_device *spi = efx->spi_flash; 394 struct falcon_nic_data *nic_data = efx->nic_data;
395 struct efx_spi_device *spi;
391 struct efx_mtd *efx_mtd; 396 struct efx_mtd *efx_mtd;
392 int rc; 397 int rc = -ENODEV;
393 398
394 ASSERT_RTNL(); 399 ASSERT_RTNL();
395 400
396 if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START) 401 spi = &nic_data->spi_flash;
397 return -ENODEV; 402 if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
398 403 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
399 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), 404 GFP_KERNEL);
400 GFP_KERNEL); 405 if (!efx_mtd)
401 if (!efx_mtd) 406 return -ENOMEM;
402 return -ENOMEM; 407
403 408 efx_mtd->spi = spi;
404 efx_mtd->spi = spi; 409 efx_mtd->name = "flash";
405 efx_mtd->name = "flash"; 410 efx_mtd->ops = &falcon_mtd_ops;
406 efx_mtd->ops = &falcon_mtd_ops; 411
412 efx_mtd->n_parts = 1;
413 efx_mtd->part[0].mtd.type = MTD_NORFLASH;
414 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
415 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
416 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
417 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
418 efx_mtd->part[0].type_name = "sfc_flash_bootrom";
419
420 rc = efx_mtd_probe_device(efx, efx_mtd);
421 if (rc) {
422 kfree(efx_mtd);
423 return rc;
424 }
425 }
407 426
408 efx_mtd->n_parts = 1; 427 spi = &nic_data->spi_eeprom;
409 efx_mtd->part[0].mtd.type = MTD_NORFLASH; 428 if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
410 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH; 429 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
411 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; 430 GFP_KERNEL);
412 efx_mtd->part[0].mtd.erasesize = spi->erase_size; 431 if (!efx_mtd)
413 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START; 432 return -ENOMEM;
414 efx_mtd->part[0].type_name = "sfc_flash_bootrom"; 433
434 efx_mtd->spi = spi;
435 efx_mtd->name = "EEPROM";
436 efx_mtd->ops = &falcon_mtd_ops;
437
438 efx_mtd->n_parts = 1;
439 efx_mtd->part[0].mtd.type = MTD_RAM;
440 efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
441 efx_mtd->part[0].mtd.size =
442 min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
443 EFX_EEPROM_BOOTCONFIG_START;
444 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
445 efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
446 efx_mtd->part[0].type_name = "sfc_bootconfig";
447
448 rc = efx_mtd_probe_device(efx, efx_mtd);
449 if (rc) {
450 kfree(efx_mtd);
451 return rc;
452 }
453 }
415 454
416 rc = efx_mtd_probe_device(efx, efx_mtd);
417 if (rc)
418 kfree(efx_mtd);
419 return rc; 455 return rc;
420} 456}
421 457
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 64e7caa4bbb5..e8d5f03a89fe 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/workqueue.h> 31#include <linux/workqueue.h>
32#include <linux/vmalloc.h>
32#include <linux/i2c.h> 33#include <linux/i2c.h>
33 34
34#include "enum.h" 35#include "enum.h"
@@ -40,7 +41,7 @@
40 * 41 *
41 **************************************************************************/ 42 **************************************************************************/
42 43
43#define EFX_DRIVER_VERSION "3.0" 44#define EFX_DRIVER_VERSION "3.1"
44 45
45#ifdef EFX_ENABLE_DEBUG 46#ifdef EFX_ENABLE_DEBUG
46#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 47#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -62,10 +63,12 @@
62/* Checksum generation is a per-queue option in hardware, so each 63/* Checksum generation is a per-queue option in hardware, so each
63 * queue visible to the networking core is backed by two hardware TX 64 * queue visible to the networking core is backed by two hardware TX
64 * queues. */ 65 * queues. */
65#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS 66#define EFX_MAX_TX_TC 2
66#define EFX_TXQ_TYPE_OFFLOAD 1 67#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
67#define EFX_TXQ_TYPES 2 68#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
68#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) 69#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
70#define EFX_TXQ_TYPES 4
71#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
69 72
70/** 73/**
71 * struct efx_special_buffer - An Efx special buffer 74 * struct efx_special_buffer - An Efx special buffer
@@ -135,13 +138,20 @@ struct efx_tx_buffer {
135 * @efx: The associated Efx NIC 138 * @efx: The associated Efx NIC
136 * @queue: DMA queue number 139 * @queue: DMA queue number
137 * @channel: The associated channel 140 * @channel: The associated channel
141 * @core_txq: The networking core TX queue structure
138 * @buffer: The software buffer ring 142 * @buffer: The software buffer ring
139 * @txd: The hardware descriptor ring 143 * @txd: The hardware descriptor ring
144 * @ptr_mask: The size of the ring minus 1.
145 * @initialised: Has hardware queue been initialised?
140 * @flushed: Used when handling queue flushing 146 * @flushed: Used when handling queue flushing
141 * @read_count: Current read pointer. 147 * @read_count: Current read pointer.
142 * This is the number of buffers that have been removed from both rings. 148 * This is the number of buffers that have been removed from both rings.
143 * @stopped: Stopped count. 149 * @old_write_count: The value of @write_count when last checked.
144 * Set if this TX queue is currently stopping its port. 150 * This is here for performance reasons. The xmit path will
151 * only get the up-to-date value of @write_count if this
152 * variable indicates that the queue is empty. This is to
153 * avoid cache-line ping-pong between the xmit path and the
154 * completion path.
145 * @insert_count: Current insert pointer 155 * @insert_count: Current insert pointer
146 * This is the number of buffers that have been added to the 156 * This is the number of buffers that have been added to the
147 * software ring. 157 * software ring.
@@ -161,20 +171,26 @@ struct efx_tx_buffer {
161 * @tso_long_headers: Number of packets with headers too long for standard 171 * @tso_long_headers: Number of packets with headers too long for standard
162 * blocks 172 * blocks
163 * @tso_packets: Number of packets via the TSO xmit path 173 * @tso_packets: Number of packets via the TSO xmit path
174 * @pushes: Number of times the TX push feature has been used
175 * @empty_read_count: If the completion path has seen the queue as empty
176 * and the transmission path has not yet checked this, the value of
177 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
164 */ 178 */
165struct efx_tx_queue { 179struct efx_tx_queue {
166 /* Members which don't change on the fast path */ 180 /* Members which don't change on the fast path */
167 struct efx_nic *efx ____cacheline_aligned_in_smp; 181 struct efx_nic *efx ____cacheline_aligned_in_smp;
168 unsigned queue; 182 unsigned queue;
169 struct efx_channel *channel; 183 struct efx_channel *channel;
170 struct efx_nic *nic; 184 struct netdev_queue *core_txq;
171 struct efx_tx_buffer *buffer; 185 struct efx_tx_buffer *buffer;
172 struct efx_special_buffer txd; 186 struct efx_special_buffer txd;
187 unsigned int ptr_mask;
188 bool initialised;
173 enum efx_flush_state flushed; 189 enum efx_flush_state flushed;
174 190
175 /* Members used mainly on the completion path */ 191 /* Members used mainly on the completion path */
176 unsigned int read_count ____cacheline_aligned_in_smp; 192 unsigned int read_count ____cacheline_aligned_in_smp;
177 int stopped; 193 unsigned int old_write_count;
178 194
179 /* Members used only on the xmit path */ 195 /* Members used only on the xmit path */
180 unsigned int insert_count ____cacheline_aligned_in_smp; 196 unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -184,6 +200,11 @@ struct efx_tx_queue {
184 unsigned int tso_bursts; 200 unsigned int tso_bursts;
185 unsigned int tso_long_headers; 201 unsigned int tso_long_headers;
186 unsigned int tso_packets; 202 unsigned int tso_packets;
203 unsigned int pushes;
204
205 /* Members shared between paths and sometimes updated */
206 unsigned int empty_read_count ____cacheline_aligned_in_smp;
207#define EFX_EMPTY_COUNT_VALID 0x80000000
187}; 208};
188 209
189/** 210/**
@@ -193,15 +214,17 @@ struct efx_tx_queue {
193 * If both this and page are %NULL, the buffer slot is currently free. 214 * If both this and page are %NULL, the buffer slot is currently free.
194 * @page: The associated page buffer, if any. 215 * @page: The associated page buffer, if any.
195 * If both this and skb are %NULL, the buffer slot is currently free. 216 * If both this and skb are %NULL, the buffer slot is currently free.
196 * @data: Pointer to ethernet header
197 * @len: Buffer length, in bytes. 217 * @len: Buffer length, in bytes.
218 * @is_page: Indicates if @page is valid. If false, @skb is valid.
198 */ 219 */
199struct efx_rx_buffer { 220struct efx_rx_buffer {
200 dma_addr_t dma_addr; 221 dma_addr_t dma_addr;
201 struct sk_buff *skb; 222 union {
202 struct page *page; 223 struct sk_buff *skb;
203 char *data; 224 struct page *page;
225 } u;
204 unsigned int len; 226 unsigned int len;
227 bool is_page;
205}; 228};
206 229
207/** 230/**
@@ -225,10 +248,9 @@ struct efx_rx_page_state {
225/** 248/**
226 * struct efx_rx_queue - An Efx RX queue 249 * struct efx_rx_queue - An Efx RX queue
227 * @efx: The associated Efx NIC 250 * @efx: The associated Efx NIC
228 * @queue: DMA queue number
229 * @channel: The associated channel
230 * @buffer: The software buffer ring 251 * @buffer: The software buffer ring
231 * @rxd: The hardware descriptor ring 252 * @rxd: The hardware descriptor ring
253 * @ptr_mask: The size of the ring minus 1.
232 * @added_count: Number of buffers added to the receive queue. 254 * @added_count: Number of buffers added to the receive queue.
233 * @notified_count: Number of buffers given to NIC (<= @added_count). 255 * @notified_count: Number of buffers given to NIC (<= @added_count).
234 * @removed_count: Number of buffers removed from the receive queue. 256 * @removed_count: Number of buffers removed from the receive queue.
@@ -240,9 +262,6 @@ struct efx_rx_page_state {
240 * @min_fill: RX descriptor minimum non-zero fill level. 262 * @min_fill: RX descriptor minimum non-zero fill level.
241 * This records the minimum fill level observed when a ring 263 * This records the minimum fill level observed when a ring
242 * refill was triggered. 264 * refill was triggered.
243 * @min_overfill: RX descriptor minimum overflow fill level.
244 * This records the minimum fill level at which RX queue
245 * overflow was observed. It should never be set.
246 * @alloc_page_count: RX allocation strategy counter. 265 * @alloc_page_count: RX allocation strategy counter.
247 * @alloc_skb_count: RX allocation strategy counter. 266 * @alloc_skb_count: RX allocation strategy counter.
248 * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). 267 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
@@ -250,10 +269,9 @@ struct efx_rx_page_state {
250 */ 269 */
251struct efx_rx_queue { 270struct efx_rx_queue {
252 struct efx_nic *efx; 271 struct efx_nic *efx;
253 int queue;
254 struct efx_channel *channel;
255 struct efx_rx_buffer *buffer; 272 struct efx_rx_buffer *buffer;
256 struct efx_special_buffer rxd; 273 struct efx_special_buffer rxd;
274 unsigned int ptr_mask;
257 275
258 int added_count; 276 int added_count;
259 int notified_count; 277 int notified_count;
@@ -302,18 +320,16 @@ enum efx_rx_alloc_method {
302 * 320 *
303 * @efx: Associated Efx NIC 321 * @efx: Associated Efx NIC
304 * @channel: Channel instance number 322 * @channel: Channel instance number
305 * @name: Name for channel and IRQ
306 * @enabled: Channel enabled indicator 323 * @enabled: Channel enabled indicator
307 * @irq: IRQ number (MSI and MSI-X only) 324 * @irq: IRQ number (MSI and MSI-X only)
308 * @irq_moderation: IRQ moderation value (in hardware ticks) 325 * @irq_moderation: IRQ moderation value (in hardware ticks)
309 * @napi_dev: Net device used with NAPI 326 * @napi_dev: Net device used with NAPI
310 * @napi_str: NAPI control structure 327 * @napi_str: NAPI control structure
311 * @reset_work: Scheduled reset work thread
312 * @work_pending: Is work pending via NAPI? 328 * @work_pending: Is work pending via NAPI?
313 * @eventq: Event queue buffer 329 * @eventq: Event queue buffer
330 * @eventq_mask: Event queue pointer mask
314 * @eventq_read_ptr: Event queue read pointer 331 * @eventq_read_ptr: Event queue read pointer
315 * @last_eventq_read_ptr: Last event queue read pointer value. 332 * @last_eventq_read_ptr: Last event queue read pointer value.
316 * @magic_count: Event queue test event count
317 * @irq_count: Number of IRQs since last adaptive moderation decision 333 * @irq_count: Number of IRQs since last adaptive moderation decision
318 * @irq_mod_score: IRQ moderation score 334 * @irq_mod_score: IRQ moderation score
319 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 335 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -327,14 +343,12 @@ enum efx_rx_alloc_method {
327 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 343 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
328 * @n_rx_overlength: Count of RX_OVERLENGTH errors 344 * @n_rx_overlength: Count of RX_OVERLENGTH errors
329 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 345 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
330 * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX 346 * @rx_queue: RX queue for this channel
331 * @tx_stop_count: Core TX queue stop count 347 * @tx_queue: TX queues for this channel
332 * @tx_stop_lock: Core TX queue stop lock
333 */ 348 */
334struct efx_channel { 349struct efx_channel {
335 struct efx_nic *efx; 350 struct efx_nic *efx;
336 int channel; 351 int channel;
337 char name[IFNAMSIZ + 6];
338 bool enabled; 352 bool enabled;
339 int irq; 353 int irq;
340 unsigned int irq_moderation; 354 unsigned int irq_moderation;
@@ -342,12 +356,15 @@ struct efx_channel {
342 struct napi_struct napi_str; 356 struct napi_struct napi_str;
343 bool work_pending; 357 bool work_pending;
344 struct efx_special_buffer eventq; 358 struct efx_special_buffer eventq;
359 unsigned int eventq_mask;
345 unsigned int eventq_read_ptr; 360 unsigned int eventq_read_ptr;
346 unsigned int last_eventq_read_ptr; 361 unsigned int last_eventq_read_ptr;
347 unsigned int magic_count;
348 362
349 unsigned int irq_count; 363 unsigned int irq_count;
350 unsigned int irq_mod_score; 364 unsigned int irq_mod_score;
365#ifdef CONFIG_RFS_ACCEL
366 unsigned int rfs_filters_added;
367#endif
351 368
352 int rx_alloc_level; 369 int rx_alloc_level;
353 int rx_alloc_push_pages; 370 int rx_alloc_push_pages;
@@ -366,9 +383,8 @@ struct efx_channel {
366 struct efx_rx_buffer *rx_pkt; 383 struct efx_rx_buffer *rx_pkt;
367 bool rx_pkt_csummed; 384 bool rx_pkt_csummed;
368 385
369 struct efx_tx_queue *tx_queue; 386 struct efx_rx_queue rx_queue;
370 atomic_t tx_stop_count; 387 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
371 spinlock_t tx_stop_lock;
372}; 388};
373 389
374enum efx_led_mode { 390enum efx_led_mode {
@@ -385,11 +401,6 @@ extern const unsigned int efx_loopback_mode_max;
385#define LOOPBACK_MODE(efx) \ 401#define LOOPBACK_MODE(efx) \
386 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) 402 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
387 403
388extern const char *efx_interrupt_mode_names[];
389extern const unsigned int efx_interrupt_mode_max;
390#define INT_MODE(efx) \
391 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
392
393extern const char *efx_reset_type_names[]; 404extern const char *efx_reset_type_names[];
394extern const unsigned int efx_reset_type_max; 405extern const unsigned int efx_reset_type_max;
395#define RESET_TYPE(type) \ 406#define RESET_TYPE(type) \
@@ -404,8 +415,6 @@ enum efx_int_mode {
404}; 415};
405#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) 416#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
406 417
407#define EFX_IS10G(efx) ((efx)->link_state.speed == 10000)
408
409enum nic_state { 418enum nic_state {
410 STATE_INIT = 0, 419 STATE_INIT = 0,
411 STATE_RUNNING = 1, 420 STATE_RUNNING = 1,
@@ -440,11 +449,9 @@ enum nic_state {
440struct efx_nic; 449struct efx_nic;
441 450
442/* Pseudo bit-mask flow control field */ 451/* Pseudo bit-mask flow control field */
443enum efx_fc_type { 452#define EFX_FC_RX FLOW_CTRL_RX
444 EFX_FC_RX = FLOW_CTRL_RX, 453#define EFX_FC_TX FLOW_CTRL_TX
445 EFX_FC_TX = FLOW_CTRL_TX, 454#define EFX_FC_AUTO 4
446 EFX_FC_AUTO = 4,
447};
448 455
449/** 456/**
450 * struct efx_link_state - Current state of the link 457 * struct efx_link_state - Current state of the link
@@ -456,7 +463,7 @@ enum efx_fc_type {
456struct efx_link_state { 463struct efx_link_state {
457 bool up; 464 bool up;
458 bool fd; 465 bool fd;
459 enum efx_fc_type fc; 466 u8 fc;
460 unsigned int speed; 467 unsigned int speed;
461}; 468};
462 469
@@ -618,20 +625,21 @@ union efx_multicast_hash {
618 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; 625 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
619}; 626};
620 627
628struct efx_filter_state;
629
621/** 630/**
622 * struct efx_nic - an Efx NIC 631 * struct efx_nic - an Efx NIC
623 * @name: Device name (net device name or bus id before net device registered) 632 * @name: Device name (net device name or bus id before net device registered)
624 * @pci_dev: The PCI device 633 * @pci_dev: The PCI device
625 * @type: Controller type attributes 634 * @type: Controller type attributes
626 * @legacy_irq: IRQ number 635 * @legacy_irq: IRQ number
636 * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
627 * @workqueue: Workqueue for port reconfigures and the HW monitor. 637 * @workqueue: Workqueue for port reconfigures and the HW monitor.
628 * Work items do not hold and must not acquire RTNL. 638 * Work items do not hold and must not acquire RTNL.
629 * @workqueue_name: Name of workqueue 639 * @workqueue_name: Name of workqueue
630 * @reset_work: Scheduled reset workitem 640 * @reset_work: Scheduled reset workitem
631 * @monitor_work: Hardware monitor workitem
632 * @membase_phys: Memory BAR value as physical address 641 * @membase_phys: Memory BAR value as physical address
633 * @membase: Memory BAR value 642 * @membase: Memory BAR value
634 * @biu_lock: BIU (bus interface unit) lock
635 * @interrupt_mode: Interrupt mode 643 * @interrupt_mode: Interrupt mode
636 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 644 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
637 * @irq_rx_moderation: IRQ moderation time for RX event queues 645 * @irq_rx_moderation: IRQ moderation time for RX event queues
@@ -641,56 +649,41 @@ union efx_multicast_hash {
641 * @tx_queue: TX DMA queues 649 * @tx_queue: TX DMA queues
642 * @rx_queue: RX DMA queues 650 * @rx_queue: RX DMA queues
643 * @channel: Channels 651 * @channel: Channels
652 * @channel_name: Names for channels and their IRQs
653 * @rxq_entries: Size of receive queues requested by user.
654 * @txq_entries: Size of transmit queues requested by user.
644 * @next_buffer_table: First available buffer table id 655 * @next_buffer_table: First available buffer table id
645 * @n_channels: Number of channels in use 656 * @n_channels: Number of channels in use
646 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 657 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
647 * @n_tx_channels: Number of channels used for TX 658 * @n_tx_channels: Number of channels used for TX
648 * @rx_buffer_len: RX buffer length 659 * @rx_buffer_len: RX buffer length
649 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 660 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
661 * @rx_hash_key: Toeplitz hash key for RSS
650 * @rx_indir_table: Indirection table for RSS 662 * @rx_indir_table: Indirection table for RSS
651 * @int_error_count: Number of internal errors seen recently 663 * @int_error_count: Number of internal errors seen recently
652 * @int_error_expire: Time at which error count will be expired 664 * @int_error_expire: Time at which error count will be expired
653 * @irq_status: Interrupt status buffer 665 * @irq_status: Interrupt status buffer
654 * @last_irq_cpu: Last CPU to handle interrupt.
655 * This register is written with the SMP processor ID whenever an
656 * interrupt is handled. It is used by efx_nic_test_interrupt()
657 * to verify that an interrupt has occurred.
658 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 666 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
659 * @fatal_irq_level: IRQ level (bit number) used for serious errors 667 * @fatal_irq_level: IRQ level (bit number) used for serious errors
660 * @spi_flash: SPI flash device
661 * This field will be %NULL if no flash device is present (or for Siena).
662 * @spi_eeprom: SPI EEPROM device
663 * This field will be %NULL if no EEPROM device is present (or for Siena).
664 * @spi_lock: SPI bus lock
665 * @mtd_list: List of MTDs attached to the NIC 668 * @mtd_list: List of MTDs attached to the NIC
666 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 669 * @nic_data: Hardware dependent state
667 * @nic_data: Hardware dependant state
668 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 670 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
669 * @port_inhibited, efx_monitor() and efx_reconfigure_port() 671 * efx_monitor() and efx_reconfigure_port()
670 * @port_enabled: Port enabled indicator. 672 * @port_enabled: Port enabled indicator.
671 * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and 673 * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
672 * efx_mac_work() with kernel interfaces. Safe to read under any 674 * efx_mac_work() with kernel interfaces. Safe to read under any
673 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must 675 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
674 * be held to modify it. 676 * be held to modify it.
675 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
676 * @port_initialized: Port initialized? 677 * @port_initialized: Port initialized?
677 * @net_dev: Operating system network device. Consider holding the rtnl lock 678 * @net_dev: Operating system network device. Consider holding the rtnl lock
678 * @rx_checksum_enabled: RX checksumming enabled
679 * @mac_stats: MAC statistics. These include all statistics the MACs
680 * can provide. Generic code converts these into a standard
681 * &struct net_device_stats.
682 * @stats_buffer: DMA buffer for statistics 679 * @stats_buffer: DMA buffer for statistics
683 * @stats_lock: Statistics update lock. Serialises statistics fetches
684 * @mac_op: MAC interface 680 * @mac_op: MAC interface
685 * @mac_address: Permanent MAC address
686 * @phy_type: PHY type 681 * @phy_type: PHY type
687 * @mdio_lock: MDIO lock
688 * @phy_op: PHY interface 682 * @phy_op: PHY interface
689 * @phy_data: PHY private data (including PHY-specific stats) 683 * @phy_data: PHY private data (including PHY-specific stats)
690 * @mdio: PHY MDIO interface 684 * @mdio: PHY MDIO interface
691 * @mdio_bus: PHY MDIO bus ID (only used by Siena) 685 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
692 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 686 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
693 * @xmac_poll_required: XMAC link state needs polling
694 * @link_advertising: Autonegotiation advertising flags 687 * @link_advertising: Autonegotiation advertising flags
695 * @link_state: Current state of the link 688 * @link_state: Current state of the link
696 * @n_link_state_changes: Number of times the link has changed state 689 * @n_link_state_changes: Number of times the link has changed state
@@ -701,21 +694,34 @@ union efx_multicast_hash {
701 * @loopback_mode: Loopback status 694 * @loopback_mode: Loopback status
702 * @loopback_modes: Supported loopback mode bitmask 695 * @loopback_modes: Supported loopback mode bitmask
703 * @loopback_selftest: Offline self-test private state 696 * @loopback_selftest: Offline self-test private state
697 * @monitor_work: Hardware monitor workitem
698 * @biu_lock: BIU (bus interface unit) lock
699 * @last_irq_cpu: Last CPU to handle interrupt.
700 * This register is written with the SMP processor ID whenever an
701 * interrupt is handled. It is used by efx_nic_test_interrupt()
702 * to verify that an interrupt has occurred.
703 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
704 * @mac_stats: MAC statistics. These include all statistics the MACs
705 * can provide. Generic code converts these into a standard
706 * &struct net_device_stats.
707 * @stats_lock: Statistics update lock. Serialises statistics fetches
704 * 708 *
705 * This is stored in the private area of the &struct net_device. 709 * This is stored in the private area of the &struct net_device.
706 */ 710 */
707struct efx_nic { 711struct efx_nic {
712 /* The following fields should be written very rarely */
713
708 char name[IFNAMSIZ]; 714 char name[IFNAMSIZ];
709 struct pci_dev *pci_dev; 715 struct pci_dev *pci_dev;
710 const struct efx_nic_type *type; 716 const struct efx_nic_type *type;
711 int legacy_irq; 717 int legacy_irq;
718 bool legacy_irq_enabled;
712 struct workqueue_struct *workqueue; 719 struct workqueue_struct *workqueue;
713 char workqueue_name[16]; 720 char workqueue_name[16];
714 struct work_struct reset_work; 721 struct work_struct reset_work;
715 struct delayed_work monitor_work;
716 resource_size_t membase_phys; 722 resource_size_t membase_phys;
717 void __iomem *membase; 723 void __iomem *membase;
718 spinlock_t biu_lock; 724
719 enum efx_int_mode interrupt_mode; 725 enum efx_int_mode interrupt_mode;
720 bool irq_rx_adaptive; 726 bool irq_rx_adaptive;
721 unsigned int irq_rx_moderation; 727 unsigned int irq_rx_moderation;
@@ -724,13 +730,15 @@ struct efx_nic {
724 enum nic_state state; 730 enum nic_state state;
725 enum reset_type reset_pending; 731 enum reset_type reset_pending;
726 732
727 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES]; 733 struct efx_channel *channel[EFX_MAX_CHANNELS];
728 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 734 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
729 struct efx_channel channel[EFX_MAX_CHANNELS];
730 735
736 unsigned rxq_entries;
737 unsigned txq_entries;
731 unsigned next_buffer_table; 738 unsigned next_buffer_table;
732 unsigned n_channels; 739 unsigned n_channels;
733 unsigned n_rx_channels; 740 unsigned n_rx_channels;
741 unsigned tx_channel_offset;
734 unsigned n_tx_channels; 742 unsigned n_tx_channels;
735 unsigned int rx_buffer_len; 743 unsigned int rx_buffer_len;
736 unsigned int rx_buffer_order; 744 unsigned int rx_buffer_order;
@@ -741,59 +749,57 @@ struct efx_nic {
741 unsigned long int_error_expire; 749 unsigned long int_error_expire;
742 750
743 struct efx_buffer irq_status; 751 struct efx_buffer irq_status;
744 volatile signed int last_irq_cpu;
745 unsigned irq_zero_count; 752 unsigned irq_zero_count;
746 unsigned fatal_irq_level; 753 unsigned fatal_irq_level;
747 754
748 struct efx_spi_device *spi_flash;
749 struct efx_spi_device *spi_eeprom;
750 struct mutex spi_lock;
751#ifdef CONFIG_SFC_MTD 755#ifdef CONFIG_SFC_MTD
752 struct list_head mtd_list; 756 struct list_head mtd_list;
753#endif 757#endif
754 758
755 unsigned n_rx_nodesc_drop_cnt;
756
757 void *nic_data; 759 void *nic_data;
758 760
759 struct mutex mac_lock; 761 struct mutex mac_lock;
760 struct work_struct mac_work; 762 struct work_struct mac_work;
761 bool port_enabled; 763 bool port_enabled;
762 bool port_inhibited;
763 764
764 bool port_initialized; 765 bool port_initialized;
765 struct net_device *net_dev; 766 struct net_device *net_dev;
766 bool rx_checksum_enabled;
767 767
768 struct efx_mac_stats mac_stats;
769 struct efx_buffer stats_buffer; 768 struct efx_buffer stats_buffer;
770 spinlock_t stats_lock;
771 769
772 struct efx_mac_operations *mac_op; 770 const struct efx_mac_operations *mac_op;
773 unsigned char mac_address[ETH_ALEN];
774 771
775 unsigned int phy_type; 772 unsigned int phy_type;
776 struct mutex mdio_lock; 773 const struct efx_phy_operations *phy_op;
777 struct efx_phy_operations *phy_op;
778 void *phy_data; 774 void *phy_data;
779 struct mdio_if_info mdio; 775 struct mdio_if_info mdio;
780 unsigned int mdio_bus; 776 unsigned int mdio_bus;
781 enum efx_phy_mode phy_mode; 777 enum efx_phy_mode phy_mode;
782 778
783 bool xmac_poll_required;
784 u32 link_advertising; 779 u32 link_advertising;
785 struct efx_link_state link_state; 780 struct efx_link_state link_state;
786 unsigned int n_link_state_changes; 781 unsigned int n_link_state_changes;
787 782
788 bool promiscuous; 783 bool promiscuous;
789 union efx_multicast_hash multicast_hash; 784 union efx_multicast_hash multicast_hash;
790 enum efx_fc_type wanted_fc; 785 u8 wanted_fc;
791 786
792 atomic_t rx_reset; 787 atomic_t rx_reset;
793 enum efx_loopback_mode loopback_mode; 788 enum efx_loopback_mode loopback_mode;
794 u64 loopback_modes; 789 u64 loopback_modes;
795 790
796 void *loopback_selftest; 791 void *loopback_selftest;
792
793 struct efx_filter_state *filter_state;
794
795 /* The following fields may be written more often */
796
797 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
798 spinlock_t biu_lock;
799 volatile signed int last_irq_cpu;
800 unsigned n_rx_nodesc_drop_cnt;
801 struct efx_mac_stats mac_stats;
802 spinlock_t stats_lock;
797}; 803};
798 804
799static inline int efx_dev_registered(struct efx_nic *efx) 805static inline int efx_dev_registered(struct efx_nic *efx)
@@ -826,6 +832,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
826 * be called while the controller is uninitialised. 832 * be called while the controller is uninitialised.
827 * @probe_port: Probe the MAC and PHY 833 * @probe_port: Probe the MAC and PHY
828 * @remove_port: Free resources allocated by probe_port() 834 * @remove_port: Free resources allocated by probe_port()
835 * @handle_global_event: Handle a "global" event (may be %NULL)
829 * @prepare_flush: Prepare the hardware for flushing the DMA queues 836 * @prepare_flush: Prepare the hardware for flushing the DMA queues
830 * @update_stats: Update statistics not provided by event handling 837 * @update_stats: Update statistics not provided by event handling
831 * @start_stats: Start the regular fetching of statistics 838 * @start_stats: Start the regular fetching of statistics
@@ -870,6 +877,7 @@ struct efx_nic_type {
870 int (*reset)(struct efx_nic *efx, enum reset_type method); 877 int (*reset)(struct efx_nic *efx, enum reset_type method);
871 int (*probe_port)(struct efx_nic *efx); 878 int (*probe_port)(struct efx_nic *efx);
872 void (*remove_port)(struct efx_nic *efx); 879 void (*remove_port)(struct efx_nic *efx);
880 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
873 void (*prepare_flush)(struct efx_nic *efx); 881 void (*prepare_flush)(struct efx_nic *efx);
874 void (*update_stats)(struct efx_nic *efx); 882 void (*update_stats)(struct efx_nic *efx);
875 void (*start_stats)(struct efx_nic *efx); 883 void (*start_stats)(struct efx_nic *efx);
@@ -883,7 +891,7 @@ struct efx_nic_type {
883 void (*resume_wol)(struct efx_nic *efx); 891 void (*resume_wol)(struct efx_nic *efx);
884 int (*test_registers)(struct efx_nic *efx); 892 int (*test_registers)(struct efx_nic *efx);
885 int (*test_nvram)(struct efx_nic *efx); 893 int (*test_nvram)(struct efx_nic *efx);
886 struct efx_mac_operations *default_mac_ops; 894 const struct efx_mac_operations *default_mac_ops;
887 895
888 int revision; 896 int revision;
889 unsigned int mem_map_size; 897 unsigned int mem_map_size;
@@ -899,7 +907,7 @@ struct efx_nic_type {
899 unsigned int phys_addr_channels; 907 unsigned int phys_addr_channels;
900 unsigned int tx_dc_base; 908 unsigned int tx_dc_base;
901 unsigned int rx_dc_base; 909 unsigned int rx_dc_base;
902 unsigned long offload_features; 910 u32 offload_features;
903 u32 reset_world_flags; 911 u32 reset_world_flags;
904}; 912};
905 913
@@ -909,39 +917,102 @@ struct efx_nic_type {
909 * 917 *
910 *************************************************************************/ 918 *************************************************************************/
911 919
920static inline struct efx_channel *
921efx_get_channel(struct efx_nic *efx, unsigned index)
922{
923 EFX_BUG_ON_PARANOID(index >= efx->n_channels);
924 return efx->channel[index];
925}
926
912/* Iterate over all used channels */ 927/* Iterate over all used channels */
913#define efx_for_each_channel(_channel, _efx) \ 928#define efx_for_each_channel(_channel, _efx) \
914 for (_channel = &((_efx)->channel[0]); \ 929 for (_channel = (_efx)->channel[0]; \
915 _channel < &((_efx)->channel[(efx)->n_channels]); \ 930 _channel; \
916 _channel++) 931 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
917 932 (_efx)->channel[_channel->channel + 1] : NULL)
918/* Iterate over all used TX queues */ 933
919#define efx_for_each_tx_queue(_tx_queue, _efx) \ 934static inline struct efx_tx_queue *
920 for (_tx_queue = &((_efx)->tx_queue[0]); \ 935efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
921 _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \ 936{
922 (_efx)->n_tx_channels]); \ 937 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
923 _tx_queue++) 938 type >= EFX_TXQ_TYPES);
939 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
940}
941
942static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
943{
944 return channel->channel - channel->efx->tx_channel_offset <
945 channel->efx->n_tx_channels;
946}
947
948static inline struct efx_tx_queue *
949efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
950{
951 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
952 type >= EFX_TXQ_TYPES);
953 return &channel->tx_queue[type];
954}
955
956static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
957{
958 return !(tx_queue->efx->net_dev->num_tc < 2 &&
959 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
960}
924 961
925/* Iterate over all TX queues belonging to a channel */ 962/* Iterate over all TX queues belonging to a channel */
926#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 963#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
964 if (!efx_channel_has_tx_queues(_channel)) \
965 ; \
966 else \
967 for (_tx_queue = (_channel)->tx_queue; \
968 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
969 efx_tx_queue_used(_tx_queue); \
970 _tx_queue++)
971
972/* Iterate over all possible TX queues belonging to a channel */
973#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
927 for (_tx_queue = (_channel)->tx_queue; \ 974 for (_tx_queue = (_channel)->tx_queue; \
928 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 975 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
929 _tx_queue++) 976 _tx_queue++)
930 977
931/* Iterate over all used RX queues */ 978static inline struct efx_rx_queue *
932#define efx_for_each_rx_queue(_rx_queue, _efx) \ 979efx_get_rx_queue(struct efx_nic *efx, unsigned index)
933 for (_rx_queue = &((_efx)->rx_queue[0]); \ 980{
934 _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \ 981 EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
935 _rx_queue++) 982 return &efx->channel[index]->rx_queue;
983}
984
985static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
986{
987 return channel->channel < channel->efx->n_rx_channels;
988}
989
990static inline struct efx_rx_queue *
991efx_channel_get_rx_queue(struct efx_channel *channel)
992{
993 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
994 return &channel->rx_queue;
995}
936 996
937/* Iterate over all RX queues belonging to a channel */ 997/* Iterate over all RX queues belonging to a channel */
938#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 998#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
939 for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \ 999 if (!efx_channel_has_rx_queue(_channel)) \
940 _rx_queue; \ 1000 ; \
941 _rx_queue = NULL) \ 1001 else \
942 if (_rx_queue->channel != (_channel)) \ 1002 for (_rx_queue = &(_channel)->rx_queue; \
943 continue; \ 1003 _rx_queue; \
944 else 1004 _rx_queue = NULL)
1005
1006static inline struct efx_channel *
1007efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
1008{
1009 return container_of(rx_queue, struct efx_channel, rx_queue);
1010}
1011
1012static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
1013{
1014 return efx_rx_queue_channel(rx_queue)->channel;
1015}
945 1016
946/* Returns a pointer to the specified receive buffer in the RX 1017/* Returns a pointer to the specified receive buffer in the RX
947 * descriptor queue. 1018 * descriptor queue.
@@ -949,7 +1020,7 @@ struct efx_nic_type {
949static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, 1020static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
950 unsigned int index) 1021 unsigned int index)
951{ 1022{
952 return (&rx_queue->buffer[index]); 1023 return &rx_queue->buffer[index];
953} 1024}
954 1025
955/* Set bit in a little-endian bitfield */ 1026/* Set bit in a little-endian bitfield */
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index f595d920c7c4..f2a2b947f860 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -41,26 +41,6 @@
41#define RX_DC_ENTRIES 64 41#define RX_DC_ENTRIES 64
42#define RX_DC_ENTRIES_ORDER 3 42#define RX_DC_ENTRIES_ORDER 3
43 43
44/* RX FIFO XOFF watermark
45 *
46 * When the amount of the RX FIFO increases used increases past this
47 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
48 * This also has an effect on RX/TX arbitration
49 */
50int efx_nic_rx_xoff_thresh = -1;
51module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
52MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
53
54/* RX FIFO XON watermark
55 *
56 * When the amount of the RX FIFO used decreases below this
57 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
58 * This also has an effect on RX/TX arbitration
59 */
60int efx_nic_rx_xon_thresh = -1;
61module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
62MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
63
64/* If EFX_MAX_INT_ERRORS internal errors occur within 44/* If EFX_MAX_INT_ERRORS internal errors occur within
65 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 45 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
66 * disable it. 46 * disable it.
@@ -104,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
104static inline efx_qword_t *efx_event(struct efx_channel *channel, 84static inline efx_qword_t *efx_event(struct efx_channel *channel,
105 unsigned int index) 85 unsigned int index)
106{ 86{
107 return (((efx_qword_t *) (channel->eventq.addr)) + index); 87 return ((efx_qword_t *) (channel->eventq.addr)) +
88 (index & channel->eventq_mask);
108} 89}
109 90
110/* See if an event is present 91/* See if an event is present
@@ -119,8 +100,8 @@ static inline efx_qword_t *efx_event(struct efx_channel *channel,
119 */ 100 */
120static inline int efx_event_present(efx_qword_t *event) 101static inline int efx_event_present(efx_qword_t *event)
121{ 102{
122 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 103 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
123 EFX_DWORD_IS_ALL_ONES(event->dword[1]))); 104 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
124} 105}
125 106
126static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 107static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
@@ -263,8 +244,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
263{ 244{
264 len = ALIGN(len, EFX_BUF_SIZE); 245 len = ALIGN(len, EFX_BUF_SIZE);
265 246
266 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 247 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
267 &buffer->dma_addr); 248 &buffer->dma_addr, GFP_KERNEL);
268 if (!buffer->addr) 249 if (!buffer->addr)
269 return -ENOMEM; 250 return -ENOMEM;
270 buffer->len = len; 251 buffer->len = len;
@@ -301,8 +282,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
301 (u64)buffer->dma_addr, buffer->len, 282 (u64)buffer->dma_addr, buffer->len,
302 buffer->addr, (u64)virt_to_phys(buffer->addr)); 283 buffer->addr, (u64)virt_to_phys(buffer->addr));
303 284
304 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, 285 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
305 buffer->dma_addr); 286 buffer->dma_addr);
306 buffer->addr = NULL; 287 buffer->addr = NULL;
307 buffer->entries = 0; 288 buffer->entries = 0;
308} 289}
@@ -347,7 +328,7 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
347static inline efx_qword_t * 328static inline efx_qword_t *
348efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 329efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
349{ 330{
350 return (((efx_qword_t *) (tx_queue->txd.addr)) + index); 331 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
351} 332}
352 333
353/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 334/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
@@ -356,12 +337,41 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
356 unsigned write_ptr; 337 unsigned write_ptr;
357 efx_dword_t reg; 338 efx_dword_t reg;
358 339
359 write_ptr = tx_queue->write_count & EFX_TXQ_MASK; 340 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
360 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 341 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
361 efx_writed_page(tx_queue->efx, &reg, 342 efx_writed_page(tx_queue->efx, &reg,
362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 343 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
363} 344}
364 345
346/* Write pointer and first descriptor for TX descriptor ring */
347static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
348 const efx_qword_t *txd)
349{
350 unsigned write_ptr;
351 efx_oword_t reg;
352
353 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
354 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
355
356 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
357 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
358 FRF_AZ_TX_DESC_WPTR, write_ptr);
359 reg.qword[0] = *txd;
360 efx_writeo_page(tx_queue->efx, &reg,
361 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
362}
363
364static inline bool
365efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
366{
367 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
368
369 if (empty_read_count == 0)
370 return false;
371
372 tx_queue->empty_read_count = 0;
373 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
374}
365 375
366/* For each entry inserted into the software descriptor ring, create a 376/* For each entry inserted into the software descriptor ring, create a
367 * descriptor in the hardware TX descriptor ring (in host memory), and 377 * descriptor in the hardware TX descriptor ring (in host memory), and
@@ -373,11 +383,12 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
373 struct efx_tx_buffer *buffer; 383 struct efx_tx_buffer *buffer;
374 efx_qword_t *txd; 384 efx_qword_t *txd;
375 unsigned write_ptr; 385 unsigned write_ptr;
386 unsigned old_write_count = tx_queue->write_count;
376 387
377 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 388 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
378 389
379 do { 390 do {
380 write_ptr = tx_queue->write_count & EFX_TXQ_MASK; 391 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
381 buffer = &tx_queue->buffer[write_ptr]; 392 buffer = &tx_queue->buffer[write_ptr];
382 txd = efx_tx_desc(tx_queue, write_ptr); 393 txd = efx_tx_desc(tx_queue, write_ptr);
383 ++tx_queue->write_count; 394 ++tx_queue->write_count;
@@ -391,23 +402,32 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
391 } while (tx_queue->write_count != tx_queue->insert_count); 402 } while (tx_queue->write_count != tx_queue->insert_count);
392 403
393 wmb(); /* Ensure descriptors are written before they are fetched */ 404 wmb(); /* Ensure descriptors are written before they are fetched */
394 efx_notify_tx_desc(tx_queue); 405
406 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
407 txd = efx_tx_desc(tx_queue,
408 old_write_count & tx_queue->ptr_mask);
409 efx_push_tx_desc(tx_queue, txd);
410 ++tx_queue->pushes;
411 } else {
412 efx_notify_tx_desc(tx_queue);
413 }
395} 414}
396 415
397/* Allocate hardware resources for a TX queue */ 416/* Allocate hardware resources for a TX queue */
398int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 417int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
399{ 418{
400 struct efx_nic *efx = tx_queue->efx; 419 struct efx_nic *efx = tx_queue->efx;
401 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 || 420 unsigned entries;
402 EFX_TXQ_SIZE & EFX_TXQ_MASK); 421
422 entries = tx_queue->ptr_mask + 1;
403 return efx_alloc_special_buffer(efx, &tx_queue->txd, 423 return efx_alloc_special_buffer(efx, &tx_queue->txd,
404 EFX_TXQ_SIZE * sizeof(efx_qword_t)); 424 entries * sizeof(efx_qword_t));
405} 425}
406 426
407void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 427void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
408{ 428{
409 efx_oword_t tx_desc_ptr;
410 struct efx_nic *efx = tx_queue->efx; 429 struct efx_nic *efx = tx_queue->efx;
430 efx_oword_t reg;
411 431
412 tx_queue->flushed = FLUSH_NONE; 432 tx_queue->flushed = FLUSH_NONE;
413 433
@@ -415,7 +435,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
415 efx_init_special_buffer(efx, &tx_queue->txd); 435 efx_init_special_buffer(efx, &tx_queue->txd);
416 436
417 /* Push TX descriptor ring to card */ 437 /* Push TX descriptor ring to card */
418 EFX_POPULATE_OWORD_10(tx_desc_ptr, 438 EFX_POPULATE_OWORD_10(reg,
419 FRF_AZ_TX_DESCQ_EN, 1, 439 FRF_AZ_TX_DESCQ_EN, 1,
420 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 440 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
421 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 441 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -431,17 +451,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
431 451
432 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 452 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
433 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 453 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
434 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 454 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
435 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 455 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
436 !csum); 456 !csum);
437 } 457 }
438 458
439 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 459 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
440 tx_queue->queue); 460 tx_queue->queue);
441 461
442 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 462 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
443 efx_oword_t reg;
444
445 /* Only 128 bits in this register */ 463 /* Only 128 bits in this register */
446 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 464 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
447 465
@@ -452,6 +470,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
452 set_bit_le(tx_queue->queue, (void *)&reg); 470 set_bit_le(tx_queue->queue, (void *)&reg);
453 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); 471 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
454 } 472 }
473
474 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
475 EFX_POPULATE_OWORD_1(reg,
476 FRF_BZ_TX_PACE,
477 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
478 FFE_BZ_TX_PACE_OFF :
479 FFE_BZ_TX_PACE_RESERVED);
480 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
481 tx_queue->queue);
482 }
455} 483}
456 484
457static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 485static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -501,7 +529,7 @@ void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
501static inline efx_qword_t * 529static inline efx_qword_t *
502efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 530efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
503{ 531{
504 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index); 532 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
505} 533}
506 534
507/* This creates an entry in the RX descriptor queue */ 535/* This creates an entry in the RX descriptor queue */
@@ -526,30 +554,32 @@ efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
526 */ 554 */
527void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 555void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
528{ 556{
557 struct efx_nic *efx = rx_queue->efx;
529 efx_dword_t reg; 558 efx_dword_t reg;
530 unsigned write_ptr; 559 unsigned write_ptr;
531 560
532 while (rx_queue->notified_count != rx_queue->added_count) { 561 while (rx_queue->notified_count != rx_queue->added_count) {
533 efx_build_rx_desc(rx_queue, 562 efx_build_rx_desc(
534 rx_queue->notified_count & 563 rx_queue,
535 EFX_RXQ_MASK); 564 rx_queue->notified_count & rx_queue->ptr_mask);
536 ++rx_queue->notified_count; 565 ++rx_queue->notified_count;
537 } 566 }
538 567
539 wmb(); 568 wmb();
540 write_ptr = rx_queue->added_count & EFX_RXQ_MASK; 569 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
541 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 570 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
542 efx_writed_page(rx_queue->efx, &reg, 571 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
543 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue); 572 efx_rx_queue_index(rx_queue));
544} 573}
545 574
546int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 575int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
547{ 576{
548 struct efx_nic *efx = rx_queue->efx; 577 struct efx_nic *efx = rx_queue->efx;
549 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 || 578 unsigned entries;
550 EFX_RXQ_SIZE & EFX_RXQ_MASK); 579
580 entries = rx_queue->ptr_mask + 1;
551 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 581 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
552 EFX_RXQ_SIZE * sizeof(efx_qword_t)); 582 entries * sizeof(efx_qword_t));
553} 583}
554 584
555void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 585void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
@@ -561,7 +591,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
561 591
562 netif_dbg(efx, hw, efx->net_dev, 592 netif_dbg(efx, hw, efx->net_dev,
563 "RX queue %d ring in special buffers %d-%d\n", 593 "RX queue %d ring in special buffers %d-%d\n",
564 rx_queue->queue, rx_queue->rxd.index, 594 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
565 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 595 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
566 596
567 rx_queue->flushed = FLUSH_NONE; 597 rx_queue->flushed = FLUSH_NONE;
@@ -575,9 +605,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
575 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 605 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
576 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 606 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
577 FRF_AZ_RX_DESCQ_EVQ_ID, 607 FRF_AZ_RX_DESCQ_EVQ_ID,
578 rx_queue->channel->channel, 608 efx_rx_queue_channel(rx_queue)->channel,
579 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 609 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
580 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue, 610 FRF_AZ_RX_DESCQ_LABEL,
611 efx_rx_queue_index(rx_queue),
581 FRF_AZ_RX_DESCQ_SIZE, 612 FRF_AZ_RX_DESCQ_SIZE,
582 __ffs(rx_queue->rxd.entries), 613 __ffs(rx_queue->rxd.entries),
583 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 614 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
@@ -585,7 +616,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
585 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 616 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
586 FRF_AZ_RX_DESCQ_EN, 1); 617 FRF_AZ_RX_DESCQ_EN, 1);
587 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 618 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
588 rx_queue->queue); 619 efx_rx_queue_index(rx_queue));
589} 620}
590 621
591static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 622static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
@@ -598,7 +629,8 @@ static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
598 /* Post a flush command */ 629 /* Post a flush command */
599 EFX_POPULATE_OWORD_2(rx_flush_descq, 630 EFX_POPULATE_OWORD_2(rx_flush_descq,
600 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 631 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
601 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue); 632 FRF_AZ_RX_FLUSH_DESCQ,
633 efx_rx_queue_index(rx_queue));
602 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 634 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
603} 635}
604 636
@@ -613,7 +645,7 @@ void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
613 /* Remove RX descriptor ring from card */ 645 /* Remove RX descriptor ring from card */
614 EFX_ZERO_OWORD(rx_desc_ptr); 646 EFX_ZERO_OWORD(rx_desc_ptr);
615 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 647 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
616 rx_queue->queue); 648 efx_rx_queue_index(rx_queue));
617 649
618 /* Unpin RX descriptor ring */ 650 /* Unpin RX descriptor ring */
619 efx_fini_special_buffer(efx, &rx_queue->rxd); 651 efx_fini_special_buffer(efx, &rx_queue->rxd);
@@ -642,13 +674,14 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
642 efx_dword_t reg; 674 efx_dword_t reg;
643 struct efx_nic *efx = channel->efx; 675 struct efx_nic *efx = channel->efx;
644 676
645 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); 677 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
678 channel->eventq_read_ptr & channel->eventq_mask);
646 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, 679 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
647 channel->channel); 680 channel->channel);
648} 681}
649 682
650/* Use HW to insert a SW defined event */ 683/* Use HW to insert a SW defined event */
651void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) 684static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
652{ 685{
653 efx_oword_t drv_ev_reg; 686 efx_oword_t drv_ev_reg;
654 687
@@ -680,15 +713,17 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
680 /* Transmit completion */ 713 /* Transmit completion */
681 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 714 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
682 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 715 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
683 tx_queue = &efx->tx_queue[tx_ev_q_label]; 716 tx_queue = efx_channel_get_tx_queue(
717 channel, tx_ev_q_label % EFX_TXQ_TYPES);
684 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 718 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
685 EFX_TXQ_MASK); 719 tx_queue->ptr_mask);
686 channel->irq_mod_score += tx_packets; 720 channel->irq_mod_score += tx_packets;
687 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 721 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
688 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 722 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
689 /* Rewrite the FIFO write pointer */ 723 /* Rewrite the FIFO write pointer */
690 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 724 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
691 tx_queue = &efx->tx_queue[tx_ev_q_label]; 725 tx_queue = efx_channel_get_tx_queue(
726 channel, tx_ev_q_label % EFX_TXQ_TYPES);
692 727
693 if (efx_dev_registered(efx)) 728 if (efx_dev_registered(efx))
694 netif_tx_lock(efx->net_dev); 729 netif_tx_lock(efx->net_dev);
@@ -714,6 +749,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
714 bool *rx_ev_pkt_ok, 749 bool *rx_ev_pkt_ok,
715 bool *discard) 750 bool *discard)
716{ 751{
752 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
717 struct efx_nic *efx = rx_queue->efx; 753 struct efx_nic *efx = rx_queue->efx;
718 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 754 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
719 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 755 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
@@ -746,14 +782,14 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
746 /* Count errors that are not in MAC stats. Ignore expected 782 /* Count errors that are not in MAC stats. Ignore expected
747 * checksum errors during self-test. */ 783 * checksum errors during self-test. */
748 if (rx_ev_frm_trunc) 784 if (rx_ev_frm_trunc)
749 ++rx_queue->channel->n_rx_frm_trunc; 785 ++channel->n_rx_frm_trunc;
750 else if (rx_ev_tobe_disc) 786 else if (rx_ev_tobe_disc)
751 ++rx_queue->channel->n_rx_tobe_disc; 787 ++channel->n_rx_tobe_disc;
752 else if (!efx->loopback_selftest) { 788 else if (!efx->loopback_selftest) {
753 if (rx_ev_ip_hdr_chksum_err) 789 if (rx_ev_ip_hdr_chksum_err)
754 ++rx_queue->channel->n_rx_ip_hdr_chksum_err; 790 ++channel->n_rx_ip_hdr_chksum_err;
755 else if (rx_ev_tcp_udp_chksum_err) 791 else if (rx_ev_tcp_udp_chksum_err)
756 ++rx_queue->channel->n_rx_tcp_udp_chksum_err; 792 ++channel->n_rx_tcp_udp_chksum_err;
757 } 793 }
758 794
759 /* The frame must be discarded if any of these are true. */ 795 /* The frame must be discarded if any of these are true. */
@@ -769,7 +805,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
769 netif_dbg(efx, rx_err, efx->net_dev, 805 netif_dbg(efx, rx_err, efx->net_dev,
770 " RX queue %d unexpected RX event " 806 " RX queue %d unexpected RX event "
771 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 807 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
772 rx_queue->queue, EFX_QWORD_VAL(*event), 808 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
773 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 809 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
774 rx_ev_ip_hdr_chksum_err ? 810 rx_ev_ip_hdr_chksum_err ?
775 " [IP_HDR_CHKSUM_ERR]" : "", 811 " [IP_HDR_CHKSUM_ERR]" : "",
@@ -791,8 +827,8 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
791 struct efx_nic *efx = rx_queue->efx; 827 struct efx_nic *efx = rx_queue->efx;
792 unsigned expected, dropped; 828 unsigned expected, dropped;
793 829
794 expected = rx_queue->removed_count & EFX_RXQ_MASK; 830 expected = rx_queue->removed_count & rx_queue->ptr_mask;
795 dropped = (index - expected) & EFX_RXQ_MASK; 831 dropped = (index - expected) & rx_queue->ptr_mask;
796 netif_info(efx, rx_err, efx->net_dev, 832 netif_info(efx, rx_err, efx->net_dev,
797 "dropped %d events (index=%d expected=%d)\n", 833 "dropped %d events (index=%d expected=%d)\n",
798 dropped, index, expected); 834 dropped, index, expected);
@@ -816,7 +852,6 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
816 unsigned expected_ptr; 852 unsigned expected_ptr;
817 bool rx_ev_pkt_ok, discard = false, checksummed; 853 bool rx_ev_pkt_ok, discard = false, checksummed;
818 struct efx_rx_queue *rx_queue; 854 struct efx_rx_queue *rx_queue;
819 struct efx_nic *efx = channel->efx;
820 855
821 /* Basic packet information */ 856 /* Basic packet information */
822 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 857 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
@@ -827,10 +862,10 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
827 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 862 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
828 channel->channel); 863 channel->channel);
829 864
830 rx_queue = &efx->rx_queue[channel->channel]; 865 rx_queue = efx_channel_get_rx_queue(channel);
831 866
832 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 867 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
833 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; 868 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
834 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 869 if (unlikely(rx_ev_desc_ptr != expected_ptr))
835 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 870 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
836 871
@@ -839,9 +874,8 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
839 * UDP/IP, then we can rely on the hardware checksum. 874 * UDP/IP, then we can rely on the hardware checksum.
840 */ 875 */
841 checksummed = 876 checksummed =
842 likely(efx->rx_checksum_enabled) && 877 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
843 (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 878 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
844 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
845 } else { 879 } else {
846 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); 880 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
847 checksummed = false; 881 checksummed = false;
@@ -874,58 +908,18 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
874 908
875 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 909 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
876 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 910 if (code == EFX_CHANNEL_MAGIC_TEST(channel))
877 ++channel->magic_count; 911 ; /* ignore */
878 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 912 else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
879 /* The queue must be empty, so we won't receive any rx 913 /* The queue must be empty, so we won't receive any rx
880 * events, so efx_process_channel() won't refill the 914 * events, so efx_process_channel() won't refill the
881 * queue. Refill it here */ 915 * queue. Refill it here */
882 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 916 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
883 else 917 else
884 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 918 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
885 "generated event "EFX_QWORD_FMT"\n", 919 "generated event "EFX_QWORD_FMT"\n",
886 channel->channel, EFX_QWORD_VAL(*event)); 920 channel->channel, EFX_QWORD_VAL(*event));
887} 921}
888 922
889/* Global events are basically PHY events */
890static void
891efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
892{
893 struct efx_nic *efx = channel->efx;
894 bool handled = false;
895
896 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
897 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
898 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
899 /* Ignored */
900 handled = true;
901 }
902
903 if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
904 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
905 efx->xmac_poll_required = true;
906 handled = true;
907 }
908
909 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
910 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
911 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
912 netif_err(efx, rx_err, efx->net_dev,
913 "channel %d seen global RX_RESET event. Resetting.\n",
914 channel->channel);
915
916 atomic_inc(&efx->rx_reset);
917 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
918 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
919 handled = true;
920 }
921
922 if (!handled)
923 netif_err(efx, hw, efx->net_dev,
924 "channel %d unknown global event "
925 EFX_QWORD_FMT "\n", channel->channel,
926 EFX_QWORD_VAL(*event));
927}
928
929static void 923static void
930efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 924efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
931{ 925{
@@ -997,6 +991,7 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
997 991
998int efx_nic_process_eventq(struct efx_channel *channel, int budget) 992int efx_nic_process_eventq(struct efx_channel *channel, int budget)
999{ 993{
994 struct efx_nic *efx = channel->efx;
1000 unsigned int read_ptr; 995 unsigned int read_ptr;
1001 efx_qword_t event, *p_event; 996 efx_qword_t event, *p_event;
1002 int ev_code; 997 int ev_code;
@@ -1020,8 +1015,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1020 /* Clear this event by marking it all ones */ 1015 /* Clear this event by marking it all ones */
1021 EFX_SET_QWORD(*p_event); 1016 EFX_SET_QWORD(*p_event);
1022 1017
1023 /* Increment read pointer */ 1018 ++read_ptr;
1024 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1025 1019
1026 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1020 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1027 1021
@@ -1033,7 +1027,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1033 break; 1027 break;
1034 case FSE_AZ_EV_CODE_TX_EV: 1028 case FSE_AZ_EV_CODE_TX_EV:
1035 tx_packets += efx_handle_tx_event(channel, &event); 1029 tx_packets += efx_handle_tx_event(channel, &event);
1036 if (tx_packets >= EFX_TXQ_SIZE) { 1030 if (tx_packets > efx->txq_entries) {
1037 spent = budget; 1031 spent = budget;
1038 goto out; 1032 goto out;
1039 } 1033 }
@@ -1041,15 +1035,17 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1041 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1035 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1042 efx_handle_generated_event(channel, &event); 1036 efx_handle_generated_event(channel, &event);
1043 break; 1037 break;
1044 case FSE_AZ_EV_CODE_GLOBAL_EV:
1045 efx_handle_global_event(channel, &event);
1046 break;
1047 case FSE_AZ_EV_CODE_DRIVER_EV: 1038 case FSE_AZ_EV_CODE_DRIVER_EV:
1048 efx_handle_driver_event(channel, &event); 1039 efx_handle_driver_event(channel, &event);
1049 break; 1040 break;
1050 case FSE_CZ_EV_CODE_MCDI_EV: 1041 case FSE_CZ_EV_CODE_MCDI_EV:
1051 efx_mcdi_process_event(channel, &event); 1042 efx_mcdi_process_event(channel, &event);
1052 break; 1043 break;
1044 case FSE_AZ_EV_CODE_GLOBAL_EV:
1045 if (efx->type->handle_global_event &&
1046 efx->type->handle_global_event(channel, &event))
1047 break;
1048 /* else fall through */
1053 default: 1049 default:
1054 netif_err(channel->efx, hw, channel->efx->net_dev, 1050 netif_err(channel->efx, hw, channel->efx->net_dev,
1055 "channel %d unknown event type %d (data " 1051 "channel %d unknown event type %d (data "
@@ -1063,15 +1059,23 @@ out:
1063 return spent; 1059 return spent;
1064} 1060}
1065 1061
1062/* Check whether an event is present in the eventq at the current
1063 * read pointer. Only useful for self-test.
1064 */
1065bool efx_nic_event_present(struct efx_channel *channel)
1066{
1067 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1068}
1066 1069
1067/* Allocate buffer table entries for event queue */ 1070/* Allocate buffer table entries for event queue */
1068int efx_nic_probe_eventq(struct efx_channel *channel) 1071int efx_nic_probe_eventq(struct efx_channel *channel)
1069{ 1072{
1070 struct efx_nic *efx = channel->efx; 1073 struct efx_nic *efx = channel->efx;
1071 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 || 1074 unsigned entries;
1072 EFX_EVQ_SIZE & EFX_EVQ_MASK); 1075
1076 entries = channel->eventq_mask + 1;
1073 return efx_alloc_special_buffer(efx, &channel->eventq, 1077 return efx_alloc_special_buffer(efx, &channel->eventq,
1074 EFX_EVQ_SIZE * sizeof(efx_qword_t)); 1078 entries * sizeof(efx_qword_t));
1075} 1079}
1076 1080
1077void efx_nic_init_eventq(struct efx_channel *channel) 1081void efx_nic_init_eventq(struct efx_channel *channel)
@@ -1163,11 +1167,11 @@ void efx_nic_generate_fill_event(struct efx_channel *channel)
1163 1167
1164static void efx_poll_flush_events(struct efx_nic *efx) 1168static void efx_poll_flush_events(struct efx_nic *efx)
1165{ 1169{
1166 struct efx_channel *channel = &efx->channel[0]; 1170 struct efx_channel *channel = efx_get_channel(efx, 0);
1167 struct efx_tx_queue *tx_queue; 1171 struct efx_tx_queue *tx_queue;
1168 struct efx_rx_queue *rx_queue; 1172 struct efx_rx_queue *rx_queue;
1169 unsigned int read_ptr = channel->eventq_read_ptr; 1173 unsigned int read_ptr = channel->eventq_read_ptr;
1170 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK; 1174 unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
1171 1175
1172 do { 1176 do {
1173 efx_qword_t *event = efx_event(channel, read_ptr); 1177 efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1185,7 +1189,9 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1185 ev_queue = EFX_QWORD_FIELD(*event, 1189 ev_queue = EFX_QWORD_FIELD(*event,
1186 FSF_AZ_DRIVER_EV_SUBDATA); 1190 FSF_AZ_DRIVER_EV_SUBDATA);
1187 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { 1191 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1188 tx_queue = efx->tx_queue + ev_queue; 1192 tx_queue = efx_get_tx_queue(
1193 efx, ev_queue / EFX_TXQ_TYPES,
1194 ev_queue % EFX_TXQ_TYPES);
1189 tx_queue->flushed = FLUSH_DONE; 1195 tx_queue->flushed = FLUSH_DONE;
1190 } 1196 }
1191 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1197 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
@@ -1195,7 +1201,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1195 ev_failed = EFX_QWORD_FIELD( 1201 ev_failed = EFX_QWORD_FIELD(
1196 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1202 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1197 if (ev_queue < efx->n_rx_channels) { 1203 if (ev_queue < efx->n_rx_channels) {
1198 rx_queue = efx->rx_queue + ev_queue; 1204 rx_queue = efx_get_rx_queue(efx, ev_queue);
1199 rx_queue->flushed = 1205 rx_queue->flushed =
1200 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1206 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1201 } 1207 }
@@ -1205,7 +1211,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1205 * it's ok to throw away every non-flush event */ 1211 * it's ok to throw away every non-flush event */
1206 EFX_SET_QWORD(*event); 1212 EFX_SET_QWORD(*event);
1207 1213
1208 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; 1214 ++read_ptr;
1209 } while (read_ptr != end_ptr); 1215 } while (read_ptr != end_ptr);
1210 1216
1211 channel->eventq_read_ptr = read_ptr; 1217 channel->eventq_read_ptr = read_ptr;
@@ -1216,6 +1222,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1216 * serialise them */ 1222 * serialise them */
1217int efx_nic_flush_queues(struct efx_nic *efx) 1223int efx_nic_flush_queues(struct efx_nic *efx)
1218{ 1224{
1225 struct efx_channel *channel;
1219 struct efx_rx_queue *rx_queue; 1226 struct efx_rx_queue *rx_queue;
1220 struct efx_tx_queue *tx_queue; 1227 struct efx_tx_queue *tx_queue;
1221 int i, tx_pending, rx_pending; 1228 int i, tx_pending, rx_pending;
@@ -1224,29 +1231,38 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1224 efx->type->prepare_flush(efx); 1231 efx->type->prepare_flush(efx);
1225 1232
1226 /* Flush all tx queues in parallel */ 1233 /* Flush all tx queues in parallel */
1227 efx_for_each_tx_queue(tx_queue, efx) 1234 efx_for_each_channel(channel, efx) {
1228 efx_flush_tx_queue(tx_queue); 1235 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1236 if (tx_queue->initialised)
1237 efx_flush_tx_queue(tx_queue);
1238 }
1239 }
1229 1240
1230 /* The hardware supports four concurrent rx flushes, each of which may 1241 /* The hardware supports four concurrent rx flushes, each of which may
1231 * need to be retried if there is an outstanding descriptor fetch */ 1242 * need to be retried if there is an outstanding descriptor fetch */
1232 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { 1243 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1233 rx_pending = tx_pending = 0; 1244 rx_pending = tx_pending = 0;
1234 efx_for_each_rx_queue(rx_queue, efx) { 1245 efx_for_each_channel(channel, efx) {
1235 if (rx_queue->flushed == FLUSH_PENDING) 1246 efx_for_each_channel_rx_queue(rx_queue, channel) {
1236 ++rx_pending; 1247 if (rx_queue->flushed == FLUSH_PENDING)
1237 } 1248 ++rx_pending;
1238 efx_for_each_rx_queue(rx_queue, efx) {
1239 if (rx_pending == EFX_RX_FLUSH_COUNT)
1240 break;
1241 if (rx_queue->flushed == FLUSH_FAILED ||
1242 rx_queue->flushed == FLUSH_NONE) {
1243 efx_flush_rx_queue(rx_queue);
1244 ++rx_pending;
1245 } 1249 }
1246 } 1250 }
1247 efx_for_each_tx_queue(tx_queue, efx) { 1251 efx_for_each_channel(channel, efx) {
1248 if (tx_queue->flushed != FLUSH_DONE) 1252 efx_for_each_channel_rx_queue(rx_queue, channel) {
1249 ++tx_pending; 1253 if (rx_pending == EFX_RX_FLUSH_COUNT)
1254 break;
1255 if (rx_queue->flushed == FLUSH_FAILED ||
1256 rx_queue->flushed == FLUSH_NONE) {
1257 efx_flush_rx_queue(rx_queue);
1258 ++rx_pending;
1259 }
1260 }
1261 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1262 if (tx_queue->initialised &&
1263 tx_queue->flushed != FLUSH_DONE)
1264 ++tx_pending;
1265 }
1250 } 1266 }
1251 1267
1252 if (rx_pending == 0 && tx_pending == 0) 1268 if (rx_pending == 0 && tx_pending == 0)
@@ -1258,19 +1274,22 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1258 1274
1259 /* Mark the queues as all flushed. We're going to return failure 1275 /* Mark the queues as all flushed. We're going to return failure
1260 * leading to a reset, or fake up success anyway */ 1276 * leading to a reset, or fake up success anyway */
1261 efx_for_each_tx_queue(tx_queue, efx) { 1277 efx_for_each_channel(channel, efx) {
1262 if (tx_queue->flushed != FLUSH_DONE) 1278 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1263 netif_err(efx, hw, efx->net_dev, 1279 if (tx_queue->initialised &&
1264 "tx queue %d flush command timed out\n", 1280 tx_queue->flushed != FLUSH_DONE)
1265 tx_queue->queue); 1281 netif_err(efx, hw, efx->net_dev,
1266 tx_queue->flushed = FLUSH_DONE; 1282 "tx queue %d flush command timed out\n",
1267 } 1283 tx_queue->queue);
1268 efx_for_each_rx_queue(rx_queue, efx) { 1284 tx_queue->flushed = FLUSH_DONE;
1269 if (rx_queue->flushed != FLUSH_DONE) 1285 }
1270 netif_err(efx, hw, efx->net_dev, 1286 efx_for_each_channel_rx_queue(rx_queue, channel) {
1271 "rx queue %d flush command timed out\n", 1287 if (rx_queue->flushed != FLUSH_DONE)
1272 rx_queue->queue); 1288 netif_err(efx, hw, efx->net_dev,
1273 rx_queue->flushed = FLUSH_DONE; 1289 "rx queue %d flush command timed out\n",
1290 efx_rx_queue_index(rx_queue));
1291 rx_queue->flushed = FLUSH_DONE;
1292 }
1274 } 1293 }
1275 1294
1276 return -ETIMEDOUT; 1295 return -ETIMEDOUT;
@@ -1397,6 +1416,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1397 u32 queues; 1416 u32 queues;
1398 int syserr; 1417 int syserr;
1399 1418
1419 /* Could this be ours? If interrupts are disabled then the
1420 * channel state may not be valid.
1421 */
1422 if (!efx->legacy_irq_enabled)
1423 return result;
1424
1400 /* Read the ISR which also ACKs the interrupts */ 1425 /* Read the ISR which also ACKs the interrupts */
1401 efx_readd(efx, &reg, FR_BZ_INT_ISR0); 1426 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1402 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1427 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
@@ -1457,7 +1482,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1457 */ 1482 */
1458static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1483static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1459{ 1484{
1460 struct efx_channel *channel = dev_id; 1485 struct efx_channel *channel = *(struct efx_channel **)dev_id;
1461 struct efx_nic *efx = channel->efx; 1486 struct efx_nic *efx = channel->efx;
1462 efx_oword_t *int_ker = efx->irq_status.addr; 1487 efx_oword_t *int_ker = efx->irq_status.addr;
1463 int syserr; 1488 int syserr;
@@ -1532,7 +1557,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1532 efx_for_each_channel(channel, efx) { 1557 efx_for_each_channel(channel, efx) {
1533 rc = request_irq(channel->irq, efx_msi_interrupt, 1558 rc = request_irq(channel->irq, efx_msi_interrupt,
1534 IRQF_PROBE_SHARED, /* Not shared */ 1559 IRQF_PROBE_SHARED, /* Not shared */
1535 channel->name, channel); 1560 efx->channel_name[channel->channel],
1561 &efx->channel[channel->channel]);
1536 if (rc) { 1562 if (rc) {
1537 netif_err(efx, drv, efx->net_dev, 1563 netif_err(efx, drv, efx->net_dev,
1538 "failed to hook IRQ %d\n", channel->irq); 1564 "failed to hook IRQ %d\n", channel->irq);
@@ -1544,7 +1570,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1544 1570
1545 fail2: 1571 fail2:
1546 efx_for_each_channel(channel, efx) 1572 efx_for_each_channel(channel, efx)
1547 free_irq(channel->irq, channel); 1573 free_irq(channel->irq, &efx->channel[channel->channel]);
1548 fail1: 1574 fail1:
1549 return rc; 1575 return rc;
1550} 1576}
@@ -1557,7 +1583,7 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
1557 /* Disable MSI/MSI-X interrupts */ 1583 /* Disable MSI/MSI-X interrupts */
1558 efx_for_each_channel(channel, efx) { 1584 efx_for_each_channel(channel, efx) {
1559 if (channel->irq) 1585 if (channel->irq)
1560 free_irq(channel->irq, channel); 1586 free_irq(channel->irq, &efx->channel[channel->channel]);
1561 } 1587 }
1562 1588
1563 /* ACK legacy interrupt */ 1589 /* ACK legacy interrupt */
@@ -1642,7 +1668,7 @@ void efx_nic_init_common(struct efx_nic *efx)
1642 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1668 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1643 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1669 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1644 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1670 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1645 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); 1671 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1646 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1672 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1647 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1673 /* Enable SW_EV to inherit in char driver - assume harmless here */
1648 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1674 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
@@ -1654,6 +1680,19 @@ void efx_nic_init_common(struct efx_nic *efx)
1654 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1680 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1655 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1681 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1656 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1682 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1683
1684 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1685 EFX_POPULATE_OWORD_4(temp,
1686 /* Default values */
1687 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1688 FRF_BZ_TX_PACE_SB_AF, 0xb,
1689 FRF_BZ_TX_PACE_FB_BASE, 0,
1690 /* Allow large pace values in the
1691 * fast bin. */
1692 FRF_BZ_TX_PACE_BIN_TH,
1693 FFE_BZ_TX_PACE_RESERVED);
1694 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1695 }
1657} 1696}
1658 1697
1659/* Register dump */ 1698/* Register dump */
@@ -1827,8 +1866,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1827 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1866 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1828 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1867 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1829 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1868 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1830 /* The register buffer is allocated with slab, so we can't 1869 /* We can't reasonably read all of the buffer table (up to 8MB!).
1831 * reasonably read all of the buffer table (up to 8MB!).
1832 * However this driver will only use a few entries. Reading 1870 * However this driver will only use a few entries. Reading
1833 * 1K entries allows for some expansion of queue count and 1871 * 1K entries allows for some expansion of queue count and
1834 * size before we need to change the version. */ 1872 * size before we need to change the version. */
@@ -1836,7 +1874,6 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1836 A, A, 8, 1024), 1874 A, A, 8, 1024),
1837 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1875 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1838 B, Z, 8, 1024), 1876 B, Z, 8, 1024),
1839 /* RX_FILTER_TBL{0,1} is huge and not used by this driver */
1840 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1877 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1841 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1878 REGISTER_TABLE_BB_CZ(TIMER_TBL),
1842 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1879 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -1846,6 +1883,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1846 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1883 REGISTER_TABLE_CZ(MC_TREG_SMEM),
1847 /* MSIX_PBA_TABLE is not mapped */ 1884 /* MSIX_PBA_TABLE is not mapped */
1848 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1885 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1886 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
1849}; 1887};
1850 1888
1851size_t efx_nic_get_regs_len(struct efx_nic *efx) 1889size_t efx_nic_get_regs_len(struct efx_nic *efx)
@@ -1897,6 +1935,13 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1897 1935
1898 size = min_t(size_t, table->step, 16); 1936 size = min_t(size_t, table->step, 16);
1899 1937
1938 if (table->offset >= efx->type->mem_map_size) {
1939 /* No longer mapped; return dummy data */
1940 memcpy(buf, "\xde\xc0\xad\xde", 4);
1941 buf += table->rows * size;
1942 continue;
1943 }
1944
1900 for (i = 0; i < table->rows; i++) { 1945 for (i = 0; i < table->rows; i++) {
1901 switch (table->step) { 1946 switch (table->step) {
1902 case 4: /* 32-bit register or SRAM */ 1947 case 4: /* 32-bit register or SRAM */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 0438dc98722d..4bd1f2839dfe 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -15,6 +15,7 @@
15#include "net_driver.h" 15#include "net_driver.h"
16#include "efx.h" 16#include "efx.h"
17#include "mcdi.h" 17#include "mcdi.h"
18#include "spi.h"
18 19
19/* 20/*
20 * Falcon hardware control 21 * Falcon hardware control
@@ -113,6 +114,11 @@ struct falcon_board {
113 * @stats_pending: Is there a pending DMA of MAC statistics. 114 * @stats_pending: Is there a pending DMA of MAC statistics.
114 * @stats_timer: A timer for regularly fetching MAC statistics. 115 * @stats_timer: A timer for regularly fetching MAC statistics.
115 * @stats_dma_done: Pointer to the flag which indicates DMA completion. 116 * @stats_dma_done: Pointer to the flag which indicates DMA completion.
117 * @spi_flash: SPI flash device
118 * @spi_eeprom: SPI EEPROM device
119 * @spi_lock: SPI bus lock
120 * @mdio_lock: MDIO bus lock
121 * @xmac_poll_required: XMAC link state needs polling
116 */ 122 */
117struct falcon_nic_data { 123struct falcon_nic_data {
118 struct pci_dev *pci_dev2; 124 struct pci_dev *pci_dev2;
@@ -121,6 +127,11 @@ struct falcon_nic_data {
121 bool stats_pending; 127 bool stats_pending;
122 struct timer_list stats_timer; 128 struct timer_list stats_timer;
123 u32 *stats_dma_done; 129 u32 *stats_dma_done;
130 struct efx_spi_device spi_flash;
131 struct efx_spi_device spi_eeprom;
132 struct mutex spi_lock;
133 struct mutex mdio_lock;
134 bool xmac_poll_required;
124}; 135};
125 136
126static inline struct falcon_board *falcon_board(struct efx_nic *efx) 137static inline struct falcon_board *falcon_board(struct efx_nic *efx)
@@ -131,24 +142,19 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
131 142
132/** 143/**
133 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
134 * @fw_version: Management controller firmware version
135 * @fw_build: Firmware build number
136 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
146 * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
137 * @wol_filter_id: Wake-on-LAN packet filter id 147 * @wol_filter_id: Wake-on-LAN packet filter id
138 * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
139 */ 148 */
140struct siena_nic_data { 149struct siena_nic_data {
141 u64 fw_version;
142 u32 fw_build;
143 struct efx_mcdi_iface mcdi; 150 struct efx_mcdi_iface mcdi;
151 void __iomem *mcdi_smem;
144 int wol_filter_id; 152 int wol_filter_id;
145}; 153};
146 154
147extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len); 155extern const struct efx_nic_type falcon_a1_nic_type;
148 156extern const struct efx_nic_type falcon_b0_nic_type;
149extern struct efx_nic_type falcon_a1_nic_type; 157extern const struct efx_nic_type siena_a0_nic_type;
150extern struct efx_nic_type falcon_b0_nic_type;
151extern struct efx_nic_type siena_a0_nic_type;
152 158
153/************************************************************************** 159/**************************************************************************
154 * 160 *
@@ -180,11 +186,11 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel);
180extern void efx_nic_remove_eventq(struct efx_channel *channel); 186extern void efx_nic_remove_eventq(struct efx_channel *channel);
181extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); 187extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
182extern void efx_nic_eventq_read_ack(struct efx_channel *channel); 188extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
189extern bool efx_nic_event_present(struct efx_channel *channel);
183 190
184/* MAC/PHY */ 191/* MAC/PHY */
185extern void falcon_drain_tx_fifo(struct efx_nic *efx); 192extern void falcon_drain_tx_fifo(struct efx_nic *efx);
186extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); 193extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
187extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
188 194
189/* Interrupts and test events */ 195/* Interrupts and test events */
190extern int efx_nic_init_interrupt(struct efx_nic *efx); 196extern int efx_nic_init_interrupt(struct efx_nic *efx);
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 5bc26137257b..11d148cd8441 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -11,21 +11,16 @@
11#define EFX_PHY_H 11#define EFX_PHY_H
12 12
13/**************************************************************************** 13/****************************************************************************
14 * 10Xpress (SFX7101 and SFT9001) PHYs 14 * 10Xpress (SFX7101) PHY
15 */ 15 */
16extern struct efx_phy_operations falcon_sfx7101_phy_ops; 16extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
17extern struct efx_phy_operations falcon_sft9001_phy_ops;
18 17
19extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 18extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
20 19
21/* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed
22 * to boot due to corrupt flash, or some other negative error code. */
23extern int sft9001_wait_boot(struct efx_nic *efx);
24
25/**************************************************************************** 20/****************************************************************************
26 * AMCC/Quake QT202x PHYs 21 * AMCC/Quake QT202x PHYs
27 */ 22 */
28extern struct efx_phy_operations falcon_qt202x_phy_ops; 23extern const struct efx_phy_operations falcon_qt202x_phy_ops;
29 24
30/* These PHYs provide various H/W control states for LEDs */ 25/* These PHYs provide various H/W control states for LEDs */
31#define QUAKE_LED_LINK_INVAL (0) 26#define QUAKE_LED_LINK_INVAL (0)
@@ -42,9 +37,20 @@ extern struct efx_phy_operations falcon_qt202x_phy_ops;
42extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state); 37extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
43 38
44/**************************************************************************** 39/****************************************************************************
40* Transwitch CX4 retimer
41*/
42extern const struct efx_phy_operations falcon_txc_phy_ops;
43
44#define TXC_GPIO_DIR_INPUT 0
45#define TXC_GPIO_DIR_OUTPUT 1
46
47extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
48extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
49
50/****************************************************************************
45 * Siena managed PHYs 51 * Siena managed PHYs
46 */ 52 */
47extern struct efx_phy_operations efx_mcdi_phy_ops; 53extern const struct efx_phy_operations efx_mcdi_phy_ops;
48 54
49extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, 55extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
50 unsigned int prtad, unsigned int devad, 56 unsigned int prtad, unsigned int devad,
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 68813d1d85f3..7ad97e397406 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -41,6 +41,8 @@
41#define PCS_UC_STATUS_LBN 0 41#define PCS_UC_STATUS_LBN 0
42#define PCS_UC_STATUS_WIDTH 8 42#define PCS_UC_STATUS_WIDTH 8
43#define PCS_UC_STATUS_FW_SAVE 0x20 43#define PCS_UC_STATUS_FW_SAVE 0x20
44#define PMA_PMD_MODE_REG 0xc301
45#define PMA_PMD_RXIN_SEL_LBN 6
44#define PMA_PMD_FTX_CTRL2_REG 0xc309 46#define PMA_PMD_FTX_CTRL2_REG 0xc309
45#define PMA_PMD_FTX_STATIC_LBN 13 47#define PMA_PMD_FTX_STATIC_LBN 13
46#define PMA_PMD_VEND1_REG 0xc001 48#define PMA_PMD_VEND1_REG 0xc001
@@ -282,6 +284,10 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
282 * slow) reload of the firmware image (the microcontroller's code 284 * slow) reload of the firmware image (the microcontroller's code
283 * memory is not affected by the microcontroller reset). */ 285 * memory is not affected by the microcontroller reset). */
284 efx_mdio_write(efx, 1, 0xc317, 0x00ff); 286 efx_mdio_write(efx, 1, 0xc317, 0x00ff);
287 /* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
288 * restart doesn't reset it. We need to do that ourselves. */
289 efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
290 1 << PMA_PMD_RXIN_SEL_LBN, false);
285 efx_mdio_write(efx, 1, 0xc300, 0x0002); 291 efx_mdio_write(efx, 1, 0xc300, 0x0002);
286 msleep(20); 292 msleep(20);
287 293
@@ -443,7 +449,7 @@ static void qt202x_phy_remove(struct efx_nic *efx)
443 efx->phy_data = NULL; 449 efx->phy_data = NULL;
444} 450}
445 451
446struct efx_phy_operations falcon_qt202x_phy_ops = { 452const struct efx_phy_operations falcon_qt202x_phy_ops = {
447 .probe = qt202x_phy_probe, 453 .probe = qt202x_phy_probe,
448 .init = qt202x_phy_init, 454 .init = qt202x_phy_init,
449 .reconfigure = qt202x_phy_reconfigure, 455 .reconfigure = qt202x_phy_reconfigure,
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 18a3be428348..cc2c86b76a7b 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -2893,6 +2893,26 @@
2893#define FRF_AB_XX_FORCE_SIG_WIDTH 8 2893#define FRF_AB_XX_FORCE_SIG_WIDTH 8
2894#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff 2894#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
2895 2895
2896/* RX_MAC_FILTER_TBL0 */
2897/* RMFT_DEST_MAC is wider than 32 bits */
2898#define FRF_CZ_RMFT_DEST_MAC_LO_LBN 12
2899#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
2900#define FRF_CZ_RMFT_DEST_MAC_HI_LBN 44
2901#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH 16
2902
2903/* TX_MAC_FILTER_TBL0 */
2904/* TMFT_SRC_MAC is wider than 32 bits */
2905#define FRF_CZ_TMFT_SRC_MAC_LO_LBN 12
2906#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
2909
2910/* TX_PACE_TBL */
2911/* Values >20 are documented as reserved, but will result in a queue going
2912 * into the fast bin with a pace value of zero. */
2913#define FFE_BZ_TX_PACE_OFF 0
2914#define FFE_BZ_TX_PACE_RESERVED 21
2915
2896/* DRIVER_EV */ 2916/* DRIVER_EV */
2897/* Sub-fields of an RX flush completion event */ 2917/* Sub-fields of an RX flush completion event */
2898#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 2918#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 799c461ce7b8..62e43649466e 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -14,6 +14,7 @@
14#include <linux/ip.h> 14#include <linux/ip.h>
15#include <linux/tcp.h> 15#include <linux/tcp.h>
16#include <linux/udp.h> 16#include <linux/udp.h>
17#include <linux/prefetch.h>
17#include <net/ip.h> 18#include <net/ip.h>
18#include <net/checksum.h> 19#include <net/checksum.h>
19#include "net_driver.h" 20#include "net_driver.h"
@@ -37,7 +38,7 @@
37 * This driver supports two methods for allocating and using RX buffers: 38 * This driver supports two methods for allocating and using RX buffers:
38 * each RX buffer may be backed by an skb or by an order-n page. 39 * each RX buffer may be backed by an skb or by an order-n page.
39 * 40 *
40 * When LRO is in use then the second method has a lower overhead, 41 * When GRO is in use then the second method has a lower overhead,
41 * since we don't have to allocate then free skbs on reassembled frames. 42 * since we don't have to allocate then free skbs on reassembled frames.
42 * 43 *
43 * Values: 44 * Values:
@@ -50,25 +51,25 @@
50 * 51 *
51 * - Since pushing and popping descriptors are separated by the rx_queue 52 * - Since pushing and popping descriptors are separated by the rx_queue
52 * size, so the watermarks should be ~rxd_size. 53 * size, so the watermarks should be ~rxd_size.
53 * - The performance win by using page-based allocation for LRO is less 54 * - The performance win by using page-based allocation for GRO is less
54 * than the performance hit of using page-based allocation of non-LRO, 55 * than the performance hit of using page-based allocation of non-GRO,
55 * so the watermarks should reflect this. 56 * so the watermarks should reflect this.
56 * 57 *
57 * Per channel we maintain a single variable, updated by each channel: 58 * Per channel we maintain a single variable, updated by each channel:
58 * 59 *
59 * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO : 60 * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
60 * RX_ALLOC_FACTOR_SKB) 61 * RX_ALLOC_FACTOR_SKB)
61 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which 62 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
62 * limits the hysteresis), and update the allocation strategy: 63 * limits the hysteresis), and update the allocation strategy:
63 * 64 *
64 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? 65 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
65 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) 66 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
66 */ 67 */
67static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; 68static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
68 69
69#define RX_ALLOC_LEVEL_LRO 0x2000 70#define RX_ALLOC_LEVEL_GRO 0x2000
70#define RX_ALLOC_LEVEL_MAX 0x3000 71#define RX_ALLOC_LEVEL_MAX 0x3000
71#define RX_ALLOC_FACTOR_LRO 1 72#define RX_ALLOC_FACTOR_GRO 1
72#define RX_ALLOC_FACTOR_SKB (-2) 73#define RX_ALLOC_FACTOR_SKB (-2)
73 74
74/* This is the percentage fill level below which new RX descriptors 75/* This is the percentage fill level below which new RX descriptors
@@ -89,24 +90,37 @@ static unsigned int rx_refill_limit = 95;
89 */ 90 */
90#define EFX_RXD_HEAD_ROOM 2 91#define EFX_RXD_HEAD_ROOM 2
91 92
92static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) 93/* Offset of ethernet header within page */
94static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
95 struct efx_rx_buffer *buf)
93{ 96{
94 /* Offset is always within one page, so we don't need to consider 97 /* Offset is always within one page, so we don't need to consider
95 * the page order. 98 * the page order.
96 */ 99 */
97 return (__force unsigned long) buf->data & (PAGE_SIZE - 1); 100 return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
101 efx->type->rx_buffer_hash_size);
98} 102}
99static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 103static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100{ 104{
101 return PAGE_SIZE << efx->rx_buffer_order; 105 return PAGE_SIZE << efx->rx_buffer_order;
102} 106}
103 107
104static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf) 108static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
105{ 109{
110 if (buf->is_page)
111 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
112 else
113 return ((u8 *)buf->u.skb->data +
114 efx->type->rx_buffer_hash_size);
115}
116
117static inline u32 efx_rx_buf_hash(const u8 *eh)
118{
119 /* The ethernet header is always directly after any hash. */
106#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 120#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
107 return __le32_to_cpup((const __le32 *)(buf->data - 4)); 121 return __le32_to_cpup((const __le32 *)(eh - 4));
108#else 122#else
109 const u8 *data = (const u8 *)(buf->data - 4); 123 const u8 *data = eh - 4;
110 return ((u32)data[0] | 124 return ((u32)data[0] |
111 (u32)data[1] << 8 | 125 (u32)data[1] << 8 |
112 (u32)data[2] << 16 | 126 (u32)data[2] << 16 |
@@ -129,31 +143,31 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
129 struct efx_nic *efx = rx_queue->efx; 143 struct efx_nic *efx = rx_queue->efx;
130 struct net_device *net_dev = efx->net_dev; 144 struct net_device *net_dev = efx->net_dev;
131 struct efx_rx_buffer *rx_buf; 145 struct efx_rx_buffer *rx_buf;
146 struct sk_buff *skb;
132 int skb_len = efx->rx_buffer_len; 147 int skb_len = efx->rx_buffer_len;
133 unsigned index, count; 148 unsigned index, count;
134 149
135 for (count = 0; count < EFX_RX_BATCH; ++count) { 150 for (count = 0; count < EFX_RX_BATCH; ++count) {
136 index = rx_queue->added_count & EFX_RXQ_MASK; 151 index = rx_queue->added_count & rx_queue->ptr_mask;
137 rx_buf = efx_rx_buffer(rx_queue, index); 152 rx_buf = efx_rx_buffer(rx_queue, index);
138 153
139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); 154 rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
140 if (unlikely(!rx_buf->skb)) 155 if (unlikely(!skb))
141 return -ENOMEM; 156 return -ENOMEM;
142 rx_buf->page = NULL;
143 157
144 /* Adjust the SKB for padding and checksum */ 158 /* Adjust the SKB for padding and checksum */
145 skb_reserve(rx_buf->skb, NET_IP_ALIGN); 159 skb_reserve(skb, NET_IP_ALIGN);
146 rx_buf->len = skb_len - NET_IP_ALIGN; 160 rx_buf->len = skb_len - NET_IP_ALIGN;
147 rx_buf->data = (char *)rx_buf->skb->data; 161 rx_buf->is_page = false;
148 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; 162 skb->ip_summed = CHECKSUM_UNNECESSARY;
149 163
150 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 164 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
151 rx_buf->data, rx_buf->len, 165 skb->data, rx_buf->len,
152 PCI_DMA_FROMDEVICE); 166 PCI_DMA_FROMDEVICE);
153 if (unlikely(pci_dma_mapping_error(efx->pci_dev, 167 if (unlikely(pci_dma_mapping_error(efx->pci_dev,
154 rx_buf->dma_addr))) { 168 rx_buf->dma_addr))) {
155 dev_kfree_skb_any(rx_buf->skb); 169 dev_kfree_skb_any(skb);
156 rx_buf->skb = NULL; 170 rx_buf->u.skb = NULL;
157 return -EIO; 171 return -EIO;
158 } 172 }
159 173
@@ -208,13 +222,12 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
208 dma_addr += sizeof(struct efx_rx_page_state); 222 dma_addr += sizeof(struct efx_rx_page_state);
209 223
210 split: 224 split:
211 index = rx_queue->added_count & EFX_RXQ_MASK; 225 index = rx_queue->added_count & rx_queue->ptr_mask;
212 rx_buf = efx_rx_buffer(rx_queue, index); 226 rx_buf = efx_rx_buffer(rx_queue, index);
213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 227 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
214 rx_buf->skb = NULL; 228 rx_buf->u.page = page;
215 rx_buf->page = page;
216 rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
217 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 229 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
230 rx_buf->is_page = true;
218 ++rx_queue->added_count; 231 ++rx_queue->added_count;
219 ++rx_queue->alloc_page_count; 232 ++rx_queue->alloc_page_count;
220 ++state->refcnt; 233 ++state->refcnt;
@@ -235,19 +248,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
235static void efx_unmap_rx_buffer(struct efx_nic *efx, 248static void efx_unmap_rx_buffer(struct efx_nic *efx,
236 struct efx_rx_buffer *rx_buf) 249 struct efx_rx_buffer *rx_buf)
237{ 250{
238 if (rx_buf->page) { 251 if (rx_buf->is_page && rx_buf->u.page) {
239 struct efx_rx_page_state *state; 252 struct efx_rx_page_state *state;
240 253
241 EFX_BUG_ON_PARANOID(rx_buf->skb); 254 state = page_address(rx_buf->u.page);
242
243 state = page_address(rx_buf->page);
244 if (--state->refcnt == 0) { 255 if (--state->refcnt == 0) {
245 pci_unmap_page(efx->pci_dev, 256 pci_unmap_page(efx->pci_dev,
246 state->dma_addr, 257 state->dma_addr,
247 efx_rx_buf_size(efx), 258 efx_rx_buf_size(efx),
248 PCI_DMA_FROMDEVICE); 259 PCI_DMA_FROMDEVICE);
249 } 260 }
250 } else if (likely(rx_buf->skb)) { 261 } else if (!rx_buf->is_page && rx_buf->u.skb) {
251 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 262 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
252 rx_buf->len, PCI_DMA_FROMDEVICE); 263 rx_buf->len, PCI_DMA_FROMDEVICE);
253 } 264 }
@@ -256,12 +267,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
256static void efx_free_rx_buffer(struct efx_nic *efx, 267static void efx_free_rx_buffer(struct efx_nic *efx,
257 struct efx_rx_buffer *rx_buf) 268 struct efx_rx_buffer *rx_buf)
258{ 269{
259 if (rx_buf->page) { 270 if (rx_buf->is_page && rx_buf->u.page) {
260 __free_pages(rx_buf->page, efx->rx_buffer_order); 271 __free_pages(rx_buf->u.page, efx->rx_buffer_order);
261 rx_buf->page = NULL; 272 rx_buf->u.page = NULL;
262 } else if (likely(rx_buf->skb)) { 273 } else if (!rx_buf->is_page && rx_buf->u.skb) {
263 dev_kfree_skb_any(rx_buf->skb); 274 dev_kfree_skb_any(rx_buf->u.skb);
264 rx_buf->skb = NULL; 275 rx_buf->u.skb = NULL;
265 } 276 }
266} 277}
267 278
@@ -277,7 +288,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
277static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, 288static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
278 struct efx_rx_buffer *rx_buf) 289 struct efx_rx_buffer *rx_buf)
279{ 290{
280 struct efx_rx_page_state *state = page_address(rx_buf->page); 291 struct efx_rx_page_state *state = page_address(rx_buf->u.page);
281 struct efx_rx_buffer *new_buf; 292 struct efx_rx_buffer *new_buf;
282 unsigned fill_level, index; 293 unsigned fill_level, index;
283 294
@@ -285,23 +296,21 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
285 * we'd like to insert an additional descriptor whilst leaving 296 * we'd like to insert an additional descriptor whilst leaving
286 * EFX_RXD_HEAD_ROOM for the non-recycle path */ 297 * EFX_RXD_HEAD_ROOM for the non-recycle path */
287 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); 298 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
288 if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) { 299 if (unlikely(fill_level > rx_queue->max_fill)) {
289 /* We could place "state" on a list, and drain the list in 300 /* We could place "state" on a list, and drain the list in
290 * efx_fast_push_rx_descriptors(). For now, this will do. */ 301 * efx_fast_push_rx_descriptors(). For now, this will do. */
291 return; 302 return;
292 } 303 }
293 304
294 ++state->refcnt; 305 ++state->refcnt;
295 get_page(rx_buf->page); 306 get_page(rx_buf->u.page);
296 307
297 index = rx_queue->added_count & EFX_RXQ_MASK; 308 index = rx_queue->added_count & rx_queue->ptr_mask;
298 new_buf = efx_rx_buffer(rx_queue, index); 309 new_buf = efx_rx_buffer(rx_queue, index);
299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 310 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
300 new_buf->skb = NULL; 311 new_buf->u.page = rx_buf->u.page;
301 new_buf->page = rx_buf->page;
302 new_buf->data = (void *)
303 ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
304 new_buf->len = rx_buf->len; 312 new_buf->len = rx_buf->len;
313 new_buf->is_page = true;
305 ++rx_queue->added_count; 314 ++rx_queue->added_count;
306} 315}
307 316
@@ -311,20 +320,19 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
311 struct efx_rx_buffer *rx_buf) 320 struct efx_rx_buffer *rx_buf)
312{ 321{
313 struct efx_nic *efx = channel->efx; 322 struct efx_nic *efx = channel->efx;
314 struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel]; 323 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
315 struct efx_rx_buffer *new_buf; 324 struct efx_rx_buffer *new_buf;
316 unsigned index; 325 unsigned index;
317 326
318 if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 327 if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
319 page_count(rx_buf->page) == 1) 328 page_count(rx_buf->u.page) == 1)
320 efx_resurrect_rx_buffer(rx_queue, rx_buf); 329 efx_resurrect_rx_buffer(rx_queue, rx_buf);
321 330
322 index = rx_queue->added_count & EFX_RXQ_MASK; 331 index = rx_queue->added_count & rx_queue->ptr_mask;
323 new_buf = efx_rx_buffer(rx_queue, index); 332 new_buf = efx_rx_buffer(rx_queue, index);
324 333
325 memcpy(new_buf, rx_buf, sizeof(*new_buf)); 334 memcpy(new_buf, rx_buf, sizeof(*new_buf));
326 rx_buf->page = NULL; 335 rx_buf->u.page = NULL;
327 rx_buf->skb = NULL;
328 ++rx_queue->added_count; 336 ++rx_queue->added_count;
329} 337}
330 338
@@ -341,13 +349,13 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
341 */ 349 */
342void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) 350void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
343{ 351{
344 struct efx_channel *channel = rx_queue->channel; 352 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
345 unsigned fill_level; 353 unsigned fill_level;
346 int space, rc = 0; 354 int space, rc = 0;
347 355
348 /* Calculate current fill level, and exit if we don't need to fill */ 356 /* Calculate current fill level, and exit if we don't need to fill */
349 fill_level = (rx_queue->added_count - rx_queue->removed_count); 357 fill_level = (rx_queue->added_count - rx_queue->removed_count);
350 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); 358 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
351 if (fill_level >= rx_queue->fast_fill_trigger) 359 if (fill_level >= rx_queue->fast_fill_trigger)
352 goto out; 360 goto out;
353 361
@@ -364,7 +372,8 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
364 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 372 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
365 "RX queue %d fast-filling descriptor ring from" 373 "RX queue %d fast-filling descriptor ring from"
366 " level %d to level %d using %s allocation\n", 374 " level %d to level %d using %s allocation\n",
367 rx_queue->queue, fill_level, rx_queue->fast_fill_limit, 375 efx_rx_queue_index(rx_queue), fill_level,
376 rx_queue->fast_fill_limit,
368 channel->rx_alloc_push_pages ? "page" : "skb"); 377 channel->rx_alloc_push_pages ? "page" : "skb");
369 378
370 do { 379 do {
@@ -382,7 +391,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
382 391
383 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 392 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
384 "RX queue %d fast-filled descriptor ring " 393 "RX queue %d fast-filled descriptor ring "
385 "to level %d\n", rx_queue->queue, 394 "to level %d\n", efx_rx_queue_index(rx_queue),
386 rx_queue->added_count - rx_queue->removed_count); 395 rx_queue->added_count - rx_queue->removed_count);
387 396
388 out: 397 out:
@@ -393,7 +402,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
393void efx_rx_slow_fill(unsigned long context) 402void efx_rx_slow_fill(unsigned long context)
394{ 403{
395 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; 404 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
396 struct efx_channel *channel = rx_queue->channel; 405 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
397 406
398 /* Post an event to cause NAPI to run and refill the queue */ 407 /* Post an event to cause NAPI to run and refill the queue */
399 efx_nic_generate_fill_event(channel); 408 efx_nic_generate_fill_event(channel);
@@ -421,45 +430,44 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
421 netif_err(efx, rx_err, efx->net_dev, 430 netif_err(efx, rx_err, efx->net_dev,
422 " RX queue %d seriously overlength " 431 " RX queue %d seriously overlength "
423 "RX event (0x%x > 0x%x+0x%x). Leaking\n", 432 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
424 rx_queue->queue, len, max_len, 433 efx_rx_queue_index(rx_queue), len, max_len,
425 efx->type->rx_buffer_padding); 434 efx->type->rx_buffer_padding);
426 /* If this buffer was skb-allocated, then the meta 435 /* If this buffer was skb-allocated, then the meta
427 * data at the end of the skb will be trashed. So 436 * data at the end of the skb will be trashed. So
428 * we have no choice but to leak the fragment. 437 * we have no choice but to leak the fragment.
429 */ 438 */
430 *leak_packet = (rx_buf->skb != NULL); 439 *leak_packet = !rx_buf->is_page;
431 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 440 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
432 } else { 441 } else {
433 if (net_ratelimit()) 442 if (net_ratelimit())
434 netif_err(efx, rx_err, efx->net_dev, 443 netif_err(efx, rx_err, efx->net_dev,
435 " RX queue %d overlength RX event " 444 " RX queue %d overlength RX event "
436 "(0x%x > 0x%x)\n", 445 "(0x%x > 0x%x)\n",
437 rx_queue->queue, len, max_len); 446 efx_rx_queue_index(rx_queue), len, max_len);
438 } 447 }
439 448
440 rx_queue->channel->n_rx_overlength++; 449 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
441} 450}
442 451
443/* Pass a received packet up through the generic LRO stack 452/* Pass a received packet up through the generic GRO stack
444 * 453 *
445 * Handles driverlink veto, and passes the fragment up via 454 * Handles driverlink veto, and passes the fragment up via
446 * the appropriate LRO method 455 * the appropriate GRO method
447 */ 456 */
448static void efx_rx_packet_lro(struct efx_channel *channel, 457static void efx_rx_packet_gro(struct efx_channel *channel,
449 struct efx_rx_buffer *rx_buf, 458 struct efx_rx_buffer *rx_buf,
450 bool checksummed) 459 const u8 *eh, bool checksummed)
451{ 460{
452 struct napi_struct *napi = &channel->napi_str; 461 struct napi_struct *napi = &channel->napi_str;
453 gro_result_t gro_result; 462 gro_result_t gro_result;
454 463
455 /* Pass the skb/page into the LRO engine */ 464 /* Pass the skb/page into the GRO engine */
456 if (rx_buf->page) { 465 if (rx_buf->is_page) {
457 struct efx_nic *efx = channel->efx; 466 struct efx_nic *efx = channel->efx;
458 struct page *page = rx_buf->page; 467 struct page *page = rx_buf->u.page;
459 struct sk_buff *skb; 468 struct sk_buff *skb;
460 469
461 EFX_BUG_ON_PARANOID(rx_buf->skb); 470 rx_buf->u.page = NULL;
462 rx_buf->page = NULL;
463 471
464 skb = napi_get_frags(napi); 472 skb = napi_get_frags(napi);
465 if (!skb) { 473 if (!skb) {
@@ -468,11 +476,11 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
468 } 476 }
469 477
470 if (efx->net_dev->features & NETIF_F_RXHASH) 478 if (efx->net_dev->features & NETIF_F_RXHASH)
471 skb->rxhash = efx_rx_buf_hash(rx_buf); 479 skb->rxhash = efx_rx_buf_hash(eh);
472 480
473 skb_shinfo(skb)->frags[0].page = page; 481 skb_shinfo(skb)->frags[0].page = page;
474 skb_shinfo(skb)->frags[0].page_offset = 482 skb_shinfo(skb)->frags[0].page_offset =
475 efx_rx_buf_offset(rx_buf); 483 efx_rx_buf_offset(efx, rx_buf);
476 skb_shinfo(skb)->frags[0].size = rx_buf->len; 484 skb_shinfo(skb)->frags[0].size = rx_buf->len;
477 skb_shinfo(skb)->nr_frags = 1; 485 skb_shinfo(skb)->nr_frags = 1;
478 486
@@ -486,11 +494,10 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
486 494
487 gro_result = napi_gro_frags(napi); 495 gro_result = napi_gro_frags(napi);
488 } else { 496 } else {
489 struct sk_buff *skb = rx_buf->skb; 497 struct sk_buff *skb = rx_buf->u.skb;
490 498
491 EFX_BUG_ON_PARANOID(!skb);
492 EFX_BUG_ON_PARANOID(!checksummed); 499 EFX_BUG_ON_PARANOID(!checksummed);
493 rx_buf->skb = NULL; 500 rx_buf->u.skb = NULL;
494 501
495 gro_result = napi_gro_receive(napi, skb); 502 gro_result = napi_gro_receive(napi, skb);
496 } 503 }
@@ -498,7 +505,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
498 if (gro_result == GRO_NORMAL) { 505 if (gro_result == GRO_NORMAL) {
499 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 506 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
500 } else if (gro_result != GRO_DROP) { 507 } else if (gro_result != GRO_DROP) {
501 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; 508 channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
502 channel->irq_mod_score += 2; 509 channel->irq_mod_score += 2;
503 } 510 }
504} 511}
@@ -507,14 +514,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
507 unsigned int len, bool checksummed, bool discard) 514 unsigned int len, bool checksummed, bool discard)
508{ 515{
509 struct efx_nic *efx = rx_queue->efx; 516 struct efx_nic *efx = rx_queue->efx;
510 struct efx_channel *channel = rx_queue->channel; 517 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
511 struct efx_rx_buffer *rx_buf; 518 struct efx_rx_buffer *rx_buf;
512 bool leak_packet = false; 519 bool leak_packet = false;
513 520
514 rx_buf = efx_rx_buffer(rx_queue, index); 521 rx_buf = efx_rx_buffer(rx_queue, index);
515 EFX_BUG_ON_PARANOID(!rx_buf->data);
516 EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
517 EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
518 522
519 /* This allows the refill path to post another buffer. 523 /* This allows the refill path to post another buffer.
520 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 524 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -528,7 +532,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
528 532
529 netif_vdbg(efx, rx_status, efx->net_dev, 533 netif_vdbg(efx, rx_status, efx->net_dev,
530 "RX queue %d received id %x at %llx+%x %s%s\n", 534 "RX queue %d received id %x at %llx+%x %s%s\n",
531 rx_queue->queue, index, 535 efx_rx_queue_index(rx_queue), index,
532 (unsigned long long)rx_buf->dma_addr, len, 536 (unsigned long long)rx_buf->dma_addr, len,
533 (checksummed ? " [SUMMED]" : ""), 537 (checksummed ? " [SUMMED]" : ""),
534 (discard ? " [DISCARD]" : "")); 538 (discard ? " [DISCARD]" : ""));
@@ -553,19 +557,18 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
553 /* Prefetch nice and early so data will (hopefully) be in cache by 557 /* Prefetch nice and early so data will (hopefully) be in cache by
554 * the time we look at it. 558 * the time we look at it.
555 */ 559 */
556 prefetch(rx_buf->data); 560 prefetch(efx_rx_buf_eh(efx, rx_buf));
557 561
558 /* Pipeline receives so that we give time for packet headers to be 562 /* Pipeline receives so that we give time for packet headers to be
559 * prefetched into cache. 563 * prefetched into cache.
560 */ 564 */
561 rx_buf->len = len; 565 rx_buf->len = len - efx->type->rx_buffer_hash_size;
562out: 566out:
563 if (rx_queue->channel->rx_pkt) 567 if (channel->rx_pkt)
564 __efx_rx_packet(rx_queue->channel, 568 __efx_rx_packet(channel,
565 rx_queue->channel->rx_pkt, 569 channel->rx_pkt, channel->rx_pkt_csummed);
566 rx_queue->channel->rx_pkt_csummed); 570 channel->rx_pkt = rx_buf;
567 rx_queue->channel->rx_pkt = rx_buf; 571 channel->rx_pkt_csummed = checksummed;
568 rx_queue->channel->rx_pkt_csummed = checksummed;
569} 572}
570 573
571/* Handle a received packet. Second half: Touches packet payload. */ 574/* Handle a received packet. Second half: Touches packet payload. */
@@ -574,48 +577,49 @@ void __efx_rx_packet(struct efx_channel *channel,
574{ 577{
575 struct efx_nic *efx = channel->efx; 578 struct efx_nic *efx = channel->efx;
576 struct sk_buff *skb; 579 struct sk_buff *skb;
577 580 u8 *eh = efx_rx_buf_eh(efx, rx_buf);
578 rx_buf->data += efx->type->rx_buffer_hash_size;
579 rx_buf->len -= efx->type->rx_buffer_hash_size;
580 581
581 /* If we're in loopback test, then pass the packet directly to the 582 /* If we're in loopback test, then pass the packet directly to the
582 * loopback layer, and free the rx_buf here 583 * loopback layer, and free the rx_buf here
583 */ 584 */
584 if (unlikely(efx->loopback_selftest)) { 585 if (unlikely(efx->loopback_selftest)) {
585 efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); 586 efx_loopback_rx_packet(efx, eh, rx_buf->len);
586 efx_free_rx_buffer(efx, rx_buf); 587 efx_free_rx_buffer(efx, rx_buf);
587 return; 588 return;
588 } 589 }
589 590
590 if (rx_buf->skb) { 591 if (!rx_buf->is_page) {
591 prefetch(skb_shinfo(rx_buf->skb)); 592 skb = rx_buf->u.skb;
592 593
593 skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size); 594 prefetch(skb_shinfo(skb));
594 skb_put(rx_buf->skb, rx_buf->len); 595
596 skb_reserve(skb, efx->type->rx_buffer_hash_size);
597 skb_put(skb, rx_buf->len);
595 598
596 if (efx->net_dev->features & NETIF_F_RXHASH) 599 if (efx->net_dev->features & NETIF_F_RXHASH)
597 rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf); 600 skb->rxhash = efx_rx_buf_hash(eh);
598 601
599 /* Move past the ethernet header. rx_buf->data still points 602 /* Move past the ethernet header. rx_buf->data still points
600 * at the ethernet header */ 603 * at the ethernet header */
601 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, 604 skb->protocol = eth_type_trans(skb, efx->net_dev);
602 efx->net_dev);
603 605
604 skb_record_rx_queue(rx_buf->skb, channel->channel); 606 skb_record_rx_queue(skb, channel->channel);
605 } 607 }
606 608
607 if (likely(checksummed || rx_buf->page)) { 609 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
608 efx_rx_packet_lro(channel, rx_buf, checksummed); 610 checksummed = false;
611
612 if (likely(checksummed || rx_buf->is_page)) {
613 efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
609 return; 614 return;
610 } 615 }
611 616
612 /* We now own the SKB */ 617 /* We now own the SKB */
613 skb = rx_buf->skb; 618 skb = rx_buf->u.skb;
614 rx_buf->skb = NULL; 619 rx_buf->u.skb = NULL;
615 EFX_BUG_ON_PARANOID(!skb);
616 620
617 /* Set the SKB flags */ 621 /* Set the SKB flags */
618 skb->ip_summed = CHECKSUM_NONE; 622 skb_checksum_none_assert(skb);
619 623
620 /* Pass the packet up */ 624 /* Pass the packet up */
621 netif_receive_skb(skb); 625 netif_receive_skb(skb);
@@ -628,7 +632,7 @@ void efx_rx_strategy(struct efx_channel *channel)
628{ 632{
629 enum efx_rx_alloc_method method = rx_alloc_method; 633 enum efx_rx_alloc_method method = rx_alloc_method;
630 634
631 /* Only makes sense to use page based allocation if LRO is enabled */ 635 /* Only makes sense to use page based allocation if GRO is enabled */
632 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { 636 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
633 method = RX_ALLOC_METHOD_SKB; 637 method = RX_ALLOC_METHOD_SKB;
634 } else if (method == RX_ALLOC_METHOD_AUTO) { 638 } else if (method == RX_ALLOC_METHOD_AUTO) {
@@ -639,7 +643,7 @@ void efx_rx_strategy(struct efx_channel *channel)
639 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; 643 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
640 644
641 /* Decide on the allocation method */ 645 /* Decide on the allocation method */
642 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ? 646 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
643 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); 647 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
644 } 648 }
645 649
@@ -650,15 +654,22 @@ void efx_rx_strategy(struct efx_channel *channel)
650int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 654int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
651{ 655{
652 struct efx_nic *efx = rx_queue->efx; 656 struct efx_nic *efx = rx_queue->efx;
653 unsigned int rxq_size; 657 unsigned int entries;
654 int rc; 658 int rc;
655 659
660 /* Create the smallest power-of-two aligned ring */
661 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
662 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
663 rx_queue->ptr_mask = entries - 1;
664
656 netif_dbg(efx, probe, efx->net_dev, 665 netif_dbg(efx, probe, efx->net_dev,
657 "creating RX queue %d\n", rx_queue->queue); 666 "creating RX queue %d size %#x mask %#x\n",
667 efx_rx_queue_index(rx_queue), efx->rxq_entries,
668 rx_queue->ptr_mask);
658 669
659 /* Allocate RX buffers */ 670 /* Allocate RX buffers */
660 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); 671 rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
661 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 672 GFP_KERNEL);
662 if (!rx_queue->buffer) 673 if (!rx_queue->buffer)
663 return -ENOMEM; 674 return -ENOMEM;
664 675
@@ -672,20 +683,20 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
672 683
673void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 684void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
674{ 685{
686 struct efx_nic *efx = rx_queue->efx;
675 unsigned int max_fill, trigger, limit; 687 unsigned int max_fill, trigger, limit;
676 688
677 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 689 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
678 "initialising RX queue %d\n", rx_queue->queue); 690 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
679 691
680 /* Initialise ptr fields */ 692 /* Initialise ptr fields */
681 rx_queue->added_count = 0; 693 rx_queue->added_count = 0;
682 rx_queue->notified_count = 0; 694 rx_queue->notified_count = 0;
683 rx_queue->removed_count = 0; 695 rx_queue->removed_count = 0;
684 rx_queue->min_fill = -1U; 696 rx_queue->min_fill = -1U;
685 rx_queue->min_overfill = -1U;
686 697
687 /* Initialise limit fields */ 698 /* Initialise limit fields */
688 max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM; 699 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
689 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 700 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
690 limit = max_fill * min(rx_refill_limit, 100U) / 100U; 701 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
691 702
@@ -703,14 +714,14 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
703 struct efx_rx_buffer *rx_buf; 714 struct efx_rx_buffer *rx_buf;
704 715
705 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 716 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
706 "shutting down RX queue %d\n", rx_queue->queue); 717 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
707 718
708 del_timer_sync(&rx_queue->slow_fill); 719 del_timer_sync(&rx_queue->slow_fill);
709 efx_nic_fini_rx(rx_queue); 720 efx_nic_fini_rx(rx_queue);
710 721
711 /* Release RX buffers NB start at index 0 not current HW ptr */ 722 /* Release RX buffers NB start at index 0 not current HW ptr */
712 if (rx_queue->buffer) { 723 if (rx_queue->buffer) {
713 for (i = 0; i <= EFX_RXQ_MASK; i++) { 724 for (i = 0; i <= rx_queue->ptr_mask; i++) {
714 rx_buf = efx_rx_buffer(rx_queue, i); 725 rx_buf = efx_rx_buffer(rx_queue, i);
715 efx_fini_rx_buffer(rx_queue, rx_buf); 726 efx_fini_rx_buffer(rx_queue, rx_buf);
716 } 727 }
@@ -720,7 +731,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
720void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 731void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
721{ 732{
722 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 733 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
723 "destroying RX queue %d\n", rx_queue->queue); 734 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
724 735
725 efx_nic_remove_rx(rx_queue); 736 efx_nic_remove_rx(rx_queue);
726 737
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 85f015f005d5..822f6c2a6a7c 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -48,6 +48,16 @@ static const unsigned char payload_source[ETH_ALEN] = {
48static const char payload_msg[] = 48static const char payload_msg[] =
49 "Hello world! This is an Efx loopback test in progress!"; 49 "Hello world! This is an Efx loopback test in progress!";
50 50
51/* Interrupt mode names */
52static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
53static const char *efx_interrupt_mode_names[] = {
54 [EFX_INT_MODE_MSIX] = "MSI-X",
55 [EFX_INT_MODE_MSI] = "MSI",
56 [EFX_INT_MODE_LEGACY] = "legacy",
57};
58#define INT_MODE(efx) \
59 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
60
51/** 61/**
52 * efx_loopback_state - persistent state during a loopback selftest 62 * efx_loopback_state - persistent state during a loopback selftest
53 * @flush: Drop all packets in efx_loopback_rx_packet 63 * @flush: Drop all packets in efx_loopback_rx_packet
@@ -121,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
121static int efx_test_interrupts(struct efx_nic *efx, 131static int efx_test_interrupts(struct efx_nic *efx,
122 struct efx_self_tests *tests) 132 struct efx_self_tests *tests)
123{ 133{
124 struct efx_channel *channel;
125
126 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 134 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
127 tests->interrupt = -1; 135 tests->interrupt = -1;
128 136
@@ -130,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
130 efx->last_irq_cpu = -1; 138 efx->last_irq_cpu = -1;
131 smp_wmb(); 139 smp_wmb();
132 140
133 /* ACK each interrupting event queue. Receiving an interrupt due to
134 * traffic before a test event is raised is considered a pass */
135 efx_for_each_channel(channel, efx) {
136 if (channel->work_pending)
137 efx_process_channel_now(channel);
138 if (efx->last_irq_cpu >= 0)
139 goto success;
140 }
141
142 efx_nic_generate_interrupt(efx); 141 efx_nic_generate_interrupt(efx);
143 142
144 /* Wait for arrival of test interrupt. */ 143 /* Wait for arrival of test interrupt. */
@@ -163,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
163 struct efx_self_tests *tests) 162 struct efx_self_tests *tests)
164{ 163{
165 struct efx_nic *efx = channel->efx; 164 struct efx_nic *efx = channel->efx;
166 unsigned int magic_count, count; 165 unsigned int read_ptr, count;
167 166
168 tests->eventq_dma[channel->channel] = -1; 167 tests->eventq_dma[channel->channel] = -1;
169 tests->eventq_int[channel->channel] = -1; 168 tests->eventq_int[channel->channel] = -1;
170 tests->eventq_poll[channel->channel] = -1; 169 tests->eventq_poll[channel->channel] = -1;
171 170
172 magic_count = channel->magic_count; 171 read_ptr = channel->eventq_read_ptr;
173 channel->efx->last_irq_cpu = -1; 172 channel->efx->last_irq_cpu = -1;
174 smp_wmb(); 173 smp_wmb();
175 174
@@ -180,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
180 do { 179 do {
181 schedule_timeout_uninterruptible(HZ / 100); 180 schedule_timeout_uninterruptible(HZ / 100);
182 181
183 if (channel->work_pending) 182 if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
184 efx_process_channel_now(channel);
185
186 if (channel->magic_count != magic_count)
187 goto eventq_ok; 183 goto eventq_ok;
188 } while (++count < 2); 184 } while (++count < 2);
189 185
@@ -201,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
201 } 197 }
202 198
203 /* Check to see if event was received even if interrupt wasn't */ 199 /* Check to see if event was received even if interrupt wasn't */
204 efx_process_channel_now(channel); 200 if (efx_nic_event_present(channel)) {
205 if (channel->magic_count != magic_count) {
206 netif_err(efx, drv, efx->net_dev, 201 netif_err(efx, drv, efx->net_dev,
207 "channel %d event was generated, but " 202 "channel %d event was generated, but "
208 "failed to trigger an interrupt\n", channel->channel); 203 "failed to trigger an interrupt\n", channel->channel);
@@ -506,7 +501,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
506 501
507 for (i = 0; i < 3; i++) { 502 for (i = 0; i < 3; i++) {
508 /* Determine how many packets to send */ 503 /* Determine how many packets to send */
509 state->packet_count = EFX_TXQ_SIZE / 3; 504 state->packet_count = efx->txq_entries / 3;
510 state->packet_count = min(1 << (i << 2), state->packet_count); 505 state->packet_count = min(1 << (i << 2), state->packet_count);
511 state->skbs = kzalloc(sizeof(state->skbs[0]) * 506 state->skbs = kzalloc(sizeof(state->skbs[0]) *
512 state->packet_count, GFP_KERNEL); 507 state->packet_count, GFP_KERNEL);
@@ -567,7 +562,7 @@ static int efx_wait_for_link(struct efx_nic *efx)
567 efx->type->monitor(efx); 562 efx->type->monitor(efx);
568 mutex_unlock(&efx->mac_lock); 563 mutex_unlock(&efx->mac_lock);
569 } else { 564 } else {
570 struct efx_channel *channel = &efx->channel[0]; 565 struct efx_channel *channel = efx_get_channel(efx, 0);
571 if (channel->work_pending) 566 if (channel->work_pending)
572 efx_process_channel_now(channel); 567 efx_process_channel_now(channel);
573 } 568 }
@@ -594,6 +589,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
594{ 589{
595 enum efx_loopback_mode mode; 590 enum efx_loopback_mode mode;
596 struct efx_loopback_state *state; 591 struct efx_loopback_state *state;
592 struct efx_channel *channel = efx_get_channel(efx, 0);
597 struct efx_tx_queue *tx_queue; 593 struct efx_tx_queue *tx_queue;
598 int rc = 0; 594 int rc = 0;
599 595
@@ -633,8 +629,8 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
633 goto out; 629 goto out;
634 } 630 }
635 631
636 /* Test both types of TX queue */ 632 /* Test all enabled types of TX queue */
637 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) { 633 efx_for_each_channel_tx_queue(tx_queue, channel) {
638 state->offload_csum = (tx_queue->queue & 634 state->offload_csum = (tx_queue->queue &
639 EFX_TXQ_TYPE_OFFLOAD); 635 EFX_TXQ_TYPE_OFFLOAD);
640 rc = efx_test_loopback(tx_queue, 636 rc = efx_test_loopback(tx_queue,
@@ -699,12 +695,12 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
699 /* Offline (i.e. disruptive) testing 695 /* Offline (i.e. disruptive) testing
700 * This checks MAC and PHY loopback on the specified port. */ 696 * This checks MAC and PHY loopback on the specified port. */
701 697
702 /* force the carrier state off so the kernel doesn't transmit during 698 /* Detach the device so the kernel doesn't transmit during the
703 * the loopback test, and the watchdog timeout doesn't fire. Also put 699 * loopback test and the watchdog timeout doesn't fire.
704 * falcon into loopback for the register test.
705 */ 700 */
701 netif_device_detach(efx->net_dev);
702
706 mutex_lock(&efx->mac_lock); 703 mutex_lock(&efx->mac_lock);
707 efx->port_inhibited = true;
708 if (efx->loopback_modes) { 704 if (efx->loopback_modes) {
709 /* We need the 312 clock from the PHY to test the XMAC 705 /* We need the 312 clock from the PHY to test the XMAC
710 * registers, so move into XGMII loopback if available */ 706 * registers, so move into XGMII loopback if available */
@@ -754,11 +750,12 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
754 /* restore the PHY to the previous state */ 750 /* restore the PHY to the previous state */
755 mutex_lock(&efx->mac_lock); 751 mutex_lock(&efx->mac_lock);
756 efx->phy_mode = phy_mode; 752 efx->phy_mode = phy_mode;
757 efx->port_inhibited = false;
758 efx->loopback_mode = loopback_mode; 753 efx->loopback_mode = loopback_mode;
759 __efx_reconfigure_port(efx); 754 __efx_reconfigure_port(efx);
760 mutex_unlock(&efx->mac_lock); 755 mutex_unlock(&efx->mac_lock);
761 756
757 netif_device_attach(efx->net_dev);
758
762 return rc_test; 759 return rc_test;
763} 760}
764 761
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index aed495a4dad7..dba5456e70f3 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 3fab030f8ab5..fb4721f780ff 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -129,7 +129,7 @@ static int siena_probe_port(struct efx_nic *efx)
129 return 0; 129 return 0;
130} 130}
131 131
132void siena_remove_port(struct efx_nic *efx) 132static void siena_remove_port(struct efx_nic *efx)
133{ 133{
134 efx->phy_op->remove(efx); 134 efx->phy_op->remove(efx);
135 efx_nic_free_buffer(efx, &efx->stats_buffer); 135 efx_nic_free_buffer(efx, &efx->stats_buffer);
@@ -194,13 +194,7 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
194 194
195static int siena_probe_nvconfig(struct efx_nic *efx) 195static int siena_probe_nvconfig(struct efx_nic *efx)
196{ 196{
197 int rc; 197 return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
198
199 rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL);
200 if (rc)
201 return rc;
202
203 return 0;
204} 198}
205 199
206static int siena_probe_nic(struct efx_nic *efx) 200static int siena_probe_nic(struct efx_nic *efx)
@@ -226,19 +220,26 @@ static int siena_probe_nic(struct efx_nic *efx)
226 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 220 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
227 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 221 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
228 222
223 /* Initialise MCDI */
224 nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
225 FR_CZ_MC_TREG_SMEM,
226 FR_CZ_MC_TREG_SMEM_STEP *
227 FR_CZ_MC_TREG_SMEM_ROWS);
228 if (!nic_data->mcdi_smem) {
229 netif_err(efx, probe, efx->net_dev,
230 "could not map MCDI at %llx+%x\n",
231 (unsigned long long)efx->membase_phys +
232 FR_CZ_MC_TREG_SMEM,
233 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
234 rc = -ENOMEM;
235 goto fail1;
236 }
229 efx_mcdi_init(efx); 237 efx_mcdi_init(efx);
230 238
231 /* Recover from a failed assertion before probing */ 239 /* Recover from a failed assertion before probing */
232 rc = efx_mcdi_handle_assertion(efx); 240 rc = efx_mcdi_handle_assertion(efx);
233 if (rc) 241 if (rc)
234 goto fail1; 242 goto fail2;
235
236 rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
237 if (rc) {
238 netif_err(efx, probe, efx->net_dev,
239 "Failed to read MCPU firmware version - rc %d\n", rc);
240 goto fail1; /* MCPU absent? */
241 }
242 243
243 /* Let the BMC know that the driver is now in charge of link and 244 /* Let the BMC know that the driver is now in charge of link and
244 * filter settings. We must do this before we reset the NIC */ 245 * filter settings. We must do this before we reset the NIC */
@@ -293,6 +294,7 @@ fail4:
293fail3: 294fail3:
294 efx_mcdi_drv_attach(efx, false, NULL); 295 efx_mcdi_drv_attach(efx, false, NULL);
295fail2: 296fail2:
297 iounmap(nic_data->mcdi_smem);
296fail1: 298fail1:
297 kfree(efx->nic_data); 299 kfree(efx->nic_data);
298 return rc; 300 return rc;
@@ -354,11 +356,6 @@ static int siena_init_nic(struct efx_nic *efx)
354 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); 356 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
355 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); 357 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
356 358
357 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
358 /* No MCDI operation has been defined to set thresholds */
359 netif_err(efx, hw, efx->net_dev,
360 "ignoring RX flow control thresholds\n");
361
362 /* Enable event logging */ 359 /* Enable event logging */
363 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 360 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
364 if (rc) 361 if (rc)
@@ -377,6 +374,8 @@ static int siena_init_nic(struct efx_nic *efx)
377 374
378static void siena_remove_nic(struct efx_nic *efx) 375static void siena_remove_nic(struct efx_nic *efx)
379{ 376{
377 struct siena_nic_data *nic_data = efx->nic_data;
378
380 efx_nic_free_buffer(efx, &efx->irq_status); 379 efx_nic_free_buffer(efx, &efx->irq_status);
381 380
382 siena_reset_hw(efx, RESET_TYPE_ALL); 381 siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -386,7 +385,8 @@ static void siena_remove_nic(struct efx_nic *efx)
386 efx_mcdi_drv_attach(efx, false, NULL); 385 efx_mcdi_drv_attach(efx, false, NULL);
387 386
388 /* Tear down the private nic state */ 387 /* Tear down the private nic state */
389 kfree(efx->nic_data); 388 iounmap(nic_data->mcdi_smem);
389 kfree(nic_data);
390 efx->nic_data = NULL; 390 efx->nic_data = NULL;
391} 391}
392 392
@@ -450,7 +450,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
450 mac_stats->rx_bad_bytes); 450 mac_stats->rx_bad_bytes);
451 MAC_STAT(rx_packets, RX_PKTS); 451 MAC_STAT(rx_packets, RX_PKTS);
452 MAC_STAT(rx_good, RX_GOOD_PKTS); 452 MAC_STAT(rx_good, RX_GOOD_PKTS);
453 mac_stats->rx_bad = mac_stats->rx_packets - mac_stats->rx_good; 453 MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
454 MAC_STAT(rx_pause, RX_PAUSE_PKTS); 454 MAC_STAT(rx_pause, RX_PAUSE_PKTS);
455 MAC_STAT(rx_control, RX_CONTROL_PKTS); 455 MAC_STAT(rx_control, RX_CONTROL_PKTS);
456 MAC_STAT(rx_unicast, RX_UNICAST_PKTS); 456 MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
@@ -520,16 +520,6 @@ static void siena_stop_nic_stats(struct efx_nic *efx)
520 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); 520 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
521} 521}
522 522
523void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len)
524{
525 struct siena_nic_data *nic_data = efx->nic_data;
526 snprintf(buf, len, "%u.%u.%u.%u",
527 (unsigned int)(nic_data->fw_version >> 48),
528 (unsigned int)(nic_data->fw_version >> 32 & 0xffff),
529 (unsigned int)(nic_data->fw_version >> 16 & 0xffff),
530 (unsigned int)(nic_data->fw_version & 0xffff));
531}
532
533/************************************************************************** 523/**************************************************************************
534 * 524 *
535 * Wake on LAN 525 * Wake on LAN
@@ -562,7 +552,7 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
562 if (nic_data->wol_filter_id != -1) 552 if (nic_data->wol_filter_id != -1)
563 efx_mcdi_wol_filter_remove(efx, 553 efx_mcdi_wol_filter_remove(efx,
564 nic_data->wol_filter_id); 554 nic_data->wol_filter_id);
565 rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address, 555 rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
566 &nic_data->wol_filter_id); 556 &nic_data->wol_filter_id);
567 if (rc) 557 if (rc)
568 goto fail; 558 goto fail;
@@ -609,7 +599,7 @@ static void siena_init_wol(struct efx_nic *efx)
609 ************************************************************************** 599 **************************************************************************
610 */ 600 */
611 601
612struct efx_nic_type siena_a0_nic_type = { 602const struct efx_nic_type siena_a0_nic_type = {
613 .probe = siena_probe_nic, 603 .probe = siena_probe_nic,
614 .remove = siena_remove_nic, 604 .remove = siena_remove_nic,
615 .init = siena_init_nic, 605 .init = siena_init_nic,
@@ -634,8 +624,7 @@ struct efx_nic_type siena_a0_nic_type = {
634 .default_mac_ops = &efx_mcdi_mac_operations, 624 .default_mac_ops = &efx_mcdi_mac_operations,
635 625
636 .revision = EFX_REV_SIENA_A0, 626 .revision = EFX_REV_SIENA_A0,
637 .mem_map_size = (FR_CZ_MC_TREG_SMEM + 627 .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
638 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
639 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 628 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
640 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 629 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
641 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 630 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
@@ -651,6 +640,6 @@ struct efx_nic_type siena_a0_nic_type = {
651 .tx_dc_base = 0x88000, 640 .tx_dc_base = 0x88000,
652 .rx_dc_base = 0x68000, 641 .rx_dc_base = 0x68000,
653 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 642 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
654 NETIF_F_RXHASH), 643 NETIF_F_RXHASH | NETIF_F_NTUPLE),
655 .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT, 644 .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
656}; 645};
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 8bf4fce0813a..71f2e3ebe1c7 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd. 3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -61,6 +61,11 @@ struct efx_spi_device {
61 unsigned int block_size; 61 unsigned int block_size;
62}; 62};
63 63
64static inline bool efx_spi_present(const struct efx_spi_device *spi)
65{
66 return spi->size != 0;
67}
68
64int falcon_spi_cmd(struct efx_nic *efx, 69int falcon_spi_cmd(struct efx_nic *efx,
65 const struct efx_spi_device *spi, unsigned int command, 70 const struct efx_spi_device *spi, unsigned int command,
66 int address, const void* in, void *out, size_t len); 71 int address, const void* in, void *out, size_t len);
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 6791be90c2fe..7b0fd89e7b85 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -15,14 +15,9 @@
15#include "mdio_10g.h" 15#include "mdio_10g.h"
16#include "nic.h" 16#include "nic.h"
17#include "phy.h" 17#include "phy.h"
18#include "regs.h"
19#include "workarounds.h" 18#include "workarounds.h"
20#include "selftest.h"
21 19
22/* We expect these MMDs to be in the package. SFT9001 also has a 20/* We expect these MMDs to be in the package. */
23 * clause 22 extension MMD, but since it doesn't have all the generic
24 * MMD registers it is pointless to include it here.
25 */
26#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \ 21#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
27 MDIO_DEVS_PCS | \ 22 MDIO_DEVS_PCS | \
28 MDIO_DEVS_PHYXS | \ 23 MDIO_DEVS_PHYXS | \
@@ -33,12 +28,6 @@
33 (1 << LOOPBACK_PMAPMD) | \ 28 (1 << LOOPBACK_PMAPMD) | \
34 (1 << LOOPBACK_PHYXS_WS)) 29 (1 << LOOPBACK_PHYXS_WS))
35 30
36#define SFT9001_LOOPBACKS ((1 << LOOPBACK_GPHY) | \
37 (1 << LOOPBACK_PHYXS) | \
38 (1 << LOOPBACK_PCS) | \
39 (1 << LOOPBACK_PMAPMD) | \
40 (1 << LOOPBACK_PHYXS_WS))
41
42/* We complain if we fail to see the link partner as 10G capable this many 31/* We complain if we fail to see the link partner as 10G capable this many
43 * times in a row (must be > 1 as sampling the autoneg. registers is racy) 32 * times in a row (must be > 1 as sampling the autoneg. registers is racy)
44 */ 33 */
@@ -50,9 +39,8 @@
50#define PMA_PMD_EXT_GMII_EN_WIDTH 1 39#define PMA_PMD_EXT_GMII_EN_WIDTH 1
51#define PMA_PMD_EXT_CLK_OUT_LBN 2 40#define PMA_PMD_EXT_CLK_OUT_LBN 2
52#define PMA_PMD_EXT_CLK_OUT_WIDTH 1 41#define PMA_PMD_EXT_CLK_OUT_WIDTH 1
53#define PMA_PMD_LNPGA_POWERDOWN_LBN 8 /* SFX7101 only */ 42#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
54#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1 43#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
55#define PMA_PMD_EXT_CLK312_LBN 8 /* SFT9001 only */
56#define PMA_PMD_EXT_CLK312_WIDTH 1 44#define PMA_PMD_EXT_CLK312_WIDTH 1
57#define PMA_PMD_EXT_LPOWER_LBN 12 45#define PMA_PMD_EXT_LPOWER_LBN 12
58#define PMA_PMD_EXT_LPOWER_WIDTH 1 46#define PMA_PMD_EXT_LPOWER_WIDTH 1
@@ -84,7 +72,6 @@
84#define PMA_PMD_LED_FLASH (3) 72#define PMA_PMD_LED_FLASH (3)
85#define PMA_PMD_LED_MASK 3 73#define PMA_PMD_LED_MASK 3
86/* All LEDs under hardware control */ 74/* All LEDs under hardware control */
87#define SFT9001_PMA_PMD_LED_DEFAULT 0
88/* Green and Amber under hardware control, Red off */ 75/* Green and Amber under hardware control, Red off */
89#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) 76#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
90 77
@@ -98,31 +85,7 @@
98#define PMA_PMD_SPEED_LBN 4 85#define PMA_PMD_SPEED_LBN 4
99#define PMA_PMD_SPEED_WIDTH 4 86#define PMA_PMD_SPEED_WIDTH 4
100 87
101/* Cable diagnostics - SFT9001 only */ 88/* Misc register defines */
102#define PMA_PMD_CDIAG_CTRL_REG 49213
103#define CDIAG_CTRL_IMMED_LBN 15
104#define CDIAG_CTRL_BRK_LINK_LBN 12
105#define CDIAG_CTRL_IN_PROG_LBN 11
106#define CDIAG_CTRL_LEN_UNIT_LBN 10
107#define CDIAG_CTRL_LEN_METRES 1
108#define PMA_PMD_CDIAG_RES_REG 49174
109#define CDIAG_RES_A_LBN 12
110#define CDIAG_RES_B_LBN 8
111#define CDIAG_RES_C_LBN 4
112#define CDIAG_RES_D_LBN 0
113#define CDIAG_RES_WIDTH 4
114#define CDIAG_RES_OPEN 2
115#define CDIAG_RES_OK 1
116#define CDIAG_RES_INVALID 0
117/* Set of 4 registers for pairs A-D */
118#define PMA_PMD_CDIAG_LEN_REG 49175
119
120/* Serdes control registers - SFT9001 only */
121#define PMA_PMD_CSERDES_CTRL_REG 64258
122/* Set the 156.25 MHz output to 312.5 MHz to drive Falcon's XMAC */
123#define PMA_PMD_CSERDES_DEFAULT 0x000f
124
125/* Misc register defines - SFX7101 only */
126#define PCS_CLOCK_CTRL_REG 55297 89#define PCS_CLOCK_CTRL_REG 55297
127#define PLL312_RST_N_LBN 2 90#define PLL312_RST_N_LBN 2
128 91
@@ -185,121 +148,17 @@ struct tenxpress_phy_data {
185 int bad_lp_tries; 148 int bad_lp_tries;
186}; 149};
187 150
188static ssize_t show_phy_short_reach(struct device *dev,
189 struct device_attribute *attr, char *buf)
190{
191 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
192 int reg;
193
194 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR);
195 return sprintf(buf, "%d\n", !!(reg & MDIO_PMA_10GBT_TXPWR_SHORT));
196}
197
198static ssize_t set_phy_short_reach(struct device *dev,
199 struct device_attribute *attr,
200 const char *buf, size_t count)
201{
202 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
203 int rc;
204
205 rtnl_lock();
206 if (efx->state != STATE_RUNNING) {
207 rc = -EBUSY;
208 } else {
209 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
210 MDIO_PMA_10GBT_TXPWR_SHORT,
211 count != 0 && *buf != '0');
212 rc = efx_reconfigure_port(efx);
213 }
214 rtnl_unlock();
215
216 return rc < 0 ? rc : (ssize_t)count;
217}
218
219static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach,
220 set_phy_short_reach);
221
222int sft9001_wait_boot(struct efx_nic *efx)
223{
224 unsigned long timeout = jiffies + HZ + 1;
225 int boot_stat;
226
227 for (;;) {
228 boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS,
229 PCS_BOOT_STATUS_REG);
230 if (boot_stat >= 0) {
231 netif_dbg(efx, hw, efx->net_dev,
232 "PHY boot status = %#x\n", boot_stat);
233 switch (boot_stat &
234 ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
235 (3 << PCS_BOOT_PROGRESS_LBN) |
236 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN) |
237 (1 << PCS_BOOT_CODE_STARTED_LBN))) {
238 case ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
239 (PCS_BOOT_PROGRESS_CHECKSUM <<
240 PCS_BOOT_PROGRESS_LBN)):
241 case ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
242 (PCS_BOOT_PROGRESS_INIT <<
243 PCS_BOOT_PROGRESS_LBN) |
244 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN)):
245 return -EINVAL;
246 case ((PCS_BOOT_PROGRESS_WAIT_MDIO <<
247 PCS_BOOT_PROGRESS_LBN) |
248 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN)):
249 return (efx->phy_mode & PHY_MODE_SPECIAL) ?
250 0 : -EIO;
251 case ((PCS_BOOT_PROGRESS_JUMP <<
252 PCS_BOOT_PROGRESS_LBN) |
253 (1 << PCS_BOOT_CODE_STARTED_LBN)):
254 case ((PCS_BOOT_PROGRESS_JUMP <<
255 PCS_BOOT_PROGRESS_LBN) |
256 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN) |
257 (1 << PCS_BOOT_CODE_STARTED_LBN)):
258 return (efx->phy_mode & PHY_MODE_SPECIAL) ?
259 -EIO : 0;
260 default:
261 if (boot_stat & (1 << PCS_BOOT_FATAL_ERROR_LBN))
262 return -EIO;
263 break;
264 }
265 }
266
267 if (time_after_eq(jiffies, timeout))
268 return -ETIMEDOUT;
269
270 msleep(50);
271 }
272}
273
274static int tenxpress_init(struct efx_nic *efx) 151static int tenxpress_init(struct efx_nic *efx)
275{ 152{
276 int reg; 153 /* Enable 312.5 MHz clock */
277 154 efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
278 if (efx->phy_type == PHY_TYPE_SFX7101) { 155 1 << CLK312_EN_LBN);
279 /* Enable 312.5 MHz clock */
280 efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
281 1 << CLK312_EN_LBN);
282 } else {
283 /* Enable 312.5 MHz clock and GMII */
284 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
285 reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
286 (1 << PMA_PMD_EXT_CLK_OUT_LBN) |
287 (1 << PMA_PMD_EXT_CLK312_LBN) |
288 (1 << PMA_PMD_EXT_ROBUST_LBN));
289
290 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
291 efx_mdio_set_flag(efx, MDIO_MMD_C22EXT,
292 GPHY_XCONTROL_REG, 1 << GPHY_ISOLATE_LBN,
293 false);
294 }
295 156
296 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */ 157 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
297 if (efx->phy_type == PHY_TYPE_SFX7101) { 158 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
298 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG, 159 1 << PMA_PMA_LED_ACTIVITY_LBN, true);
299 1 << PMA_PMA_LED_ACTIVITY_LBN, true); 160 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
300 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, 161 SFX7101_PMA_PMD_LED_DEFAULT);
301 SFX7101_PMA_PMD_LED_DEFAULT);
302 }
303 162
304 return 0; 163 return 0;
305} 164}
@@ -307,7 +166,6 @@ static int tenxpress_init(struct efx_nic *efx)
307static int tenxpress_phy_probe(struct efx_nic *efx) 166static int tenxpress_phy_probe(struct efx_nic *efx)
308{ 167{
309 struct tenxpress_phy_data *phy_data; 168 struct tenxpress_phy_data *phy_data;
310 int rc;
311 169
312 /* Allocate phy private storage */ 170 /* Allocate phy private storage */
313 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 171 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
@@ -316,42 +174,15 @@ static int tenxpress_phy_probe(struct efx_nic *efx)
316 efx->phy_data = phy_data; 174 efx->phy_data = phy_data;
317 phy_data->phy_mode = efx->phy_mode; 175 phy_data->phy_mode = efx->phy_mode;
318 176
319 /* Create any special files */ 177 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
320 if (efx->phy_type == PHY_TYPE_SFT9001B) { 178 efx->mdio.mode_support = MDIO_SUPPORTS_C45;
321 rc = device_create_file(&efx->pci_dev->dev,
322 &dev_attr_phy_short_reach);
323 if (rc)
324 goto fail;
325 }
326
327 if (efx->phy_type == PHY_TYPE_SFX7101) {
328 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
329 efx->mdio.mode_support = MDIO_SUPPORTS_C45;
330
331 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
332 179
333 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | 180 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
334 ADVERTISED_10000baseT_Full);
335 } else {
336 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
337 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
338 181
339 efx->loopback_modes = (SFT9001_LOOPBACKS | 182 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
340 FALCON_XMAC_LOOPBACKS | 183 ADVERTISED_10000baseT_Full);
341 FALCON_GMAC_LOOPBACKS);
342
343 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
344 ADVERTISED_10000baseT_Full |
345 ADVERTISED_1000baseT_Full |
346 ADVERTISED_100baseT_Full);
347 }
348 184
349 return 0; 185 return 0;
350
351fail:
352 kfree(efx->phy_data);
353 efx->phy_data = NULL;
354 return rc;
355} 186}
356 187
357static int tenxpress_phy_init(struct efx_nic *efx) 188static int tenxpress_phy_init(struct efx_nic *efx)
@@ -361,21 +192,11 @@ static int tenxpress_phy_init(struct efx_nic *efx)
361 falcon_board(efx)->type->init_phy(efx); 192 falcon_board(efx)->type->init_phy(efx);
362 193
363 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { 194 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
364 if (efx->phy_type == PHY_TYPE_SFT9001A) {
365 int reg;
366 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
367 PMA_PMD_XCONTROL_REG);
368 reg |= (1 << PMA_PMD_EXT_SSR_LBN);
369 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
370 PMA_PMD_XCONTROL_REG, reg);
371 mdelay(200);
372 }
373
374 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); 195 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
375 if (rc < 0) 196 if (rc < 0)
376 return rc; 197 return rc;
377 198
378 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 199 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
379 if (rc < 0) 200 if (rc < 0)
380 return rc; 201 return rc;
381 } 202 }
@@ -403,7 +224,7 @@ static int tenxpress_special_reset(struct efx_nic *efx)
403{ 224{
404 int rc, reg; 225 int rc, reg;
405 226
406 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so 227 /* The XGMAC clock is driven from the SFX7101 312MHz clock, so
407 * a special software reset can glitch the XGMAC sufficiently for stats 228 * a special software reset can glitch the XGMAC sufficiently for stats
408 * requests to fail. */ 229 * requests to fail. */
409 falcon_stop_nic_stats(efx); 230 falcon_stop_nic_stats(efx);
@@ -484,53 +305,18 @@ static bool sfx7101_link_ok(struct efx_nic *efx)
484 MDIO_DEVS_PHYXS); 305 MDIO_DEVS_PHYXS);
485} 306}
486 307
487static bool sft9001_link_ok(struct efx_nic *efx, struct ethtool_cmd *ecmd)
488{
489 u32 reg;
490
491 if (efx_phy_mode_disabled(efx->phy_mode))
492 return false;
493 else if (efx->loopback_mode == LOOPBACK_GPHY)
494 return true;
495 else if (efx->loopback_mode)
496 return efx_mdio_links_ok(efx,
497 MDIO_DEVS_PMAPMD |
498 MDIO_DEVS_PHYXS);
499
500 /* We must use the same definition of link state as LASI,
501 * otherwise we can miss a link state transition
502 */
503 if (ecmd->speed == 10000) {
504 reg = efx_mdio_read(efx, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT1);
505 return reg & MDIO_PCS_10GBRT_STAT1_BLKLK;
506 } else {
507 reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_STATUS_REG);
508 return reg & (1 << C22EXT_STATUS_LINK_LBN);
509 }
510}
511
512static void tenxpress_ext_loopback(struct efx_nic *efx) 308static void tenxpress_ext_loopback(struct efx_nic *efx)
513{ 309{
514 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1, 310 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
515 1 << LOOPBACK_NEAR_LBN, 311 1 << LOOPBACK_NEAR_LBN,
516 efx->loopback_mode == LOOPBACK_PHYXS); 312 efx->loopback_mode == LOOPBACK_PHYXS);
517 if (efx->phy_type != PHY_TYPE_SFX7101)
518 efx_mdio_set_flag(efx, MDIO_MMD_C22EXT, GPHY_XCONTROL_REG,
519 1 << GPHY_LOOPBACK_NEAR_LBN,
520 efx->loopback_mode == LOOPBACK_GPHY);
521} 313}
522 314
523static void tenxpress_low_power(struct efx_nic *efx) 315static void tenxpress_low_power(struct efx_nic *efx)
524{ 316{
525 if (efx->phy_type == PHY_TYPE_SFX7101) 317 efx_mdio_set_mmds_lpower(
526 efx_mdio_set_mmds_lpower( 318 efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
527 efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER), 319 TENXPRESS_REQUIRED_DEVS);
528 TENXPRESS_REQUIRED_DEVS);
529 else
530 efx_mdio_set_flag(
531 efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG,
532 1 << PMA_PMD_EXT_LPOWER_LBN,
533 !!(efx->phy_mode & PHY_MODE_LOW_POWER));
534} 320}
535 321
536static int tenxpress_phy_reconfigure(struct efx_nic *efx) 322static int tenxpress_phy_reconfigure(struct efx_nic *efx)
@@ -550,12 +336,7 @@ static int tenxpress_phy_reconfigure(struct efx_nic *efx)
550 336
551 if (loop_reset || phy_mode_change) { 337 if (loop_reset || phy_mode_change) {
552 tenxpress_special_reset(efx); 338 tenxpress_special_reset(efx);
553 339 falcon_reset_xaui(efx);
554 /* Reset XAUI if we were in 10G, and are staying
555 * in 10G. If we're moving into and out of 10G
556 * then xaui will be reset anyway */
557 if (EFX_IS10G(efx))
558 falcon_reset_xaui(efx);
559 } 340 }
560 341
561 tenxpress_low_power(efx); 342 tenxpress_low_power(efx);
@@ -578,29 +359,12 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
578{ 359{
579 struct efx_link_state old_state = efx->link_state; 360 struct efx_link_state old_state = efx->link_state;
580 361
581 if (efx->phy_type == PHY_TYPE_SFX7101) { 362 efx->link_state.up = sfx7101_link_ok(efx);
582 efx->link_state.up = sfx7101_link_ok(efx); 363 efx->link_state.speed = 10000;
583 efx->link_state.speed = 10000; 364 efx->link_state.fd = true;
584 efx->link_state.fd = true; 365 efx->link_state.fc = efx_mdio_get_pause(efx);
585 efx->link_state.fc = efx_mdio_get_pause(efx);
586
587 sfx7101_check_bad_lp(efx, efx->link_state.up);
588 } else {
589 struct ethtool_cmd ecmd;
590
591 /* Check the LASI alarm first */
592 if (efx->loopback_mode == LOOPBACK_NONE &&
593 !(efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT) &
594 MDIO_PMA_LASI_LSALARM))
595 return false;
596 366
597 tenxpress_get_settings(efx, &ecmd); 367 sfx7101_check_bad_lp(efx, efx->link_state.up);
598
599 efx->link_state.up = sft9001_link_ok(efx, &ecmd);
600 efx->link_state.speed = ecmd.speed;
601 efx->link_state.fd = (ecmd.duplex == DUPLEX_FULL);
602 efx->link_state.fc = efx_mdio_get_pause(efx);
603 }
604 368
605 return !efx_link_state_equal(&efx->link_state, &old_state); 369 return !efx_link_state_equal(&efx->link_state, &old_state);
606} 370}
@@ -621,10 +385,6 @@ static void sfx7101_phy_fini(struct efx_nic *efx)
621 385
622static void tenxpress_phy_remove(struct efx_nic *efx) 386static void tenxpress_phy_remove(struct efx_nic *efx)
623{ 387{
624 if (efx->phy_type == PHY_TYPE_SFT9001B)
625 device_remove_file(&efx->pci_dev->dev,
626 &dev_attr_phy_short_reach);
627
628 kfree(efx->phy_data); 388 kfree(efx->phy_data);
629 efx->phy_data = NULL; 389 efx->phy_data = NULL;
630} 390}
@@ -647,10 +407,7 @@ void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
647 (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN); 407 (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
648 break; 408 break;
649 default: 409 default:
650 if (efx->phy_type == PHY_TYPE_SFX7101) 410 reg = SFX7101_PMA_PMD_LED_DEFAULT;
651 reg = SFX7101_PMA_PMD_LED_DEFAULT;
652 else
653 reg = SFT9001_PMA_PMD_LED_DEFAULT;
654 break; 411 break;
655 } 412 }
656 413
@@ -685,102 +442,12 @@ sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
685 return rc; 442 return rc;
686} 443}
687 444
688static const char *const sft9001_test_names[] = {
689 "bist",
690 "cable.pairA.status",
691 "cable.pairB.status",
692 "cable.pairC.status",
693 "cable.pairD.status",
694 "cable.pairA.length",
695 "cable.pairB.length",
696 "cable.pairC.length",
697 "cable.pairD.length",
698};
699
700static const char *sft9001_test_name(struct efx_nic *efx, unsigned int index)
701{
702 if (index < ARRAY_SIZE(sft9001_test_names))
703 return sft9001_test_names[index];
704 return NULL;
705}
706
707static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
708{
709 int rc = 0, rc2, i, ctrl_reg, res_reg;
710
711 /* Initialise cable diagnostic results to unknown failure */
712 for (i = 1; i < 9; ++i)
713 results[i] = -1;
714
715 /* Run cable diagnostics; wait up to 5 seconds for them to complete.
716 * A cable fault is not a self-test failure, but a timeout is. */
717 ctrl_reg = ((1 << CDIAG_CTRL_IMMED_LBN) |
718 (CDIAG_CTRL_LEN_METRES << CDIAG_CTRL_LEN_UNIT_LBN));
719 if (flags & ETH_TEST_FL_OFFLINE) {
720 /* Break the link in order to run full diagnostics. We
721 * must reset the PHY to resume normal service. */
722 ctrl_reg |= (1 << CDIAG_CTRL_BRK_LINK_LBN);
723 }
724 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_CTRL_REG,
725 ctrl_reg);
726 i = 0;
727 while (efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_CTRL_REG) &
728 (1 << CDIAG_CTRL_IN_PROG_LBN)) {
729 if (++i == 50) {
730 rc = -ETIMEDOUT;
731 goto out;
732 }
733 msleep(100);
734 }
735 res_reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_RES_REG);
736 for (i = 0; i < 4; i++) {
737 int pair_res =
738 (res_reg >> (CDIAG_RES_A_LBN - i * CDIAG_RES_WIDTH))
739 & ((1 << CDIAG_RES_WIDTH) - 1);
740 int len_reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
741 PMA_PMD_CDIAG_LEN_REG + i);
742 if (pair_res == CDIAG_RES_OK)
743 results[1 + i] = 1;
744 else if (pair_res == CDIAG_RES_INVALID)
745 results[1 + i] = -1;
746 else
747 results[1 + i] = -pair_res;
748 if (pair_res != CDIAG_RES_INVALID &&
749 pair_res != CDIAG_RES_OPEN &&
750 len_reg != 0xffff)
751 results[5 + i] = len_reg;
752 }
753
754out:
755 if (flags & ETH_TEST_FL_OFFLINE) {
756 /* Reset, running the BIST and then resuming normal service. */
757 rc2 = tenxpress_special_reset(efx);
758 results[0] = rc2 ? -1 : 1;
759 if (!rc)
760 rc = rc2;
761
762 efx_mdio_an_reconfigure(efx);
763 }
764
765 return rc;
766}
767
768static void 445static void
769tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 446tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
770{ 447{
771 u32 adv = 0, lpa = 0; 448 u32 adv = 0, lpa = 0;
772 int reg; 449 int reg;
773 450
774 if (efx->phy_type != PHY_TYPE_SFX7101) {
775 reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_CTRL);
776 if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN))
777 adv |= ADVERTISED_1000baseT_Full;
778 reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_STATUS);
779 if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN))
780 lpa |= ADVERTISED_1000baseT_Half;
781 if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN))
782 lpa |= ADVERTISED_1000baseT_Full;
783 }
784 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL); 451 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
785 if (reg & MDIO_AN_10GBT_CTRL_ADV10G) 452 if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
786 adv |= ADVERTISED_10000baseT_Full; 453 adv |= ADVERTISED_10000baseT_Full;
@@ -790,24 +457,10 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
790 457
791 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); 458 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
792 459
793 if (efx->phy_type != PHY_TYPE_SFX7101) {
794 ecmd->supported |= (SUPPORTED_100baseT_Full |
795 SUPPORTED_1000baseT_Full);
796 if (ecmd->speed != SPEED_10000) {
797 ecmd->eth_tp_mdix =
798 (efx_mdio_read(efx, MDIO_MMD_PMAPMD,
799 PMA_PMD_XSTATUS_REG) &
800 (1 << PMA_PMD_XSTAT_MDIX_LBN))
801 ? ETH_TP_MDI_X : ETH_TP_MDI;
802 }
803 }
804
805 /* In loopback, the PHY automatically brings up the correct interface, 460 /* In loopback, the PHY automatically brings up the correct interface,
806 * but doesn't advertise the correct speed. So override it */ 461 * but doesn't advertise the correct speed. So override it */
807 if (efx->loopback_mode == LOOPBACK_GPHY) 462 if (LOOPBACK_EXTERNAL(efx))
808 ecmd->speed = SPEED_1000; 463 ethtool_cmd_speed_set(ecmd, SPEED_10000);
809 else if (LOOPBACK_EXTERNAL(efx))
810 ecmd->speed = SPEED_10000;
811} 464}
812 465
813static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 466static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
@@ -825,17 +478,7 @@ static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
825 advertising & ADVERTISED_10000baseT_Full); 478 advertising & ADVERTISED_10000baseT_Full);
826} 479}
827 480
828static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising) 481const struct efx_phy_operations falcon_sfx7101_phy_ops = {
829{
830 efx_mdio_set_flag(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_CTRL,
831 1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
832 advertising & ADVERTISED_1000baseT_Full);
833 efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
834 MDIO_AN_10GBT_CTRL_ADV10G,
835 advertising & ADVERTISED_10000baseT_Full);
836}
837
838struct efx_phy_operations falcon_sfx7101_phy_ops = {
839 .probe = tenxpress_phy_probe, 482 .probe = tenxpress_phy_probe,
840 .init = tenxpress_phy_init, 483 .init = tenxpress_phy_init,
841 .reconfigure = tenxpress_phy_reconfigure, 484 .reconfigure = tenxpress_phy_reconfigure,
@@ -849,18 +492,3 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
849 .test_name = sfx7101_test_name, 492 .test_name = sfx7101_test_name,
850 .run_tests = sfx7101_run_tests, 493 .run_tests = sfx7101_run_tests,
851}; 494};
852
853struct efx_phy_operations falcon_sft9001_phy_ops = {
854 .probe = tenxpress_phy_probe,
855 .init = tenxpress_phy_init,
856 .reconfigure = tenxpress_phy_reconfigure,
857 .poll = tenxpress_phy_poll,
858 .fini = efx_port_dummy_op_void,
859 .remove = tenxpress_phy_remove,
860 .get_settings = tenxpress_get_settings,
861 .set_settings = tenxpress_set_settings,
862 .set_npage_adv = sft9001_set_npage_adv,
863 .test_alive = efx_mdio_test_alive,
864 .test_name = sft9001_test_name,
865 .run_tests = sft9001_run_tests,
866};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index c6942da2c99a..84eb99e0f8d2 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -28,51 +28,7 @@
28 * The tx_queue descriptor ring fill-level must fall below this value 28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue 29 * before we restart the netif queue
30 */ 30 */
31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) 31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32
33/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * because of the 2 hardware queues associated with each core queue,
35 * but also so that we can inhibit TX for reasons other than a full
36 * hardware queue. */
37void efx_stop_queue(struct efx_channel *channel)
38{
39 struct efx_nic *efx = channel->efx;
40
41 if (!channel->tx_queue)
42 return;
43
44 spin_lock_bh(&channel->tx_stop_lock);
45 netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
46
47 atomic_inc(&channel->tx_stop_count);
48 netif_tx_stop_queue(
49 netdev_get_tx_queue(
50 efx->net_dev,
51 channel->tx_queue->queue / EFX_TXQ_TYPES));
52
53 spin_unlock_bh(&channel->tx_stop_lock);
54}
55
56/* Decrement core TX queue stop count and wake it if the count is 0 */
57void efx_wake_queue(struct efx_channel *channel)
58{
59 struct efx_nic *efx = channel->efx;
60
61 if (!channel->tx_queue)
62 return;
63
64 local_bh_disable();
65 if (atomic_dec_and_lock(&channel->tx_stop_count,
66 &channel->tx_stop_lock)) {
67 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
68 netif_tx_wake_queue(
69 netdev_get_tx_queue(
70 efx->net_dev,
71 channel->tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock);
73 }
74 local_bh_enable();
75}
76 32
77static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 33static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
78 struct efx_tx_buffer *buffer) 34 struct efx_tx_buffer *buffer)
@@ -207,7 +163,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
207 } 163 }
208 164
209 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 165 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
210 q_space = EFX_TXQ_MASK - 1 - fill_level; 166 q_space = efx->txq_entries - 1 - fill_level;
211 167
212 /* Map for DMA. Use pci_map_single rather than pci_map_page 168 /* Map for DMA. Use pci_map_single rather than pci_map_page
213 * since this is more efficient on machines with sparse 169 * since this is more efficient on machines with sparse
@@ -234,24 +190,27 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
234 * checked. Update the xmit path's 190 * checked. Update the xmit path's
235 * copy of read_count. 191 * copy of read_count.
236 */ 192 */
237 ++tx_queue->stopped; 193 netif_tx_stop_queue(tx_queue->core_txq);
238 /* This memory barrier protects the 194 /* This memory barrier protects the
239 * change of stopped from the access 195 * change of queue state from the access
240 * of read_count. */ 196 * of read_count. */
241 smp_mb(); 197 smp_mb();
242 tx_queue->old_read_count = 198 tx_queue->old_read_count =
243 *(volatile unsigned *) 199 ACCESS_ONCE(tx_queue->read_count);
244 &tx_queue->read_count;
245 fill_level = (tx_queue->insert_count 200 fill_level = (tx_queue->insert_count
246 - tx_queue->old_read_count); 201 - tx_queue->old_read_count);
247 q_space = EFX_TXQ_MASK - 1 - fill_level; 202 q_space = efx->txq_entries - 1 - fill_level;
248 if (unlikely(q_space-- <= 0)) 203 if (unlikely(q_space-- <= 0)) {
249 goto stop; 204 rc = NETDEV_TX_BUSY;
205 goto unwind;
206 }
250 smp_mb(); 207 smp_mb();
251 --tx_queue->stopped; 208 if (likely(!efx->loopback_selftest))
209 netif_tx_start_queue(
210 tx_queue->core_txq);
252 } 211 }
253 212
254 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 213 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
255 buffer = &tx_queue->buffer[insert_ptr]; 214 buffer = &tx_queue->buffer[insert_ptr];
256 efx_tsoh_free(tx_queue, buffer); 215 efx_tsoh_free(tx_queue, buffer);
257 EFX_BUG_ON_PARANOID(buffer->tsoh); 216 EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -308,19 +267,12 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
308 267
309 /* Mark the packet as transmitted, and free the SKB ourselves */ 268 /* Mark the packet as transmitted, and free the SKB ourselves */
310 dev_kfree_skb_any(skb); 269 dev_kfree_skb_any(skb);
311 goto unwind;
312
313 stop:
314 rc = NETDEV_TX_BUSY;
315
316 if (tx_queue->stopped == 1)
317 efx_stop_queue(tx_queue->channel);
318 270
319 unwind: 271 unwind:
320 /* Work backwards until we hit the original insert pointer value */ 272 /* Work backwards until we hit the original insert pointer value */
321 while (tx_queue->insert_count != tx_queue->write_count) { 273 while (tx_queue->insert_count != tx_queue->write_count) {
322 --tx_queue->insert_count; 274 --tx_queue->insert_count;
323 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 275 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
324 buffer = &tx_queue->buffer[insert_ptr]; 276 buffer = &tx_queue->buffer[insert_ptr];
325 efx_dequeue_buffer(tx_queue, buffer); 277 efx_dequeue_buffer(tx_queue, buffer);
326 buffer->len = 0; 278 buffer->len = 0;
@@ -350,8 +302,8 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
350 struct efx_nic *efx = tx_queue->efx; 302 struct efx_nic *efx = tx_queue->efx;
351 unsigned int stop_index, read_ptr; 303 unsigned int stop_index, read_ptr;
352 304
353 stop_index = (index + 1) & EFX_TXQ_MASK; 305 stop_index = (index + 1) & tx_queue->ptr_mask;
354 read_ptr = tx_queue->read_count & EFX_TXQ_MASK; 306 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
355 307
356 while (read_ptr != stop_index) { 308 while (read_ptr != stop_index) {
357 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 309 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -368,7 +320,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
368 buffer->len = 0; 320 buffer->len = 0;
369 321
370 ++tx_queue->read_count; 322 ++tx_queue->read_count;
371 read_ptr = tx_queue->read_count & EFX_TXQ_MASK; 323 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
372 } 324 }
373} 325}
374 326
@@ -386,43 +338,120 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
386{ 338{
387 struct efx_nic *efx = netdev_priv(net_dev); 339 struct efx_nic *efx = netdev_priv(net_dev);
388 struct efx_tx_queue *tx_queue; 340 struct efx_tx_queue *tx_queue;
341 unsigned index, type;
389 342
390 if (unlikely(efx->port_inhibited)) 343 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
391 return NETDEV_TX_BUSY;
392 344
393 tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)]; 345 index = skb_get_queue_mapping(skb);
394 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) 346 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
395 tx_queue += EFX_TXQ_TYPE_OFFLOAD; 347 if (index >= efx->n_tx_channels) {
348 index -= efx->n_tx_channels;
349 type |= EFX_TXQ_TYPE_HIGHPRI;
350 }
351 tx_queue = efx_get_tx_queue(efx, index, type);
396 352
397 return efx_enqueue_skb(tx_queue, skb); 353 return efx_enqueue_skb(tx_queue, skb);
398} 354}
399 355
356void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
357{
358 struct efx_nic *efx = tx_queue->efx;
359
360 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
361 tx_queue->core_txq =
362 netdev_get_tx_queue(efx->net_dev,
363 tx_queue->queue / EFX_TXQ_TYPES +
364 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
365 efx->n_tx_channels : 0));
366}
367
368int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
369{
370 struct efx_nic *efx = netdev_priv(net_dev);
371 struct efx_channel *channel;
372 struct efx_tx_queue *tx_queue;
373 unsigned tc;
374 int rc;
375
376 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
377 return -EINVAL;
378
379 if (num_tc == net_dev->num_tc)
380 return 0;
381
382 for (tc = 0; tc < num_tc; tc++) {
383 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
384 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
385 }
386
387 if (num_tc > net_dev->num_tc) {
388 /* Initialise high-priority queues as necessary */
389 efx_for_each_channel(channel, efx) {
390 efx_for_each_possible_channel_tx_queue(tx_queue,
391 channel) {
392 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
393 continue;
394 if (!tx_queue->buffer) {
395 rc = efx_probe_tx_queue(tx_queue);
396 if (rc)
397 return rc;
398 }
399 if (!tx_queue->initialised)
400 efx_init_tx_queue(tx_queue);
401 efx_init_tx_queue_core_txq(tx_queue);
402 }
403 }
404 } else {
405 /* Reduce number of classes before number of queues */
406 net_dev->num_tc = num_tc;
407 }
408
409 rc = netif_set_real_num_tx_queues(net_dev,
410 max_t(int, num_tc, 1) *
411 efx->n_tx_channels);
412 if (rc)
413 return rc;
414
415 /* Do not destroy high-priority queues when they become
416 * unused. We would have to flush them first, and it is
417 * fairly difficult to flush a subset of TX queues. Leave
418 * it to efx_fini_channels().
419 */
420
421 net_dev->num_tc = num_tc;
422 return 0;
423}
424
400void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 425void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
401{ 426{
402 unsigned fill_level; 427 unsigned fill_level;
403 struct efx_nic *efx = tx_queue->efx; 428 struct efx_nic *efx = tx_queue->efx;
404 429
405 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK); 430 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
406 431
407 efx_dequeue_buffers(tx_queue, index); 432 efx_dequeue_buffers(tx_queue, index);
408 433
409 /* See if we need to restart the netif queue. This barrier 434 /* See if we need to restart the netif queue. This barrier
410 * separates the update of read_count from the test of 435 * separates the update of read_count from the test of the
411 * stopped. */ 436 * queue state. */
412 smp_mb(); 437 smp_mb();
413 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 438 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
439 likely(efx->port_enabled) &&
440 likely(netif_device_present(efx->net_dev))) {
414 fill_level = tx_queue->insert_count - tx_queue->read_count; 441 fill_level = tx_queue->insert_count - tx_queue->read_count;
415 if (fill_level < EFX_TXQ_THRESHOLD) { 442 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
416 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 443 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
444 netif_tx_wake_queue(tx_queue->core_txq);
445 }
446 }
417 447
418 /* Do this under netif_tx_lock(), to avoid racing 448 /* Check whether the hardware queue is now empty */
419 * with efx_xmit(). */ 449 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
420 netif_tx_lock(efx->net_dev); 450 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
421 if (tx_queue->stopped) { 451 if (tx_queue->read_count == tx_queue->old_write_count) {
422 tx_queue->stopped = 0; 452 smp_mb();
423 efx_wake_queue(tx_queue->channel); 453 tx_queue->empty_read_count =
424 } 454 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
425 netif_tx_unlock(efx->net_dev);
426 } 455 }
427 } 456 }
428} 457}
@@ -430,18 +459,24 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
430int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 459int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
431{ 460{
432 struct efx_nic *efx = tx_queue->efx; 461 struct efx_nic *efx = tx_queue->efx;
433 unsigned int txq_size; 462 unsigned int entries;
434 int i, rc; 463 int i, rc;
435 464
436 netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n", 465 /* Create the smallest power-of-two aligned ring */
437 tx_queue->queue); 466 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
467 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
468 tx_queue->ptr_mask = entries - 1;
469
470 netif_dbg(efx, probe, efx->net_dev,
471 "creating TX queue %d size %#x mask %#x\n",
472 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
438 473
439 /* Allocate software ring */ 474 /* Allocate software ring */
440 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); 475 tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
441 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 476 GFP_KERNEL);
442 if (!tx_queue->buffer) 477 if (!tx_queue->buffer)
443 return -ENOMEM; 478 return -ENOMEM;
444 for (i = 0; i <= EFX_TXQ_MASK; ++i) 479 for (i = 0; i <= tx_queue->ptr_mask; ++i)
445 tx_queue->buffer[i].continuation = true; 480 tx_queue->buffer[i].continuation = true;
446 481
447 /* Allocate hardware ring */ 482 /* Allocate hardware ring */
@@ -464,12 +499,15 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
464 499
465 tx_queue->insert_count = 0; 500 tx_queue->insert_count = 0;
466 tx_queue->write_count = 0; 501 tx_queue->write_count = 0;
502 tx_queue->old_write_count = 0;
467 tx_queue->read_count = 0; 503 tx_queue->read_count = 0;
468 tx_queue->old_read_count = 0; 504 tx_queue->old_read_count = 0;
469 BUG_ON(tx_queue->stopped); 505 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
470 506
471 /* Set up TX descriptor ring */ 507 /* Set up TX descriptor ring */
472 efx_nic_init_tx(tx_queue); 508 efx_nic_init_tx(tx_queue);
509
510 tx_queue->initialised = true;
473} 511}
474 512
475void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 513void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -481,7 +519,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
481 519
482 /* Free any buffers left in the ring */ 520 /* Free any buffers left in the ring */
483 while (tx_queue->read_count != tx_queue->write_count) { 521 while (tx_queue->read_count != tx_queue->write_count) {
484 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK]; 522 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
485 efx_dequeue_buffer(tx_queue, buffer); 523 efx_dequeue_buffer(tx_queue, buffer);
486 buffer->continuation = true; 524 buffer->continuation = true;
487 buffer->len = 0; 525 buffer->len = 0;
@@ -492,9 +530,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
492 530
493void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 531void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
494{ 532{
533 if (!tx_queue->initialised)
534 return;
535
495 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 536 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
496 "shutting down TX queue %d\n", tx_queue->queue); 537 "shutting down TX queue %d\n", tx_queue->queue);
497 538
539 tx_queue->initialised = false;
540
498 /* Flush TX queue, remove descriptor ring */ 541 /* Flush TX queue, remove descriptor ring */
499 efx_nic_fini_tx(tx_queue); 542 efx_nic_fini_tx(tx_queue);
500 543
@@ -502,16 +545,13 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
502 545
503 /* Free up TSO header cache */ 546 /* Free up TSO header cache */
504 efx_fini_tso(tx_queue); 547 efx_fini_tso(tx_queue);
505
506 /* Release queue's stop on port, if any */
507 if (tx_queue->stopped) {
508 tx_queue->stopped = 0;
509 efx_wake_queue(tx_queue->channel);
510 }
511} 548}
512 549
513void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 550void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
514{ 551{
552 if (!tx_queue->buffer)
553 return;
554
515 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 555 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
516 "destroying TX queue %d\n", tx_queue->queue); 556 "destroying TX queue %d\n", tx_queue->queue);
517 efx_nic_remove_tx(tx_queue); 557 efx_nic_remove_tx(tx_queue);
@@ -741,7 +781,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
741 781
742 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 782 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
743 /* -1 as there is no way to represent all descriptors used */ 783 /* -1 as there is no way to represent all descriptors used */
744 q_space = EFX_TXQ_MASK - 1 - fill_level; 784 q_space = efx->txq_entries - 1 - fill_level;
745 785
746 while (1) { 786 while (1) {
747 if (unlikely(q_space-- <= 0)) { 787 if (unlikely(q_space-- <= 0)) {
@@ -749,30 +789,30 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
749 * since the xmit path last checked. Update 789 * since the xmit path last checked. Update
750 * the xmit path's copy of read_count. 790 * the xmit path's copy of read_count.
751 */ 791 */
752 ++tx_queue->stopped; 792 netif_tx_stop_queue(tx_queue->core_txq);
753 /* This memory barrier protects the change of 793 /* This memory barrier protects the change of
754 * stopped from the access of read_count. */ 794 * queue state from the access of read_count. */
755 smp_mb(); 795 smp_mb();
756 tx_queue->old_read_count = 796 tx_queue->old_read_count =
757 *(volatile unsigned *)&tx_queue->read_count; 797 ACCESS_ONCE(tx_queue->read_count);
758 fill_level = (tx_queue->insert_count 798 fill_level = (tx_queue->insert_count
759 - tx_queue->old_read_count); 799 - tx_queue->old_read_count);
760 q_space = EFX_TXQ_MASK - 1 - fill_level; 800 q_space = efx->txq_entries - 1 - fill_level;
761 if (unlikely(q_space-- <= 0)) { 801 if (unlikely(q_space-- <= 0)) {
762 *final_buffer = NULL; 802 *final_buffer = NULL;
763 return 1; 803 return 1;
764 } 804 }
765 smp_mb(); 805 smp_mb();
766 --tx_queue->stopped; 806 netif_tx_start_queue(tx_queue->core_txq);
767 } 807 }
768 808
769 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 809 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
770 buffer = &tx_queue->buffer[insert_ptr]; 810 buffer = &tx_queue->buffer[insert_ptr];
771 ++tx_queue->insert_count; 811 ++tx_queue->insert_count;
772 812
773 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 813 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
774 tx_queue->read_count > 814 tx_queue->read_count >=
775 EFX_TXQ_MASK); 815 efx->txq_entries);
776 816
777 efx_tsoh_free(tx_queue, buffer); 817 efx_tsoh_free(tx_queue, buffer);
778 EFX_BUG_ON_PARANOID(buffer->len); 818 EFX_BUG_ON_PARANOID(buffer->len);
@@ -813,7 +853,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
813{ 853{
814 struct efx_tx_buffer *buffer; 854 struct efx_tx_buffer *buffer;
815 855
816 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK]; 856 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
817 efx_tsoh_free(tx_queue, buffer); 857 efx_tsoh_free(tx_queue, buffer);
818 EFX_BUG_ON_PARANOID(buffer->len); 858 EFX_BUG_ON_PARANOID(buffer->len);
819 EFX_BUG_ON_PARANOID(buffer->unmap_len); 859 EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -838,7 +878,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
838 while (tx_queue->insert_count != tx_queue->write_count) { 878 while (tx_queue->insert_count != tx_queue->write_count) {
839 --tx_queue->insert_count; 879 --tx_queue->insert_count;
840 buffer = &tx_queue->buffer[tx_queue->insert_count & 880 buffer = &tx_queue->buffer[tx_queue->insert_count &
841 EFX_TXQ_MASK]; 881 tx_queue->ptr_mask];
842 efx_tsoh_free(tx_queue, buffer); 882 efx_tsoh_free(tx_queue, buffer);
843 EFX_BUG_ON_PARANOID(buffer->skb); 883 EFX_BUG_ON_PARANOID(buffer->skb);
844 if (buffer->unmap_len) { 884 if (buffer->unmap_len) {
@@ -1103,8 +1143,10 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1103 1143
1104 while (1) { 1144 while (1) {
1105 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1145 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1106 if (unlikely(rc)) 1146 if (unlikely(rc)) {
1107 goto stop; 1147 rc2 = NETDEV_TX_BUSY;
1148 goto unwind;
1149 }
1108 1150
1109 /* Move onto the next fragment? */ 1151 /* Move onto the next fragment? */
1110 if (state.in_len == 0) { 1152 if (state.in_len == 0) {
@@ -1133,14 +1175,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1133 netif_err(efx, tx_err, efx->net_dev, 1175 netif_err(efx, tx_err, efx->net_dev,
1134 "Out of memory for TSO headers, or PCI mapping error\n"); 1176 "Out of memory for TSO headers, or PCI mapping error\n");
1135 dev_kfree_skb_any(skb); 1177 dev_kfree_skb_any(skb);
1136 goto unwind;
1137
1138 stop:
1139 rc2 = NETDEV_TX_BUSY;
1140
1141 /* Stop the queue if it wasn't stopped before. */
1142 if (tx_queue->stopped == 1)
1143 efx_stop_queue(tx_queue->channel);
1144 1178
1145 unwind: 1179 unwind:
1146 /* Free the DMA mapping we were in the process of writing out */ 1180 /* Free the DMA mapping we were in the process of writing out */
@@ -1168,7 +1202,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1168 unsigned i; 1202 unsigned i;
1169 1203
1170 if (tx_queue->buffer) { 1204 if (tx_queue->buffer) {
1171 for (i = 0; i <= EFX_TXQ_MASK; ++i) 1205 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1172 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1206 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1173 } 1207 }
1174 1208
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
new file mode 100644
index 000000000000..7c21b334a75b
--- /dev/null
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -0,0 +1,560 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/*
11 * Driver for Transwitch/Mysticom CX4 retimer
12 * see www.transwitch.com, part is TXC-43128
13 */
14
15#include <linux/delay.h>
16#include <linux/slab.h>
17#include "efx.h"
18#include "mdio_10g.h"
19#include "phy.h"
20#include "nic.h"
21
22/* We expect these MMDs to be in the package */
23#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
24 MDIO_DEVS_PMAPMD | \
25 MDIO_DEVS_PHYXS)
26
27#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
28 (1 << LOOPBACK_PMAPMD) | \
29 (1 << LOOPBACK_PHYXS_WS))
30
31/**************************************************************************
32 *
33 * Compile-time config
34 *
35 **************************************************************************
36 */
37#define TXCNAME "TXC43128"
38/* Total length of time we'll wait for the PHY to come out of reset (ms) */
39#define TXC_MAX_RESET_TIME 500
40/* Interval between checks (ms) */
41#define TXC_RESET_WAIT 10
42/* How long to run BIST (us) */
43#define TXC_BIST_DURATION 50
44
45/**************************************************************************
46 *
47 * Register definitions
48 *
49 **************************************************************************
50 */
51
52/* Command register */
53#define TXC_GLRGS_GLCMD 0xc004
54/* Useful bits in command register */
55/* Lane power-down */
56#define TXC_GLCMD_L01PD_LBN 5
57#define TXC_GLCMD_L23PD_LBN 6
58/* Limited SW reset: preserves configuration but
59 * initiates a logic reset. Self-clearing */
60#define TXC_GLCMD_LMTSWRST_LBN 14
61
62/* Signal Quality Control */
63#define TXC_GLRGS_GSGQLCTL 0xc01a
64/* Enable bit */
65#define TXC_GSGQLCT_SGQLEN_LBN 15
66/* Lane selection */
67#define TXC_GSGQLCT_LNSL_LBN 13
68#define TXC_GSGQLCT_LNSL_WIDTH 2
69
70/* Analog TX control */
71#define TXC_ALRGS_ATXCTL 0xc040
72/* Lane power-down */
73#define TXC_ATXCTL_TXPD3_LBN 15
74#define TXC_ATXCTL_TXPD2_LBN 14
75#define TXC_ATXCTL_TXPD1_LBN 13
76#define TXC_ATXCTL_TXPD0_LBN 12
77
78/* Amplitude on lanes 0, 1 */
79#define TXC_ALRGS_ATXAMP0 0xc041
80/* Amplitude on lanes 2, 3 */
81#define TXC_ALRGS_ATXAMP1 0xc042
82/* Bit position of value for lane 0 (or 2) */
83#define TXC_ATXAMP_LANE02_LBN 3
84/* Bit position of value for lane 1 (or 3) */
85#define TXC_ATXAMP_LANE13_LBN 11
86
87#define TXC_ATXAMP_1280_mV 0
88#define TXC_ATXAMP_1200_mV 8
89#define TXC_ATXAMP_1120_mV 12
90#define TXC_ATXAMP_1060_mV 14
91#define TXC_ATXAMP_0820_mV 25
92#define TXC_ATXAMP_0720_mV 26
93#define TXC_ATXAMP_0580_mV 27
94#define TXC_ATXAMP_0440_mV 28
95
96#define TXC_ATXAMP_0820_BOTH \
97 ((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
98 | (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
99
100#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
101
102/* Preemphasis on lanes 0, 1 */
103#define TXC_ALRGS_ATXPRE0 0xc043
104/* Preemphasis on lanes 2, 3 */
105#define TXC_ALRGS_ATXPRE1 0xc044
106
107#define TXC_ATXPRE_NONE 0
108#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
109
110#define TXC_ALRGS_ARXCTL 0xc045
111/* Lane power-down */
112#define TXC_ARXCTL_RXPD3_LBN 15
113#define TXC_ARXCTL_RXPD2_LBN 14
114#define TXC_ARXCTL_RXPD1_LBN 13
115#define TXC_ARXCTL_RXPD0_LBN 12
116
117/* Main control */
118#define TXC_MRGS_CTL 0xc340
119/* Bits in main control */
120#define TXC_MCTL_RESET_LBN 15 /* Self clear */
121#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
122#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
123
124/* GPIO output */
125#define TXC_GPIO_OUTPUT 0xc346
126#define TXC_GPIO_DIR 0xc348
127
128/* Vendor-specific BIST registers */
129#define TXC_BIST_CTL 0xc280
130#define TXC_BIST_TXFRMCNT 0xc281
131#define TXC_BIST_RX0FRMCNT 0xc282
132#define TXC_BIST_RX1FRMCNT 0xc283
133#define TXC_BIST_RX2FRMCNT 0xc284
134#define TXC_BIST_RX3FRMCNT 0xc285
135#define TXC_BIST_RX0ERRCNT 0xc286
136#define TXC_BIST_RX1ERRCNT 0xc287
137#define TXC_BIST_RX2ERRCNT 0xc288
138#define TXC_BIST_RX3ERRCNT 0xc289
139
140/* BIST type (controls bit patter in test) */
141#define TXC_BIST_CTRL_TYPE_LBN 10
142#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
143#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
144#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
145#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
146/* Set this to 1 for 10 bit and 0 for 8 bit */
147#define TXC_BIST_CTRL_B10EN_LBN 12
148/* Enable BIST (write 0 to disable) */
149#define TXC_BIST_CTRL_ENAB_LBN 13
150/* Stop BIST (self-clears when stop complete) */
151#define TXC_BIST_CTRL_STOP_LBN 14
152/* Start BIST (cleared by writing 1 to STOP) */
153#define TXC_BIST_CTRL_STRT_LBN 15
154
155/* Mt. Diablo test configuration */
156#define TXC_MTDIABLO_CTRL 0xc34f
157#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
158
159struct txc43128_data {
160 unsigned long bug10934_timer;
161 enum efx_phy_mode phy_mode;
162 enum efx_loopback_mode loopback_mode;
163};
164
165/* The PHY sometimes needs a reset to bring the link back up. So long as
166 * it reports link down, we reset it every 5 seconds.
167 */
168#define BUG10934_RESET_INTERVAL (5 * HZ)
169
170/* Perform a reset that doesn't clear configuration changes */
171static void txc_reset_logic(struct efx_nic *efx);
172
173/* Set the output value of a gpio */
174void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
175{
176 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
177}
178
179/* Set up the GPIO direction register */
180void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
181{
182 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
183}
184
185/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
186 * global reset (it's less clear what reset of other MMDs does).*/
187static int txc_reset_phy(struct efx_nic *efx)
188{
189 int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
190 TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
191 TXC_RESET_WAIT);
192 if (rc < 0)
193 goto fail;
194
195 /* Check that all the MMDs we expect are present and responding. */
196 rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
197 if (rc < 0)
198 goto fail;
199
200 return 0;
201
202fail:
203 netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
204 return rc;
205}
206
207/* Run a single BIST on one MMD */
208static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
209{
210 int ctrl, bctl;
211 int lane;
212 int rc = 0;
213
214 /* Set PMA to test into loopback using Mt Diablo reg as per app note */
215 ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
216 ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
217 efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
218
219 /* The BIST app. note lists these as 3 distinct steps. */
220 /* Set the BIST type */
221 bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
222 efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
223
224 /* Set the BSTEN bit in the BIST Control register to enable */
225 bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
226 efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
227
228 /* Set the BSTRT bit in the BIST Control register */
229 efx_mdio_write(efx, mmd, TXC_BIST_CTL,
230 bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
231
232 /* Wait. */
233 udelay(TXC_BIST_DURATION);
234
235 /* Set the BSTOP bit in the BIST Control register */
236 bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
237 efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
238
239 /* The STOP bit should go off when things have stopped */
240 while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
241 bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
242
243 /* Check all the error counts are 0 and all the frame counts are
244 non-zero */
245 for (lane = 0; lane < 4; lane++) {
246 int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
247 if (count != 0) {
248 netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
249 "Lane %d had %d errs\n", lane, count);
250 rc = -EIO;
251 }
252 count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
253 if (count == 0) {
254 netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
255 "Lane %d got 0 frames\n", lane);
256 rc = -EIO;
257 }
258 }
259
260 if (rc == 0)
261 netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
262
263 /* Disable BIST */
264 efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
265
266 /* Turn off loopback */
267 ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
268 efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
269
270 return rc;
271}
272
273static int txc_bist(struct efx_nic *efx)
274{
275 return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
276}
277
278/* Push the non-configurable defaults into the PHY. This must be
279 * done after every full reset */
280static void txc_apply_defaults(struct efx_nic *efx)
281{
282 int mctrl;
283
284 /* Turn amplitude down and preemphasis off on the host side
285 * (PHY<->MAC) as this is believed less likely to upset Falcon
286 * and no adverse effects have been noted. It probably also
287 * saves a picowatt or two */
288
289 /* Turn off preemphasis */
290 efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
291 efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
292
293 /* Turn down the amplitude */
294 efx_mdio_write(efx, MDIO_MMD_PHYXS,
295 TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
296 efx_mdio_write(efx, MDIO_MMD_PHYXS,
297 TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
298
299 /* Set the line side amplitude and preemphasis to the databook
300 * defaults as an erratum causes them to be 0 on at least some
301 * PHY rev.s */
302 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
303 TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
304 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
305 TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
306 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
307 TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
308 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
309 TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
310
311 /* Set up the LEDs */
312 mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
313
314 /* Set the Green and Red LEDs to their default modes */
315 mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
316 efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
317
318 /* Databook recommends doing this after configuration changes */
319 txc_reset_logic(efx);
320
321 falcon_board(efx)->type->init_phy(efx);
322}
323
324static int txc43128_phy_probe(struct efx_nic *efx)
325{
326 struct txc43128_data *phy_data;
327
328 /* Allocate phy private storage */
329 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
330 if (!phy_data)
331 return -ENOMEM;
332 efx->phy_data = phy_data;
333 phy_data->phy_mode = efx->phy_mode;
334
335 efx->mdio.mmds = TXC_REQUIRED_DEVS;
336 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
337
338 efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
339
340 return 0;
341}
342
343/* Initialisation entry point for this PHY driver */
344static int txc43128_phy_init(struct efx_nic *efx)
345{
346 int rc;
347
348 rc = txc_reset_phy(efx);
349 if (rc < 0)
350 return rc;
351
352 rc = txc_bist(efx);
353 if (rc < 0)
354 return rc;
355
356 txc_apply_defaults(efx);
357
358 return 0;
359}
360
361/* Set the lane power down state in the global registers */
362static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
363{
364 int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
365 int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
366
367 if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
368 ctl &= ~pd;
369 else
370 ctl |= pd;
371
372 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
373}
374
375/* Set the lane power down state in the analog control registers */
376static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
377{
378 int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
379 | (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
380 int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
381 | (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
382 int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
383 int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
384
385 if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
386 txctl &= ~txpd;
387 rxctl &= ~rxpd;
388 } else {
389 txctl |= txpd;
390 rxctl |= rxpd;
391 }
392
393 efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
394 efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
395}
396
397static void txc_set_power(struct efx_nic *efx)
398{
399 /* According to the data book, all the MMDs can do low power */
400 efx_mdio_set_mmds_lpower(efx,
401 !!(efx->phy_mode & PHY_MODE_LOW_POWER),
402 TXC_REQUIRED_DEVS);
403
404 /* Global register bank is in PCS, PHY XS. These control the host
405 * side and line side settings respectively. */
406 txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
407 txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
408
409 /* Analog register bank in PMA/PMD, PHY XS */
410 txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
411 txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
412}
413
414static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
415{
416 int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
417 int tries = 50;
418
419 val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
420 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
421 while (tries--) {
422 val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
423 if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
424 break;
425 udelay(1);
426 }
427 if (!tries)
428 netif_info(efx, hw, efx->net_dev,
429 TXCNAME " Logic reset timed out!\n");
430}
431
432/* Perform a logic reset. This preserves the configuration registers
433 * and is needed for some configuration changes to take effect */
434static void txc_reset_logic(struct efx_nic *efx)
435{
436 /* The data sheet claims we can do the logic reset on either the
437 * PCS or the PHYXS and the result is a reset of both host- and
438 * line-side logic. */
439 txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
440}
441
442static bool txc43128_phy_read_link(struct efx_nic *efx)
443{
444 return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
445}
446
447static int txc43128_phy_reconfigure(struct efx_nic *efx)
448{
449 struct txc43128_data *phy_data = efx->phy_data;
450 enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
451 bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
452
453 if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
454 txc_reset_phy(efx);
455 txc_apply_defaults(efx);
456 falcon_reset_xaui(efx);
457 mode_change &= ~PHY_MODE_TX_DISABLED;
458 }
459
460 efx_mdio_transmit_disable(efx);
461 efx_mdio_phy_reconfigure(efx);
462 if (mode_change & PHY_MODE_LOW_POWER)
463 txc_set_power(efx);
464
465 /* The data sheet claims this is required after every reconfiguration
466 * (note at end of 7.1), but we mustn't do it when nothing changes as
467 * it glitches the link, and reconfigure gets called on link change,
468 * so we get an IRQ storm on link up. */
469 if (loop_change || mode_change)
470 txc_reset_logic(efx);
471
472 phy_data->phy_mode = efx->phy_mode;
473 phy_data->loopback_mode = efx->loopback_mode;
474
475 return 0;
476}
477
478static void txc43128_phy_fini(struct efx_nic *efx)
479{
480 /* Disable link events */
481 efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
482}
483
484static void txc43128_phy_remove(struct efx_nic *efx)
485{
486 kfree(efx->phy_data);
487 efx->phy_data = NULL;
488}
489
490/* Periodic callback: this exists mainly to poll link status as we
491 * don't use LASI interrupts */
492static bool txc43128_phy_poll(struct efx_nic *efx)
493{
494 struct txc43128_data *data = efx->phy_data;
495 bool was_up = efx->link_state.up;
496
497 efx->link_state.up = txc43128_phy_read_link(efx);
498 efx->link_state.speed = 10000;
499 efx->link_state.fd = true;
500 efx->link_state.fc = efx->wanted_fc;
501
502 if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
503 data->bug10934_timer = jiffies;
504 } else {
505 if (time_after_eq(jiffies, (data->bug10934_timer +
506 BUG10934_RESET_INTERVAL))) {
507 data->bug10934_timer = jiffies;
508 txc_reset_logic(efx);
509 }
510 }
511
512 return efx->link_state.up != was_up;
513}
514
515static const char *txc43128_test_names[] = {
516 "bist"
517};
518
519static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
520{
521 if (index < ARRAY_SIZE(txc43128_test_names))
522 return txc43128_test_names[index];
523 return NULL;
524}
525
526static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
527{
528 int rc;
529
530 if (!(flags & ETH_TEST_FL_OFFLINE))
531 return 0;
532
533 rc = txc_reset_phy(efx);
534 if (rc < 0)
535 return rc;
536
537 rc = txc_bist(efx);
538 txc_apply_defaults(efx);
539 results[0] = rc ? -1 : 1;
540 return rc;
541}
542
543static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
544{
545 mdio45_ethtool_gset(&efx->mdio, ecmd);
546}
547
548const struct efx_phy_operations falcon_txc_phy_ops = {
549 .probe = txc43128_phy_probe,
550 .init = txc43128_phy_init,
551 .reconfigure = txc43128_phy_reconfigure,
552 .poll = txc43128_phy_poll,
553 .fini = txc43128_phy_fini,
554 .remove = txc43128_phy_remove,
555 .get_settings = txc43128_get_settings,
556 .set_settings = efx_mdio_set_settings,
557 .test_alive = efx_mdio_test_alive,
558 .run_tests = txc43128_run_tests,
559 .test_name = txc43128_test_name,
560};
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 782e45a613d6..99ff11400cef 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -19,9 +19,7 @@
19#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) 19#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
20#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) 20#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
21#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) 21#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
22#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) 22#define EFX_WORKAROUND_10G(efx) 1
23#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
24 (efx)->phy_type == PHY_TYPE_SFT9001B)
25 23
26/* XAUI resets if link not detected */ 24/* XAUI resets if link not detected */
27#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS 25#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
@@ -40,6 +38,8 @@
40#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
41/* Legacy interrupt storm when interrupt fifo fills */ 39/* Legacy interrupt storm when interrupt fifo fills */
42#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
41/* Write combining and sriov=enabled are incompatible */
42#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
43 43
44/* Spurious parity errors in TSORT buffers */ 44/* Spurious parity errors in TSORT buffers */
45#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 45#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
@@ -58,9 +58,4 @@
58/* Leak overlength packets rather than free */ 58/* Leak overlength packets rather than free */
59#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A 59#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
60 60
61/* Need to send XNP pages for 100BaseT */
62#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001
63/* Don't restart AN in near-side loopback */
64#define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001
65
66#endif /* EFX_WORKAROUNDS_H */ 61#endif /* EFX_WORKAROUNDS_H */