aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/Kconfig21
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h22
-rw-r--r--drivers/net/ethernet/sfc/efx.c868
-rw-r--r--drivers/net/ethernet/sfc/efx.h11
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c200
-rw-r--r--drivers/net/ethernet/sfc/falcon.c54
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c12
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c15
-rw-r--r--drivers/net/ethernet/sfc/filter.c255
-rw-r--r--drivers/net/ethernet/sfc/filter.h20
-rw-r--r--drivers/net/ethernet/sfc/mac.h21
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c149
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h36
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c65
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c415
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3542
-rw-r--r--drivers/net/ethernet/sfc/mcdi_phy.c36
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c15
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h321
-rw-r--r--drivers/net/ethernet/sfc/nic.c593
-rw-r--r--drivers/net/ethernet/sfc/nic.h122
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c6
-rw-r--r--drivers/net/ethernet/sfc/regs.h20
-rw-r--r--drivers/net/ethernet/sfc/rx.c126
-rw-r--r--drivers/net/ethernet/sfc/selftest.c110
-rw-r--r--drivers/net/ethernet/sfc/selftest.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c47
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c1643
-rw-r--r--drivers/net/ethernet/sfc/spi.h2
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h255
35 files changed, 6283 insertions, 2737 deletions
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 5d18841f0f3d..fb3cbc27063c 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -16,6 +16,21 @@ config SFC_MTD
16 depends on SFC && MTD && !(SFC=y && MTD=m) 16 depends on SFC && MTD && !(SFC=y && MTD=m)
17 default y 17 default y
18 ---help--- 18 ---help---
19 This exposes the on-board flash memory as MTD devices (e.g. 19 This exposes the on-board flash and/or EEPROM as MTD devices
20 /dev/mtd1). This makes it possible to upload new firmware 20 (e.g. /dev/mtd1). This is required to update the firmware or
21 to the NIC. 21 the boot configuration under Linux.
22config SFC_MCDI_MON
23 bool "Solarflare SFC9000-family hwmon support"
24 depends on SFC && HWMON && !(SFC=y && HWMON=m)
25 default y
26 ----help---
27 This exposes the on-board firmware-managed sensors as a
28 hardware monitor device.
29config SFC_SRIOV
30 bool "Solarflare SFC9000-family SR-IOV support"
31 depends on SFC && PCI_IOV
32 default y
33 ---help---
34 This enables support for the SFC9000 I/O Virtualization
35 features, allowing accelerated network performance in
36 virtualized environments.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index ab31c7124db1..ea1f8db57318 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -2,7 +2,8 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
2 falcon_xmac.o mcdi_mac.o \ 2 falcon_xmac.o mcdi_mac.o \
3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ 3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o txc43128_phy.o falcon_boards.o \ 4 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_phy.o 5 mcdi.o mcdi_phy.o mcdi_mon.o
6sfc-$(CONFIG_SFC_MTD) += mtd.o 6sfc-$(CONFIG_SFC_MTD) += mtd.o
7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
7 8
8obj-$(CONFIG_SFC) += sfc.o 9obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 098ac2ad757d..b26a954c27fc 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -448,40 +448,40 @@ typedef union efx_oword {
448 EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low)) 448 EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
449 449
450#define EFX_SET_OWORD64(oword, low, high, value) do { \ 450#define EFX_SET_OWORD64(oword, low, high, value) do { \
451 (oword).u64[0] = (((oword).u64[0] \ 451 (oword).u64[0] = (((oword).u64[0] \
452 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ 452 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
453 | EFX_INSERT64(0, 63, low, high, value)); \ 453 | EFX_INSERT64(0, 63, low, high, value)); \
454 (oword).u64[1] = (((oword).u64[1] \ 454 (oword).u64[1] = (((oword).u64[1] \
455 & ~EFX_INPLACE_MASK64(64, 127, low, high)) \ 455 & ~EFX_INPLACE_MASK64(64, 127, low, high)) \
456 | EFX_INSERT64(64, 127, low, high, value)); \ 456 | EFX_INSERT64(64, 127, low, high, value)); \
457 } while (0) 457 } while (0)
458 458
459#define EFX_SET_QWORD64(qword, low, high, value) do { \ 459#define EFX_SET_QWORD64(qword, low, high, value) do { \
460 (qword).u64[0] = (((qword).u64[0] \ 460 (qword).u64[0] = (((qword).u64[0] \
461 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ 461 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
462 | EFX_INSERT64(0, 63, low, high, value)); \ 462 | EFX_INSERT64(0, 63, low, high, value)); \
463 } while (0) 463 } while (0)
464 464
465#define EFX_SET_OWORD32(oword, low, high, value) do { \ 465#define EFX_SET_OWORD32(oword, low, high, value) do { \
466 (oword).u32[0] = (((oword).u32[0] \ 466 (oword).u32[0] = (((oword).u32[0] \
467 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ 467 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
468 | EFX_INSERT32(0, 31, low, high, value)); \ 468 | EFX_INSERT32(0, 31, low, high, value)); \
469 (oword).u32[1] = (((oword).u32[1] \ 469 (oword).u32[1] = (((oword).u32[1] \
470 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ 470 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
471 | EFX_INSERT32(32, 63, low, high, value)); \ 471 | EFX_INSERT32(32, 63, low, high, value)); \
472 (oword).u32[2] = (((oword).u32[2] \ 472 (oword).u32[2] = (((oword).u32[2] \
473 & ~EFX_INPLACE_MASK32(64, 95, low, high)) \ 473 & ~EFX_INPLACE_MASK32(64, 95, low, high)) \
474 | EFX_INSERT32(64, 95, low, high, value)); \ 474 | EFX_INSERT32(64, 95, low, high, value)); \
475 (oword).u32[3] = (((oword).u32[3] \ 475 (oword).u32[3] = (((oword).u32[3] \
476 & ~EFX_INPLACE_MASK32(96, 127, low, high)) \ 476 & ~EFX_INPLACE_MASK32(96, 127, low, high)) \
477 | EFX_INSERT32(96, 127, low, high, value)); \ 477 | EFX_INSERT32(96, 127, low, high, value)); \
478 } while (0) 478 } while (0)
479 479
480#define EFX_SET_QWORD32(qword, low, high, value) do { \ 480#define EFX_SET_QWORD32(qword, low, high, value) do { \
481 (qword).u32[0] = (((qword).u32[0] \ 481 (qword).u32[0] = (((qword).u32[0] \
482 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ 482 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
483 | EFX_INSERT32(0, 31, low, high, value)); \ 483 | EFX_INSERT32(0, 31, low, high, value)); \
484 (qword).u32[1] = (((qword).u32[1] \ 484 (qword).u32[1] = (((qword).u32[1] \
485 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ 485 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
486 | EFX_INSERT32(32, 63, low, high, value)); \ 486 | EFX_INSERT32(32, 63, low, high, value)); \
487 } while (0) 487 } while (0)
@@ -531,8 +531,8 @@ typedef union efx_oword {
531 531
532 532
533/* Static initialiser */ 533/* Static initialiser */
534#define EFX_OWORD32(a, b, c, d) \ 534#define EFX_OWORD32(a, b, c, d) \
535 { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \ 535 { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
536 cpu_to_le32(c), cpu_to_le32(d) } } 536 cpu_to_le32(c), cpu_to_le32(d) } }
537 537
538#endif /* EFX_BITFIELD_H */ 538#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e43702f33b62..1908ba7ca7e6 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -38,15 +38,15 @@
38 38
39/* Loopback mode names (see LOOPBACK_MODE()) */ 39/* Loopback mode names (see LOOPBACK_MODE()) */
40const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; 40const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
41const char *efx_loopback_mode_names[] = { 41const char *const efx_loopback_mode_names[] = {
42 [LOOPBACK_NONE] = "NONE", 42 [LOOPBACK_NONE] = "NONE",
43 [LOOPBACK_DATA] = "DATAPATH", 43 [LOOPBACK_DATA] = "DATAPATH",
44 [LOOPBACK_GMAC] = "GMAC", 44 [LOOPBACK_GMAC] = "GMAC",
45 [LOOPBACK_XGMII] = "XGMII", 45 [LOOPBACK_XGMII] = "XGMII",
46 [LOOPBACK_XGXS] = "XGXS", 46 [LOOPBACK_XGXS] = "XGXS",
47 [LOOPBACK_XAUI] = "XAUI", 47 [LOOPBACK_XAUI] = "XAUI",
48 [LOOPBACK_GMII] = "GMII", 48 [LOOPBACK_GMII] = "GMII",
49 [LOOPBACK_SGMII] = "SGMII", 49 [LOOPBACK_SGMII] = "SGMII",
50 [LOOPBACK_XGBR] = "XGBR", 50 [LOOPBACK_XGBR] = "XGBR",
51 [LOOPBACK_XFI] = "XFI", 51 [LOOPBACK_XFI] = "XFI",
52 [LOOPBACK_XAUI_FAR] = "XAUI_FAR", 52 [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
@@ -55,21 +55,21 @@ const char *efx_loopback_mode_names[] = {
55 [LOOPBACK_XFI_FAR] = "XFI_FAR", 55 [LOOPBACK_XFI_FAR] = "XFI_FAR",
56 [LOOPBACK_GPHY] = "GPHY", 56 [LOOPBACK_GPHY] = "GPHY",
57 [LOOPBACK_PHYXS] = "PHYXS", 57 [LOOPBACK_PHYXS] = "PHYXS",
58 [LOOPBACK_PCS] = "PCS", 58 [LOOPBACK_PCS] = "PCS",
59 [LOOPBACK_PMAPMD] = "PMA/PMD", 59 [LOOPBACK_PMAPMD] = "PMA/PMD",
60 [LOOPBACK_XPORT] = "XPORT", 60 [LOOPBACK_XPORT] = "XPORT",
61 [LOOPBACK_XGMII_WS] = "XGMII_WS", 61 [LOOPBACK_XGMII_WS] = "XGMII_WS",
62 [LOOPBACK_XAUI_WS] = "XAUI_WS", 62 [LOOPBACK_XAUI_WS] = "XAUI_WS",
63 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", 63 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
64 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", 64 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
65 [LOOPBACK_GMII_WS] = "GMII_WS", 65 [LOOPBACK_GMII_WS] = "GMII_WS",
66 [LOOPBACK_XFI_WS] = "XFI_WS", 66 [LOOPBACK_XFI_WS] = "XFI_WS",
67 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", 67 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
68 [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 68 [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
69}; 69};
70 70
71const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 71const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
72const char *efx_reset_type_names[] = { 72const char *const efx_reset_type_names[] = {
73 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 73 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
74 [RESET_TYPE_ALL] = "ALL", 74 [RESET_TYPE_ALL] = "ALL",
75 [RESET_TYPE_WORLD] = "WORLD", 75 [RESET_TYPE_WORLD] = "WORLD",
@@ -122,15 +122,6 @@ static int napi_weight = 64;
122 */ 122 */
123static unsigned int efx_monitor_interval = 1 * HZ; 123static unsigned int efx_monitor_interval = 1 * HZ;
124 124
125/* This controls whether or not the driver will initialise devices
126 * with invalid MAC addresses stored in the EEPROM or flash. If true,
127 * such devices will be initialised with a random locally-generated
128 * MAC address. This allows for loading the sfc_mtd driver to
129 * reprogram the flash, even if the flash contents (including the MAC
130 * address) have previously been erased.
131 */
132static unsigned int allow_bad_hwaddr;
133
134/* Initial interrupt moderation settings. They can be modified after 125/* Initial interrupt moderation settings. They can be modified after
135 * module load with ethtool. 126 * module load with ethtool.
136 * 127 *
@@ -162,7 +153,7 @@ static unsigned int interrupt_mode;
162 * interrupt handling. 153 * interrupt handling.
163 * 154 *
164 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 155 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
165 * The default (0) means to assign an interrupt to each package (level II cache) 156 * The default (0) means to assign an interrupt to each core.
166 */ 157 */
167static unsigned int rss_cpus; 158static unsigned int rss_cpus;
168module_param(rss_cpus, uint, 0444); 159module_param(rss_cpus, uint, 0444);
@@ -195,9 +186,13 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
195 * 186 *
196 *************************************************************************/ 187 *************************************************************************/
197 188
189static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
190static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
191static void efx_remove_channel(struct efx_channel *channel);
198static void efx_remove_channels(struct efx_nic *efx); 192static void efx_remove_channels(struct efx_nic *efx);
193static const struct efx_channel_type efx_default_channel_type;
199static void efx_remove_port(struct efx_nic *efx); 194static void efx_remove_port(struct efx_nic *efx);
200static void efx_init_napi(struct efx_nic *efx); 195static void efx_init_napi_channel(struct efx_channel *channel);
201static void efx_fini_napi(struct efx_nic *efx); 196static void efx_fini_napi(struct efx_nic *efx);
202static void efx_fini_napi_channel(struct efx_channel *channel); 197static void efx_fini_napi_channel(struct efx_channel *channel);
203static void efx_fini_struct(struct efx_nic *efx); 198static void efx_fini_struct(struct efx_nic *efx);
@@ -226,27 +221,27 @@ static void efx_stop_all(struct efx_nic *efx);
226 */ 221 */
227static int efx_process_channel(struct efx_channel *channel, int budget) 222static int efx_process_channel(struct efx_channel *channel, int budget)
228{ 223{
229 struct efx_nic *efx = channel->efx;
230 int spent; 224 int spent;
231 225
232 if (unlikely(efx->reset_pending || !channel->enabled)) 226 if (unlikely(!channel->enabled))
233 return 0; 227 return 0;
234 228
235 spent = efx_nic_process_eventq(channel, budget); 229 spent = efx_nic_process_eventq(channel, budget);
236 if (spent == 0) 230 if (spent && efx_channel_has_rx_queue(channel)) {
237 return 0; 231 struct efx_rx_queue *rx_queue =
238 232 efx_channel_get_rx_queue(channel);
239 /* Deliver last RX packet. */ 233
240 if (channel->rx_pkt) { 234 /* Deliver last RX packet. */
241 __efx_rx_packet(channel, channel->rx_pkt, 235 if (channel->rx_pkt) {
242 channel->rx_pkt_csummed); 236 __efx_rx_packet(channel, channel->rx_pkt);
243 channel->rx_pkt = NULL; 237 channel->rx_pkt = NULL;
238 }
239 if (rx_queue->enabled) {
240 efx_rx_strategy(channel);
241 efx_fast_push_rx_descriptors(rx_queue);
242 }
244 } 243 }
245 244
246 efx_rx_strategy(channel);
247
248 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
249
250 return spent; 245 return spent;
251} 246}
252 247
@@ -286,7 +281,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
286 spent = efx_process_channel(channel, budget); 281 spent = efx_process_channel(channel, budget);
287 282
288 if (spent < budget) { 283 if (spent < budget) {
289 if (channel->channel < efx->n_rx_channels && 284 if (efx_channel_has_rx_queue(channel) &&
290 efx->irq_rx_adaptive && 285 efx->irq_rx_adaptive &&
291 unlikely(++channel->irq_count == 1000)) { 286 unlikely(++channel->irq_count == 1000)) {
292 if (unlikely(channel->irq_mod_score < 287 if (unlikely(channel->irq_mod_score <
@@ -373,7 +368,7 @@ static int efx_probe_eventq(struct efx_channel *channel)
373 struct efx_nic *efx = channel->efx; 368 struct efx_nic *efx = channel->efx;
374 unsigned long entries; 369 unsigned long entries;
375 370
376 netif_dbg(channel->efx, probe, channel->efx->net_dev, 371 netif_dbg(efx, probe, efx->net_dev,
377 "chan %d create event queue\n", channel->channel); 372 "chan %d create event queue\n", channel->channel);
378 373
379 /* Build an event queue with room for one event per tx and rx buffer, 374 /* Build an event queue with room for one event per tx and rx buffer,
@@ -396,6 +391,34 @@ static void efx_init_eventq(struct efx_channel *channel)
396 efx_nic_init_eventq(channel); 391 efx_nic_init_eventq(channel);
397} 392}
398 393
394/* Enable event queue processing and NAPI */
395static void efx_start_eventq(struct efx_channel *channel)
396{
397 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
398 "chan %d start event queue\n", channel->channel);
399
400 /* The interrupt handler for this channel may set work_pending
401 * as soon as we enable it. Make sure it's cleared before
402 * then. Similarly, make sure it sees the enabled flag set.
403 */
404 channel->work_pending = false;
405 channel->enabled = true;
406 smp_wmb();
407
408 napi_enable(&channel->napi_str);
409 efx_nic_eventq_read_ack(channel);
410}
411
412/* Disable event queue processing and NAPI */
413static void efx_stop_eventq(struct efx_channel *channel)
414{
415 if (!channel->enabled)
416 return;
417
418 napi_disable(&channel->napi_str);
419 channel->enabled = false;
420}
421
399static void efx_fini_eventq(struct efx_channel *channel) 422static void efx_fini_eventq(struct efx_channel *channel)
400{ 423{
401 netif_dbg(channel->efx, drv, channel->efx->net_dev, 424 netif_dbg(channel->efx, drv, channel->efx->net_dev,
@@ -418,8 +441,7 @@ static void efx_remove_eventq(struct efx_channel *channel)
418 * 441 *
419 *************************************************************************/ 442 *************************************************************************/
420 443
421/* Allocate and initialise a channel structure, optionally copying 444/* Allocate and initialise a channel structure. */
422 * parameters (but not resources) from an old channel structure. */
423static struct efx_channel * 445static struct efx_channel *
424efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) 446efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
425{ 447{
@@ -428,45 +450,60 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
428 struct efx_tx_queue *tx_queue; 450 struct efx_tx_queue *tx_queue;
429 int j; 451 int j;
430 452
431 if (old_channel) { 453 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
432 channel = kmalloc(sizeof(*channel), GFP_KERNEL); 454 if (!channel)
433 if (!channel) 455 return NULL;
434 return NULL;
435 456
436 *channel = *old_channel; 457 channel->efx = efx;
458 channel->channel = i;
459 channel->type = &efx_default_channel_type;
437 460
438 channel->napi_dev = NULL; 461 for (j = 0; j < EFX_TXQ_TYPES; j++) {
439 memset(&channel->eventq, 0, sizeof(channel->eventq)); 462 tx_queue = &channel->tx_queue[j];
463 tx_queue->efx = efx;
464 tx_queue->queue = i * EFX_TXQ_TYPES + j;
465 tx_queue->channel = channel;
466 }
440 467
441 rx_queue = &channel->rx_queue; 468 rx_queue = &channel->rx_queue;
442 rx_queue->buffer = NULL; 469 rx_queue->efx = efx;
443 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); 470 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
471 (unsigned long)rx_queue);
444 472
445 for (j = 0; j < EFX_TXQ_TYPES; j++) { 473 return channel;
446 tx_queue = &channel->tx_queue[j]; 474}
447 if (tx_queue->channel) 475
448 tx_queue->channel = channel; 476/* Allocate and initialise a channel structure, copying parameters
449 tx_queue->buffer = NULL; 477 * (but not resources) from an old channel structure.
450 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); 478 */
451 } 479static struct efx_channel *
452 } else { 480efx_copy_channel(const struct efx_channel *old_channel)
453 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 481{
454 if (!channel) 482 struct efx_channel *channel;
455 return NULL; 483 struct efx_rx_queue *rx_queue;
484 struct efx_tx_queue *tx_queue;
485 int j;
486
487 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
488 if (!channel)
489 return NULL;
456 490
457 channel->efx = efx; 491 *channel = *old_channel;
458 channel->channel = i;
459 492
460 for (j = 0; j < EFX_TXQ_TYPES; j++) { 493 channel->napi_dev = NULL;
461 tx_queue = &channel->tx_queue[j]; 494 memset(&channel->eventq, 0, sizeof(channel->eventq));
462 tx_queue->efx = efx; 495
463 tx_queue->queue = i * EFX_TXQ_TYPES + j; 496 for (j = 0; j < EFX_TXQ_TYPES; j++) {
497 tx_queue = &channel->tx_queue[j];
498 if (tx_queue->channel)
464 tx_queue->channel = channel; 499 tx_queue->channel = channel;
465 } 500 tx_queue->buffer = NULL;
501 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
466 } 502 }
467 503
468 rx_queue = &channel->rx_queue; 504 rx_queue = &channel->rx_queue;
469 rx_queue->efx = efx; 505 rx_queue->buffer = NULL;
506 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
470 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, 507 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
471 (unsigned long)rx_queue); 508 (unsigned long)rx_queue);
472 509
@@ -482,57 +519,62 @@ static int efx_probe_channel(struct efx_channel *channel)
482 netif_dbg(channel->efx, probe, channel->efx->net_dev, 519 netif_dbg(channel->efx, probe, channel->efx->net_dev,
483 "creating channel %d\n", channel->channel); 520 "creating channel %d\n", channel->channel);
484 521
522 rc = channel->type->pre_probe(channel);
523 if (rc)
524 goto fail;
525
485 rc = efx_probe_eventq(channel); 526 rc = efx_probe_eventq(channel);
486 if (rc) 527 if (rc)
487 goto fail1; 528 goto fail;
488 529
489 efx_for_each_channel_tx_queue(tx_queue, channel) { 530 efx_for_each_channel_tx_queue(tx_queue, channel) {
490 rc = efx_probe_tx_queue(tx_queue); 531 rc = efx_probe_tx_queue(tx_queue);
491 if (rc) 532 if (rc)
492 goto fail2; 533 goto fail;
493 } 534 }
494 535
495 efx_for_each_channel_rx_queue(rx_queue, channel) { 536 efx_for_each_channel_rx_queue(rx_queue, channel) {
496 rc = efx_probe_rx_queue(rx_queue); 537 rc = efx_probe_rx_queue(rx_queue);
497 if (rc) 538 if (rc)
498 goto fail3; 539 goto fail;
499 } 540 }
500 541
501 channel->n_rx_frm_trunc = 0; 542 channel->n_rx_frm_trunc = 0;
502 543
503 return 0; 544 return 0;
504 545
505 fail3: 546fail:
506 efx_for_each_channel_rx_queue(rx_queue, channel) 547 efx_remove_channel(channel);
507 efx_remove_rx_queue(rx_queue);
508 fail2:
509 efx_for_each_channel_tx_queue(tx_queue, channel)
510 efx_remove_tx_queue(tx_queue);
511 fail1:
512 return rc; 548 return rc;
513} 549}
514 550
551static void
552efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
553{
554 struct efx_nic *efx = channel->efx;
555 const char *type;
556 int number;
557
558 number = channel->channel;
559 if (efx->tx_channel_offset == 0) {
560 type = "";
561 } else if (channel->channel < efx->tx_channel_offset) {
562 type = "-rx";
563 } else {
564 type = "-tx";
565 number -= efx->tx_channel_offset;
566 }
567 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
568}
515 569
516static void efx_set_channel_names(struct efx_nic *efx) 570static void efx_set_channel_names(struct efx_nic *efx)
517{ 571{
518 struct efx_channel *channel; 572 struct efx_channel *channel;
519 const char *type = "";
520 int number;
521 573
522 efx_for_each_channel(channel, efx) { 574 efx_for_each_channel(channel, efx)
523 number = channel->channel; 575 channel->type->get_name(channel,
524 if (efx->n_channels > efx->n_rx_channels) { 576 efx->channel_name[channel->channel],
525 if (channel->channel < efx->n_rx_channels) { 577 sizeof(efx->channel_name[0]));
526 type = "-rx";
527 } else {
528 type = "-tx";
529 number -= efx->n_rx_channels;
530 }
531 }
532 snprintf(efx->channel_name[channel->channel],
533 sizeof(efx->channel_name[0]),
534 "%s%s-%d", efx->name, type, number);
535 }
536} 578}
537 579
538static int efx_probe_channels(struct efx_nic *efx) 580static int efx_probe_channels(struct efx_nic *efx)
@@ -543,7 +585,12 @@ static int efx_probe_channels(struct efx_nic *efx)
543 /* Restart special buffer allocation */ 585 /* Restart special buffer allocation */
544 efx->next_buffer_table = 0; 586 efx->next_buffer_table = 0;
545 587
546 efx_for_each_channel(channel, efx) { 588 /* Probe channels in reverse, so that any 'extra' channels
589 * use the start of the buffer table. This allows the traffic
590 * channels to be resized without moving them or wasting the
591 * entries before them.
592 */
593 efx_for_each_channel_rev(channel, efx) {
547 rc = efx_probe_channel(channel); 594 rc = efx_probe_channel(channel);
548 if (rc) { 595 if (rc) {
549 netif_err(efx, probe, efx->net_dev, 596 netif_err(efx, probe, efx->net_dev,
@@ -565,7 +612,7 @@ fail:
565 * to propagate configuration changes (mtu, checksum offload), or 612 * to propagate configuration changes (mtu, checksum offload), or
566 * to clear hardware error conditions 613 * to clear hardware error conditions
567 */ 614 */
568static void efx_init_channels(struct efx_nic *efx) 615static void efx_start_datapath(struct efx_nic *efx)
569{ 616{
570 struct efx_tx_queue *tx_queue; 617 struct efx_tx_queue *tx_queue;
571 struct efx_rx_queue *rx_queue; 618 struct efx_rx_queue *rx_queue;
@@ -584,68 +631,26 @@ static void efx_init_channels(struct efx_nic *efx)
584 631
585 /* Initialise the channels */ 632 /* Initialise the channels */
586 efx_for_each_channel(channel, efx) { 633 efx_for_each_channel(channel, efx) {
587 netif_dbg(channel->efx, drv, channel->efx->net_dev,
588 "init chan %d\n", channel->channel);
589
590 efx_init_eventq(channel);
591
592 efx_for_each_channel_tx_queue(tx_queue, channel) 634 efx_for_each_channel_tx_queue(tx_queue, channel)
593 efx_init_tx_queue(tx_queue); 635 efx_init_tx_queue(tx_queue);
594 636
595 /* The rx buffer allocation strategy is MTU dependent */ 637 /* The rx buffer allocation strategy is MTU dependent */
596 efx_rx_strategy(channel); 638 efx_rx_strategy(channel);
597 639
598 efx_for_each_channel_rx_queue(rx_queue, channel) 640 efx_for_each_channel_rx_queue(rx_queue, channel) {
599 efx_init_rx_queue(rx_queue); 641 efx_init_rx_queue(rx_queue);
642 efx_nic_generate_fill_event(rx_queue);
643 }
600 644
601 WARN_ON(channel->rx_pkt != NULL); 645 WARN_ON(channel->rx_pkt != NULL);
602 efx_rx_strategy(channel); 646 efx_rx_strategy(channel);
603 } 647 }
604}
605
606/* This enables event queue processing and packet transmission.
607 *
608 * Note that this function is not allowed to fail, since that would
609 * introduce too much complexity into the suspend/resume path.
610 */
611static void efx_start_channel(struct efx_channel *channel)
612{
613 struct efx_rx_queue *rx_queue;
614 648
615 netif_dbg(channel->efx, ifup, channel->efx->net_dev, 649 if (netif_device_present(efx->net_dev))
616 "starting chan %d\n", channel->channel); 650 netif_tx_wake_all_queues(efx->net_dev);
617
618 /* The interrupt handler for this channel may set work_pending
619 * as soon as we enable it. Make sure it's cleared before
620 * then. Similarly, make sure it sees the enabled flag set. */
621 channel->work_pending = false;
622 channel->enabled = true;
623 smp_wmb();
624
625 /* Fill the queues before enabling NAPI */
626 efx_for_each_channel_rx_queue(rx_queue, channel)
627 efx_fast_push_rx_descriptors(rx_queue);
628
629 napi_enable(&channel->napi_str);
630}
631
632/* This disables event queue processing and packet transmission.
633 * This function does not guarantee that all queue processing
634 * (e.g. RX refill) is complete.
635 */
636static void efx_stop_channel(struct efx_channel *channel)
637{
638 if (!channel->enabled)
639 return;
640
641 netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
642 "stop chan %d\n", channel->channel);
643
644 channel->enabled = false;
645 napi_disable(&channel->napi_str);
646} 651}
647 652
648static void efx_fini_channels(struct efx_nic *efx) 653static void efx_stop_datapath(struct efx_nic *efx)
649{ 654{
650 struct efx_channel *channel; 655 struct efx_channel *channel;
651 struct efx_tx_queue *tx_queue; 656 struct efx_tx_queue *tx_queue;
@@ -672,14 +677,21 @@ static void efx_fini_channels(struct efx_nic *efx)
672 } 677 }
673 678
674 efx_for_each_channel(channel, efx) { 679 efx_for_each_channel(channel, efx) {
675 netif_dbg(channel->efx, drv, channel->efx->net_dev, 680 /* RX packet processing is pipelined, so wait for the
676 "shut down chan %d\n", channel->channel); 681 * NAPI handler to complete. At least event queue 0
682 * might be kept active by non-data events, so don't
683 * use napi_synchronize() but actually disable NAPI
684 * temporarily.
685 */
686 if (efx_channel_has_rx_queue(channel)) {
687 efx_stop_eventq(channel);
688 efx_start_eventq(channel);
689 }
677 690
678 efx_for_each_channel_rx_queue(rx_queue, channel) 691 efx_for_each_channel_rx_queue(rx_queue, channel)
679 efx_fini_rx_queue(rx_queue); 692 efx_fini_rx_queue(rx_queue);
680 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 693 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
681 efx_fini_tx_queue(tx_queue); 694 efx_fini_tx_queue(tx_queue);
682 efx_fini_eventq(channel);
683 } 695 }
684} 696}
685 697
@@ -711,16 +723,40 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
711{ 723{
712 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 724 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
713 u32 old_rxq_entries, old_txq_entries; 725 u32 old_rxq_entries, old_txq_entries;
714 unsigned i; 726 unsigned i, next_buffer_table = 0;
715 int rc; 727 int rc = 0;
728
729 /* Not all channels should be reallocated. We must avoid
730 * reallocating their buffer table entries.
731 */
732 efx_for_each_channel(channel, efx) {
733 struct efx_rx_queue *rx_queue;
734 struct efx_tx_queue *tx_queue;
735
736 if (channel->type->copy)
737 continue;
738 next_buffer_table = max(next_buffer_table,
739 channel->eventq.index +
740 channel->eventq.entries);
741 efx_for_each_channel_rx_queue(rx_queue, channel)
742 next_buffer_table = max(next_buffer_table,
743 rx_queue->rxd.index +
744 rx_queue->rxd.entries);
745 efx_for_each_channel_tx_queue(tx_queue, channel)
746 next_buffer_table = max(next_buffer_table,
747 tx_queue->txd.index +
748 tx_queue->txd.entries);
749 }
716 750
717 efx_stop_all(efx); 751 efx_stop_all(efx);
718 efx_fini_channels(efx); 752 efx_stop_interrupts(efx, true);
719 753
720 /* Clone channels */ 754 /* Clone channels (where possible) */
721 memset(other_channel, 0, sizeof(other_channel)); 755 memset(other_channel, 0, sizeof(other_channel));
722 for (i = 0; i < efx->n_channels; i++) { 756 for (i = 0; i < efx->n_channels; i++) {
723 channel = efx_alloc_channel(efx, i, efx->channel[i]); 757 channel = efx->channel[i];
758 if (channel->type->copy)
759 channel = channel->type->copy(channel);
724 if (!channel) { 760 if (!channel) {
725 rc = -ENOMEM; 761 rc = -ENOMEM;
726 goto out; 762 goto out;
@@ -739,23 +775,31 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
739 other_channel[i] = channel; 775 other_channel[i] = channel;
740 } 776 }
741 777
742 rc = efx_probe_channels(efx); 778 /* Restart buffer table allocation */
743 if (rc) 779 efx->next_buffer_table = next_buffer_table;
744 goto rollback;
745 780
746 efx_init_napi(efx);
747
748 /* Destroy old channels */
749 for (i = 0; i < efx->n_channels; i++) { 781 for (i = 0; i < efx->n_channels; i++) {
750 efx_fini_napi_channel(other_channel[i]); 782 channel = efx->channel[i];
751 efx_remove_channel(other_channel[i]); 783 if (!channel->type->copy)
784 continue;
785 rc = efx_probe_channel(channel);
786 if (rc)
787 goto rollback;
788 efx_init_napi_channel(efx->channel[i]);
752 } 789 }
790
753out: 791out:
754 /* Free unused channel structures */ 792 /* Destroy unused channel structures */
755 for (i = 0; i < efx->n_channels; i++) 793 for (i = 0; i < efx->n_channels; i++) {
756 kfree(other_channel[i]); 794 channel = other_channel[i];
795 if (channel && channel->type->copy) {
796 efx_fini_napi_channel(channel);
797 efx_remove_channel(channel);
798 kfree(channel);
799 }
800 }
757 801
758 efx_init_channels(efx); 802 efx_start_interrupts(efx, true);
759 efx_start_all(efx); 803 efx_start_all(efx);
760 return rc; 804 return rc;
761 805
@@ -776,6 +820,18 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
776 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); 820 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
777} 821}
778 822
823static const struct efx_channel_type efx_default_channel_type = {
824 .pre_probe = efx_channel_dummy_op_int,
825 .get_name = efx_get_channel_name,
826 .copy = efx_copy_channel,
827 .keep_eventq = false,
828};
829
830int efx_channel_dummy_op_int(struct efx_channel *channel)
831{
832 return 0;
833}
834
779/************************************************************************** 835/**************************************************************************
780 * 836 *
781 * Port handling 837 * Port handling
@@ -807,16 +863,14 @@ void efx_link_status_changed(struct efx_nic *efx)
807 } 863 }
808 864
809 /* Status message for kernel log */ 865 /* Status message for kernel log */
810 if (link_state->up) { 866 if (link_state->up)
811 netif_info(efx, link, efx->net_dev, 867 netif_info(efx, link, efx->net_dev,
812 "link up at %uMbps %s-duplex (MTU %d)%s\n", 868 "link up at %uMbps %s-duplex (MTU %d)%s\n",
813 link_state->speed, link_state->fd ? "full" : "half", 869 link_state->speed, link_state->fd ? "full" : "half",
814 efx->net_dev->mtu, 870 efx->net_dev->mtu,
815 (efx->promiscuous ? " [PROMISC]" : "")); 871 (efx->promiscuous ? " [PROMISC]" : ""));
816 } else { 872 else
817 netif_info(efx, link, efx->net_dev, "link down\n"); 873 netif_info(efx, link, efx->net_dev, "link down\n");
818 }
819
820} 874}
821 875
822void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) 876void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
@@ -863,11 +917,9 @@ int __efx_reconfigure_port(struct efx_nic *efx)
863 917
864 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 918 WARN_ON(!mutex_is_locked(&efx->mac_lock));
865 919
866 /* Serialise the promiscuous flag with efx_set_multicast_list. */ 920 /* Serialise the promiscuous flag with efx_set_rx_mode. */
867 if (efx_dev_registered(efx)) { 921 netif_addr_lock_bh(efx->net_dev);
868 netif_addr_lock_bh(efx->net_dev); 922 netif_addr_unlock_bh(efx->net_dev);
869 netif_addr_unlock_bh(efx->net_dev);
870 }
871 923
872 /* Disable PHY transmit in mac level loopbacks */ 924 /* Disable PHY transmit in mac level loopbacks */
873 phy_mode = efx->phy_mode; 925 phy_mode = efx->phy_mode;
@@ -907,16 +959,13 @@ static void efx_mac_work(struct work_struct *data)
907 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); 959 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
908 960
909 mutex_lock(&efx->mac_lock); 961 mutex_lock(&efx->mac_lock);
910 if (efx->port_enabled) { 962 if (efx->port_enabled)
911 efx->type->push_multicast_hash(efx); 963 efx->type->reconfigure_mac(efx);
912 efx->mac_op->reconfigure(efx);
913 }
914 mutex_unlock(&efx->mac_lock); 964 mutex_unlock(&efx->mac_lock);
915} 965}
916 966
917static int efx_probe_port(struct efx_nic *efx) 967static int efx_probe_port(struct efx_nic *efx)
918{ 968{
919 unsigned char *perm_addr;
920 int rc; 969 int rc;
921 970
922 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 971 netif_dbg(efx, probe, efx->net_dev, "create port\n");
@@ -929,28 +978,10 @@ static int efx_probe_port(struct efx_nic *efx)
929 if (rc) 978 if (rc)
930 return rc; 979 return rc;
931 980
932 /* Sanity check MAC address */ 981 /* Initialise MAC address to permanent address */
933 perm_addr = efx->net_dev->perm_addr; 982 memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
934 if (is_valid_ether_addr(perm_addr)) {
935 memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
936 } else {
937 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
938 perm_addr);
939 if (!allow_bad_hwaddr) {
940 rc = -EINVAL;
941 goto err;
942 }
943 random_ether_addr(efx->net_dev->dev_addr);
944 netif_info(efx, probe, efx->net_dev,
945 "using locally-generated MAC %pM\n",
946 efx->net_dev->dev_addr);
947 }
948 983
949 return 0; 984 return 0;
950
951 err:
952 efx->type->remove_port(efx);
953 return rc;
954} 985}
955 986
956static int efx_init_port(struct efx_nic *efx) 987static int efx_init_port(struct efx_nic *efx)
@@ -969,7 +1000,7 @@ static int efx_init_port(struct efx_nic *efx)
969 1000
970 /* Reconfigure the MAC before creating dma queues (required for 1001 /* Reconfigure the MAC before creating dma queues (required for
971 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ 1002 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
972 efx->mac_op->reconfigure(efx); 1003 efx->type->reconfigure_mac(efx);
973 1004
974 /* Ensure the PHY advertises the correct flow control settings */ 1005 /* Ensure the PHY advertises the correct flow control settings */
975 rc = efx->phy_op->reconfigure(efx); 1006 rc = efx->phy_op->reconfigure(efx);
@@ -996,8 +1027,7 @@ static void efx_start_port(struct efx_nic *efx)
996 1027
997 /* efx_mac_work() might have been scheduled after efx_stop_port(), 1028 /* efx_mac_work() might have been scheduled after efx_stop_port(),
998 * and then cancelled by efx_flush_all() */ 1029 * and then cancelled by efx_flush_all() */
999 efx->type->push_multicast_hash(efx); 1030 efx->type->reconfigure_mac(efx);
1000 efx->mac_op->reconfigure(efx);
1001 1031
1002 mutex_unlock(&efx->mac_lock); 1032 mutex_unlock(&efx->mac_lock);
1003} 1033}
@@ -1012,10 +1042,8 @@ static void efx_stop_port(struct efx_nic *efx)
1012 mutex_unlock(&efx->mac_lock); 1042 mutex_unlock(&efx->mac_lock);
1013 1043
1014 /* Serialise against efx_set_multicast_list() */ 1044 /* Serialise against efx_set_multicast_list() */
1015 if (efx_dev_registered(efx)) { 1045 netif_addr_lock_bh(efx->net_dev);
1016 netif_addr_lock_bh(efx->net_dev); 1046 netif_addr_unlock_bh(efx->net_dev);
1017 netif_addr_unlock_bh(efx->net_dev);
1018 }
1019} 1047}
1020 1048
1021static void efx_fini_port(struct efx_nic *efx) 1049static void efx_fini_port(struct efx_nic *efx)
@@ -1069,9 +1097,11 @@ static int efx_init_io(struct efx_nic *efx)
1069 * masks event though they reject 46 bit masks. 1097 * masks event though they reject 46 bit masks.
1070 */ 1098 */
1071 while (dma_mask > 0x7fffffffUL) { 1099 while (dma_mask > 0x7fffffffUL) {
1072 if (pci_dma_supported(pci_dev, dma_mask) && 1100 if (pci_dma_supported(pci_dev, dma_mask)) {
1073 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) 1101 rc = pci_set_dma_mask(pci_dev, dma_mask);
1074 break; 1102 if (rc == 0)
1103 break;
1104 }
1075 dma_mask >>= 1; 1105 dma_mask >>= 1;
1076 } 1106 }
1077 if (rc) { 1107 if (rc) {
@@ -1144,33 +1174,46 @@ static void efx_fini_io(struct efx_nic *efx)
1144 pci_disable_device(efx->pci_dev); 1174 pci_disable_device(efx->pci_dev);
1145} 1175}
1146 1176
1147/* Get number of channels wanted. Each channel will have its own IRQ, 1177static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1148 * 1 RX queue and/or 2 TX queues. */
1149static int efx_wanted_channels(void)
1150{ 1178{
1151 cpumask_var_t core_mask; 1179 cpumask_var_t thread_mask;
1152 int count; 1180 unsigned int count;
1153 int cpu; 1181 int cpu;
1154 1182
1155 if (rss_cpus) 1183 if (rss_cpus) {
1156 return rss_cpus; 1184 count = rss_cpus;
1185 } else {
1186 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1187 netif_warn(efx, probe, efx->net_dev,
1188 "RSS disabled due to allocation failure\n");
1189 return 1;
1190 }
1191
1192 count = 0;
1193 for_each_online_cpu(cpu) {
1194 if (!cpumask_test_cpu(cpu, thread_mask)) {
1195 ++count;
1196 cpumask_or(thread_mask, thread_mask,
1197 topology_thread_cpumask(cpu));
1198 }
1199 }
1157 1200
1158 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { 1201 free_cpumask_var(thread_mask);
1159 printk(KERN_WARNING
1160 "sfc: RSS disabled due to allocation failure\n");
1161 return 1;
1162 } 1202 }
1163 1203
1164 count = 0; 1204 /* If RSS is requested for the PF *and* VFs then we can't write RSS
1165 for_each_online_cpu(cpu) { 1205 * table entries that are inaccessible to VFs
1166 if (!cpumask_test_cpu(cpu, core_mask)) { 1206 */
1167 ++count; 1207 if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1168 cpumask_or(core_mask, core_mask, 1208 count > efx_vf_size(efx)) {
1169 topology_core_cpumask(cpu)); 1209 netif_warn(efx, probe, efx->net_dev,
1170 } 1210 "Reducing number of RSS channels from %u to %u for "
1211 "VF support. Increase vf-msix-limit to use more "
1212 "channels on the PF.\n",
1213 count, efx_vf_size(efx));
1214 count = efx_vf_size(efx);
1171 } 1215 }
1172 1216
1173 free_cpumask_var(core_mask);
1174 return count; 1217 return count;
1175} 1218}
1176 1219
@@ -1178,7 +1221,8 @@ static int
1178efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) 1221efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1179{ 1222{
1180#ifdef CONFIG_RFS_ACCEL 1223#ifdef CONFIG_RFS_ACCEL
1181 int i, rc; 1224 unsigned int i;
1225 int rc;
1182 1226
1183 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); 1227 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
1184 if (!efx->net_dev->rx_cpu_rmap) 1228 if (!efx->net_dev->rx_cpu_rmap)
@@ -1201,17 +1245,24 @@ efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1201 */ 1245 */
1202static int efx_probe_interrupts(struct efx_nic *efx) 1246static int efx_probe_interrupts(struct efx_nic *efx)
1203{ 1247{
1204 int max_channels = 1248 unsigned int max_channels =
1205 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 1249 min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
1206 int rc, i; 1250 unsigned int extra_channels = 0;
1251 unsigned int i, j;
1252 int rc;
1253
1254 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
1255 if (efx->extra_channel_type[i])
1256 ++extra_channels;
1207 1257
1208 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 1258 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1209 struct msix_entry xentries[EFX_MAX_CHANNELS]; 1259 struct msix_entry xentries[EFX_MAX_CHANNELS];
1210 int n_channels; 1260 unsigned int n_channels;
1211 1261
1212 n_channels = efx_wanted_channels(); 1262 n_channels = efx_wanted_parallelism(efx);
1213 if (separate_tx_channels) 1263 if (separate_tx_channels)
1214 n_channels *= 2; 1264 n_channels *= 2;
1265 n_channels += extra_channels;
1215 n_channels = min(n_channels, max_channels); 1266 n_channels = min(n_channels, max_channels);
1216 1267
1217 for (i = 0; i < n_channels; i++) 1268 for (i = 0; i < n_channels; i++)
@@ -1220,7 +1271,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1220 if (rc > 0) { 1271 if (rc > 0) {
1221 netif_err(efx, drv, efx->net_dev, 1272 netif_err(efx, drv, efx->net_dev,
1222 "WARNING: Insufficient MSI-X vectors" 1273 "WARNING: Insufficient MSI-X vectors"
1223 " available (%d < %d).\n", rc, n_channels); 1274 " available (%d < %u).\n", rc, n_channels);
1224 netif_err(efx, drv, efx->net_dev, 1275 netif_err(efx, drv, efx->net_dev,
1225 "WARNING: Performance may be reduced.\n"); 1276 "WARNING: Performance may be reduced.\n");
1226 EFX_BUG_ON_PARANOID(rc >= n_channels); 1277 EFX_BUG_ON_PARANOID(rc >= n_channels);
@@ -1231,22 +1282,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1231 1282
1232 if (rc == 0) { 1283 if (rc == 0) {
1233 efx->n_channels = n_channels; 1284 efx->n_channels = n_channels;
1285 if (n_channels > extra_channels)
1286 n_channels -= extra_channels;
1234 if (separate_tx_channels) { 1287 if (separate_tx_channels) {
1235 efx->n_tx_channels = 1288 efx->n_tx_channels = max(n_channels / 2, 1U);
1236 max(efx->n_channels / 2, 1U); 1289 efx->n_rx_channels = max(n_channels -
1237 efx->n_rx_channels = 1290 efx->n_tx_channels,
1238 max(efx->n_channels - 1291 1U);
1239 efx->n_tx_channels, 1U);
1240 } else { 1292 } else {
1241 efx->n_tx_channels = efx->n_channels; 1293 efx->n_tx_channels = n_channels;
1242 efx->n_rx_channels = efx->n_channels; 1294 efx->n_rx_channels = n_channels;
1243 } 1295 }
1244 rc = efx_init_rx_cpu_rmap(efx, xentries); 1296 rc = efx_init_rx_cpu_rmap(efx, xentries);
1245 if (rc) { 1297 if (rc) {
1246 pci_disable_msix(efx->pci_dev); 1298 pci_disable_msix(efx->pci_dev);
1247 return rc; 1299 return rc;
1248 } 1300 }
1249 for (i = 0; i < n_channels; i++) 1301 for (i = 0; i < efx->n_channels; i++)
1250 efx_get_channel(efx, i)->irq = 1302 efx_get_channel(efx, i)->irq =
1251 xentries[i].vector; 1303 xentries[i].vector;
1252 } else { 1304 } else {
@@ -1280,9 +1332,68 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1280 efx->legacy_irq = efx->pci_dev->irq; 1332 efx->legacy_irq = efx->pci_dev->irq;
1281 } 1333 }
1282 1334
1335 /* Assign extra channels if possible */
1336 j = efx->n_channels;
1337 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
1338 if (!efx->extra_channel_type[i])
1339 continue;
1340 if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
1341 efx->n_channels <= extra_channels) {
1342 efx->extra_channel_type[i]->handle_no_channel(efx);
1343 } else {
1344 --j;
1345 efx_get_channel(efx, j)->type =
1346 efx->extra_channel_type[i];
1347 }
1348 }
1349
1350 /* RSS might be usable on VFs even if it is disabled on the PF */
1351 efx->rss_spread = (efx->n_rx_channels > 1 ?
1352 efx->n_rx_channels : efx_vf_size(efx));
1353
1283 return 0; 1354 return 0;
1284} 1355}
1285 1356
1357/* Enable interrupts, then probe and start the event queues */
1358static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1359{
1360 struct efx_channel *channel;
1361
1362 if (efx->legacy_irq)
1363 efx->legacy_irq_enabled = true;
1364 efx_nic_enable_interrupts(efx);
1365
1366 efx_for_each_channel(channel, efx) {
1367 if (!channel->type->keep_eventq || !may_keep_eventq)
1368 efx_init_eventq(channel);
1369 efx_start_eventq(channel);
1370 }
1371
1372 efx_mcdi_mode_event(efx);
1373}
1374
1375static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1376{
1377 struct efx_channel *channel;
1378
1379 efx_mcdi_mode_poll(efx);
1380
1381 efx_nic_disable_interrupts(efx);
1382 if (efx->legacy_irq) {
1383 synchronize_irq(efx->legacy_irq);
1384 efx->legacy_irq_enabled = false;
1385 }
1386
1387 efx_for_each_channel(channel, efx) {
1388 if (channel->irq)
1389 synchronize_irq(channel->irq);
1390
1391 efx_stop_eventq(channel);
1392 if (!channel->type->keep_eventq || !may_keep_eventq)
1393 efx_fini_eventq(channel);
1394 }
1395}
1396
1286static void efx_remove_interrupts(struct efx_nic *efx) 1397static void efx_remove_interrupts(struct efx_nic *efx)
1287{ 1398{
1288 struct efx_channel *channel; 1399 struct efx_channel *channel;
@@ -1333,11 +1444,13 @@ static int efx_probe_nic(struct efx_nic *efx)
1333 if (rc) 1444 if (rc)
1334 goto fail; 1445 goto fail;
1335 1446
1447 efx->type->dimension_resources(efx);
1448
1336 if (efx->n_channels > 1) 1449 if (efx->n_channels > 1)
1337 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1450 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
1338 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) 1451 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1339 efx->rx_indir_table[i] = 1452 efx->rx_indir_table[i] =
1340 ethtool_rxfh_indir_default(i, efx->n_rx_channels); 1453 ethtool_rxfh_indir_default(i, efx->rss_spread);
1341 1454
1342 efx_set_channels(efx); 1455 efx_set_channels(efx);
1343 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 1456 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
@@ -1385,21 +1498,22 @@ static int efx_probe_all(struct efx_nic *efx)
1385 } 1498 }
1386 1499
1387 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; 1500 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1388 rc = efx_probe_channels(efx);
1389 if (rc)
1390 goto fail3;
1391 1501
1392 rc = efx_probe_filters(efx); 1502 rc = efx_probe_filters(efx);
1393 if (rc) { 1503 if (rc) {
1394 netif_err(efx, probe, efx->net_dev, 1504 netif_err(efx, probe, efx->net_dev,
1395 "failed to create filter tables\n"); 1505 "failed to create filter tables\n");
1396 goto fail4; 1506 goto fail3;
1397 } 1507 }
1398 1508
1509 rc = efx_probe_channels(efx);
1510 if (rc)
1511 goto fail4;
1512
1399 return 0; 1513 return 0;
1400 1514
1401 fail4: 1515 fail4:
1402 efx_remove_channels(efx); 1516 efx_remove_filters(efx);
1403 fail3: 1517 fail3:
1404 efx_remove_port(efx); 1518 efx_remove_port(efx);
1405 fail2: 1519 fail2:
@@ -1408,15 +1522,13 @@ static int efx_probe_all(struct efx_nic *efx)
1408 return rc; 1522 return rc;
1409} 1523}
1410 1524
1411/* Called after previous invocation(s) of efx_stop_all, restarts the 1525/* Called after previous invocation(s) of efx_stop_all, restarts the port,
1412 * port, kernel transmit queue, NAPI processing and hardware interrupts, 1526 * kernel transmit queues and NAPI processing, and ensures that the port is
1413 * and ensures that the port is scheduled to be reconfigured. 1527 * scheduled to be reconfigured. This function is safe to call multiple
1414 * This function is safe to call multiple times when the NIC is in any 1528 * times when the NIC is in any state.
1415 * state. */ 1529 */
1416static void efx_start_all(struct efx_nic *efx) 1530static void efx_start_all(struct efx_nic *efx)
1417{ 1531{
1418 struct efx_channel *channel;
1419
1420 EFX_ASSERT_RESET_SERIALISED(efx); 1532 EFX_ASSERT_RESET_SERIALISED(efx);
1421 1533
1422 /* Check that it is appropriate to restart the interface. All 1534 /* Check that it is appropriate to restart the interface. All
@@ -1425,31 +1537,11 @@ static void efx_start_all(struct efx_nic *efx)
1425 return; 1537 return;
1426 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1538 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1427 return; 1539 return;
1428 if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) 1540 if (!netif_running(efx->net_dev))
1429 return; 1541 return;
1430 1542
1431 /* Mark the port as enabled so port reconfigurations can start, then
1432 * restart the transmit interface early so the watchdog timer stops */
1433 efx_start_port(efx); 1543 efx_start_port(efx);
1434 1544 efx_start_datapath(efx);
1435 if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
1436 netif_tx_wake_all_queues(efx->net_dev);
1437
1438 efx_for_each_channel(channel, efx)
1439 efx_start_channel(channel);
1440
1441 if (efx->legacy_irq)
1442 efx->legacy_irq_enabled = true;
1443 efx_nic_enable_interrupts(efx);
1444
1445 /* Switch to event based MCDI completions after enabling interrupts.
1446 * If a reset has been scheduled, then we need to stay in polled mode.
1447 * Rather than serialising efx_mcdi_mode_event() [which sleeps] and
1448 * reset_pending [modified from an atomic context], we instead guarantee
1449 * that efx_mcdi_mode_poll() isn't reverted erroneously */
1450 efx_mcdi_mode_event(efx);
1451 if (efx->reset_pending)
1452 efx_mcdi_mode_poll(efx);
1453 1545
1454 /* Start the hardware monitor if there is one. Otherwise (we're link 1546 /* Start the hardware monitor if there is one. Otherwise (we're link
1455 * event driven), we have to poll the PHY because after an event queue 1547 * event driven), we have to poll the PHY because after an event queue
@@ -1485,8 +1577,6 @@ static void efx_flush_all(struct efx_nic *efx)
1485 * taking locks. */ 1577 * taking locks. */
1486static void efx_stop_all(struct efx_nic *efx) 1578static void efx_stop_all(struct efx_nic *efx)
1487{ 1579{
1488 struct efx_channel *channel;
1489
1490 EFX_ASSERT_RESET_SERIALISED(efx); 1580 EFX_ASSERT_RESET_SERIALISED(efx);
1491 1581
1492 /* port_enabled can be read safely under the rtnl lock */ 1582 /* port_enabled can be read safely under the rtnl lock */
@@ -1494,28 +1584,6 @@ static void efx_stop_all(struct efx_nic *efx)
1494 return; 1584 return;
1495 1585
1496 efx->type->stop_stats(efx); 1586 efx->type->stop_stats(efx);
1497
1498 /* Switch to MCDI polling on Siena before disabling interrupts */
1499 efx_mcdi_mode_poll(efx);
1500
1501 /* Disable interrupts and wait for ISR to complete */
1502 efx_nic_disable_interrupts(efx);
1503 if (efx->legacy_irq) {
1504 synchronize_irq(efx->legacy_irq);
1505 efx->legacy_irq_enabled = false;
1506 }
1507 efx_for_each_channel(channel, efx) {
1508 if (channel->irq)
1509 synchronize_irq(channel->irq);
1510 }
1511
1512 /* Stop all NAPI processing and synchronous rx refills */
1513 efx_for_each_channel(channel, efx)
1514 efx_stop_channel(channel);
1515
1516 /* Stop all asynchronous port reconfigurations. Since all
1517 * event processing has already been stopped, there is no
1518 * window to loose phy events */
1519 efx_stop_port(efx); 1587 efx_stop_port(efx);
1520 1588
1521 /* Flush efx_mac_work(), refill_workqueue, monitor_work */ 1589 /* Flush efx_mac_work(), refill_workqueue, monitor_work */
@@ -1523,17 +1591,15 @@ static void efx_stop_all(struct efx_nic *efx)
1523 1591
1524 /* Stop the kernel transmit interface late, so the watchdog 1592 /* Stop the kernel transmit interface late, so the watchdog
1525 * timer isn't ticking over the flush */ 1593 * timer isn't ticking over the flush */
1526 if (efx_dev_registered(efx)) { 1594 netif_tx_disable(efx->net_dev);
1527 netif_tx_stop_all_queues(efx->net_dev); 1595
1528 netif_tx_lock_bh(efx->net_dev); 1596 efx_stop_datapath(efx);
1529 netif_tx_unlock_bh(efx->net_dev);
1530 }
1531} 1597}
1532 1598
1533static void efx_remove_all(struct efx_nic *efx) 1599static void efx_remove_all(struct efx_nic *efx)
1534{ 1600{
1535 efx_remove_filters(efx);
1536 efx_remove_channels(efx); 1601 efx_remove_channels(efx);
1602 efx_remove_filters(efx);
1537 efx_remove_port(efx); 1603 efx_remove_port(efx);
1538 efx_remove_nic(efx); 1604 efx_remove_nic(efx);
1539} 1605}
@@ -1544,13 +1610,13 @@ static void efx_remove_all(struct efx_nic *efx)
1544 * 1610 *
1545 **************************************************************************/ 1611 **************************************************************************/
1546 1612
1547static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int resolution) 1613static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
1548{ 1614{
1549 if (usecs == 0) 1615 if (usecs == 0)
1550 return 0; 1616 return 0;
1551 if (usecs < resolution) 1617 if (usecs * 1000 < quantum_ns)
1552 return 1; /* never round down to 0 */ 1618 return 1; /* never round down to 0 */
1553 return usecs / resolution; 1619 return usecs * 1000 / quantum_ns;
1554} 1620}
1555 1621
1556/* Set interrupt moderation parameters */ 1622/* Set interrupt moderation parameters */
@@ -1559,14 +1625,20 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
1559 bool rx_may_override_tx) 1625 bool rx_may_override_tx)
1560{ 1626{
1561 struct efx_channel *channel; 1627 struct efx_channel *channel;
1562 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); 1628 unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
1563 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); 1629 efx->timer_quantum_ns,
1630 1000);
1631 unsigned int tx_ticks;
1632 unsigned int rx_ticks;
1564 1633
1565 EFX_ASSERT_RESET_SERIALISED(efx); 1634 EFX_ASSERT_RESET_SERIALISED(efx);
1566 1635
1567 if (tx_ticks > EFX_IRQ_MOD_MAX || rx_ticks > EFX_IRQ_MOD_MAX) 1636 if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
1568 return -EINVAL; 1637 return -EINVAL;
1569 1638
1639 tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
1640 rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
1641
1570 if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && 1642 if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
1571 !rx_may_override_tx) { 1643 !rx_may_override_tx) {
1572 netif_err(efx, drv, efx->net_dev, "Channels are shared. " 1644 netif_err(efx, drv, efx->net_dev, "Channels are shared. "
@@ -1589,8 +1661,14 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
1589void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 1661void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
1590 unsigned int *rx_usecs, bool *rx_adaptive) 1662 unsigned int *rx_usecs, bool *rx_adaptive)
1591{ 1663{
1664 /* We must round up when converting ticks to microseconds
1665 * because we round down when converting the other way.
1666 */
1667
1592 *rx_adaptive = efx->irq_rx_adaptive; 1668 *rx_adaptive = efx->irq_rx_adaptive;
1593 *rx_usecs = efx->irq_rx_moderation * EFX_IRQ_MOD_RESOLUTION; 1669 *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
1670 efx->timer_quantum_ns,
1671 1000);
1594 1672
1595 /* If channels are shared between RX and TX, so is IRQ 1673 /* If channels are shared between RX and TX, so is IRQ
1596 * moderation. Otherwise, IRQ moderation is the same for all 1674 * moderation. Otherwise, IRQ moderation is the same for all
@@ -1599,9 +1677,10 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
1599 if (efx->tx_channel_offset == 0) 1677 if (efx->tx_channel_offset == 0)
1600 *tx_usecs = *rx_usecs; 1678 *tx_usecs = *rx_usecs;
1601 else 1679 else
1602 *tx_usecs = 1680 *tx_usecs = DIV_ROUND_UP(
1603 efx->channel[efx->tx_channel_offset]->irq_moderation * 1681 efx->channel[efx->tx_channel_offset]->irq_moderation *
1604 EFX_IRQ_MOD_RESOLUTION; 1682 efx->timer_quantum_ns,
1683 1000);
1605} 1684}
1606 1685
1607/************************************************************************** 1686/**************************************************************************
@@ -1664,15 +1743,21 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1664 * 1743 *
1665 **************************************************************************/ 1744 **************************************************************************/
1666 1745
1746static void efx_init_napi_channel(struct efx_channel *channel)
1747{
1748 struct efx_nic *efx = channel->efx;
1749
1750 channel->napi_dev = efx->net_dev;
1751 netif_napi_add(channel->napi_dev, &channel->napi_str,
1752 efx_poll, napi_weight);
1753}
1754
1667static void efx_init_napi(struct efx_nic *efx) 1755static void efx_init_napi(struct efx_nic *efx)
1668{ 1756{
1669 struct efx_channel *channel; 1757 struct efx_channel *channel;
1670 1758
1671 efx_for_each_channel(channel, efx) { 1759 efx_for_each_channel(channel, efx)
1672 channel->napi_dev = efx->net_dev; 1760 efx_init_napi_channel(channel);
1673 netif_napi_add(channel->napi_dev, &channel->napi_str,
1674 efx_poll, napi_weight);
1675 }
1676} 1761}
1677 1762
1678static void efx_fini_napi_channel(struct efx_channel *channel) 1763static void efx_fini_napi_channel(struct efx_channel *channel)
@@ -1757,22 +1842,21 @@ static int efx_net_stop(struct net_device *net_dev)
1757 if (efx->state != STATE_DISABLED) { 1842 if (efx->state != STATE_DISABLED) {
1758 /* Stop the device and flush all the channels */ 1843 /* Stop the device and flush all the channels */
1759 efx_stop_all(efx); 1844 efx_stop_all(efx);
1760 efx_fini_channels(efx);
1761 efx_init_channels(efx);
1762 } 1845 }
1763 1846
1764 return 0; 1847 return 0;
1765} 1848}
1766 1849
1767/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1850/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1768static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) 1851static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
1852 struct rtnl_link_stats64 *stats)
1769{ 1853{
1770 struct efx_nic *efx = netdev_priv(net_dev); 1854 struct efx_nic *efx = netdev_priv(net_dev);
1771 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1855 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1772 1856
1773 spin_lock_bh(&efx->stats_lock); 1857 spin_lock_bh(&efx->stats_lock);
1858
1774 efx->type->update_stats(efx); 1859 efx->type->update_stats(efx);
1775 spin_unlock_bh(&efx->stats_lock);
1776 1860
1777 stats->rx_packets = mac_stats->rx_packets; 1861 stats->rx_packets = mac_stats->rx_packets;
1778 stats->tx_packets = mac_stats->tx_packets; 1862 stats->tx_packets = mac_stats->tx_packets;
@@ -1796,6 +1880,8 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struc
1796 stats->tx_errors = (stats->tx_window_errors + 1880 stats->tx_errors = (stats->tx_window_errors +
1797 mac_stats->tx_bad); 1881 mac_stats->tx_bad);
1798 1882
1883 spin_unlock_bh(&efx->stats_lock);
1884
1799 return stats; 1885 return stats;
1800} 1886}
1801 1887
@@ -1816,7 +1902,6 @@ static void efx_watchdog(struct net_device *net_dev)
1816static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 1902static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1817{ 1903{
1818 struct efx_nic *efx = netdev_priv(net_dev); 1904 struct efx_nic *efx = netdev_priv(net_dev);
1819 int rc = 0;
1820 1905
1821 EFX_ASSERT_RESET_SERIALISED(efx); 1906 EFX_ASSERT_RESET_SERIALISED(efx);
1822 1907
@@ -1827,19 +1912,15 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1827 1912
1828 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 1913 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1829 1914
1830 efx_fini_channels(efx);
1831
1832 mutex_lock(&efx->mac_lock); 1915 mutex_lock(&efx->mac_lock);
1833 /* Reconfigure the MAC before enabling the dma queues so that 1916 /* Reconfigure the MAC before enabling the dma queues so that
1834 * the RX buffers don't overflow */ 1917 * the RX buffers don't overflow */
1835 net_dev->mtu = new_mtu; 1918 net_dev->mtu = new_mtu;
1836 efx->mac_op->reconfigure(efx); 1919 efx->type->reconfigure_mac(efx);
1837 mutex_unlock(&efx->mac_lock); 1920 mutex_unlock(&efx->mac_lock);
1838 1921
1839 efx_init_channels(efx);
1840
1841 efx_start_all(efx); 1922 efx_start_all(efx);
1842 return rc; 1923 return 0;
1843} 1924}
1844 1925
1845static int efx_set_mac_address(struct net_device *net_dev, void *data) 1926static int efx_set_mac_address(struct net_device *net_dev, void *data)
@@ -1854,21 +1935,22 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1854 netif_err(efx, drv, efx->net_dev, 1935 netif_err(efx, drv, efx->net_dev,
1855 "invalid ethernet MAC address requested: %pM\n", 1936 "invalid ethernet MAC address requested: %pM\n",
1856 new_addr); 1937 new_addr);
1857 return -EINVAL; 1938 return -EADDRNOTAVAIL;
1858 } 1939 }
1859 1940
1860 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); 1941 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1942 efx_sriov_mac_address_changed(efx);
1861 1943
1862 /* Reconfigure the MAC */ 1944 /* Reconfigure the MAC */
1863 mutex_lock(&efx->mac_lock); 1945 mutex_lock(&efx->mac_lock);
1864 efx->mac_op->reconfigure(efx); 1946 efx->type->reconfigure_mac(efx);
1865 mutex_unlock(&efx->mac_lock); 1947 mutex_unlock(&efx->mac_lock);
1866 1948
1867 return 0; 1949 return 0;
1868} 1950}
1869 1951
1870/* Context: netif_addr_lock held, BHs disabled. */ 1952/* Context: netif_addr_lock held, BHs disabled. */
1871static void efx_set_multicast_list(struct net_device *net_dev) 1953static void efx_set_rx_mode(struct net_device *net_dev)
1872{ 1954{
1873 struct efx_nic *efx = netdev_priv(net_dev); 1955 struct efx_nic *efx = netdev_priv(net_dev);
1874 struct netdev_hw_addr *ha; 1956 struct netdev_hw_addr *ha;
@@ -1922,8 +2004,14 @@ static const struct net_device_ops efx_netdev_ops = {
1922 .ndo_do_ioctl = efx_ioctl, 2004 .ndo_do_ioctl = efx_ioctl,
1923 .ndo_change_mtu = efx_change_mtu, 2005 .ndo_change_mtu = efx_change_mtu,
1924 .ndo_set_mac_address = efx_set_mac_address, 2006 .ndo_set_mac_address = efx_set_mac_address,
1925 .ndo_set_rx_mode = efx_set_multicast_list, 2007 .ndo_set_rx_mode = efx_set_rx_mode,
1926 .ndo_set_features = efx_set_features, 2008 .ndo_set_features = efx_set_features,
2009#ifdef CONFIG_SFC_SRIOV
2010 .ndo_set_vf_mac = efx_sriov_set_vf_mac,
2011 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
2012 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
2013 .ndo_get_vf_config = efx_sriov_get_vf_config,
2014#endif
1927#ifdef CONFIG_NET_POLL_CONTROLLER 2015#ifdef CONFIG_NET_POLL_CONTROLLER
1928 .ndo_poll_controller = efx_netpoll, 2016 .ndo_poll_controller = efx_netpoll,
1929#endif 2017#endif
@@ -1975,10 +2063,6 @@ static int efx_register_netdev(struct efx_nic *efx)
1975 net_dev->netdev_ops = &efx_netdev_ops; 2063 net_dev->netdev_ops = &efx_netdev_ops;
1976 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2064 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1977 2065
1978 /* Clear MAC statistics */
1979 efx->mac_op->update_stats(efx);
1980 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1981
1982 rtnl_lock(); 2066 rtnl_lock();
1983 2067
1984 rc = dev_alloc_name(net_dev, net_dev->name); 2068 rc = dev_alloc_name(net_dev, net_dev->name);
@@ -1997,7 +2081,7 @@ static int efx_register_netdev(struct efx_nic *efx)
1997 } 2081 }
1998 2082
1999 /* Always start with carrier off; PHY events will detect the link */ 2083 /* Always start with carrier off; PHY events will detect the link */
2000 netif_carrier_off(efx->net_dev); 2084 netif_carrier_off(net_dev);
2001 2085
2002 rtnl_unlock(); 2086 rtnl_unlock();
2003 2087
@@ -2038,11 +2122,9 @@ static void efx_unregister_netdev(struct efx_nic *efx)
2038 efx_release_tx_buffers(tx_queue); 2122 efx_release_tx_buffers(tx_queue);
2039 } 2123 }
2040 2124
2041 if (efx_dev_registered(efx)) { 2125 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2042 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2126 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2043 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2127 unregister_netdev(efx->net_dev);
2044 unregister_netdev(efx->net_dev);
2045 }
2046} 2128}
2047 2129
2048/************************************************************************** 2130/**************************************************************************
@@ -2060,7 +2142,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2060 efx_stop_all(efx); 2142 efx_stop_all(efx);
2061 mutex_lock(&efx->mac_lock); 2143 mutex_lock(&efx->mac_lock);
2062 2144
2063 efx_fini_channels(efx); 2145 efx_stop_interrupts(efx, false);
2064 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 2146 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
2065 efx->phy_op->fini(efx); 2147 efx->phy_op->fini(efx);
2066 efx->type->fini(efx); 2148 efx->type->fini(efx);
@@ -2095,10 +2177,11 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2095 "could not restore PHY settings\n"); 2177 "could not restore PHY settings\n");
2096 } 2178 }
2097 2179
2098 efx->mac_op->reconfigure(efx); 2180 efx->type->reconfigure_mac(efx);
2099 2181
2100 efx_init_channels(efx); 2182 efx_start_interrupts(efx, false);
2101 efx_restore_filters(efx); 2183 efx_restore_filters(efx);
2184 efx_sriov_reset(efx);
2102 2185
2103 mutex_unlock(&efx->mac_lock); 2186 mutex_unlock(&efx->mac_lock);
2104 2187
@@ -2300,10 +2383,10 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2300 efx->net_dev = net_dev; 2383 efx->net_dev = net_dev;
2301 spin_lock_init(&efx->stats_lock); 2384 spin_lock_init(&efx->stats_lock);
2302 mutex_init(&efx->mac_lock); 2385 mutex_init(&efx->mac_lock);
2303 efx->mac_op = type->default_mac_ops;
2304 efx->phy_op = &efx_dummy_phy_operations; 2386 efx->phy_op = &efx_dummy_phy_operations;
2305 efx->mdio.dev = net_dev; 2387 efx->mdio.dev = net_dev;
2306 INIT_WORK(&efx->mac_work, efx_mac_work); 2388 INIT_WORK(&efx->mac_work, efx_mac_work);
2389 init_waitqueue_head(&efx->flush_wq);
2307 2390
2308 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2391 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2309 efx->channel[i] = efx_alloc_channel(efx, i, NULL); 2392 efx->channel[i] = efx_alloc_channel(efx, i, NULL);
@@ -2361,8 +2444,8 @@ static void efx_pci_remove_main(struct efx_nic *efx)
2361 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 2444 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2362 efx->net_dev->rx_cpu_rmap = NULL; 2445 efx->net_dev->rx_cpu_rmap = NULL;
2363#endif 2446#endif
2447 efx_stop_interrupts(efx, false);
2364 efx_nic_fini_interrupt(efx); 2448 efx_nic_fini_interrupt(efx);
2365 efx_fini_channels(efx);
2366 efx_fini_port(efx); 2449 efx_fini_port(efx);
2367 efx->type->fini(efx); 2450 efx->type->fini(efx);
2368 efx_fini_napi(efx); 2451 efx_fini_napi(efx);
@@ -2388,6 +2471,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2388 /* Allow any queued efx_resets() to complete */ 2471 /* Allow any queued efx_resets() to complete */
2389 rtnl_unlock(); 2472 rtnl_unlock();
2390 2473
2474 efx_stop_interrupts(efx, false);
2475 efx_sriov_fini(efx);
2391 efx_unregister_netdev(efx); 2476 efx_unregister_netdev(efx);
2392 2477
2393 efx_mtd_remove(efx); 2478 efx_mtd_remove(efx);
@@ -2436,16 +2521,14 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2436 goto fail4; 2521 goto fail4;
2437 } 2522 }
2438 2523
2439 efx_init_channels(efx);
2440
2441 rc = efx_nic_init_interrupt(efx); 2524 rc = efx_nic_init_interrupt(efx);
2442 if (rc) 2525 if (rc)
2443 goto fail5; 2526 goto fail5;
2527 efx_start_interrupts(efx, false);
2444 2528
2445 return 0; 2529 return 0;
2446 2530
2447 fail5: 2531 fail5:
2448 efx_fini_channels(efx);
2449 efx_fini_port(efx); 2532 efx_fini_port(efx);
2450 fail4: 2533 fail4:
2451 efx->type->fini(efx); 2534 efx->type->fini(efx);
@@ -2459,7 +2542,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2459/* NIC initialisation 2542/* NIC initialisation
2460 * 2543 *
2461 * This is called at module load (or hotplug insertion, 2544 * This is called at module load (or hotplug insertion,
2462 * theoretically). It sets up PCI mappings, tests and resets the NIC, 2545 * theoretically). It sets up PCI mappings, resets the NIC,
2463 * sets up and registers the network devices with the kernel and hooks 2546 * sets up and registers the network devices with the kernel and hooks
2464 * the interrupt service routine. It does not prepare the device for 2547 * the interrupt service routine. It does not prepare the device for
2465 * transmission; this is left to the first time one of the network 2548 * transmission; this is left to the first time one of the network
@@ -2471,7 +2554,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2471 const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; 2554 const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
2472 struct net_device *net_dev; 2555 struct net_device *net_dev;
2473 struct efx_nic *efx; 2556 struct efx_nic *efx;
2474 int i, rc; 2557 int rc;
2475 2558
2476 /* Allocate and initialise a struct net_device and struct efx_nic */ 2559 /* Allocate and initialise a struct net_device and struct efx_nic */
2477 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, 2560 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
@@ -2504,39 +2587,22 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2504 if (rc) 2587 if (rc)
2505 goto fail2; 2588 goto fail2;
2506 2589
2507 /* No serialisation is required with the reset path because 2590 rc = efx_pci_probe_main(efx);
2508 * we're in STATE_INIT. */
2509 for (i = 0; i < 5; i++) {
2510 rc = efx_pci_probe_main(efx);
2511
2512 /* Serialise against efx_reset(). No more resets will be
2513 * scheduled since efx_stop_all() has been called, and we
2514 * have not and never have been registered with either
2515 * the rtnetlink or driverlink layers. */
2516 cancel_work_sync(&efx->reset_work);
2517 2591
2518 if (rc == 0) { 2592 /* Serialise against efx_reset(). No more resets will be
2519 if (efx->reset_pending) { 2593 * scheduled since efx_stop_all() has been called, and we have
2520 /* If there was a scheduled reset during 2594 * not and never have been registered.
2521 * probe, the NIC is probably hosed anyway */ 2595 */
2522 efx_pci_remove_main(efx); 2596 cancel_work_sync(&efx->reset_work);
2523 rc = -EIO;
2524 } else {
2525 break;
2526 }
2527 }
2528
2529 /* Retry if a recoverably reset event has been scheduled */
2530 if (efx->reset_pending &
2531 ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) ||
2532 !efx->reset_pending)
2533 goto fail3;
2534 2597
2535 efx->reset_pending = 0; 2598 if (rc)
2536 } 2599 goto fail3;
2537 2600
2538 if (rc) { 2601 /* If there was a scheduled reset during probe, the NIC is
2539 netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); 2602 * probably hosed anyway.
2603 */
2604 if (efx->reset_pending) {
2605 rc = -EIO;
2540 goto fail4; 2606 goto fail4;
2541 } 2607 }
2542 2608
@@ -2546,18 +2612,27 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2546 2612
2547 rc = efx_register_netdev(efx); 2613 rc = efx_register_netdev(efx);
2548 if (rc) 2614 if (rc)
2549 goto fail5; 2615 goto fail4;
2616
2617 rc = efx_sriov_init(efx);
2618 if (rc)
2619 netif_err(efx, probe, efx->net_dev,
2620 "SR-IOV can't be enabled rc %d\n", rc);
2550 2621
2551 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 2622 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2552 2623
2624 /* Try to create MTDs, but allow this to fail */
2553 rtnl_lock(); 2625 rtnl_lock();
2554 efx_mtd_probe(efx); /* allowed to fail */ 2626 rc = efx_mtd_probe(efx);
2555 rtnl_unlock(); 2627 rtnl_unlock();
2628 if (rc)
2629 netif_warn(efx, probe, efx->net_dev,
2630 "failed to create MTDs (%d)\n", rc);
2631
2556 return 0; 2632 return 0;
2557 2633
2558 fail5:
2559 efx_pci_remove_main(efx);
2560 fail4: 2634 fail4:
2635 efx_pci_remove_main(efx);
2561 fail3: 2636 fail3:
2562 efx_fini_io(efx); 2637 efx_fini_io(efx);
2563 fail2: 2638 fail2:
@@ -2578,7 +2653,7 @@ static int efx_pm_freeze(struct device *dev)
2578 netif_device_detach(efx->net_dev); 2653 netif_device_detach(efx->net_dev);
2579 2654
2580 efx_stop_all(efx); 2655 efx_stop_all(efx);
2581 efx_fini_channels(efx); 2656 efx_stop_interrupts(efx, false);
2582 2657
2583 return 0; 2658 return 0;
2584} 2659}
@@ -2589,7 +2664,7 @@ static int efx_pm_thaw(struct device *dev)
2589 2664
2590 efx->state = STATE_INIT; 2665 efx->state = STATE_INIT;
2591 2666
2592 efx_init_channels(efx); 2667 efx_start_interrupts(efx, false);
2593 2668
2594 mutex_lock(&efx->mac_lock); 2669 mutex_lock(&efx->mac_lock);
2595 efx->phy_op->reconfigure(efx); 2670 efx->phy_op->reconfigure(efx);
@@ -2658,7 +2733,7 @@ static int efx_pm_suspend(struct device *dev)
2658 return rc; 2733 return rc;
2659} 2734}
2660 2735
2661static struct dev_pm_ops efx_pm_ops = { 2736static const struct dev_pm_ops efx_pm_ops = {
2662 .suspend = efx_pm_suspend, 2737 .suspend = efx_pm_suspend,
2663 .resume = efx_pm_resume, 2738 .resume = efx_pm_resume,
2664 .freeze = efx_pm_freeze, 2739 .freeze = efx_pm_freeze,
@@ -2695,6 +2770,10 @@ static int __init efx_init_module(void)
2695 if (rc) 2770 if (rc)
2696 goto err_notifier; 2771 goto err_notifier;
2697 2772
2773 rc = efx_init_sriov();
2774 if (rc)
2775 goto err_sriov;
2776
2698 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 2777 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2699 if (!reset_workqueue) { 2778 if (!reset_workqueue) {
2700 rc = -ENOMEM; 2779 rc = -ENOMEM;
@@ -2710,6 +2789,8 @@ static int __init efx_init_module(void)
2710 err_pci: 2789 err_pci:
2711 destroy_workqueue(reset_workqueue); 2790 destroy_workqueue(reset_workqueue);
2712 err_reset: 2791 err_reset:
2792 efx_fini_sriov();
2793 err_sriov:
2713 unregister_netdevice_notifier(&efx_netdev_notifier); 2794 unregister_netdevice_notifier(&efx_netdev_notifier);
2714 err_notifier: 2795 err_notifier:
2715 return rc; 2796 return rc;
@@ -2721,6 +2802,7 @@ static void __exit efx_exit_module(void)
2721 2802
2722 pci_unregister_driver(&efx_pci_driver); 2803 pci_unregister_driver(&efx_pci_driver);
2723 destroy_workqueue(reset_workqueue); 2804 destroy_workqueue(reset_workqueue);
2805 efx_fini_sriov();
2724 unregister_netdevice_notifier(&efx_netdev_notifier); 2806 unregister_netdevice_notifier(&efx_netdev_notifier);
2725 2807
2726} 2808}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a3541ac6ea01..4debfe07fb88 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -40,9 +40,9 @@ extern void efx_rx_strategy(struct efx_channel *channel);
40extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 40extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
41extern void efx_rx_slow_fill(unsigned long context); 41extern void efx_rx_slow_fill(unsigned long context);
42extern void __efx_rx_packet(struct efx_channel *channel, 42extern void __efx_rx_packet(struct efx_channel *channel,
43 struct efx_rx_buffer *rx_buf, bool checksummed); 43 struct efx_rx_buffer *rx_buf);
44extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 44extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
45 unsigned int len, bool checksummed, bool discard); 45 unsigned int len, u16 flags);
46extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 46extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
47 47
48#define EFX_MAX_DMAQ_SIZE 4096UL 48#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -95,6 +95,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
95#endif 95#endif
96 96
97/* Channels */ 97/* Channels */
98extern int efx_channel_dummy_op_int(struct efx_channel *channel);
98extern void efx_process_channel_now(struct efx_channel *channel); 99extern void efx_process_channel_now(struct efx_channel *channel);
99extern int 100extern int
100efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); 101efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
@@ -145,6 +146,12 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
145 napi_schedule(&channel->napi_str); 146 napi_schedule(&channel->napi_str);
146} 147}
147 148
149static inline void efx_schedule_channel_irq(struct efx_channel *channel)
150{
151 channel->last_irq_cpu = raw_smp_processor_id();
152 efx_schedule_channel(channel);
153}
154
148extern void efx_link_status_changed(struct efx_nic *efx); 155extern void efx_link_status_changed(struct efx_nic *efx);
149extern void efx_link_set_advertising(struct efx_nic *efx, u32); 156extern void efx_link_set_advertising(struct efx_nic *efx, u32);
150extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); 157extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 29b2ebfef19f..f22f45f515a8 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -52,11 +52,6 @@ static u64 efx_get_uint_stat(void *field)
52 return *(unsigned int *)field; 52 return *(unsigned int *)field;
53} 53}
54 54
55static u64 efx_get_ulong_stat(void *field)
56{
57 return *(unsigned long *)field;
58}
59
60static u64 efx_get_u64_stat(void *field) 55static u64 efx_get_u64_stat(void *field)
61{ 56{
62 return *(u64 *) field; 57 return *(u64 *) field;
@@ -67,12 +62,8 @@ static u64 efx_get_atomic_stat(void *field)
67 return atomic_read((atomic_t *) field); 62 return atomic_read((atomic_t *) field);
68} 63}
69 64
70#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
71 EFX_ETHTOOL_STAT(field, mac_stats, field, \
72 unsigned long, efx_get_ulong_stat)
73
74#define EFX_ETHTOOL_U64_MAC_STAT(field) \ 65#define EFX_ETHTOOL_U64_MAC_STAT(field) \
75 EFX_ETHTOOL_STAT(field, mac_stats, field, \ 66 EFX_ETHTOOL_STAT(field, mac_stats, field, \
76 u64, efx_get_u64_stat) 67 u64, efx_get_u64_stat)
77 68
78#define EFX_ETHTOOL_UINT_NIC_STAT(name) \ 69#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
@@ -91,36 +82,36 @@ static u64 efx_get_atomic_stat(void *field)
91 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ 82 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
92 unsigned int, efx_get_uint_stat) 83 unsigned int, efx_get_uint_stat)
93 84
94static struct efx_ethtool_stat efx_ethtool_stats[] = { 85static const struct efx_ethtool_stat efx_ethtool_stats[] = {
95 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), 86 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
96 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), 87 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
97 EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes), 88 EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
98 EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets), 89 EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
99 EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad), 90 EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
100 EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause), 91 EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
101 EFX_ETHTOOL_ULONG_MAC_STAT(tx_control), 92 EFX_ETHTOOL_U64_MAC_STAT(tx_control),
102 EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast), 93 EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
103 EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast), 94 EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
104 EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast), 95 EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
105 EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64), 96 EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
106 EFX_ETHTOOL_ULONG_MAC_STAT(tx_64), 97 EFX_ETHTOOL_U64_MAC_STAT(tx_64),
107 EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127), 98 EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
108 EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255), 99 EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
109 EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511), 100 EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
110 EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023), 101 EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
111 EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx), 102 EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
112 EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo), 103 EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
113 EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo), 104 EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
114 EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision), 105 EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
115 EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision), 106 EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
116 EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision), 107 EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
117 EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision), 108 EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
118 EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred), 109 EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
119 EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision), 110 EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
120 EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred), 111 EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
121 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), 112 EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
122 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), 113 EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
123 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), 114 EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
124 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), 115 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
125 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), 116 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
126 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), 117 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
@@ -128,34 +119,34 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
128 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), 119 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
129 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), 120 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
130 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), 121 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
131 EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets), 122 EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
132 EFX_ETHTOOL_ULONG_MAC_STAT(rx_good), 123 EFX_ETHTOOL_U64_MAC_STAT(rx_good),
133 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad), 124 EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
134 EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause), 125 EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
135 EFX_ETHTOOL_ULONG_MAC_STAT(rx_control), 126 EFX_ETHTOOL_U64_MAC_STAT(rx_control),
136 EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast), 127 EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
137 EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast), 128 EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
138 EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast), 129 EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
139 EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64), 130 EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
140 EFX_ETHTOOL_ULONG_MAC_STAT(rx_64), 131 EFX_ETHTOOL_U64_MAC_STAT(rx_64),
141 EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127), 132 EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
142 EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255), 133 EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
143 EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511), 134 EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
144 EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023), 135 EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
145 EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx), 136 EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
146 EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo), 137 EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
147 EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo), 138 EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
148 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64), 139 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
149 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx), 140 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
150 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo), 141 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
151 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo), 142 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
152 EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow), 143 EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
153 EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed), 144 EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
154 EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier), 145 EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
155 EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error), 146 EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
156 EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error), 147 EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
157 EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error), 148 EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
158 EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error), 149 EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
159 EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt), 150 EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
160 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), 151 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
161 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), 152 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
@@ -404,10 +395,6 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
404 &tests->eventq_int[channel->channel], 395 &tests->eventq_int[channel->channel],
405 EFX_CHANNEL_NAME(channel), 396 EFX_CHANNEL_NAME(channel),
406 "eventq.int", NULL); 397 "eventq.int", NULL);
407 efx_fill_test(n++, strings, data,
408 &tests->eventq_poll[channel->channel],
409 EFX_CHANNEL_NAME(channel),
410 "eventq.poll", NULL);
411 } 398 }
412 399
413 efx_fill_test(n++, strings, data, &tests->registers, 400 efx_fill_test(n++, strings, data, &tests->registers,
@@ -486,16 +473,17 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
486{ 473{
487 struct efx_nic *efx = netdev_priv(net_dev); 474 struct efx_nic *efx = netdev_priv(net_dev);
488 struct efx_mac_stats *mac_stats = &efx->mac_stats; 475 struct efx_mac_stats *mac_stats = &efx->mac_stats;
489 struct efx_ethtool_stat *stat; 476 const struct efx_ethtool_stat *stat;
490 struct efx_channel *channel; 477 struct efx_channel *channel;
491 struct efx_tx_queue *tx_queue; 478 struct efx_tx_queue *tx_queue;
492 struct rtnl_link_stats64 temp;
493 int i; 479 int i;
494 480
495 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); 481 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
496 482
483 spin_lock_bh(&efx->stats_lock);
484
497 /* Update MAC and NIC statistics */ 485 /* Update MAC and NIC statistics */
498 dev_get_stats(net_dev, &temp); 486 efx->type->update_stats(efx);
499 487
500 /* Fill detailed statistics buffer */ 488 /* Fill detailed statistics buffer */
501 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { 489 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
@@ -525,6 +513,8 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
525 break; 513 break;
526 } 514 }
527 } 515 }
516
517 spin_unlock_bh(&efx->stats_lock);
528} 518}
529 519
530static void efx_ethtool_self_test(struct net_device *net_dev, 520static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -747,7 +737,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
747 /* Recover by resetting the EM block */ 737 /* Recover by resetting the EM block */
748 falcon_stop_nic_stats(efx); 738 falcon_stop_nic_stats(efx);
749 falcon_drain_tx_fifo(efx); 739 falcon_drain_tx_fifo(efx);
750 efx->mac_op->reconfigure(efx); 740 falcon_reconfigure_xmac(efx);
751 falcon_start_nic_stats(efx); 741 falcon_start_nic_stats(efx);
752 } else { 742 } else {
753 /* Schedule a reset to recover */ 743 /* Schedule a reset to recover */
@@ -772,7 +762,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
772 /* Reconfigure the MAC. The PHY *may* generate a link state change event 762 /* Reconfigure the MAC. The PHY *may* generate a link state change event
773 * if the user just changed the advertised capabilities, but there's no 763 * if the user just changed the advertised capabilities, but there's no
774 * harm doing this twice */ 764 * harm doing this twice */
775 efx->mac_op->reconfigure(efx); 765 efx->type->reconfigure_mac(efx);
776 766
777out: 767out:
778 mutex_unlock(&efx->mac_lock); 768 mutex_unlock(&efx->mac_lock);
@@ -818,11 +808,16 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
818 return efx_reset(efx, rc); 808 return efx_reset(efx, rc);
819} 809}
820 810
811/* MAC address mask including only MC flag */
812static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
813
821static int efx_ethtool_get_class_rule(struct efx_nic *efx, 814static int efx_ethtool_get_class_rule(struct efx_nic *efx,
822 struct ethtool_rx_flow_spec *rule) 815 struct ethtool_rx_flow_spec *rule)
823{ 816{
824 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; 817 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
825 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; 818 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
819 struct ethhdr *mac_entry = &rule->h_u.ether_spec;
820 struct ethhdr *mac_mask = &rule->m_u.ether_spec;
826 struct efx_filter_spec spec; 821 struct efx_filter_spec spec;
827 u16 vid; 822 u16 vid;
828 u8 proto; 823 u8 proto;
@@ -838,11 +833,18 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
838 else 833 else
839 rule->ring_cookie = spec.dmaq_id; 834 rule->ring_cookie = spec.dmaq_id;
840 835
841 rc = efx_filter_get_eth_local(&spec, &vid, 836 if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) {
842 rule->h_u.ether_spec.h_dest); 837 rule->flow_type = ETHER_FLOW;
838 memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN);
839 if (spec.type == EFX_FILTER_MC_DEF)
840 memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN);
841 return 0;
842 }
843
844 rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest);
843 if (rc == 0) { 845 if (rc == 0) {
844 rule->flow_type = ETHER_FLOW; 846 rule->flow_type = ETHER_FLOW;
845 memset(rule->m_u.ether_spec.h_dest, ~0, ETH_ALEN); 847 memset(mac_mask->h_dest, ~0, ETH_ALEN);
846 if (vid != EFX_FILTER_VID_UNSPEC) { 848 if (vid != EFX_FILTER_VID_UNSPEC) {
847 rule->flow_type |= FLOW_EXT; 849 rule->flow_type |= FLOW_EXT;
848 rule->h_ext.vlan_tci = htons(vid); 850 rule->h_ext.vlan_tci = htons(vid);
@@ -1011,27 +1013,40 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
1011 } 1013 }
1012 1014
1013 case ETHER_FLOW | FLOW_EXT: 1015 case ETHER_FLOW | FLOW_EXT:
1014 /* Must match all or none of VID */ 1016 case ETHER_FLOW: {
1015 if (rule->m_ext.vlan_tci != htons(0xfff) && 1017 u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
1016 rule->m_ext.vlan_tci != 0) 1018 ntohs(rule->m_ext.vlan_tci) : 0);
1017 return -EINVAL; 1019
1018 case ETHER_FLOW: 1020 /* Must not match on source address or Ethertype */
1019 /* Must match all of destination */
1020 if (!is_broadcast_ether_addr(mac_mask->h_dest))
1021 return -EINVAL;
1022 /* and nothing else */
1023 if (!is_zero_ether_addr(mac_mask->h_source) || 1021 if (!is_zero_ether_addr(mac_mask->h_source) ||
1024 mac_mask->h_proto) 1022 mac_mask->h_proto)
1025 return -EINVAL; 1023 return -EINVAL;
1026 1024
1027 rc = efx_filter_set_eth_local( 1025 /* Is it a default UC or MC filter? */
1028 &spec, 1026 if (!compare_ether_addr(mac_mask->h_dest, mac_addr_mc_mask) &&
1029 (rule->flow_type & FLOW_EXT && rule->m_ext.vlan_tci) ? 1027 vlan_tag_mask == 0) {
1030 ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC, 1028 if (is_multicast_ether_addr(mac_entry->h_dest))
1031 mac_entry->h_dest); 1029 rc = efx_filter_set_mc_def(&spec);
1030 else
1031 rc = efx_filter_set_uc_def(&spec);
1032 }
1033 /* Otherwise, it must match all of destination and all
1034 * or none of VID.
1035 */
1036 else if (is_broadcast_ether_addr(mac_mask->h_dest) &&
1037 (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) {
1038 rc = efx_filter_set_eth_local(
1039 &spec,
1040 vlan_tag_mask ?
1041 ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
1042 mac_entry->h_dest);
1043 } else {
1044 rc = -EINVAL;
1045 }
1032 if (rc) 1046 if (rc)
1033 return rc; 1047 return rc;
1034 break; 1048 break;
1049 }
1035 1050
1036 default: 1051 default:
1037 return -EINVAL; 1052 return -EINVAL;
@@ -1070,7 +1085,8 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
1070{ 1085{
1071 struct efx_nic *efx = netdev_priv(net_dev); 1086 struct efx_nic *efx = netdev_priv(net_dev);
1072 1087
1073 return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ? 1088 return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 ||
1089 efx->n_rx_channels == 1) ?
1074 0 : ARRAY_SIZE(efx->rx_indir_table)); 1090 0 : ARRAY_SIZE(efx->rx_indir_table));
1075} 1091}
1076 1092
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 8ae1ebd35397..3a1ca2bd1548 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -19,7 +19,6 @@
19#include "net_driver.h" 19#include "net_driver.h"
20#include "bitfield.h" 20#include "bitfield.h"
21#include "efx.h" 21#include "efx.h"
22#include "mac.h"
23#include "spi.h" 22#include "spi.h"
24#include "nic.h" 23#include "nic.h"
25#include "regs.h" 24#include "regs.h"
@@ -89,7 +88,7 @@ static int falcon_getscl(void *data)
89 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN); 88 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
90} 89}
91 90
92static struct i2c_algo_bit_data falcon_i2c_bit_operations = { 91static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
93 .setsda = falcon_setsda, 92 .setsda = falcon_setsda,
94 .setscl = falcon_setscl, 93 .setscl = falcon_setscl,
95 .getsda = falcon_getsda, 94 .getsda = falcon_getsda,
@@ -104,8 +103,6 @@ static void falcon_push_irq_moderation(struct efx_channel *channel)
104 efx_dword_t timer_cmd; 103 efx_dword_t timer_cmd;
105 struct efx_nic *efx = channel->efx; 104 struct efx_nic *efx = channel->efx;
106 105
107 BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_AB_TC_TIMER_VAL_WIDTH));
108
109 /* Set timer register */ 106 /* Set timer register */
110 if (channel->irq_moderation) { 107 if (channel->irq_moderation) {
111 EFX_POPULATE_DWORD_2(timer_cmd, 108 EFX_POPULATE_DWORD_2(timer_cmd,
@@ -177,27 +174,24 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
177 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 174 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
178 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 175 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
179 176
177 /* Check to see if we have a serious error condition */
178 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
179 if (unlikely(syserr))
180 return efx_nic_fatal_interrupt(efx);
181
180 /* Determine interrupting queues, clear interrupt status 182 /* Determine interrupting queues, clear interrupt status
181 * register and acknowledge the device interrupt. 183 * register and acknowledge the device interrupt.
182 */ 184 */
183 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); 185 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
184 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); 186 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
185
186 /* Check to see if we have a serious error condition */
187 if (queues & (1U << efx->fatal_irq_level)) {
188 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
189 if (unlikely(syserr))
190 return efx_nic_fatal_interrupt(efx);
191 }
192
193 EFX_ZERO_OWORD(*int_ker); 187 EFX_ZERO_OWORD(*int_ker);
194 wmb(); /* Ensure the vector is cleared before interrupt ack */ 188 wmb(); /* Ensure the vector is cleared before interrupt ack */
195 falcon_irq_ack_a1(efx); 189 falcon_irq_ack_a1(efx);
196 190
197 if (queues & 1) 191 if (queues & 1)
198 efx_schedule_channel(efx_get_channel(efx, 0)); 192 efx_schedule_channel_irq(efx_get_channel(efx, 0));
199 if (queues & 2) 193 if (queues & 2)
200 efx_schedule_channel(efx_get_channel(efx, 1)); 194 efx_schedule_channel_irq(efx_get_channel(efx, 1));
201 return IRQ_HANDLED; 195 return IRQ_HANDLED;
202} 196}
203/************************************************************************** 197/**************************************************************************
@@ -613,7 +607,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
613 nic_data->stats_pending = false; 607 nic_data->stats_pending = false;
614 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { 608 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
615 rmb(); /* read the done flag before the stats */ 609 rmb(); /* read the done flag before the stats */
616 efx->mac_op->update_stats(efx); 610 falcon_update_stats_xmac(efx);
617 } else { 611 } else {
618 netif_err(efx, hw, efx->net_dev, 612 netif_err(efx, hw, efx->net_dev,
619 "timed out waiting for statistics\n"); 613 "timed out waiting for statistics\n");
@@ -670,7 +664,7 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
670 falcon_reset_macs(efx); 664 falcon_reset_macs(efx);
671 665
672 efx->phy_op->reconfigure(efx); 666 efx->phy_op->reconfigure(efx);
673 rc = efx->mac_op->reconfigure(efx); 667 rc = falcon_reconfigure_xmac(efx);
674 BUG_ON(rc); 668 BUG_ON(rc);
675 669
676 falcon_start_nic_stats(efx); 670 falcon_start_nic_stats(efx);
@@ -1218,7 +1212,7 @@ static void falcon_monitor(struct efx_nic *efx)
1218 falcon_deconfigure_mac_wrapper(efx); 1212 falcon_deconfigure_mac_wrapper(efx);
1219 1213
1220 falcon_reset_macs(efx); 1214 falcon_reset_macs(efx);
1221 rc = efx->mac_op->reconfigure(efx); 1215 rc = falcon_reconfigure_xmac(efx);
1222 BUG_ON(rc); 1216 BUG_ON(rc);
1223 1217
1224 falcon_start_nic_stats(efx); 1218 falcon_start_nic_stats(efx);
@@ -1339,6 +1333,12 @@ out:
1339 return rc; 1333 return rc;
1340} 1334}
1341 1335
1336static void falcon_dimension_resources(struct efx_nic *efx)
1337{
1338 efx->rx_dc_base = 0x20000;
1339 efx->tx_dc_base = 0x26000;
1340}
1341
1342/* Probe all SPI devices on the NIC */ 1342/* Probe all SPI devices on the NIC */
1343static void falcon_probe_spi_devices(struct efx_nic *efx) 1343static void falcon_probe_spi_devices(struct efx_nic *efx)
1344{ 1344{
@@ -1472,6 +1472,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
1472 goto fail5; 1472 goto fail5;
1473 } 1473 }
1474 1474
1475 efx->timer_quantum_ns = 4968; /* 621 cycles */
1476
1475 /* Initialise I2C adapter */ 1477 /* Initialise I2C adapter */
1476 board = falcon_board(efx); 1478 board = falcon_board(efx);
1477 board->i2c_adap.owner = THIS_MODULE; 1479 board->i2c_adap.owner = THIS_MODULE;
@@ -1676,7 +1678,7 @@ static void falcon_update_nic_stats(struct efx_nic *efx)
1676 *nic_data->stats_dma_done == FALCON_STATS_DONE) { 1678 *nic_data->stats_dma_done == FALCON_STATS_DONE) {
1677 nic_data->stats_pending = false; 1679 nic_data->stats_pending = false;
1678 rmb(); /* read the done flag before the stats */ 1680 rmb(); /* read the done flag before the stats */
1679 efx->mac_op->update_stats(efx); 1681 falcon_update_stats_xmac(efx);
1680 } 1682 }
1681} 1683}
1682 1684
@@ -1753,6 +1755,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
1753 .probe = falcon_probe_nic, 1755 .probe = falcon_probe_nic,
1754 .remove = falcon_remove_nic, 1756 .remove = falcon_remove_nic,
1755 .init = falcon_init_nic, 1757 .init = falcon_init_nic,
1758 .dimension_resources = falcon_dimension_resources,
1756 .fini = efx_port_dummy_op_void, 1759 .fini = efx_port_dummy_op_void,
1757 .monitor = falcon_monitor, 1760 .monitor = falcon_monitor,
1758 .map_reset_reason = falcon_map_reset_reason, 1761 .map_reset_reason = falcon_map_reset_reason,
@@ -1767,13 +1770,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
1767 .stop_stats = falcon_stop_nic_stats, 1770 .stop_stats = falcon_stop_nic_stats,
1768 .set_id_led = falcon_set_id_led, 1771 .set_id_led = falcon_set_id_led,
1769 .push_irq_moderation = falcon_push_irq_moderation, 1772 .push_irq_moderation = falcon_push_irq_moderation,
1770 .push_multicast_hash = falcon_push_multicast_hash,
1771 .reconfigure_port = falcon_reconfigure_port, 1773 .reconfigure_port = falcon_reconfigure_port,
1774 .reconfigure_mac = falcon_reconfigure_xmac,
1775 .check_mac_fault = falcon_xmac_check_fault,
1772 .get_wol = falcon_get_wol, 1776 .get_wol = falcon_get_wol,
1773 .set_wol = falcon_set_wol, 1777 .set_wol = falcon_set_wol,
1774 .resume_wol = efx_port_dummy_op_void, 1778 .resume_wol = efx_port_dummy_op_void,
1775 .test_nvram = falcon_test_nvram, 1779 .test_nvram = falcon_test_nvram,
1776 .default_mac_ops = &falcon_xmac_operations,
1777 1780
1778 .revision = EFX_REV_FALCON_A1, 1781 .revision = EFX_REV_FALCON_A1,
1779 .mem_map_size = 0x20000, 1782 .mem_map_size = 0x20000,
@@ -1786,8 +1789,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
1786 .rx_buffer_padding = 0x24, 1789 .rx_buffer_padding = 0x24,
1787 .max_interrupt_mode = EFX_INT_MODE_MSI, 1790 .max_interrupt_mode = EFX_INT_MODE_MSI,
1788 .phys_addr_channels = 4, 1791 .phys_addr_channels = 4,
1789 .tx_dc_base = 0x130000, 1792 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1790 .rx_dc_base = 0x100000,
1791 .offload_features = NETIF_F_IP_CSUM, 1793 .offload_features = NETIF_F_IP_CSUM,
1792}; 1794};
1793 1795
@@ -1795,6 +1797,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
1795 .probe = falcon_probe_nic, 1797 .probe = falcon_probe_nic,
1796 .remove = falcon_remove_nic, 1798 .remove = falcon_remove_nic,
1797 .init = falcon_init_nic, 1799 .init = falcon_init_nic,
1800 .dimension_resources = falcon_dimension_resources,
1798 .fini = efx_port_dummy_op_void, 1801 .fini = efx_port_dummy_op_void,
1799 .monitor = falcon_monitor, 1802 .monitor = falcon_monitor,
1800 .map_reset_reason = falcon_map_reset_reason, 1803 .map_reset_reason = falcon_map_reset_reason,
@@ -1809,14 +1812,14 @@ const struct efx_nic_type falcon_b0_nic_type = {
1809 .stop_stats = falcon_stop_nic_stats, 1812 .stop_stats = falcon_stop_nic_stats,
1810 .set_id_led = falcon_set_id_led, 1813 .set_id_led = falcon_set_id_led,
1811 .push_irq_moderation = falcon_push_irq_moderation, 1814 .push_irq_moderation = falcon_push_irq_moderation,
1812 .push_multicast_hash = falcon_push_multicast_hash,
1813 .reconfigure_port = falcon_reconfigure_port, 1815 .reconfigure_port = falcon_reconfigure_port,
1816 .reconfigure_mac = falcon_reconfigure_xmac,
1817 .check_mac_fault = falcon_xmac_check_fault,
1814 .get_wol = falcon_get_wol, 1818 .get_wol = falcon_get_wol,
1815 .set_wol = falcon_set_wol, 1819 .set_wol = falcon_set_wol,
1816 .resume_wol = efx_port_dummy_op_void, 1820 .resume_wol = efx_port_dummy_op_void,
1817 .test_registers = falcon_b0_test_registers, 1821 .test_registers = falcon_b0_test_registers,
1818 .test_nvram = falcon_test_nvram, 1822 .test_nvram = falcon_test_nvram,
1819 .default_mac_ops = &falcon_xmac_operations,
1820 1823
1821 .revision = EFX_REV_FALCON_B0, 1824 .revision = EFX_REV_FALCON_B0,
1822 /* Map everything up to and including the RSS indirection 1825 /* Map everything up to and including the RSS indirection
@@ -1837,8 +1840,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
1837 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 1840 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
1838 * interrupt handler only supports 32 1841 * interrupt handler only supports 32
1839 * channels */ 1842 * channels */
1840 .tx_dc_base = 0x130000, 1843 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1841 .rx_dc_base = 0x100000,
1842 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, 1844 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
1843}; 1845};
1844 1846
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 6cc16b8cc6f4..2084cc6ede52 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -87,7 +87,7 @@ static const u8 falcon_lm87_common_regs[] = {
87 0 87 0
88}; 88};
89 89
90static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, 90static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
91 const u8 *reg_values) 91 const u8 *reg_values)
92{ 92{
93 struct falcon_board *board = falcon_board(efx); 93 struct falcon_board *board = falcon_board(efx);
@@ -179,7 +179,7 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
179#else /* !CONFIG_SENSORS_LM87 */ 179#else /* !CONFIG_SENSORS_LM87 */
180 180
181static inline int 181static inline int
182efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, 182efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
183 const u8 *reg_values) 183 const u8 *reg_values)
184{ 184{
185 return 0; 185 return 0;
@@ -442,7 +442,7 @@ static int sfe4001_check_hw(struct efx_nic *efx)
442 return (status < 0) ? -EIO : -ERANGE; 442 return (status < 0) ? -EIO : -ERANGE;
443} 443}
444 444
445static struct i2c_board_info sfe4001_hwmon_info = { 445static const struct i2c_board_info sfe4001_hwmon_info = {
446 I2C_BOARD_INFO("max6647", 0x4e), 446 I2C_BOARD_INFO("max6647", 0x4e),
447}; 447};
448 448
@@ -522,7 +522,7 @@ static const u8 sfe4002_lm87_regs[] = {
522 0 522 0
523}; 523};
524 524
525static struct i2c_board_info sfe4002_hwmon_info = { 525static const struct i2c_board_info sfe4002_hwmon_info = {
526 I2C_BOARD_INFO("lm87", 0x2e), 526 I2C_BOARD_INFO("lm87", 0x2e),
527 .platform_data = &sfe4002_lm87_channel, 527 .platform_data = &sfe4002_lm87_channel,
528}; 528};
@@ -591,7 +591,7 @@ static const u8 sfn4112f_lm87_regs[] = {
591 0 591 0
592}; 592};
593 593
594static struct i2c_board_info sfn4112f_hwmon_info = { 594static const struct i2c_board_info sfn4112f_hwmon_info = {
595 I2C_BOARD_INFO("lm87", 0x2e), 595 I2C_BOARD_INFO("lm87", 0x2e),
596 .platform_data = &sfn4112f_lm87_channel, 596 .platform_data = &sfn4112f_lm87_channel,
597}; 597};
@@ -653,7 +653,7 @@ static const u8 sfe4003_lm87_regs[] = {
653 0 653 0
654}; 654};
655 655
656static struct i2c_board_info sfe4003_hwmon_info = { 656static const struct i2c_board_info sfe4003_hwmon_info = {
657 I2C_BOARD_INFO("lm87", 0x2e), 657 I2C_BOARD_INFO("lm87", 0x2e),
658 .platform_data = &sfe4003_lm87_channel, 658 .platform_data = &sfe4003_lm87_channel,
659}; 659};
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
index 9516452c079c..6106ef15dee3 100644
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ b/drivers/net/ethernet/sfc/falcon_xmac.c
@@ -14,7 +14,6 @@
14#include "nic.h" 14#include "nic.h"
15#include "regs.h" 15#include "regs.h"
16#include "io.h" 16#include "io.h"
17#include "mac.h"
18#include "mdio_10g.h" 17#include "mdio_10g.h"
19#include "workarounds.h" 18#include "workarounds.h"
20 19
@@ -139,7 +138,7 @@ static bool falcon_xmac_link_ok(struct efx_nic *efx)
139 return (efx->loopback_mode == LOOPBACK_XGMII || 138 return (efx->loopback_mode == LOOPBACK_XGMII ||
140 falcon_xgxs_link_ok(efx)) && 139 falcon_xgxs_link_ok(efx)) &&
141 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || 140 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
142 LOOPBACK_INTERNAL(efx) || 141 LOOPBACK_INTERNAL(efx) ||
143 efx_mdio_phyxgxs_lane_sync(efx)); 142 efx_mdio_phyxgxs_lane_sync(efx));
144} 143}
145 144
@@ -270,12 +269,12 @@ static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
270 return mac_up; 269 return mac_up;
271} 270}
272 271
273static bool falcon_xmac_check_fault(struct efx_nic *efx) 272bool falcon_xmac_check_fault(struct efx_nic *efx)
274{ 273{
275 return !falcon_xmac_link_ok_retry(efx, 5); 274 return !falcon_xmac_link_ok_retry(efx, 5);
276} 275}
277 276
278static int falcon_reconfigure_xmac(struct efx_nic *efx) 277int falcon_reconfigure_xmac(struct efx_nic *efx)
279{ 278{
280 struct falcon_nic_data *nic_data = efx->nic_data; 279 struct falcon_nic_data *nic_data = efx->nic_data;
281 280
@@ -290,7 +289,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
290 return 0; 289 return 0;
291} 290}
292 291
293static void falcon_update_stats_xmac(struct efx_nic *efx) 292void falcon_update_stats_xmac(struct efx_nic *efx)
294{ 293{
295 struct efx_mac_stats *mac_stats = &efx->mac_stats; 294 struct efx_mac_stats *mac_stats = &efx->mac_stats;
296 295
@@ -361,9 +360,3 @@ void falcon_poll_xmac(struct efx_nic *efx)
361 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 360 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
362 falcon_ack_status_intr(efx); 361 falcon_ack_status_intr(efx);
363} 362}
364
365const struct efx_mac_operations falcon_xmac_operations = {
366 .reconfigure = falcon_reconfigure_xmac,
367 .update_stats = falcon_update_stats_xmac,
368 .check_fault = falcon_xmac_check_fault,
369};
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 1fbbbee7b1ae..fea7f7300675 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -35,9 +35,17 @@
35enum efx_filter_table_id { 35enum efx_filter_table_id {
36 EFX_FILTER_TABLE_RX_IP = 0, 36 EFX_FILTER_TABLE_RX_IP = 0,
37 EFX_FILTER_TABLE_RX_MAC, 37 EFX_FILTER_TABLE_RX_MAC,
38 EFX_FILTER_TABLE_RX_DEF,
39 EFX_FILTER_TABLE_TX_MAC,
38 EFX_FILTER_TABLE_COUNT, 40 EFX_FILTER_TABLE_COUNT,
39}; 41};
40 42
43enum efx_filter_index {
44 EFX_FILTER_INDEX_UC_DEF,
45 EFX_FILTER_INDEX_MC_DEF,
46 EFX_FILTER_SIZE_RX_DEF,
47};
48
41struct efx_filter_table { 49struct efx_filter_table {
42 enum efx_filter_table_id id; 50 enum efx_filter_table_id id;
43 u32 offset; /* address of table relative to BAR */ 51 u32 offset; /* address of table relative to BAR */
@@ -90,8 +98,9 @@ efx_filter_spec_table_id(const struct efx_filter_spec *spec)
90 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2)); 98 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
91 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2)); 99 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
92 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2)); 100 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
101 BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
93 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC); 102 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
94 return spec->type >> 2; 103 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
95} 104}
96 105
97static struct efx_filter_table * 106static struct efx_filter_table *
@@ -109,7 +118,7 @@ static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
109 memset(table->search_depth, 0, sizeof(table->search_depth)); 118 memset(table->search_depth, 0, sizeof(table->search_depth));
110} 119}
111 120
112static void efx_filter_push_rx_limits(struct efx_nic *efx) 121static void efx_filter_push_rx_config(struct efx_nic *efx)
113{ 122{
114 struct efx_filter_state *state = efx->filter_state; 123 struct efx_filter_state *state = efx->filter_state;
115 struct efx_filter_table *table; 124 struct efx_filter_table *table;
@@ -143,9 +152,58 @@ static void efx_filter_push_rx_limits(struct efx_nic *efx)
143 FILTER_CTL_SRCH_FUDGE_WILD); 152 FILTER_CTL_SRCH_FUDGE_WILD);
144 } 153 }
145 154
155 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
156 if (table->size) {
157 EFX_SET_OWORD_FIELD(
158 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
159 table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
160 EFX_SET_OWORD_FIELD(
161 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
162 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
163 EFX_FILTER_FLAG_RX_RSS));
164 EFX_SET_OWORD_FIELD(
165 filter_ctl, FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE,
166 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
167 EFX_FILTER_FLAG_RX_OVERRIDE_IP));
168 EFX_SET_OWORD_FIELD(
169 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
170 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
171 EFX_SET_OWORD_FIELD(
172 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
173 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
174 EFX_FILTER_FLAG_RX_RSS));
175 EFX_SET_OWORD_FIELD(
176 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE,
177 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
178 EFX_FILTER_FLAG_RX_OVERRIDE_IP));
179 }
180
146 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 181 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
147} 182}
148 183
184static void efx_filter_push_tx_limits(struct efx_nic *efx)
185{
186 struct efx_filter_state *state = efx->filter_state;
187 struct efx_filter_table *table;
188 efx_oword_t tx_cfg;
189
190 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
191
192 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
193 if (table->size) {
194 EFX_SET_OWORD_FIELD(
195 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
196 table->search_depth[EFX_FILTER_MAC_FULL] +
197 FILTER_CTL_SRCH_FUDGE_FULL);
198 EFX_SET_OWORD_FIELD(
199 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
200 table->search_depth[EFX_FILTER_MAC_WILD] +
201 FILTER_CTL_SRCH_FUDGE_WILD);
202 }
203
204 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
205}
206
149static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec, 207static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
150 __be32 host1, __be16 port1, 208 __be32 host1, __be16 port1,
151 __be32 host2, __be16 port2) 209 __be32 host2, __be16 port2)
@@ -300,7 +358,8 @@ int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
300int efx_filter_set_eth_local(struct efx_filter_spec *spec, 358int efx_filter_set_eth_local(struct efx_filter_spec *spec,
301 u16 vid, const u8 *addr) 359 u16 vid, const u8 *addr)
302{ 360{
303 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); 361 EFX_BUG_ON_PARANOID(!(spec->flags &
362 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
304 363
305 /* This cannot currently be combined with other filtering */ 364 /* This cannot currently be combined with other filtering */
306 if (spec->type != EFX_FILTER_UNSPEC) 365 if (spec->type != EFX_FILTER_UNSPEC)
@@ -319,6 +378,52 @@ int efx_filter_set_eth_local(struct efx_filter_spec *spec,
319 return 0; 378 return 0;
320} 379}
321 380
381/**
382 * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
383 * @spec: Specification to initialise
384 */
385int efx_filter_set_uc_def(struct efx_filter_spec *spec)
386{
387 EFX_BUG_ON_PARANOID(!(spec->flags &
388 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
389
390 if (spec->type != EFX_FILTER_UNSPEC)
391 return -EINVAL;
392
393 spec->type = EFX_FILTER_UC_DEF;
394 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
395 return 0;
396}
397
398/**
399 * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
400 * @spec: Specification to initialise
401 */
402int efx_filter_set_mc_def(struct efx_filter_spec *spec)
403{
404 EFX_BUG_ON_PARANOID(!(spec->flags &
405 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
406
407 if (spec->type != EFX_FILTER_UNSPEC)
408 return -EINVAL;
409
410 spec->type = EFX_FILTER_MC_DEF;
411 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
412 return 0;
413}
414
415static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
416{
417 struct efx_filter_state *state = efx->filter_state;
418 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
419 struct efx_filter_spec *spec = &table->spec[filter_idx];
420
421 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL,
422 EFX_FILTER_FLAG_RX_RSS, 0);
423 spec->type = EFX_FILTER_UC_DEF + filter_idx;
424 table->used_bitmap[0] |= 1 << filter_idx;
425}
426
322int efx_filter_get_eth_local(const struct efx_filter_spec *spec, 427int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
323 u16 *vid, u8 *addr) 428 u16 *vid, u8 *addr)
324{ 429{
@@ -366,6 +471,13 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
366 break; 471 break;
367 } 472 }
368 473
474 case EFX_FILTER_TABLE_RX_DEF:
475 /* One filter spec per type */
476 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
477 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
478 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
479 return spec->type - EFX_FILTER_UC_DEF;
480
369 case EFX_FILTER_TABLE_RX_MAC: { 481 case EFX_FILTER_TABLE_RX_MAC: {
370 bool is_wild = spec->type == EFX_FILTER_MAC_WILD; 482 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
371 EFX_POPULATE_OWORD_8( 483 EFX_POPULATE_OWORD_8(
@@ -385,6 +497,18 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
385 break; 497 break;
386 } 498 }
387 499
500 case EFX_FILTER_TABLE_TX_MAC: {
501 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
502 EFX_POPULATE_OWORD_5(*filter,
503 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
504 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
505 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
506 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
507 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
508 data3 = is_wild | spec->dmaq_id << 1;
509 break;
510 }
511
388 default: 512 default:
389 BUG(); 513 BUG();
390 } 514 }
@@ -399,6 +523,10 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
399 memcmp(left->data, right->data, sizeof(left->data))) 523 memcmp(left->data, right->data, sizeof(left->data)))
400 return false; 524 return false;
401 525
526 if (left->flags & EFX_FILTER_FLAG_TX &&
527 left->dmaq_id != right->dmaq_id)
528 return false;
529
402 return true; 530 return true;
403} 531}
404 532
@@ -448,23 +576,40 @@ static int efx_filter_search(struct efx_filter_table *table,
448 * MAC filters without overriding behaviour. 576 * MAC filters without overriding behaviour.
449 */ 577 */
450 578
579#define EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP 0
580#define EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP 1
581#define EFX_FILTER_MATCH_PRI_NORMAL_BASE 2
582
451#define EFX_FILTER_INDEX_WIDTH 13 583#define EFX_FILTER_INDEX_WIDTH 13
452#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1) 584#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
453 585
454static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id, 586static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
455 unsigned int index, u8 flags) 587 unsigned int index, u8 flags)
456{ 588{
457 return (table_id == EFX_FILTER_TABLE_RX_MAC && 589 unsigned int match_pri = EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id;
458 flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ? 590
459 index : 591 if (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) {
460 (table_id + 1) << EFX_FILTER_INDEX_WIDTH | index; 592 if (table_id == EFX_FILTER_TABLE_RX_MAC)
593 match_pri = EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP;
594 else if (table_id == EFX_FILTER_TABLE_RX_DEF)
595 match_pri = EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP;
596 }
597
598 return match_pri << EFX_FILTER_INDEX_WIDTH | index;
461} 599}
462 600
463static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id) 601static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
464{ 602{
465 return (id <= EFX_FILTER_INDEX_MASK) ? 603 unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
466 EFX_FILTER_TABLE_RX_MAC : 604
467 (id >> EFX_FILTER_INDEX_WIDTH) - 1; 605 switch (match_pri) {
606 case EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP:
607 return EFX_FILTER_TABLE_RX_MAC;
608 case EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP:
609 return EFX_FILTER_TABLE_RX_DEF;
610 default:
611 return match_pri - EFX_FILTER_MATCH_PRI_NORMAL_BASE;
612 }
468} 613}
469 614
470static inline unsigned int efx_filter_id_index(u32 id) 615static inline unsigned int efx_filter_id_index(u32 id)
@@ -474,23 +619,30 @@ static inline unsigned int efx_filter_id_index(u32 id)
474 619
475static inline u8 efx_filter_id_flags(u32 id) 620static inline u8 efx_filter_id_flags(u32 id)
476{ 621{
477 return (id <= EFX_FILTER_INDEX_MASK) ? 622 unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
478 EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP : 623
479 EFX_FILTER_FLAG_RX; 624 if (match_pri < EFX_FILTER_MATCH_PRI_NORMAL_BASE)
625 return EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP;
626 else if (match_pri <=
627 EFX_FILTER_MATCH_PRI_NORMAL_BASE + EFX_FILTER_TABLE_RX_DEF)
628 return EFX_FILTER_FLAG_RX;
629 else
630 return EFX_FILTER_FLAG_TX;
480} 631}
481 632
482u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) 633u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
483{ 634{
484 struct efx_filter_state *state = efx->filter_state; 635 struct efx_filter_state *state = efx->filter_state;
636 unsigned int table_id = EFX_FILTER_TABLE_RX_DEF;
485 637
486 if (state->table[EFX_FILTER_TABLE_RX_MAC].size != 0) 638 do {
487 return ((EFX_FILTER_TABLE_RX_MAC + 1) << EFX_FILTER_INDEX_WIDTH) 639 if (state->table[table_id].size != 0)
488 + state->table[EFX_FILTER_TABLE_RX_MAC].size; 640 return ((EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id)
489 else if (state->table[EFX_FILTER_TABLE_RX_IP].size != 0) 641 << EFX_FILTER_INDEX_WIDTH) +
490 return ((EFX_FILTER_TABLE_RX_IP + 1) << EFX_FILTER_INDEX_WIDTH) 642 state->table[table_id].size;
491 + state->table[EFX_FILTER_TABLE_RX_IP].size; 643 } while (table_id--);
492 else 644
493 return 0; 645 return 0;
494} 646}
495 647
496/** 648/**
@@ -548,12 +700,20 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
548 } 700 }
549 *saved_spec = *spec; 701 *saved_spec = *spec;
550 702
551 if (table->search_depth[spec->type] < depth) { 703 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
552 table->search_depth[spec->type] = depth; 704 efx_filter_push_rx_config(efx);
553 efx_filter_push_rx_limits(efx); 705 } else {
554 } 706 if (table->search_depth[spec->type] < depth) {
707 table->search_depth[spec->type] = depth;
708 if (spec->flags & EFX_FILTER_FLAG_TX)
709 efx_filter_push_tx_limits(efx);
710 else
711 efx_filter_push_rx_config(efx);
712 }
555 713
556 efx_writeo(efx, &filter, table->offset + table->step * filter_idx); 714 efx_writeo(efx, &filter,
715 table->offset + table->step * filter_idx);
716 }
557 717
558 netif_vdbg(efx, hw, efx->net_dev, 718 netif_vdbg(efx, hw, efx->net_dev,
559 "%s: filter type %d index %d rxq %u set", 719 "%s: filter type %d index %d rxq %u set",
@@ -571,7 +731,11 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
571{ 731{
572 static efx_oword_t filter; 732 static efx_oword_t filter;
573 733
574 if (test_bit(filter_idx, table->used_bitmap)) { 734 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
735 /* RX default filters must always exist */
736 efx_filter_reset_rx_def(efx, filter_idx);
737 efx_filter_push_rx_config(efx);
738 } else if (test_bit(filter_idx, table->used_bitmap)) {
575 __clear_bit(filter_idx, table->used_bitmap); 739 __clear_bit(filter_idx, table->used_bitmap);
576 --table->used; 740 --table->used;
577 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); 741 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
@@ -617,7 +781,8 @@ int efx_filter_remove_id_safe(struct efx_nic *efx,
617 spin_lock_bh(&state->lock); 781 spin_lock_bh(&state->lock);
618 782
619 if (test_bit(filter_idx, table->used_bitmap) && 783 if (test_bit(filter_idx, table->used_bitmap) &&
620 spec->priority == priority && spec->flags == filter_flags) { 784 spec->priority == priority &&
785 !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
621 efx_filter_table_clear_entry(efx, table, filter_idx); 786 efx_filter_table_clear_entry(efx, table, filter_idx);
622 if (table->used == 0) 787 if (table->used == 0)
623 efx_filter_table_reset_search_depth(table); 788 efx_filter_table_reset_search_depth(table);
@@ -668,7 +833,8 @@ int efx_filter_get_filter_safe(struct efx_nic *efx,
668 spin_lock_bh(&state->lock); 833 spin_lock_bh(&state->lock);
669 834
670 if (test_bit(filter_idx, table->used_bitmap) && 835 if (test_bit(filter_idx, table->used_bitmap) &&
671 spec->priority == priority && spec->flags == filter_flags) { 836 spec->priority == priority &&
837 !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
672 *spec_buf = *spec; 838 *spec_buf = *spec;
673 rc = 0; 839 rc = 0;
674 } else { 840 } else {
@@ -722,7 +888,7 @@ u32 efx_filter_count_rx_used(struct efx_nic *efx,
722 spin_lock_bh(&state->lock); 888 spin_lock_bh(&state->lock);
723 889
724 for (table_id = EFX_FILTER_TABLE_RX_IP; 890 for (table_id = EFX_FILTER_TABLE_RX_IP;
725 table_id <= EFX_FILTER_TABLE_RX_MAC; 891 table_id <= EFX_FILTER_TABLE_RX_DEF;
726 table_id++) { 892 table_id++) {
727 table = &state->table[table_id]; 893 table = &state->table[table_id];
728 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 894 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
@@ -750,7 +916,7 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx,
750 spin_lock_bh(&state->lock); 916 spin_lock_bh(&state->lock);
751 917
752 for (table_id = EFX_FILTER_TABLE_RX_IP; 918 for (table_id = EFX_FILTER_TABLE_RX_IP;
753 table_id <= EFX_FILTER_TABLE_RX_MAC; 919 table_id <= EFX_FILTER_TABLE_RX_DEF;
754 table_id++) { 920 table_id++) {
755 table = &state->table[table_id]; 921 table = &state->table[table_id];
756 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 922 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
@@ -785,6 +951,11 @@ void efx_restore_filters(struct efx_nic *efx)
785 951
786 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { 952 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
787 table = &state->table[table_id]; 953 table = &state->table[table_id];
954
955 /* Check whether this is a regular register table */
956 if (table->step == 0)
957 continue;
958
788 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 959 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
789 if (!test_bit(filter_idx, table->used_bitmap)) 960 if (!test_bit(filter_idx, table->used_bitmap))
790 continue; 961 continue;
@@ -794,7 +965,8 @@ void efx_restore_filters(struct efx_nic *efx)
794 } 965 }
795 } 966 }
796 967
797 efx_filter_push_rx_limits(efx); 968 efx_filter_push_rx_config(efx);
969 efx_filter_push_tx_limits(efx);
798 970
799 spin_unlock_bh(&state->lock); 971 spin_unlock_bh(&state->lock);
800} 972}
@@ -833,6 +1005,16 @@ int efx_probe_filters(struct efx_nic *efx)
833 table->offset = FR_CZ_RX_MAC_FILTER_TBL0; 1005 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
834 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; 1006 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
835 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; 1007 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
1008
1009 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
1010 table->id = EFX_FILTER_TABLE_RX_DEF;
1011 table->size = EFX_FILTER_SIZE_RX_DEF;
1012
1013 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
1014 table->id = EFX_FILTER_TABLE_TX_MAC;
1015 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
1016 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
1017 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
836 } 1018 }
837 1019
838 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { 1020 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
@@ -849,6 +1031,15 @@ int efx_probe_filters(struct efx_nic *efx)
849 goto fail; 1031 goto fail;
850 } 1032 }
851 1033
1034 if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
1035 /* RX default filters must always exist */
1036 unsigned i;
1037 for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
1038 efx_filter_reset_rx_def(efx, i);
1039 }
1040
1041 efx_filter_push_rx_config(efx);
1042
852 return 0; 1043 return 0;
853 1044
854fail: 1045fail:
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 3d4108cd90ca..3c77802aed6c 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -20,6 +20,8 @@
20 * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port) 20 * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
21 * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID 21 * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
22 * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address 22 * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
23 * @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast
24 * @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast
23 * @EFX_FILTER_UNSPEC: Match type is unspecified 25 * @EFX_FILTER_UNSPEC: Match type is unspecified
24 * 26 *
25 * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types. 27 * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
@@ -31,6 +33,8 @@ enum efx_filter_type {
31 EFX_FILTER_UDP_WILD, 33 EFX_FILTER_UDP_WILD,
32 EFX_FILTER_MAC_FULL = 4, 34 EFX_FILTER_MAC_FULL = 4,
33 EFX_FILTER_MAC_WILD, 35 EFX_FILTER_MAC_WILD,
36 EFX_FILTER_UC_DEF = 8,
37 EFX_FILTER_MC_DEF,
34 EFX_FILTER_TYPE_COUNT, /* number of specific types */ 38 EFX_FILTER_TYPE_COUNT, /* number of specific types */
35 EFX_FILTER_UNSPEC = 0xf, 39 EFX_FILTER_UNSPEC = 0xf,
36}; 40};
@@ -39,7 +43,8 @@ enum efx_filter_type {
39 * enum efx_filter_priority - priority of a hardware filter specification 43 * enum efx_filter_priority - priority of a hardware filter specification
40 * @EFX_FILTER_PRI_HINT: Performance hint 44 * @EFX_FILTER_PRI_HINT: Performance hint
41 * @EFX_FILTER_PRI_MANUAL: Manually configured filter 45 * @EFX_FILTER_PRI_MANUAL: Manually configured filter
42 * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour 46 * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
47 * networking and SR-IOV)
43 */ 48 */
44enum efx_filter_priority { 49enum efx_filter_priority {
45 EFX_FILTER_PRI_HINT = 0, 50 EFX_FILTER_PRI_HINT = 0,
@@ -60,12 +65,14 @@ enum efx_filter_priority {
60 * any IP filter that matches the same packet. By default, IP 65 * any IP filter that matches the same packet. By default, IP
61 * filters take precedence. 66 * filters take precedence.
62 * @EFX_FILTER_FLAG_RX: Filter is for RX 67 * @EFX_FILTER_FLAG_RX: Filter is for RX
68 * @EFX_FILTER_FLAG_TX: Filter is for TX
63 */ 69 */
64enum efx_filter_flags { 70enum efx_filter_flags {
65 EFX_FILTER_FLAG_RX_RSS = 0x01, 71 EFX_FILTER_FLAG_RX_RSS = 0x01,
66 EFX_FILTER_FLAG_RX_SCATTER = 0x02, 72 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
67 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04, 73 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
68 EFX_FILTER_FLAG_RX = 0x08, 74 EFX_FILTER_FLAG_RX = 0x08,
75 EFX_FILTER_FLAG_TX = 0x10,
69}; 76};
70 77
71/** 78/**
@@ -103,6 +110,15 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
103 spec->dmaq_id = rxq_id; 110 spec->dmaq_id = rxq_id;
104} 111}
105 112
113static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
114 unsigned txq_id)
115{
116 spec->type = EFX_FILTER_UNSPEC;
117 spec->priority = EFX_FILTER_PRI_REQUIRED;
118 spec->flags = EFX_FILTER_FLAG_TX;
119 spec->dmaq_id = txq_id;
120}
121
106extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, 122extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
107 __be32 host, __be16 port); 123 __be32 host, __be16 port);
108extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, 124extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
@@ -117,6 +133,8 @@ extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
117 u16 vid, const u8 *addr); 133 u16 vid, const u8 *addr);
118extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec, 134extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
119 u16 *vid, u8 *addr); 135 u16 *vid, u8 *addr);
136extern int efx_filter_set_uc_def(struct efx_filter_spec *spec);
137extern int efx_filter_set_mc_def(struct efx_filter_spec *spec);
120enum { 138enum {
121 EFX_FILTER_VID_UNSPEC = 0xffff, 139 EFX_FILTER_VID_UNSPEC = 0xffff,
122}; 140};
diff --git a/drivers/net/ethernet/sfc/mac.h b/drivers/net/ethernet/sfc/mac.h
deleted file mode 100644
index d6a255d0856b..000000000000
--- a/drivers/net/ethernet/sfc/mac.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_MAC_H
12#define EFX_MAC_H
13
14#include "net_driver.h"
15
16extern const struct efx_mac_operations falcon_xmac_operations;
17extern const struct efx_mac_operations efx_mcdi_mac_operations;
18extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
19 u32 dma_len, int enable, int clear);
20
21#endif
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 81a425397468..17b6463e459c 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -22,22 +22,22 @@
22 ************************************************************************** 22 **************************************************************************
23 */ 23 */
24 24
25/* Software-defined structure to the shared-memory */
26#define CMD_NOTIFY_PORT0 0
27#define CMD_NOTIFY_PORT1 4
28#define CMD_PDU_PORT0 0x008
29#define CMD_PDU_PORT1 0x108
30#define REBOOT_FLAG_PORT0 0x3f8
31#define REBOOT_FLAG_PORT1 0x3fc
32
33#define MCDI_RPC_TIMEOUT 10 /*seconds */ 25#define MCDI_RPC_TIMEOUT 10 /*seconds */
34 26
35#define MCDI_PDU(efx) \ 27#define MCDI_PDU(efx) \
36 (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) 28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
37#define MCDI_DOORBELL(efx) \ 29#define MCDI_DOORBELL(efx) \
38 (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0) 30 (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
39#define MCDI_REBOOT_FLAG(efx) \ 31#define MCDI_STATUS(efx) \
40 (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0) 32 (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
33
34/* A reboot/assertion causes the MCDI status word to be set after the
35 * command word is set or a REBOOT event is sent. If we notice a reboot
36 * via these mechanisms then wait 10ms for the status word to be set. */
37#define MCDI_STATUS_DELAY_US 100
38#define MCDI_STATUS_DELAY_COUNT 100
39#define MCDI_STATUS_SLEEP_MS \
40 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
41 41
42#define SEQ_MASK \ 42#define SEQ_MASK \
43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
@@ -77,7 +77,7 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
77 u32 xflags, seqno; 77 u32 xflags, seqno;
78 78
79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
80 BUG_ON(inlen & 3 || inlen >= 0x100); 80 BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN);
81 81
82 seqno = mcdi->seqno & SEQ_MASK; 82 seqno = mcdi->seqno & SEQ_MASK;
83 xflags = 0; 83 xflags = 0;
@@ -111,7 +111,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
111 int i; 111 int i;
112 112
113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
114 BUG_ON(outlen & 3 || outlen >= 0x100); 114 BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN);
115 115
116 for (i = 0; i < outlen; i += 4) 116 for (i = 0; i < outlen; i += 4)
117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
@@ -210,7 +210,7 @@ out:
210/* Test and clear MC-rebooted flag for this port/function */ 210/* Test and clear MC-rebooted flag for this port/function */
211int efx_mcdi_poll_reboot(struct efx_nic *efx) 211int efx_mcdi_poll_reboot(struct efx_nic *efx)
212{ 212{
213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); 213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
214 efx_dword_t reg; 214 efx_dword_t reg;
215 uint32_t value; 215 uint32_t value;
216 216
@@ -384,6 +384,11 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
384 netif_dbg(efx, hw, efx->net_dev, 384 netif_dbg(efx, hw, efx->net_dev,
385 "MC command 0x%x inlen %d failed rc=%d\n", 385 "MC command 0x%x inlen %d failed rc=%d\n",
386 cmd, (int)inlen, -rc); 386 cmd, (int)inlen, -rc);
387
388 if (rc == -EIO || rc == -EINTR) {
389 msleep(MCDI_STATUS_SLEEP_MS);
390 efx_mcdi_poll_reboot(efx);
391 }
387 } 392 }
388 393
389 efx_mcdi_release(mcdi); 394 efx_mcdi_release(mcdi);
@@ -465,10 +470,20 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
465 mcdi->resplen = 0; 470 mcdi->resplen = 0;
466 ++mcdi->credits; 471 ++mcdi->credits;
467 } 472 }
468 } else 473 } else {
474 int count;
475
469 /* Nobody was waiting for an MCDI request, so trigger a reset */ 476 /* Nobody was waiting for an MCDI request, so trigger a reset */
470 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 477 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
471 478
479 /* Consume the status word since efx_mcdi_rpc_finish() won't */
480 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
481 if (efx_mcdi_poll_reboot(efx))
482 break;
483 udelay(MCDI_STATUS_DELAY_US);
484 }
485 }
486
472 spin_unlock(&mcdi->iface_lock); 487 spin_unlock(&mcdi->iface_lock);
473} 488}
474 489
@@ -502,49 +517,6 @@ static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
502 efx_link_status_changed(efx); 517 efx_link_status_changed(efx);
503} 518}
504 519
505static const char *sensor_names[] = {
506 [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor",
507 [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor",
508 [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling",
509 [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor",
510 [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling",
511 [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor",
512 [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling",
513 [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor",
514 [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor",
515 [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor",
516 [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor",
517 [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor",
518 [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor"
519};
520
521static const char *sensor_status_names[] = {
522 [MC_CMD_SENSOR_STATE_OK] = "OK",
523 [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
524 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
525 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
526};
527
528static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
529{
530 unsigned int monitor, state, value;
531 const char *name, *state_txt;
532 monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
533 state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
534 value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
535 /* Deal gracefully with the board having more drivers than we
536 * know about, but do not expect new sensor states. */
537 name = (monitor >= ARRAY_SIZE(sensor_names))
538 ? "No sensor name available" :
539 sensor_names[monitor];
540 EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
541 state_txt = sensor_status_names[state];
542
543 netif_err(efx, hw, efx->net_dev,
544 "Sensor %d (%s) reports condition '%s' for raw value %d\n",
545 monitor, name, state_txt, value);
546}
547
548/* Called from falcon_process_eventq for MCDI events */ 520/* Called from falcon_process_eventq for MCDI events */
549void efx_mcdi_process_event(struct efx_channel *channel, 521void efx_mcdi_process_event(struct efx_channel *channel,
550 efx_qword_t *event) 522 efx_qword_t *event)
@@ -588,6 +560,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
588 case MCDI_EVENT_CODE_MAC_STATS_DMA: 560 case MCDI_EVENT_CODE_MAC_STATS_DMA:
589 /* MAC stats are gather lazily. We can ignore this. */ 561 /* MAC stats are gather lazily. We can ignore this. */
590 break; 562 break;
563 case MCDI_EVENT_CODE_FLR:
564 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
565 break;
591 566
592 default: 567 default:
593 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 568 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
@@ -604,7 +579,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
604 579
605void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 580void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
606{ 581{
607 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; 582 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)];
608 size_t outlength; 583 size_t outlength;
609 const __le16 *ver_words; 584 const __le16 *ver_words;
610 int rc; 585 int rc;
@@ -616,7 +591,7 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
616 if (rc) 591 if (rc)
617 goto fail; 592 goto fail;
618 593
619 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { 594 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
620 rc = -EIO; 595 rc = -EIO;
621 goto fail; 596 goto fail;
622 } 597 }
@@ -663,9 +638,9 @@ fail:
663} 638}
664 639
665int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 640int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
666 u16 *fw_subtype_list) 641 u16 *fw_subtype_list, u32 *capabilities)
667{ 642{
668 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; 643 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
669 size_t outlen; 644 size_t outlen;
670 int port_num = efx_port_num(efx); 645 int port_num = efx_port_num(efx);
671 int offset; 646 int offset;
@@ -678,7 +653,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
678 if (rc) 653 if (rc)
679 goto fail; 654 goto fail;
680 655
681 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { 656 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
682 rc = -EIO; 657 rc = -EIO;
683 goto fail; 658 goto fail;
684 } 659 }
@@ -691,7 +666,16 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
691 if (fw_subtype_list) 666 if (fw_subtype_list)
692 memcpy(fw_subtype_list, 667 memcpy(fw_subtype_list,
693 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, 668 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
694 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); 669 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM *
670 sizeof(fw_subtype_list[0]));
671 if (capabilities) {
672 if (port_num)
673 *capabilities = MCDI_DWORD(outbuf,
674 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
675 else
676 *capabilities = MCDI_DWORD(outbuf,
677 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
678 }
695 679
696 return 0; 680 return 0;
697 681
@@ -779,7 +763,7 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
779 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 763 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
780 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 764 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
781 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & 765 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
782 (1 << MC_CMD_NVRAM_PROTECTED_LBN)); 766 (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
783 return 0; 767 return 0;
784 768
785fail: 769fail:
@@ -1060,7 +1044,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1060 1044
1061int efx_mcdi_reset_port(struct efx_nic *efx) 1045int efx_mcdi_reset_port(struct efx_nic *efx)
1062{ 1046{
1063 int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); 1047 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
1064 if (rc) 1048 if (rc)
1065 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1049 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1066 __func__, rc); 1050 __func__, rc);
@@ -1173,6 +1157,37 @@ fail:
1173 return rc; 1157 return rc;
1174} 1158}
1175 1159
1160int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1161{
1162 struct efx_channel *channel;
1163 struct efx_rx_queue *rx_queue;
1164 __le32 *qid;
1165 int rc, count;
1166
1167 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
1168 if (qid == NULL)
1169 return -ENOMEM;
1170
1171 count = 0;
1172 efx_for_each_channel(channel, efx) {
1173 efx_for_each_channel_rx_queue(rx_queue, channel) {
1174 if (rx_queue->flush_pending) {
1175 rx_queue->flush_pending = false;
1176 atomic_dec(&efx->rxq_flush_pending);
1177 qid[count++] = cpu_to_le32(
1178 efx_rx_queue_index(rx_queue));
1179 }
1180 }
1181 }
1182
1183 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
1184 count * sizeof(*qid), NULL, 0, NULL);
1185 WARN_ON(rc > 0);
1186
1187 kfree(qid);
1188
1189 return rc;
1190}
1176 1191
1177int efx_mcdi_wol_filter_reset(struct efx_nic *efx) 1192int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1178{ 1193{
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index aced2a7856fc..0bdf3e331832 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -56,6 +56,15 @@ struct efx_mcdi_iface {
56 size_t resplen; 56 size_t resplen;
57}; 57};
58 58
59struct efx_mcdi_mon {
60 struct efx_buffer dma_buf;
61 struct mutex update_lock;
62 unsigned long last_update;
63 struct device *device;
64 struct efx_mcdi_mon_attribute *attrs;
65 unsigned int n_attrs;
66};
67
59extern void efx_mcdi_init(struct efx_nic *efx); 68extern void efx_mcdi_init(struct efx_nic *efx);
60 69
61extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, 70extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
@@ -68,6 +77,7 @@ extern void efx_mcdi_mode_event(struct efx_nic *efx);
68 77
69extern void efx_mcdi_process_event(struct efx_channel *channel, 78extern void efx_mcdi_process_event(struct efx_channel *channel,
70 efx_qword_t *event); 79 efx_qword_t *event);
80extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
71 81
72#define MCDI_PTR2(_buf, _ofst) \ 82#define MCDI_PTR2(_buf, _ofst) \
73 (((u8 *)_buf) + _ofst) 83 (((u8 *)_buf) + _ofst)
@@ -83,6 +93,10 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
83 93
84#define MCDI_PTR(_buf, _ofst) \ 94#define MCDI_PTR(_buf, _ofst) \
85 MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST) 95 MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
96#define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \
97 MCDI_PTR2(_buf, \
98 MC_CMD_ ## _field ## _OFST + \
99 (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN)
86#define MCDI_SET_DWORD(_buf, _ofst, _value) \ 100#define MCDI_SET_DWORD(_buf, _ofst, _value) \
87 MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value) 101 MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
88#define MCDI_DWORD(_buf, _ofst) \ 102#define MCDI_DWORD(_buf, _ofst) \
@@ -92,12 +106,18 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
92 106
93#define MCDI_EVENT_FIELD(_ev, _field) \ 107#define MCDI_EVENT_FIELD(_ev, _field) \
94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 108 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
109#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
110 EFX_DWORD_FIELD( \
111 *((efx_dword_t *) \
112 (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
113 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
114 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2)
95 115
96extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); 116extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 117extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
98 bool *was_attached_out); 118 bool *was_attached_out);
99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 119extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
100 u16 *fw_subtype_list); 120 u16 *fw_subtype_list, u32 *capabilities);
101extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, 121extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
102 u32 dest_evq); 122 u32 dest_evq);
103extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); 123extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
@@ -126,5 +146,19 @@ extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
126extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); 146extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
127extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); 147extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
128extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); 148extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
149extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
150extern int efx_mcdi_set_mac(struct efx_nic *efx);
151extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
152 u32 dma_len, int enable, int clear);
153extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx);
154extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
155
156#ifdef CONFIG_SFC_MCDI_MON
157extern int efx_mcdi_mon_probe(struct efx_nic *efx);
158extern void efx_mcdi_mon_remove(struct efx_nic *efx);
159#else
160static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
161static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
162#endif
129 163
130#endif /* EFX_MCDI_H */ 164#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
index 50c20777a564..1003f309cba7 100644
--- a/drivers/net/ethernet/sfc/mcdi_mac.c
+++ b/drivers/net/ethernet/sfc/mcdi_mac.c
@@ -9,11 +9,10 @@
9 9
10#include "net_driver.h" 10#include "net_driver.h"
11#include "efx.h" 11#include "efx.h"
12#include "mac.h"
13#include "mcdi.h" 12#include "mcdi.h"
14#include "mcdi_pcol.h" 13#include "mcdi_pcol.h"
15 14
16static int efx_mcdi_set_mac(struct efx_nic *efx) 15int efx_mcdi_set_mac(struct efx_nic *efx)
17{ 16{
18 u32 reject, fcntl; 17 u32 reject, fcntl;
19 u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN]; 18 u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
@@ -45,6 +44,8 @@ static int efx_mcdi_set_mac(struct efx_nic *efx)
45 } 44 }
46 if (efx->wanted_fc & EFX_FC_AUTO) 45 if (efx->wanted_fc & EFX_FC_AUTO)
47 fcntl = MC_CMD_FCNTL_AUTO; 46 fcntl = MC_CMD_FCNTL_AUTO;
47 if (efx->fc_disable)
48 fcntl = MC_CMD_FCNTL_OFF;
48 49
49 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); 50 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
50 51
@@ -52,7 +53,7 @@ static int efx_mcdi_set_mac(struct efx_nic *efx)
52 NULL, 0, NULL); 53 NULL, 0, NULL);
53} 54}
54 55
55static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults) 56bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
56{ 57{
57 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 58 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
58 size_t outlength; 59 size_t outlength;
@@ -62,16 +63,13 @@ static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
62 63
63 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, 64 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
64 outbuf, sizeof(outbuf), &outlength); 65 outbuf, sizeof(outbuf), &outlength);
65 if (rc) 66 if (rc) {
66 goto fail; 67 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
67 68 __func__, rc);
68 *faults = MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT); 69 return true;
69 return 0; 70 }
70 71
71fail: 72 return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
72 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
73 __func__, rc);
74 return rc;
75} 73}
76 74
77int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, 75int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
@@ -84,7 +82,7 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
84 u32 addr_hi; 82 u32 addr_hi;
85 u32 addr_lo; 83 u32 addr_lo;
86 84
87 BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_LEN != 0); 85 BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
88 86
89 addr_lo = ((u64)dma_addr) >> 0; 87 addr_lo = ((u64)dma_addr) >> 0;
90 addr_hi = ((u64)dma_addr) >> 32; 88 addr_hi = ((u64)dma_addr) >> 32;
@@ -93,13 +91,13 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
93 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi); 91 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
94 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); 92 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
95 EFX_POPULATE_DWORD_7(*cmd_ptr, 93 EFX_POPULATE_DWORD_7(*cmd_ptr,
96 MC_CMD_MAC_STATS_CMD_DMA, !!enable, 94 MC_CMD_MAC_STATS_IN_DMA, !!enable,
97 MC_CMD_MAC_STATS_CMD_CLEAR, clear, 95 MC_CMD_MAC_STATS_IN_CLEAR, clear,
98 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1, 96 MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
99 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable, 97 MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
100 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0, 98 MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
101 MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1, 99 MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
102 MC_CMD_MAC_STATS_CMD_PERIOD_MS, period); 100 MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
103 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 101 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
104 102
105 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 103 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
@@ -115,31 +113,18 @@ fail:
115 return rc; 113 return rc;
116} 114}
117 115
118static int efx_mcdi_mac_reconfigure(struct efx_nic *efx) 116int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
119{ 117{
120 int rc; 118 int rc;
121 119
120 WARN_ON(!mutex_is_locked(&efx->mac_lock));
121
122 rc = efx_mcdi_set_mac(efx); 122 rc = efx_mcdi_set_mac(efx);
123 if (rc != 0) 123 if (rc != 0)
124 return rc; 124 return rc;
125 125
126 /* Restore the multicast hash registers. */ 126 return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
127 efx->type->push_multicast_hash(efx); 127 efx->multicast_hash.byte,
128 128 sizeof(efx->multicast_hash),
129 return 0; 129 NULL, 0, NULL);
130}
131
132
133static bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
134{
135 u32 faults;
136 int rc = efx_mcdi_get_mac_faults(efx, &faults);
137 return (rc != 0) || (faults != 0);
138} 130}
139
140
141const struct efx_mac_operations efx_mcdi_mac_operations = {
142 .reconfigure = efx_mcdi_mac_reconfigure,
143 .update_stats = efx_port_dummy_op_void,
144 .check_fault = efx_mcdi_mac_check_fault,
145};
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
new file mode 100644
index 000000000000..fb7f65b59eb8
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -0,0 +1,415 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/bitops.h>
11#include <linux/slab.h>
12#include <linux/hwmon.h>
13#include <linux/stat.h>
14
15#include "net_driver.h"
16#include "mcdi.h"
17#include "mcdi_pcol.h"
18#include "nic.h"
19
20enum efx_hwmon_type {
21 EFX_HWMON_UNKNOWN,
22 EFX_HWMON_TEMP, /* temperature */
23 EFX_HWMON_COOL, /* cooling device, probably a heatsink */
24 EFX_HWMON_IN /* input voltage */
25};
26
27static const struct {
28 const char *label;
29 enum efx_hwmon_type hwmon_type;
30 int port;
31} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = {
32#define SENSOR(name, label, hwmon_type, port) \
33 [MC_CMD_SENSOR_##name] = { label, hwmon_type, port }
34 SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1),
35 SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1),
36 SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1),
37 SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0),
38 SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0),
39 SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1),
40 SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1),
41 SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1),
42 SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1),
43 SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1),
44 SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1),
45 SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1),
46 SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1),
47 SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1),
48 SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1),
49#undef SENSOR
50};
51
52static const char *const sensor_status_names[] = {
53 [MC_CMD_SENSOR_STATE_OK] = "OK",
54 [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
55 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
56 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
57};
58
59void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
60{
61 unsigned int type, state, value;
62 const char *name = NULL, *state_txt;
63
64 type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
65 state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
66 value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
67
68 /* Deal gracefully with the board having more drivers than we
69 * know about, but do not expect new sensor states. */
70 if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
71 name = efx_mcdi_sensor_type[type].label;
72 if (!name)
73 name = "No sensor name available";
74 EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
75 state_txt = sensor_status_names[state];
76
77 netif_err(efx, hw, efx->net_dev,
78 "Sensor %d (%s) reports condition '%s' for raw value %d\n",
79 type, name, state_txt, value);
80}
81
82#ifdef CONFIG_SFC_MCDI_MON
83
84struct efx_mcdi_mon_attribute {
85 struct device_attribute dev_attr;
86 unsigned int index;
87 unsigned int type;
88 unsigned int limit_value;
89 char name[12];
90};
91
92static int efx_mcdi_mon_update(struct efx_nic *efx)
93{
94 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
95 u8 inbuf[MC_CMD_READ_SENSORS_IN_LEN];
96 int rc;
97
98 MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO,
99 hwmon->dma_buf.dma_addr & 0xffffffff);
100 MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_HI,
101 (u64)hwmon->dma_buf.dma_addr >> 32);
102
103 rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
104 inbuf, sizeof(inbuf), NULL, 0, NULL);
105 if (rc == 0)
106 hwmon->last_update = jiffies;
107 return rc;
108}
109
110static ssize_t efx_mcdi_mon_show_name(struct device *dev,
111 struct device_attribute *attr,
112 char *buf)
113{
114 return sprintf(buf, "%s\n", KBUILD_MODNAME);
115}
116
117static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
118 efx_dword_t *entry)
119{
120 struct efx_nic *efx = dev_get_drvdata(dev);
121 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
122 int rc;
123
124 BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0);
125
126 mutex_lock(&hwmon->update_lock);
127
128 /* Use cached value if last update was < 1 s ago */
129 if (time_before(jiffies, hwmon->last_update + HZ))
130 rc = 0;
131 else
132 rc = efx_mcdi_mon_update(efx);
133
134 /* Copy out the requested entry */
135 *entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index];
136
137 mutex_unlock(&hwmon->update_lock);
138
139 return rc;
140}
141
142static ssize_t efx_mcdi_mon_show_value(struct device *dev,
143 struct device_attribute *attr,
144 char *buf)
145{
146 struct efx_mcdi_mon_attribute *mon_attr =
147 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
148 efx_dword_t entry;
149 unsigned int value;
150 int rc;
151
152 rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
153 if (rc)
154 return rc;
155
156 value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
157
158 /* Convert temperature from degrees to milli-degrees Celsius */
159 if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
160 value *= 1000;
161
162 return sprintf(buf, "%u\n", value);
163}
164
165static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
166 struct device_attribute *attr,
167 char *buf)
168{
169 struct efx_mcdi_mon_attribute *mon_attr =
170 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
171 unsigned int value;
172
173 value = mon_attr->limit_value;
174
175 /* Convert temperature from degrees to milli-degrees Celsius */
176 if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
177 value *= 1000;
178
179 return sprintf(buf, "%u\n", value);
180}
181
182static ssize_t efx_mcdi_mon_show_alarm(struct device *dev,
183 struct device_attribute *attr,
184 char *buf)
185{
186 struct efx_mcdi_mon_attribute *mon_attr =
187 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
188 efx_dword_t entry;
189 int state;
190 int rc;
191
192 rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
193 if (rc)
194 return rc;
195
196 state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
197 return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK);
198}
199
200static ssize_t efx_mcdi_mon_show_label(struct device *dev,
201 struct device_attribute *attr,
202 char *buf)
203{
204 struct efx_mcdi_mon_attribute *mon_attr =
205 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
206 return sprintf(buf, "%s\n",
207 efx_mcdi_sensor_type[mon_attr->type].label);
208}
209
210static int
211efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
212 ssize_t (*reader)(struct device *,
213 struct device_attribute *, char *),
214 unsigned int index, unsigned int type,
215 unsigned int limit_value)
216{
217 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
218 struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
219 int rc;
220
221 strlcpy(attr->name, name, sizeof(attr->name));
222 attr->index = index;
223 attr->type = type;
224 attr->limit_value = limit_value;
225 attr->dev_attr.attr.name = attr->name;
226 attr->dev_attr.attr.mode = S_IRUGO;
227 attr->dev_attr.show = reader;
228 rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr);
229 if (rc == 0)
230 ++hwmon->n_attrs;
231 return rc;
232}
233
234int efx_mcdi_mon_probe(struct efx_nic *efx)
235{
236 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
237 unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0;
238 u8 outbuf[MC_CMD_SENSOR_INFO_OUT_LENMAX];
239 size_t outlen;
240 char name[12];
241 u32 mask;
242 int rc, i, type;
243
244 BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0);
245
246 rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0,
247 outbuf, sizeof(outbuf), &outlen);
248 if (rc)
249 return rc;
250 if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
251 return -EIO;
252
253 /* Find out which sensors are present. Don't create a device
254 * if there are none.
255 */
256 mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
257 if (mask == 0)
258 return 0;
259
260 /* Check again for short response */
261 if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask)))
262 return -EIO;
263
264 rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
265 4 * MC_CMD_SENSOR_ENTRY_MAXNUM);
266 if (rc)
267 return rc;
268
269 mutex_init(&hwmon->update_lock);
270 efx_mcdi_mon_update(efx);
271
272 /* Allocate space for the maximum possible number of
273 * attributes for this set of sensors: name of the driver plus
274 * value, min, max, crit, alarm and label for each sensor.
275 */
276 n_attrs = 1 + 6 * hweight32(mask);
277 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
278 if (!hwmon->attrs) {
279 rc = -ENOMEM;
280 goto fail;
281 }
282
283 hwmon->device = hwmon_device_register(&efx->pci_dev->dev);
284 if (IS_ERR(hwmon->device)) {
285 rc = PTR_ERR(hwmon->device);
286 goto fail;
287 }
288
289 rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0);
290 if (rc)
291 goto fail;
292
293 for (i = 0, type = -1; ; i++) {
294 const char *hwmon_prefix;
295 unsigned hwmon_index;
296 u16 min1, max1, min2, max2;
297
298 /* Find next sensor type or exit if there is none */
299 type++;
300 while (!(mask & (1 << type))) {
301 type++;
302 if (type == 32)
303 return 0;
304 }
305
306 /* Skip sensors specific to a different port */
307 if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN &&
308 efx_mcdi_sensor_type[type].port >= 0 &&
309 efx_mcdi_sensor_type[type].port != efx_port_num(efx))
310 continue;
311
312 switch (efx_mcdi_sensor_type[type].hwmon_type) {
313 case EFX_HWMON_TEMP:
314 hwmon_prefix = "temp";
315 hwmon_index = ++n_temp; /* 1-based */
316 break;
317 case EFX_HWMON_COOL:
318 /* This is likely to be a heatsink, but there
319 * is no convention for representing cooling
320 * devices other than fans.
321 */
322 hwmon_prefix = "fan";
323 hwmon_index = ++n_cool; /* 1-based */
324 break;
325 default:
326 hwmon_prefix = "in";
327 hwmon_index = n_in++; /* 0-based */
328 break;
329 }
330
331 min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
332 SENSOR_INFO_ENTRY, i, MIN1);
333 max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
334 SENSOR_INFO_ENTRY, i, MAX1);
335 min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
336 SENSOR_INFO_ENTRY, i, MIN2);
337 max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
338 SENSOR_INFO_ENTRY, i, MAX2);
339
340 if (min1 != max1) {
341 snprintf(name, sizeof(name), "%s%u_input",
342 hwmon_prefix, hwmon_index);
343 rc = efx_mcdi_mon_add_attr(
344 efx, name, efx_mcdi_mon_show_value, i, type, 0);
345 if (rc)
346 goto fail;
347
348 snprintf(name, sizeof(name), "%s%u_min",
349 hwmon_prefix, hwmon_index);
350 rc = efx_mcdi_mon_add_attr(
351 efx, name, efx_mcdi_mon_show_limit,
352 i, type, min1);
353 if (rc)
354 goto fail;
355
356 snprintf(name, sizeof(name), "%s%u_max",
357 hwmon_prefix, hwmon_index);
358 rc = efx_mcdi_mon_add_attr(
359 efx, name, efx_mcdi_mon_show_limit,
360 i, type, max1);
361 if (rc)
362 goto fail;
363
364 if (min2 != max2) {
365 /* Assume max2 is critical value.
366 * But we have no good way to expose min2.
367 */
368 snprintf(name, sizeof(name), "%s%u_crit",
369 hwmon_prefix, hwmon_index);
370 rc = efx_mcdi_mon_add_attr(
371 efx, name, efx_mcdi_mon_show_limit,
372 i, type, max2);
373 if (rc)
374 goto fail;
375 }
376 }
377
378 snprintf(name, sizeof(name), "%s%u_alarm",
379 hwmon_prefix, hwmon_index);
380 rc = efx_mcdi_mon_add_attr(
381 efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
382 if (rc)
383 goto fail;
384
385 if (efx_mcdi_sensor_type[type].label) {
386 snprintf(name, sizeof(name), "%s%u_label",
387 hwmon_prefix, hwmon_index);
388 rc = efx_mcdi_mon_add_attr(
389 efx, name, efx_mcdi_mon_show_label, i, type, 0);
390 if (rc)
391 goto fail;
392 }
393 }
394
395fail:
396 efx_mcdi_mon_remove(efx);
397 return rc;
398}
399
400void efx_mcdi_mon_remove(struct efx_nic *efx)
401{
402 struct siena_nic_data *nic_data = efx->nic_data;
403 struct efx_mcdi_mon *hwmon = &nic_data->hwmon;
404 unsigned int i;
405
406 for (i = 0; i < hwmon->n_attrs; i++)
407 device_remove_file(&efx->pci_dev->dev,
408 &hwmon->attrs[i].dev_attr);
409 kfree(hwmon->attrs);
410 if (hwmon->device)
411 hwmon_device_unregister(hwmon->device);
412 efx_nic_free_buffer(efx, &hwmon->dma_buf);
413}
414
415#endif /* CONFIG_SFC_MCDI_MON */
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 41fe06fa0600..0310b9f08c9b 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -22,6 +22,18 @@
22/* The Scheduler has started. */ 22/* The Scheduler has started. */
23#define MC_FW_STATE_SCHED (8) 23#define MC_FW_STATE_SCHED (8)
24 24
25/* Siena MC shared memmory offsets */
26/* The 'doorbell' addresses are hard-wired to alert the MC when written */
27#define MC_SMEM_P0_DOORBELL_OFST 0x000
28#define MC_SMEM_P1_DOORBELL_OFST 0x004
29/* The rest of these are firmware-defined */
30#define MC_SMEM_P0_PDU_OFST 0x008
31#define MC_SMEM_P1_PDU_OFST 0x108
32#define MC_SMEM_PDU_LEN 0x100
33#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
34#define MC_SMEM_P0_STATUS_OFST 0x7f8
35#define MC_SMEM_P1_STATUS_OFST 0x7fc
36
25/* Values to be written to the per-port status dword in shared 37/* Values to be written to the per-port status dword in shared
26 * memory on reboot and assert */ 38 * memory on reboot and assert */
27#define MC_STATUS_DWORD_REBOOT (0xb007b007) 39#define MC_STATUS_DWORD_REBOOT (0xb007b007)
@@ -34,6 +46,8 @@
34 */ 46 */
35#define MCDI_PCOL_VERSION 1 47#define MCDI_PCOL_VERSION 1
36 48
49/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
50
37/** 51/**
38 * MCDI version 1 52 * MCDI version 1
39 * 53 *
@@ -131,53 +145,6 @@
131 */ 145 */
132#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc 146#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
133 147
134#define MCDI_EVENT_DATA_LBN 0
135#define MCDI_EVENT_DATA_WIDTH 32
136#define MCDI_EVENT_CONT_LBN 32
137#define MCDI_EVENT_CONT_WIDTH 1
138#define MCDI_EVENT_LEVEL_LBN 33
139#define MCDI_EVENT_LEVEL_WIDTH 3
140#define MCDI_EVENT_LEVEL_INFO (0)
141#define MCDI_EVENT_LEVEL_WARN (1)
142#define MCDI_EVENT_LEVEL_ERR (2)
143#define MCDI_EVENT_LEVEL_FATAL (3)
144#define MCDI_EVENT_SRC_LBN 36
145#define MCDI_EVENT_SRC_WIDTH 8
146#define MCDI_EVENT_CODE_LBN 44
147#define MCDI_EVENT_CODE_WIDTH 8
148#define MCDI_EVENT_CODE_BADSSERT (1)
149#define MCDI_EVENT_CODE_PMNOTICE (2)
150#define MCDI_EVENT_CODE_CMDDONE (3)
151#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
152#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
153#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
154#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
155#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
156#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
157#define MCDI_EVENT_CODE_LINKCHANGE (4)
158#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
159#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
160#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
161#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
162#define MCDI_EVENT_LINKCHANGE_SPEED_100M 1
163#define MCDI_EVENT_LINKCHANGE_SPEED_1G 2
164#define MCDI_EVENT_LINKCHANGE_SPEED_10G 3
165#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
166#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
167#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
168#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
169#define MCDI_EVENT_CODE_SENSOREVT (5)
170#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
171#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
172#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
173#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
174#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
175#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
176#define MCDI_EVENT_CODE_SCHEDERR (6)
177#define MCDI_EVENT_CODE_REBOOT (7)
178#define MCDI_EVENT_CODE_MAC_STATS_DMA (8)
179#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
180#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
181 148
182/* Non-existent command target */ 149/* Non-existent command target */
183#define MC_CMD_ERR_ENOENT 2 150#define MC_CMD_ERR_ENOENT 2
@@ -198,121 +165,24 @@
198 165
199#define MC_CMD_ERR_CODE_OFST 0 166#define MC_CMD_ERR_CODE_OFST 0
200 167
168/* We define 8 "escape" commands to allow
169 for command number space extension */
170
171#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
172#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
173#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
174#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
175#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
176#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
177#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
178#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
179
180/* Vectors in the boot ROM */
181/* Point to the copycode entry point. */
182#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
183/* Points to the recovery mode entry point. */
184#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
201 185
202/* MC_CMD_READ32: (debug, variadic out)
203 * Read multiple 32byte words from MC memory
204 */
205#define MC_CMD_READ32 0x01
206#define MC_CMD_READ32_IN_LEN 8
207#define MC_CMD_READ32_IN_ADDR_OFST 0
208#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
209#define MC_CMD_READ32_OUT_LEN(_numwords) \
210 (4 * (_numwords))
211#define MC_CMD_READ32_OUT_BUFFER_OFST 0
212
213/* MC_CMD_WRITE32: (debug, variadic in)
214 * Write multiple 32byte words to MC memory
215 */
216#define MC_CMD_WRITE32 0x02
217#define MC_CMD_WRITE32_IN_LEN(_numwords) (((_numwords) * 4) + 4)
218#define MC_CMD_WRITE32_IN_ADDR_OFST 0
219#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
220#define MC_CMD_WRITE32_OUT_LEN 0
221
222/* MC_CMD_COPYCODE: (debug)
223 * Copy MC code between two locations and jump
224 */
225#define MC_CMD_COPYCODE 0x03
226#define MC_CMD_COPYCODE_IN_LEN 16
227#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
228#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
229#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
230#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
231/* Control should return to the caller rather than jumping */
232#define MC_CMD_COPYCODE_JUMP_NONE 1
233#define MC_CMD_COPYCODE_OUT_LEN 0
234
235/* MC_CMD_SET_FUNC: (debug)
236 * Select function for function-specific commands.
237 */
238#define MC_CMD_SET_FUNC 0x04
239#define MC_CMD_SET_FUNC_IN_LEN 4
240#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
241#define MC_CMD_SET_FUNC_OUT_LEN 0
242
243/* MC_CMD_GET_BOOT_STATUS:
244 * Get the instruction address from which the MC booted.
245 */
246#define MC_CMD_GET_BOOT_STATUS 0x05
247#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
248#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
249#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
250#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
251/* Reboot caused by watchdog */
252#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_LBN (0)
253#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_WIDTH (1)
254/* MC booted from primary flash partition */
255#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_LBN (1)
256#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_WIDTH (1)
257/* MC booted from backup flash partition */
258#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_LBN (2)
259#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_WIDTH (1)
260
261/* MC_CMD_GET_ASSERTS: (debug, variadic out)
262 * Get (and optionally clear) the current assertion status.
263 *
264 * Only OUT.GLOBAL_FLAGS is guaranteed to exist in the completion
265 * payload. The other fields will only be present if
266 * OUT.GLOBAL_FLAGS != NO_FAILS
267 */
268#define MC_CMD_GET_ASSERTS 0x06
269#define MC_CMD_GET_ASSERTS_IN_LEN 4
270#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
271#define MC_CMD_GET_ASSERTS_OUT_LEN 140
272/* Assertion status flag */
273#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
274/*! No assertions have failed. */
275#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 1
276/*! A system-level assertion has failed. */
277#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 2
278/*! A thread-level assertion has failed. */
279#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 3
280/*! The system was reset by the watchdog. */
281#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 4
282/* Failing PC value */
283#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
284/* Saved GP regs */
285#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
286#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_LEN 124
287/* Failing thread address */
288#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
289
290/* MC_CMD_LOG_CTRL:
291 * Determine the output stream for various events and messages
292 */
293#define MC_CMD_LOG_CTRL 0x07
294#define MC_CMD_LOG_CTRL_IN_LEN 8
295#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
296#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART (1)
297#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ (2)
298#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
299#define MC_CMD_LOG_CTRL_OUT_LEN 0
300
301/* MC_CMD_GET_VERSION:
302 * Get version information about the MC firmware
303 */
304#define MC_CMD_GET_VERSION 0x08
305#define MC_CMD_GET_VERSION_IN_LEN 0
306#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
307#define MC_CMD_GET_VERSION_V1_OUT_LEN 32
308#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
309/* Reserved version number to indicate "any" version. */
310#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
311/* The version response of a boot ROM awaiting rescue */
312#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000
313#define MC_CMD_GET_VERSION_V1_OUT_PCOL_OFST 4
314/* 128bit mask of functions supported by the current firmware */
315#define MC_CMD_GET_VERSION_V1_OUT_SUPPORTED_FUNCS_OFST 8
316/* The command set exported by the boot ROM (MCDI v0) */ 186/* The command set exported by the boot ROM (MCDI v0) */
317#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ 187#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
318 (1 << MC_CMD_READ32) | \ 188 (1 << MC_CMD_READ32) | \
@@ -320,1456 +190,2214 @@
320 (1 << MC_CMD_COPYCODE) | \ 190 (1 << MC_CMD_COPYCODE) | \
321 (1 << MC_CMD_GET_VERSION), \ 191 (1 << MC_CMD_GET_VERSION), \
322 0, 0, 0 } 192 0, 0, 0 }
323#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
324 193
325/* Vectors in the boot ROM */ 194#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
326/* Point to the copycode entry point. */ 195 (MC_CMD_SENSOR_ENTRY_OFST + (_x))
327#define MC_BOOTROM_COPYCODE_VEC (0x7f4) 196
328/* Points to the recovery mode entry point. */ 197#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
329#define MC_BOOTROM_NOFLASH_VEC (0x7f8) 198 (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
199 MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
200 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
201
202#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
203 (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
204 MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
205 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
206
207#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
208 (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
209 MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
210 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
211
212
213/* MCDI_EVENT structuredef */
214#define MCDI_EVENT_LEN 8
215#define MCDI_EVENT_CONT_LBN 32
216#define MCDI_EVENT_CONT_WIDTH 1
217#define MCDI_EVENT_LEVEL_LBN 33
218#define MCDI_EVENT_LEVEL_WIDTH 3
219#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */
220#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */
221#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */
222#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */
223#define MCDI_EVENT_DATA_OFST 0
224#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
225#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
226#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
227#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
228#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
229#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
230#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
231#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
232#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
233#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
234#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */
235#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */
236#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */
237#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
238#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
239#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
240#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
241#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
242#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
243#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
244#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
245#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
246#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
247#define MCDI_EVENT_FWALERT_DATA_LBN 8
248#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
249#define MCDI_EVENT_FWALERT_REASON_LBN 0
250#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
251#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */
252#define MCDI_EVENT_FLR_VF_LBN 0
253#define MCDI_EVENT_FLR_VF_WIDTH 8
254#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
255#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
256#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
257#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
258#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */
259#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */
260#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */
261#define MCDI_EVENT_TX_ERR_INFO_LBN 16
262#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
263#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
264#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
265#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
266#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
267#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 /* enum */
268#define MCDI_EVENT_PTP_ERR_FILTER 0x2 /* enum */
269#define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum */
270#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 /* enum */
271#define MCDI_EVENT_DATA_LBN 0
272#define MCDI_EVENT_DATA_WIDTH 32
273#define MCDI_EVENT_SRC_LBN 36
274#define MCDI_EVENT_SRC_WIDTH 8
275#define MCDI_EVENT_EV_CODE_LBN 60
276#define MCDI_EVENT_EV_CODE_WIDTH 4
277#define MCDI_EVENT_CODE_LBN 44
278#define MCDI_EVENT_CODE_WIDTH 8
279#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */
280#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */
281#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */
282#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */
283#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */
284#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */
285#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */
286#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */
287#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */
288#define MCDI_EVENT_CODE_FLR 0xa /* enum */
289#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */
290#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
291#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */
292#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */
293#define MCDI_EVENT_CMDDONE_DATA_OFST 0
294#define MCDI_EVENT_CMDDONE_DATA_LBN 0
295#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
296#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
297#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
298#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
299#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
300#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
301#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
302#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
303#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
304#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
305#define MCDI_EVENT_TX_ERR_DATA_OFST 0
306#define MCDI_EVENT_TX_ERR_DATA_LBN 0
307#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
308#define MCDI_EVENT_PTP_SECONDS_OFST 0
309#define MCDI_EVENT_PTP_SECONDS_LBN 0
310#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
311#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
312#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
313#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
314#define MCDI_EVENT_PTP_UUID_OFST 0
315#define MCDI_EVENT_PTP_UUID_LBN 0
316#define MCDI_EVENT_PTP_UUID_WIDTH 32
317
318
319/***********************************/
320/* MC_CMD_READ32
321 * Read multiple 32byte words from MC memory.
322 */
323#define MC_CMD_READ32 0x1
324
325/* MC_CMD_READ32_IN msgrequest */
326#define MC_CMD_READ32_IN_LEN 8
327#define MC_CMD_READ32_IN_ADDR_OFST 0
328#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
329
330/* MC_CMD_READ32_OUT msgresponse */
331#define MC_CMD_READ32_OUT_LENMIN 4
332#define MC_CMD_READ32_OUT_LENMAX 252
333#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
334#define MC_CMD_READ32_OUT_BUFFER_OFST 0
335#define MC_CMD_READ32_OUT_BUFFER_LEN 4
336#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
337#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
338
339
340/***********************************/
341/* MC_CMD_WRITE32
342 * Write multiple 32byte words to MC memory.
343 */
344#define MC_CMD_WRITE32 0x2
345
346/* MC_CMD_WRITE32_IN msgrequest */
347#define MC_CMD_WRITE32_IN_LENMIN 8
348#define MC_CMD_WRITE32_IN_LENMAX 252
349#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
350#define MC_CMD_WRITE32_IN_ADDR_OFST 0
351#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
352#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
353#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
354#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
355
356/* MC_CMD_WRITE32_OUT msgresponse */
357#define MC_CMD_WRITE32_OUT_LEN 0
358
359
360/***********************************/
361/* MC_CMD_COPYCODE
362 * Copy MC code between two locations and jump.
363 */
364#define MC_CMD_COPYCODE 0x3
365
366/* MC_CMD_COPYCODE_IN msgrequest */
367#define MC_CMD_COPYCODE_IN_LEN 16
368#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
369#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
370#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
371#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
372#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */
373
374/* MC_CMD_COPYCODE_OUT msgresponse */
375#define MC_CMD_COPYCODE_OUT_LEN 0
376
377
378/***********************************/
379/* MC_CMD_SET_FUNC
380 */
381#define MC_CMD_SET_FUNC 0x4
382
383/* MC_CMD_SET_FUNC_IN msgrequest */
384#define MC_CMD_SET_FUNC_IN_LEN 4
385#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
386
387/* MC_CMD_SET_FUNC_OUT msgresponse */
388#define MC_CMD_SET_FUNC_OUT_LEN 0
389
390
391/***********************************/
392/* MC_CMD_GET_BOOT_STATUS
393 */
394#define MC_CMD_GET_BOOT_STATUS 0x5
395
396/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
397#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
398
399/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
400#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
401#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
402#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
403#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
404#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
405#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
406#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
407#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
408#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
330 409
331/* Test execution limits */
332#define MC_TESTEXEC_VARIANT_COUNT 16
333#define MC_TESTEXEC_RESULT_COUNT 7
334 410
335/* MC_CMD_SET_TESTVARS: (debug, variadic in) 411/***********************************/
336 * Write variant words for test. 412/* MC_CMD_GET_ASSERTS
337 * 413 * Get and clear any assertion status.
338 * The user supplies a bitmap of the variants they wish to set.
339 * They must ensure that IN.LEN >= 4 + 4 * ffs(BITMAP)
340 */
341#define MC_CMD_SET_TESTVARS 0x09
342#define MC_CMD_SET_TESTVARS_IN_LEN(_numwords) \
343 (4 + 4*(_numwords))
344#define MC_CMD_SET_TESTVARS_IN_ARGS_BITMAP_OFST 0
345/* Up to MC_TESTEXEC_VARIANT_COUNT of 32byte words start here */
346#define MC_CMD_SET_TESTVARS_IN_ARGS_BUFFER_OFST 4
347#define MC_CMD_SET_TESTVARS_OUT_LEN 0
348
349/* MC_CMD_GET_TESTRCS: (debug, variadic out)
350 * Return result words from test.
351 */
352#define MC_CMD_GET_TESTRCS 0x0a
353#define MC_CMD_GET_TESTRCS_IN_LEN 4
354#define MC_CMD_GET_TESTRCS_IN_NUMWORDS_OFST 0
355#define MC_CMD_GET_TESTRCS_OUT_LEN(_numwords) \
356 (4 * (_numwords))
357#define MC_CMD_GET_TESTRCS_OUT_BUFFER_OFST 0
358
359/* MC_CMD_RUN_TEST: (debug)
360 * Run the test exported by this firmware image
361 */
362#define MC_CMD_RUN_TEST 0x0b
363#define MC_CMD_RUN_TEST_IN_LEN 0
364#define MC_CMD_RUN_TEST_OUT_LEN 0
365
366/* MC_CMD_CSR_READ32: (debug, variadic out)
367 * Read 32bit words from the indirect memory map
368 */
369#define MC_CMD_CSR_READ32 0x0c
370#define MC_CMD_CSR_READ32_IN_LEN 12
371#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
372#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
373#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
374#define MC_CMD_CSR_READ32_OUT_LEN(_numwords) \
375 (((_numwords) * 4) + 4)
376/* IN.NUMWORDS of 32bit words start here */
377#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
378#define MC_CMD_CSR_READ32_OUT_IREG_STATUS_OFST(_numwords) \
379 ((_numwords) * 4)
380
381/* MC_CMD_CSR_WRITE32: (debug, variadic in)
382 * Write 32bit dwords to the indirect memory map
383 */
384#define MC_CMD_CSR_WRITE32 0x0d
385#define MC_CMD_CSR_WRITE32_IN_LEN(_numwords) \
386 (((_numwords) * 4) + 8)
387#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
388#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
389/* Multiple 32bit words of data to write start here */
390#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
391#define MC_CMD_CSR_WRITE32_OUT_LEN 4
392#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
393
394/* MC_CMD_JTAG_WORK: (debug, fpga only)
395 * Process JTAG work buffer for RBF acceleration.
396 *
397 * Host: bit count, (up to) 32 words of data to clock out to JTAG
398 * (bits 1,0=TMS,TDO for first bit; bits 3,2=TMS,TDO for second bit, etc.)
399 * MC: bit count, (up to) 32 words of data clocked in from JTAG
400 * (bit 0=TDI for first bit, bit 1=TDI for second bit, etc.; [31:16] unused)
401 */ 414 */
402#define MC_CMD_JTAG_WORK 0x0e 415#define MC_CMD_GET_ASSERTS 0x6
403 416
404/* MC_CMD_STACKINFO: (debug, variadic out) 417/* MC_CMD_GET_ASSERTS_IN msgrequest */
405 * Get stack information 418#define MC_CMD_GET_ASSERTS_IN_LEN 4
406 * 419#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
407 * Host: nothing 420
408 * MC: (thread ptr, stack size, free space) for each thread in system 421/* MC_CMD_GET_ASSERTS_OUT msgresponse */
409 */ 422#define MC_CMD_GET_ASSERTS_OUT_LEN 140
410#define MC_CMD_STACKINFO 0x0f 423#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
424#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */
425#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */
426#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */
427#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */
428#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
429#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
430#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
431#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
432#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
433#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
411 434
412/* MC_CMD_MDIO_READ: 435
413 * MDIO register read 436/***********************************/
437/* MC_CMD_LOG_CTRL
438 * Configure the output stream for various events and messages.
439 */
440#define MC_CMD_LOG_CTRL 0x7
441
442/* MC_CMD_LOG_CTRL_IN msgrequest */
443#define MC_CMD_LOG_CTRL_IN_LEN 8
444#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
445#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */
446#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */
447#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
448
449/* MC_CMD_LOG_CTRL_OUT msgresponse */
450#define MC_CMD_LOG_CTRL_OUT_LEN 0
451
452
453/***********************************/
454/* MC_CMD_GET_VERSION
455 * Get version information about the MC firmware.
456 */
457#define MC_CMD_GET_VERSION 0x8
458
459/* MC_CMD_GET_VERSION_IN msgrequest */
460#define MC_CMD_GET_VERSION_IN_LEN 0
461
462/* MC_CMD_GET_VERSION_V0_OUT msgresponse */
463#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
464#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
465#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */
466#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */
467
468/* MC_CMD_GET_VERSION_OUT msgresponse */
469#define MC_CMD_GET_VERSION_OUT_LEN 32
470/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
471/* Enum values, see field(s): */
472/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
473#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
474#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
475#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
476#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
477#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
478#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
479#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
480
481
482/***********************************/
483/* MC_CMD_GET_FPGAREG
484 * Read multiple bytes from PTP FPGA.
485 */
486#define MC_CMD_GET_FPGAREG 0x9
487
488/* MC_CMD_GET_FPGAREG_IN msgrequest */
489#define MC_CMD_GET_FPGAREG_IN_LEN 8
490#define MC_CMD_GET_FPGAREG_IN_ADDR_OFST 0
491#define MC_CMD_GET_FPGAREG_IN_NUMBYTES_OFST 4
492
493/* MC_CMD_GET_FPGAREG_OUT msgresponse */
494#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1
495#define MC_CMD_GET_FPGAREG_OUT_LENMAX 255
496#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
497#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
498#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
499#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
500#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 255
501
502
503/***********************************/
504/* MC_CMD_PUT_FPGAREG
505 * Write multiple bytes to PTP FPGA.
506 */
507#define MC_CMD_PUT_FPGAREG 0xa
508
509/* MC_CMD_PUT_FPGAREG_IN msgrequest */
510#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
511#define MC_CMD_PUT_FPGAREG_IN_LENMAX 255
512#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
513#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
514#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
515#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
516#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
517#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 251
518
519/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
520#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
521
522
523/***********************************/
524/* MC_CMD_PTP
525 * Perform PTP operation
526 */
527#define MC_CMD_PTP 0xb
528
529/* MC_CMD_PTP_IN msgrequest */
530#define MC_CMD_PTP_IN_LEN 1
531#define MC_CMD_PTP_IN_OP_OFST 0
532#define MC_CMD_PTP_IN_OP_LEN 1
533#define MC_CMD_PTP_OP_ENABLE 0x1 /* enum */
534#define MC_CMD_PTP_OP_DISABLE 0x2 /* enum */
535#define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum */
536#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 /* enum */
537#define MC_CMD_PTP_OP_STATUS 0x5 /* enum */
538#define MC_CMD_PTP_OP_ADJUST 0x6 /* enum */
539#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 /* enum */
540#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 /* enum */
541#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum */
542#define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum */
543#define MC_CMD_PTP_OP_DEBUG 0xb /* enum */
544#define MC_CMD_PTP_OP_MAX 0xc /* enum */
545
546/* MC_CMD_PTP_IN_ENABLE msgrequest */
547#define MC_CMD_PTP_IN_ENABLE_LEN 16
548#define MC_CMD_PTP_IN_CMD_OFST 0
549#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
550#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
551#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
552#define MC_CMD_PTP_MODE_V1 0x0 /* enum */
553#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
554#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
555#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
556
557/* MC_CMD_PTP_IN_DISABLE msgrequest */
558#define MC_CMD_PTP_IN_DISABLE_LEN 8
559/* MC_CMD_PTP_IN_CMD_OFST 0 */
560/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
561
562/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
563#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
564#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 255
565#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
566/* MC_CMD_PTP_IN_CMD_OFST 0 */
567/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
568#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
569#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
570#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
571#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
572#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 243
573
574/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
575#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
576/* MC_CMD_PTP_IN_CMD_OFST 0 */
577/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
578
579/* MC_CMD_PTP_IN_STATUS msgrequest */
580#define MC_CMD_PTP_IN_STATUS_LEN 8
581/* MC_CMD_PTP_IN_CMD_OFST 0 */
582/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
583
584/* MC_CMD_PTP_IN_ADJUST msgrequest */
585#define MC_CMD_PTP_IN_ADJUST_LEN 24
586/* MC_CMD_PTP_IN_CMD_OFST 0 */
587/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
588#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
589#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
590#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
591#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
592#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* enum */
593#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
594#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
595
596/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
597#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
598/* MC_CMD_PTP_IN_CMD_OFST 0 */
599/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
600#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
601#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
602#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
603#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
604#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
605
606/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
607#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
608/* MC_CMD_PTP_IN_CMD_OFST 0 */
609/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
610
611/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
612#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
613/* MC_CMD_PTP_IN_CMD_OFST 0 */
614/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
615#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
616
617/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
618#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
619/* MC_CMD_PTP_IN_CMD_OFST 0 */
620/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
621
622/* MC_CMD_PTP_IN_DEBUG msgrequest */
623#define MC_CMD_PTP_IN_DEBUG_LEN 12
624/* MC_CMD_PTP_IN_CMD_OFST 0 */
625/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
626#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
627
628/* MC_CMD_PTP_OUT msgresponse */
629#define MC_CMD_PTP_OUT_LEN 0
630
631/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
632#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
633#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
634#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
635
636/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
637#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
638#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
639#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
640
641/* MC_CMD_PTP_OUT_STATUS msgresponse */
642#define MC_CMD_PTP_OUT_STATUS_LEN 64
643#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
644#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
645#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
646#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
647#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
648#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
649#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
650#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
651#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
652#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
653#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
654#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
655#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
656#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
657#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
658#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
659
660/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
661#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
662#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
663#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
664#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
665#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
666#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
667#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
668#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
669#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
670#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
671#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
672#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
673
674/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
675#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
676#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
677#define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum */
678#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 /* enum */
679#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 /* enum */
680#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 /* enum */
681#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 /* enum */
682#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 /* enum */
683#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 /* enum */
684#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 /* enum */
685#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum */
686#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 /* enum */
687#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
688
689/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
690#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
691#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
692#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
693#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
694
695
696/***********************************/
697/* MC_CMD_CSR_READ32
698 * Read 32bit words from the indirect memory map.
699 */
700#define MC_CMD_CSR_READ32 0xc
701
702/* MC_CMD_CSR_READ32_IN msgrequest */
703#define MC_CMD_CSR_READ32_IN_LEN 12
704#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
705#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
706#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
707
708/* MC_CMD_CSR_READ32_OUT msgresponse */
709#define MC_CMD_CSR_READ32_OUT_LENMIN 4
710#define MC_CMD_CSR_READ32_OUT_LENMAX 252
711#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
712#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
713#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
714#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
715#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
716
717
718/***********************************/
719/* MC_CMD_CSR_WRITE32
720 * Write 32bit dwords to the indirect memory map.
721 */
722#define MC_CMD_CSR_WRITE32 0xd
723
724/* MC_CMD_CSR_WRITE32_IN msgrequest */
725#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
726#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
727#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
728#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
729#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
730#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
731#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
732#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
733#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
734
735/* MC_CMD_CSR_WRITE32_OUT msgresponse */
736#define MC_CMD_CSR_WRITE32_OUT_LEN 4
737#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
738
739
740/***********************************/
741/* MC_CMD_STACKINFO
742 * Get stack information.
743 */
744#define MC_CMD_STACKINFO 0xf
745
746/* MC_CMD_STACKINFO_IN msgrequest */
747#define MC_CMD_STACKINFO_IN_LEN 0
748
749/* MC_CMD_STACKINFO_OUT msgresponse */
750#define MC_CMD_STACKINFO_OUT_LENMIN 12
751#define MC_CMD_STACKINFO_OUT_LENMAX 252
752#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
753#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
754#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
755#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
756#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
757
758
759/***********************************/
760/* MC_CMD_MDIO_READ
761 * MDIO register read.
414 */ 762 */
415#define MC_CMD_MDIO_READ 0x10 763#define MC_CMD_MDIO_READ 0x10
416#define MC_CMD_MDIO_READ_IN_LEN 16
417#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
418#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
419#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
420#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
421#define MC_CMD_MDIO_READ_OUT_LEN 8
422#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
423#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
424
425/* MC_CMD_MDIO_WRITE:
426 * MDIO register write
427 */
428#define MC_CMD_MDIO_WRITE 0x11
429#define MC_CMD_MDIO_WRITE_IN_LEN 20
430#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
431#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
432#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
433#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
434#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
435#define MC_CMD_MDIO_WRITE_OUT_LEN 4
436#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
437 764
438/* By default all the MCDI MDIO operations perform clause45 mode. 765/* MC_CMD_MDIO_READ_IN msgrequest */
439 * If you want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. 766#define MC_CMD_MDIO_READ_IN_LEN 16
440 */ 767#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
441#define MC_CMD_MDIO_CLAUSE22 32 768#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */
769#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */
770#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
771#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
772#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */
773#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
442 774
443/* There are two MDIO buses: one for the internal PHY, and one for external 775/* MC_CMD_MDIO_READ_OUT msgresponse */
444 * devices. 776#define MC_CMD_MDIO_READ_OUT_LEN 8
445 */ 777#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
446#define MC_CMD_MDIO_BUS_INTERNAL 0 778#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
447#define MC_CMD_MDIO_BUS_EXTERNAL 1 779#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */
448 780
449/* The MDIO commands return the raw status bits from the MDIO block. A "good" 781
450 * transaction should have the DONE bit set and all other bits clear. 782/***********************************/
783/* MC_CMD_MDIO_WRITE
784 * MDIO register write.
451 */ 785 */
452#define MC_CMD_MDIO_STATUS_GOOD 0x08 786#define MC_CMD_MDIO_WRITE 0x11
453 787
788/* MC_CMD_MDIO_WRITE_IN msgrequest */
789#define MC_CMD_MDIO_WRITE_IN_LEN 20
790#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
791/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
792/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
793#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
794#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
795/* MC_CMD_MDIO_CLAUSE22 0x20 */
796#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
797#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
454 798
455/* MC_CMD_DBI_WRITE: (debug) 799/* MC_CMD_MDIO_WRITE_OUT msgresponse */
456 * Write DBI register(s) 800#define MC_CMD_MDIO_WRITE_OUT_LEN 4
457 * 801#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
458 * Host: address, byte-enables (and VF selection, and cs2 flag), 802/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
459 * value [,address ...] 803
460 * MC: nothing 804
805/***********************************/
806/* MC_CMD_DBI_WRITE
807 * Write DBI register(s).
461 */ 808 */
462#define MC_CMD_DBI_WRITE 0x12 809#define MC_CMD_DBI_WRITE 0x12
463#define MC_CMD_DBI_WRITE_IN_LEN(_numwords) \ 810
464 (12 * (_numwords)) 811/* MC_CMD_DBI_WRITE_IN msgrequest */
465#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(_word) \ 812#define MC_CMD_DBI_WRITE_IN_LENMIN 12
466 (((_word) * 12) + 0) 813#define MC_CMD_DBI_WRITE_IN_LENMAX 252
467#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(_word) \ 814#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
468 (((_word) * 12) + 4) 815#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
469#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(_word) \ 816#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
470 (((_word) * 12) + 8) 817#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
471#define MC_CMD_DBI_WRITE_OUT_LEN 0 818#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
472 819
473/* MC_CMD_DBI_READ: (debug) 820/* MC_CMD_DBI_WRITE_OUT msgresponse */
474 * Read DBI register(s) 821#define MC_CMD_DBI_WRITE_OUT_LEN 0
475 * 822
476 * Host: address, [,address ...] 823/* MC_CMD_DBIWROP_TYPEDEF structuredef */
477 * MC: value [,value ...] 824#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
478 * (note: this does not support reading from VFs, but is retained for backwards 825#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
479 * compatibility; see MC_CMD_DBI_READX below) 826#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
480 */ 827#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
481#define MC_CMD_DBI_READ 0x13 828#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4
482#define MC_CMD_DBI_READ_IN_LEN(_numwords) \ 829#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32
483 (4 * (_numwords)) 830#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32
484#define MC_CMD_DBI_READ_OUT_LEN(_numwords) \ 831#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
485 (4 * (_numwords)) 832#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
486 833#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
487/* MC_CMD_PORT_READ32: (debug) 834
835
836/***********************************/
837/* MC_CMD_PORT_READ32
488 * Read a 32-bit register from the indirect port register map. 838 * Read a 32-bit register from the indirect port register map.
489 *
490 * The port to access is implied by the Shared memory channel used.
491 */ 839 */
492#define MC_CMD_PORT_READ32 0x14 840#define MC_CMD_PORT_READ32 0x14
493#define MC_CMD_PORT_READ32_IN_LEN 4
494#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
495#define MC_CMD_PORT_READ32_OUT_LEN 8
496#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
497#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
498 841
499/* MC_CMD_PORT_WRITE32: (debug) 842/* MC_CMD_PORT_READ32_IN msgrequest */
843#define MC_CMD_PORT_READ32_IN_LEN 4
844#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
845
846/* MC_CMD_PORT_READ32_OUT msgresponse */
847#define MC_CMD_PORT_READ32_OUT_LEN 8
848#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
849#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
850
851
852/***********************************/
853/* MC_CMD_PORT_WRITE32
500 * Write a 32-bit register to the indirect port register map. 854 * Write a 32-bit register to the indirect port register map.
501 *
502 * The port to access is implied by the Shared memory channel used.
503 */ 855 */
504#define MC_CMD_PORT_WRITE32 0x15 856#define MC_CMD_PORT_WRITE32 0x15
505#define MC_CMD_PORT_WRITE32_IN_LEN 8 857
506#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 858/* MC_CMD_PORT_WRITE32_IN msgrequest */
507#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 859#define MC_CMD_PORT_WRITE32_IN_LEN 8
508#define MC_CMD_PORT_WRITE32_OUT_LEN 4 860#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
509#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 861#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
510 862
511/* MC_CMD_PORT_READ128: (debug) 863/* MC_CMD_PORT_WRITE32_OUT msgresponse */
512 * Read a 128-bit register from indirect port register map 864#define MC_CMD_PORT_WRITE32_OUT_LEN 4
513 * 865#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
514 * The port to access is implied by the Shared memory channel used. 866
867
868/***********************************/
869/* MC_CMD_PORT_READ128
870 * Read a 128-bit register from the indirect port register map.
515 */ 871 */
516#define MC_CMD_PORT_READ128 0x16 872#define MC_CMD_PORT_READ128 0x16
517#define MC_CMD_PORT_READ128_IN_LEN 4 873
518#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 874/* MC_CMD_PORT_READ128_IN msgrequest */
519#define MC_CMD_PORT_READ128_OUT_LEN 20 875#define MC_CMD_PORT_READ128_IN_LEN 4
520#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 876#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
521#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 877
522 878/* MC_CMD_PORT_READ128_OUT msgresponse */
523/* MC_CMD_PORT_WRITE128: (debug) 879#define MC_CMD_PORT_READ128_OUT_LEN 20
524 * Write a 128-bit register to indirect port register map. 880#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
525 * 881#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
526 * The port to access is implied by the Shared memory channel used. 882#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
883
884
885/***********************************/
886/* MC_CMD_PORT_WRITE128
887 * Write a 128-bit register to the indirect port register map.
527 */ 888 */
528#define MC_CMD_PORT_WRITE128 0x17 889#define MC_CMD_PORT_WRITE128 0x17
529#define MC_CMD_PORT_WRITE128_IN_LEN 20 890
530#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 891/* MC_CMD_PORT_WRITE128_IN msgrequest */
531#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 892#define MC_CMD_PORT_WRITE128_IN_LEN 20
532#define MC_CMD_PORT_WRITE128_OUT_LEN 4 893#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
533#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 894#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
534 895#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
535/* MC_CMD_GET_BOARD_CFG: 896
536 * Returns the MC firmware configuration structure 897/* MC_CMD_PORT_WRITE128_OUT msgresponse */
537 * 898#define MC_CMD_PORT_WRITE128_OUT_LEN 4
538 * The FW_SUBTYPE_LIST contains a 16-bit value for each of the 12 types of 899#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
539 * NVRAM area. The values are defined in the firmware/mc/platform/<xxx>.c file 900
540 * for a specific board type, but otherwise have no meaning to the MC; they 901
541 * are used by the driver to manage selection of appropriate firmware updates. 902/***********************************/
903/* MC_CMD_GET_BOARD_CFG
904 * Returns the MC firmware configuration structure.
542 */ 905 */
543#define MC_CMD_GET_BOARD_CFG 0x18 906#define MC_CMD_GET_BOARD_CFG 0x18
544#define MC_CMD_GET_BOARD_CFG_IN_LEN 0 907
545#define MC_CMD_GET_BOARD_CFG_OUT_LEN 96 908/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
546#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 909#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
547#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 910
548#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 911/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
549#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 912#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
550#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 913#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
551#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 914#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
552#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 915#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
553#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 916#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
554#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 917#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
555#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 918#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
556#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 919#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */
557#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 920#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */
558#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 921#define MC_CMD_CAPABILITIES_TURBO_LBN 0x1 /* enum */
559#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 922#define MC_CMD_CAPABILITIES_TURBO_WIDTH 0x1 /* enum */
560#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 24 923#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 0x2 /* enum */
561 924#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 0x1 /* enum */
562/* MC_CMD_DBI_READX: (debug) 925#define MC_CMD_CAPABILITIES_PTP_LBN 0x3 /* enum */
563 * Read DBI register(s) -- extended functionality 926#define MC_CMD_CAPABILITIES_PTP_WIDTH 0x1 /* enum */
564 * 927#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
565 * Host: vf selection, address, [,vf selection ...] 928/* Enum values, see field(s): */
566 * MC: value [,value ...] 929/* CAPABILITIES_PORT0 */
930#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
931#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
932#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
933#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
934#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
935#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
936#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
937#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
938#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
939#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
940#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
941#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
942
943
944/***********************************/
945/* MC_CMD_DBI_READX
946 * Read DBI register(s).
567 */ 947 */
568#define MC_CMD_DBI_READX 0x19 948#define MC_CMD_DBI_READX 0x19
569#define MC_CMD_DBI_READX_IN_LEN(_numwords) \
570 (8*(_numwords))
571#define MC_CMD_DBI_READX_OUT_LEN(_numwords) \
572 (4*(_numwords))
573 949
574/* MC_CMD_SET_RAND_SEED: 950/* MC_CMD_DBI_READX_IN msgrequest */
575 * Set the 16byte seed for the MC pseudo-random generator 951#define MC_CMD_DBI_READX_IN_LENMIN 8
952#define MC_CMD_DBI_READX_IN_LENMAX 248
953#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
954#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
955#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
956#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
957#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
958#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
959#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
960
961/* MC_CMD_DBI_READX_OUT msgresponse */
962#define MC_CMD_DBI_READX_OUT_LENMIN 4
963#define MC_CMD_DBI_READX_OUT_LENMAX 252
964#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
965#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
966#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
967#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
968#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
969
970
971/***********************************/
972/* MC_CMD_SET_RAND_SEED
973 * Set the 16byte seed for the MC pseudo-random generator.
576 */ 974 */
577#define MC_CMD_SET_RAND_SEED 0x1a 975#define MC_CMD_SET_RAND_SEED 0x1a
578#define MC_CMD_SET_RAND_SEED_IN_LEN 16
579#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
580#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
581 976
582/* MC_CMD_LTSSM_HIST: (debug) 977/* MC_CMD_SET_RAND_SEED_IN msgrequest */
583 * Retrieve the history of the LTSSM, if the build supports it. 978#define MC_CMD_SET_RAND_SEED_IN_LEN 16
584 * 979#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
585 * Host: nothing 980#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
586 * MC: variable number of LTSSM values, as bytes 981
587 * The history is read-to-clear. 982/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
983#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
984
985
986/***********************************/
987/* MC_CMD_LTSSM_HIST
988 * Retrieve the history of the PCIE LTSSM.
588 */ 989 */
589#define MC_CMD_LTSSM_HIST 0x1b 990#define MC_CMD_LTSSM_HIST 0x1b
590 991
591/* MC_CMD_DRV_ATTACH: 992/* MC_CMD_LTSSM_HIST_IN msgrequest */
592 * Inform MCPU that this port is managed on the host (i.e. driver active) 993#define MC_CMD_LTSSM_HIST_IN_LEN 0
994
995/* MC_CMD_LTSSM_HIST_OUT msgresponse */
996#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
997#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
998#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
999#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
1000#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
1001#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
1002#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
1003
1004
1005/***********************************/
1006/* MC_CMD_DRV_ATTACH
1007 * Inform MCPU that this port is managed on the host.
593 */ 1008 */
594#define MC_CMD_DRV_ATTACH 0x1c 1009#define MC_CMD_DRV_ATTACH 0x1c
595#define MC_CMD_DRV_ATTACH_IN_LEN 8
596#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
597#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
598#define MC_CMD_DRV_ATTACH_OUT_LEN 4
599#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
600 1010
601/* MC_CMD_NCSI_PROD: (debug) 1011/* MC_CMD_DRV_ATTACH_IN msgrequest */
602 * Trigger an NC-SI event (and possibly an AEN in response) 1012#define MC_CMD_DRV_ATTACH_IN_LEN 8
1013#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
1014#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
1015
1016/* MC_CMD_DRV_ATTACH_OUT msgresponse */
1017#define MC_CMD_DRV_ATTACH_OUT_LEN 4
1018#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
1019
1020
1021/***********************************/
1022/* MC_CMD_NCSI_PROD
1023 * Trigger an NC-SI event.
603 */ 1024 */
604#define MC_CMD_NCSI_PROD 0x1d 1025#define MC_CMD_NCSI_PROD 0x1d
605#define MC_CMD_NCSI_PROD_IN_LEN 4 1026
606#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0 1027/* MC_CMD_NCSI_PROD_IN msgrequest */
607#define MC_CMD_NCSI_PROD_LINKCHANGE_LBN 0 1028#define MC_CMD_NCSI_PROD_IN_LEN 4
608#define MC_CMD_NCSI_PROD_LINKCHANGE_WIDTH 1 1029#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
609#define MC_CMD_NCSI_PROD_RESET_LBN 1 1030#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */
610#define MC_CMD_NCSI_PROD_RESET_WIDTH 1 1031#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */
611#define MC_CMD_NCSI_PROD_DRVATTACH_LBN 2 1032#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */
612#define MC_CMD_NCSI_PROD_DRVATTACH_WIDTH 1 1033#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0
613#define MC_CMD_NCSI_PROD_OUT_LEN 0 1034#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1
614 1035#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1
615/* Enumeration */ 1036#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1
616#define MC_CMD_NCSI_PROD_LINKCHANGE 0 1037#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2
617#define MC_CMD_NCSI_PROD_RESET 1 1038#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1
618#define MC_CMD_NCSI_PROD_DRVATTACH 2 1039
619 1040/* MC_CMD_NCSI_PROD_OUT msgresponse */
620/* MC_CMD_DEVEL: (debug) 1041#define MC_CMD_NCSI_PROD_OUT_LEN 0
621 * Reserved for development 1042
622 */ 1043
623#define MC_CMD_DEVEL 0x1e 1044/***********************************/
624 1045/* MC_CMD_SHMUART
625/* MC_CMD_SHMUART: (debug)
626 * Route UART output to circular buffer in shared memory instead. 1046 * Route UART output to circular buffer in shared memory instead.
627 */ 1047 */
628#define MC_CMD_SHMUART 0x1f 1048#define MC_CMD_SHMUART 0x1f
629#define MC_CMD_SHMUART_IN_FLAG_OFST 0
630#define MC_CMD_SHMUART_IN_LEN 4
631#define MC_CMD_SHMUART_OUT_LEN 0
632 1049
633/* MC_CMD_PORT_RESET: 1050/* MC_CMD_SHMUART_IN msgrequest */
634 * Generic per-port reset. There is no equivalent for per-board reset. 1051#define MC_CMD_SHMUART_IN_LEN 4
635 * 1052#define MC_CMD_SHMUART_IN_FLAG_OFST 0
636 * Locks required: None 1053
637 * Return code: 0, ETIME 1054/* MC_CMD_SHMUART_OUT msgresponse */
638 */ 1055#define MC_CMD_SHMUART_OUT_LEN 0
639#define MC_CMD_PORT_RESET 0x20 1056
640#define MC_CMD_PORT_RESET_IN_LEN 0 1057
641#define MC_CMD_PORT_RESET_OUT_LEN 0 1058/***********************************/
642 1059/* MC_CMD_ENTITY_RESET
643/* MC_CMD_RESOURCE_LOCK: 1060 * Generic per-port reset.
644 * Generic resource lock/unlock interface. 1061 */
645 * 1062#define MC_CMD_ENTITY_RESET 0x20
646 * Locks required: None 1063
647 * Return code: 0, 1064/* MC_CMD_ENTITY_RESET_IN msgrequest */
648 * EBUSY (if trylock is contended by other port), 1065#define MC_CMD_ENTITY_RESET_IN_LEN 4
649 * EDEADLK (if trylock is already acquired by this port) 1066#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
650 * EINVAL (if unlock doesn't own the lock) 1067#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
651 */ 1068#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
652#define MC_CMD_RESOURCE_LOCK 0x21 1069
653#define MC_CMD_RESOURCE_LOCK_IN_LEN 8 1070/* MC_CMD_ENTITY_RESET_OUT msgresponse */
654#define MC_CMD_RESOURCE_LOCK_IN_ACTION_OFST 0 1071#define MC_CMD_ENTITY_RESET_OUT_LEN 0
655#define MC_CMD_RESOURCE_LOCK_ACTION_TRYLOCK 1 1072
656#define MC_CMD_RESOURCE_LOCK_ACTION_UNLOCK 0 1073
657#define MC_CMD_RESOURCE_LOCK_IN_RESOURCE_OFST 4 1074/***********************************/
658#define MC_CMD_RESOURCE_LOCK_I2C 2 1075/* MC_CMD_PCIE_CREDITS
659#define MC_CMD_RESOURCE_LOCK_PHY 3 1076 * Read instantaneous and minimum flow control thresholds.
660#define MC_CMD_RESOURCE_LOCK_OUT_LEN 0 1077 */
661 1078#define MC_CMD_PCIE_CREDITS 0x21
662/* MC_CMD_SPI_COMMAND: (variadic in, variadic out) 1079
663 * Read/Write to/from the SPI device. 1080/* MC_CMD_PCIE_CREDITS_IN msgrequest */
664 * 1081#define MC_CMD_PCIE_CREDITS_IN_LEN 8
665 * Locks required: SPI_LOCK 1082#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
666 * Return code: 0, ETIME, EINVAL, EACCES (if SPI_LOCK is not held) 1083#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
667 */ 1084
668#define MC_CMD_SPI_COMMAND 0x22 1085/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
669#define MC_CMD_SPI_COMMAND_IN_LEN(_write_bytes) (12 + (_write_bytes)) 1086#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
670#define MC_CMD_SPI_COMMAND_IN_ARGS_OFST 0 1087#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
671#define MC_CMD_SPI_COMMAND_IN_ARGS_ADDRESS_OFST 0 1088#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
672#define MC_CMD_SPI_COMMAND_IN_ARGS_READ_BYTES_OFST 4 1089#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
673#define MC_CMD_SPI_COMMAND_IN_ARGS_CHIP_SELECT_OFST 8 1090#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
674/* Data to write here */ 1091#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
675#define MC_CMD_SPI_COMMAND_IN_WRITE_BUFFER_OFST 12 1092#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
676#define MC_CMD_SPI_COMMAND_OUT_LEN(_read_bytes) (_read_bytes) 1093#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
677/* Data read here */ 1094#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
678#define MC_CMD_SPI_COMMAND_OUT_READ_BUFFER_OFST 0 1095#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
679 1096#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
680/* MC_CMD_I2C_READ_WRITE: (variadic in, variadic out) 1097#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
681 * Read/Write to/from the I2C bus. 1098#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
682 * 1099#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
683 * Locks required: I2C_LOCK 1100#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
684 * Return code: 0, ETIME, EINVAL, EACCES (if I2C_LOCK is not held) 1101#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
685 */ 1102#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
686#define MC_CMD_I2C_RW 0x23 1103
687#define MC_CMD_I2C_RW_IN_LEN(_write_bytes) (8 + (_write_bytes)) 1104
688#define MC_CMD_I2C_RW_IN_ARGS_OFST 0 1105/***********************************/
689#define MC_CMD_I2C_RW_IN_ARGS_ADDR_OFST 0 1106/* MC_CMD_RXD_MONITOR
690#define MC_CMD_I2C_RW_IN_ARGS_READ_BYTES_OFST 4 1107 * Get histogram of RX queue fill level.
691/* Data to write here */ 1108 */
692#define MC_CMD_I2C_RW_IN_WRITE_BUFFER_OFSET 8 1109#define MC_CMD_RXD_MONITOR 0x22
693#define MC_CMD_I2C_RW_OUT_LEN(_read_bytes) (_read_bytes) 1110
694/* Data read here */ 1111/* MC_CMD_RXD_MONITOR_IN msgrequest */
695#define MC_CMD_I2C_RW_OUT_READ_BUFFER_OFST 0 1112#define MC_CMD_RXD_MONITOR_IN_LEN 12
696 1113#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
697/* Generic phy capability bitmask */ 1114#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
698#define MC_CMD_PHY_CAP_10HDX_LBN 1 1115#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
699#define MC_CMD_PHY_CAP_10HDX_WIDTH 1 1116
700#define MC_CMD_PHY_CAP_10FDX_LBN 2 1117/* MC_CMD_RXD_MONITOR_OUT msgresponse */
701#define MC_CMD_PHY_CAP_10FDX_WIDTH 1 1118#define MC_CMD_RXD_MONITOR_OUT_LEN 80
702#define MC_CMD_PHY_CAP_100HDX_LBN 3 1119#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
703#define MC_CMD_PHY_CAP_100HDX_WIDTH 1 1120#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
704#define MC_CMD_PHY_CAP_100FDX_LBN 4 1121#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
705#define MC_CMD_PHY_CAP_100FDX_WIDTH 1 1122#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
706#define MC_CMD_PHY_CAP_1000HDX_LBN 5 1123#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
707#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 1124#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
708#define MC_CMD_PHY_CAP_1000FDX_LBN 6 1125#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
709#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 1126#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
710#define MC_CMD_PHY_CAP_10000FDX_LBN 7 1127#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
711#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 1128#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
712#define MC_CMD_PHY_CAP_PAUSE_LBN 8 1129#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
713#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 1130#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
714#define MC_CMD_PHY_CAP_ASYM_LBN 9 1131#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
715#define MC_CMD_PHY_CAP_ASYM_WIDTH 1 1132#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
716#define MC_CMD_PHY_CAP_AN_LBN 10 1133#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
717#define MC_CMD_PHY_CAP_AN_WIDTH 1 1134#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
718 1135#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
719/* Generic loopback enumeration */ 1136#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
720#define MC_CMD_LOOPBACK_NONE 0 1137#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
721#define MC_CMD_LOOPBACK_DATA 1 1138#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
722#define MC_CMD_LOOPBACK_GMAC 2 1139
723#define MC_CMD_LOOPBACK_XGMII 3 1140
724#define MC_CMD_LOOPBACK_XGXS 4 1141/***********************************/
725#define MC_CMD_LOOPBACK_XAUI 5 1142/* MC_CMD_PUTS
726#define MC_CMD_LOOPBACK_GMII 6 1143 * puts(3) implementation over MCDI
727#define MC_CMD_LOOPBACK_SGMII 7 1144 */
728#define MC_CMD_LOOPBACK_XGBR 8 1145#define MC_CMD_PUTS 0x23
729#define MC_CMD_LOOPBACK_XFI 9 1146
730#define MC_CMD_LOOPBACK_XAUI_FAR 10 1147/* MC_CMD_PUTS_IN msgrequest */
731#define MC_CMD_LOOPBACK_GMII_FAR 11 1148#define MC_CMD_PUTS_IN_LENMIN 13
732#define MC_CMD_LOOPBACK_SGMII_FAR 12 1149#define MC_CMD_PUTS_IN_LENMAX 255
733#define MC_CMD_LOOPBACK_XFI_FAR 13 1150#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
734#define MC_CMD_LOOPBACK_GPHY 14 1151#define MC_CMD_PUTS_IN_DEST_OFST 0
735#define MC_CMD_LOOPBACK_PHYXS 15 1152#define MC_CMD_PUTS_IN_UART_LBN 0
736#define MC_CMD_LOOPBACK_PCS 16 1153#define MC_CMD_PUTS_IN_UART_WIDTH 1
737#define MC_CMD_LOOPBACK_PMAPMD 17 1154#define MC_CMD_PUTS_IN_PORT_LBN 1
738#define MC_CMD_LOOPBACK_XPORT 18 1155#define MC_CMD_PUTS_IN_PORT_WIDTH 1
739#define MC_CMD_LOOPBACK_XGMII_WS 19 1156#define MC_CMD_PUTS_IN_DHOST_OFST 4
740#define MC_CMD_LOOPBACK_XAUI_WS 20 1157#define MC_CMD_PUTS_IN_DHOST_LEN 6
741#define MC_CMD_LOOPBACK_XAUI_WS_FAR 21 1158#define MC_CMD_PUTS_IN_STRING_OFST 12
742#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 22 1159#define MC_CMD_PUTS_IN_STRING_LEN 1
743#define MC_CMD_LOOPBACK_GMII_WS 23 1160#define MC_CMD_PUTS_IN_STRING_MINNUM 1
744#define MC_CMD_LOOPBACK_XFI_WS 24 1161#define MC_CMD_PUTS_IN_STRING_MAXNUM 243
745#define MC_CMD_LOOPBACK_XFI_WS_FAR 25 1162
746#define MC_CMD_LOOPBACK_PHYXS_WS 26 1163/* MC_CMD_PUTS_OUT msgresponse */
747 1164#define MC_CMD_PUTS_OUT_LEN 0
748/* Generic PHY statistics enumeration */ 1165
749#define MC_CMD_OUI 0 1166
750#define MC_CMD_PMA_PMD_LINK_UP 1 1167/***********************************/
751#define MC_CMD_PMA_PMD_RX_FAULT 2 1168/* MC_CMD_GET_PHY_CFG
752#define MC_CMD_PMA_PMD_TX_FAULT 3 1169 * Report PHY configuration.
753#define MC_CMD_PMA_PMD_SIGNAL 4
754#define MC_CMD_PMA_PMD_SNR_A 5
755#define MC_CMD_PMA_PMD_SNR_B 6
756#define MC_CMD_PMA_PMD_SNR_C 7
757#define MC_CMD_PMA_PMD_SNR_D 8
758#define MC_CMD_PCS_LINK_UP 9
759#define MC_CMD_PCS_RX_FAULT 10
760#define MC_CMD_PCS_TX_FAULT 11
761#define MC_CMD_PCS_BER 12
762#define MC_CMD_PCS_BLOCK_ERRORS 13
763#define MC_CMD_PHYXS_LINK_UP 14
764#define MC_CMD_PHYXS_RX_FAULT 15
765#define MC_CMD_PHYXS_TX_FAULT 16
766#define MC_CMD_PHYXS_ALIGN 17
767#define MC_CMD_PHYXS_SYNC 18
768#define MC_CMD_AN_LINK_UP 19
769#define MC_CMD_AN_COMPLETE 20
770#define MC_CMD_AN_10GBT_STATUS 21
771#define MC_CMD_CL22_LINK_UP 22
772#define MC_CMD_PHY_NSTATS 23
773
774/* MC_CMD_GET_PHY_CFG:
775 * Report PHY configuration. This guarantees to succeed even if the PHY is in
776 * a "zombie" state.
777 *
778 * Locks required: None
779 * Return code: 0
780 */ 1170 */
781#define MC_CMD_GET_PHY_CFG 0x24 1171#define MC_CMD_GET_PHY_CFG 0x24
782 1172
783#define MC_CMD_GET_PHY_CFG_IN_LEN 0 1173/* MC_CMD_GET_PHY_CFG_IN msgrequest */
784#define MC_CMD_GET_PHY_CFG_OUT_LEN 72 1174#define MC_CMD_GET_PHY_CFG_IN_LEN 0
785 1175
786#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 1176/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
787#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0 1177#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
788#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1 1178#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
789#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1 1179#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
790#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1 1180#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
791#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2 1181#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
792#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1 1182#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
793#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3 1183#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
794#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1 1184#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
795#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4 1185#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
796#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1 1186#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
797#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5 1187#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
798#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1 1188#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
799#define MC_CMD_GET_PHY_CFG_BIST_LBN 6 1189#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
800#define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1 1190#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
801#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 1191#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
802/* Bitmask of supported capabilities */ 1192#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
803#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 1193#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
804#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 1194#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
805#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 1195#define MC_CMD_PHY_CAP_10HDX_LBN 1
806/* PHY statistics bitmap */ 1196#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
807#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 1197#define MC_CMD_PHY_CAP_10FDX_LBN 2
808/* PHY type/name string */ 1198#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
809#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 1199#define MC_CMD_PHY_CAP_100HDX_LBN 3
810#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 1200#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
811#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 1201#define MC_CMD_PHY_CAP_100FDX_LBN 4
812#define MC_CMD_MEDIA_XAUI 1 1202#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
813#define MC_CMD_MEDIA_CX4 2 1203#define MC_CMD_PHY_CAP_1000HDX_LBN 5
814#define MC_CMD_MEDIA_KX4 3 1204#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
815#define MC_CMD_MEDIA_XFP 4 1205#define MC_CMD_PHY_CAP_1000FDX_LBN 6
816#define MC_CMD_MEDIA_SFP_PLUS 5 1206#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
817#define MC_CMD_MEDIA_BASE_T 6 1207#define MC_CMD_PHY_CAP_10000FDX_LBN 7
818/* MDIO "MMDS" supported */ 1208#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
819#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 1209#define MC_CMD_PHY_CAP_PAUSE_LBN 8
820/* Native clause 22 */ 1210#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
821#define MC_CMD_MMD_CLAUSE22 0 1211#define MC_CMD_PHY_CAP_ASYM_LBN 9
822#define MC_CMD_MMD_CLAUSE45_PMAPMD 1 1212#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
823#define MC_CMD_MMD_CLAUSE45_WIS 2 1213#define MC_CMD_PHY_CAP_AN_LBN 10
824#define MC_CMD_MMD_CLAUSE45_PCS 3 1214#define MC_CMD_PHY_CAP_AN_WIDTH 1
825#define MC_CMD_MMD_CLAUSE45_PHYXS 4 1215#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
826#define MC_CMD_MMD_CLAUSE45_DTEXS 5 1216#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
827#define MC_CMD_MMD_CLAUSE45_TC 6 1217#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
828#define MC_CMD_MMD_CLAUSE45_AN 7 1218#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
829/* Clause22 proxied over clause45 by PHY */ 1219#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
830#define MC_CMD_MMD_CLAUSE45_C22EXT 29 1220#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
831#define MC_CMD_MMD_CLAUSE45_VEND1 30 1221#define MC_CMD_MEDIA_XAUI 0x1 /* enum */
832#define MC_CMD_MMD_CLAUSE45_VEND2 31 1222#define MC_CMD_MEDIA_CX4 0x2 /* enum */
833/* PHY stepping version */ 1223#define MC_CMD_MEDIA_KX4 0x3 /* enum */
834#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 1224#define MC_CMD_MEDIA_XFP 0x4 /* enum */
835#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 1225#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */
836 1226#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */
837/* MC_CMD_START_BIST: 1227#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
1228#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */
1229#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
1230#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
1231#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
1232#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
1233#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
1234#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
1235#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
1236#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */
1237#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
1238#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
1239#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
1240#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
1241
1242
1243/***********************************/
1244/* MC_CMD_START_BIST
838 * Start a BIST test on the PHY. 1245 * Start a BIST test on the PHY.
839 *
840 * Locks required: PHY_LOCK if doing a PHY BIST
841 * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
842 */ 1246 */
843#define MC_CMD_START_BIST 0x25 1247#define MC_CMD_START_BIST 0x25
844#define MC_CMD_START_BIST_IN_LEN 4 1248
845#define MC_CMD_START_BIST_IN_TYPE_OFST 0 1249/* MC_CMD_START_BIST_IN msgrequest */
846#define MC_CMD_START_BIST_OUT_LEN 0 1250#define MC_CMD_START_BIST_IN_LEN 4
847 1251#define MC_CMD_START_BIST_IN_TYPE_OFST 0
848/* Run the PHY's short cable BIST */ 1252#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */
849#define MC_CMD_PHY_BIST_CABLE_SHORT 1 1253#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */
850/* Run the PHY's long cable BIST */ 1254#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */
851#define MC_CMD_PHY_BIST_CABLE_LONG 2 1255#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */
852/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */ 1256#define MC_CMD_PHY_BIST 0x5 /* enum */
853#define MC_CMD_BPX_SERDES_BIST 3 1257
854/* Run the MC loopback tests */ 1258/* MC_CMD_START_BIST_OUT msgresponse */
855#define MC_CMD_MC_LOOPBACK_BIST 4 1259#define MC_CMD_START_BIST_OUT_LEN 0
856/* Run the PHY's standard BIST */ 1260
857#define MC_CMD_PHY_BIST 5 1261
858 1262/***********************************/
859/* MC_CMD_POLL_PHY_BIST: (variadic output) 1263/* MC_CMD_POLL_BIST
860 * Poll for BIST completion 1264 * Poll for BIST completion.
861 *
862 * Returns a single status code, and optionally some PHY specific
863 * bist output. The driver should only consume the BIST output
864 * after validating OUTLEN and PHY_CFG.PHY_TYPE.
865 *
866 * If a driver can't successfully parse the BIST output, it should
867 * still respect the pass/Fail in OUT.RESULT
868 *
869 * Locks required: PHY_LOCK if doing a PHY BIST
870 * Return code: 0, EACCES (if PHY_LOCK is not held)
871 */ 1265 */
872#define MC_CMD_POLL_BIST 0x26 1266#define MC_CMD_POLL_BIST 0x26
873#define MC_CMD_POLL_BIST_IN_LEN 0 1267
874#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN 1268/* MC_CMD_POLL_BIST_IN msgrequest */
875#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 1269#define MC_CMD_POLL_BIST_IN_LEN 0
876#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 1270
877#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 1271/* MC_CMD_POLL_BIST_OUT msgresponse */
878#define MC_CMD_POLL_BIST_RUNNING 1 1272#define MC_CMD_POLL_BIST_OUT_LEN 8
879#define MC_CMD_POLL_BIST_PASSED 2 1273#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
880#define MC_CMD_POLL_BIST_FAILED 3 1274#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */
881#define MC_CMD_POLL_BIST_TIMEOUT 4 1275#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */
882/* Generic: */ 1276#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */
883#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 1277#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */
884/* SFT9001-specific: */ 1278#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
885#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 1279
886#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 1280/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
887#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 1281#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
888#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 1282/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
889#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 1283/* Enum values, see field(s): */
890#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 1284/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
891#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 1285#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
892#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 1286#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
893#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1 1287#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
894#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2 1288#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
895#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3 1289#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
896#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4 1290#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */
897#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9 1291#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */
898/* mrsfp "PHY" driver: */ 1292#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */
899#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 1293#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */
900#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0 1294#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */
901#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1 1295#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
902#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2 1296/* Enum values, see field(s): */
903#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3 1297/* CABLE_STATUS_A */
904#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4 1298#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
905#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5 1299/* Enum values, see field(s): */
906#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6 1300/* CABLE_STATUS_A */
907#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7 1301#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
908#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8 1302/* Enum values, see field(s): */
909 1303/* CABLE_STATUS_A */
910/* MC_CMD_PHY_SPI: (variadic in, variadic out) 1304
911 * Read/Write/Erase the PHY SPI device 1305/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
912 * 1306#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
913 * Locks required: PHY_LOCK 1307/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
914 * Return code: 0, ETIME, EINVAL, EACCES (if PHY_LOCK is not held) 1308/* Enum values, see field(s): */
915 */ 1309/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
916#define MC_CMD_PHY_SPI 0x27 1310#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
917#define MC_CMD_PHY_SPI_IN_LEN(_write_bytes) (12 + (_write_bytes)) 1311#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */
918#define MC_CMD_PHY_SPI_IN_ARGS_OFST 0 1312#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */
919#define MC_CMD_PHY_SPI_IN_ARGS_ADDR_OFST 0 1313#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */
920#define MC_CMD_PHY_SPI_IN_ARGS_READ_BYTES_OFST 4 1314#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */
921#define MC_CMD_PHY_SPI_IN_ARGS_ERASE_ALL_OFST 8 1315#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */
922/* Data to write here */ 1316#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */
923#define MC_CMD_PHY_SPI_IN_WRITE_BUFFER_OFSET 12 1317#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */
924#define MC_CMD_PHY_SPI_OUT_LEN(_read_bytes) (_read_bytes) 1318#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */
925/* Data read here */ 1319#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */
926#define MC_CMD_PHY_SPI_OUT_READ_BUFFER_OFST 0 1320
927 1321
928 1322/***********************************/
929/* MC_CMD_GET_LOOPBACK_MODES: 1323/* MC_CMD_FLUSH_RX_QUEUES
930 * Returns a bitmask of loopback modes evailable at each speed. 1324 * Flush receive queue(s).
931 * 1325 */
932 * Locks required: None 1326#define MC_CMD_FLUSH_RX_QUEUES 0x27
933 * Return code: 0 1327
1328/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
1329#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
1330#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
1331#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
1332#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
1333#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
1334#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
1335#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
1336
1337/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
1338#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
1339
1340
1341/***********************************/
1342/* MC_CMD_GET_LOOPBACK_MODES
1343 * Get port's loopback modes.
934 */ 1344 */
935#define MC_CMD_GET_LOOPBACK_MODES 0x28 1345#define MC_CMD_GET_LOOPBACK_MODES 0x28
936#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 1346
937#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32 1347/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
938#define MC_CMD_GET_LOOPBACK_MODES_100M_OFST 0 1348#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
939#define MC_CMD_GET_LOOPBACK_MODES_1G_OFST 8 1349
940#define MC_CMD_GET_LOOPBACK_MODES_10G_OFST 16 1350/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
941#define MC_CMD_GET_LOOPBACK_MODES_SUGGESTED_OFST 24 1351#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
942 1352#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
943/* Flow control enumeration */ 1353#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
944#define MC_CMD_FCNTL_OFF 0 1354#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
945#define MC_CMD_FCNTL_RESPOND 1 1355#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
946#define MC_CMD_FCNTL_BIDIR 2 1356#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */
947/* Auto - Use what the link has autonegotiated 1357#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */
948 * - The driver should modify the advertised capabilities via SET_LINK.CAP 1358#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */
949 * to control the negotiated flow control mode. 1359#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */
950 * - Can only be set if the PHY supports PAUSE+ASYM capabilities 1360#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */
951 * - Never returned by GET_LINK as the value programmed into the MAC 1361#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */
952 */ 1362#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */
953#define MC_CMD_FCNTL_AUTO 3 1363#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */
954 1364#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */
955/* Generic mac fault bitmask */ 1365#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */
956#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 1366#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */
957#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 1367#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */
958#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 1368#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */
959#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 1369#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */
960#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 1370#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */
961#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 1371#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */
962 1372#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */
963/* MC_CMD_GET_LINK: 1373#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */
964 * Read the unified MAC/PHY link state 1374#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */
965 * 1375#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */
966 * Locks required: None 1376#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */
967 * Return code: 0, ETIME 1377#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */
1378#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */
1379#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */
1380#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */
1381#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */
1382#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */
1383#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
1384#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
1385#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
1386#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
1387/* Enum values, see field(s): */
1388/* 100M */
1389#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
1390#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
1391#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
1392#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
1393/* Enum values, see field(s): */
1394/* 100M */
1395#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
1396#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
1397#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
1398#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
1399/* Enum values, see field(s): */
1400/* 100M */
1401
1402
1403/***********************************/
1404/* MC_CMD_GET_LINK
1405 * Read the unified MAC/PHY link state.
968 */ 1406 */
969#define MC_CMD_GET_LINK 0x29 1407#define MC_CMD_GET_LINK 0x29
970#define MC_CMD_GET_LINK_IN_LEN 0 1408
971#define MC_CMD_GET_LINK_OUT_LEN 28 1409/* MC_CMD_GET_LINK_IN msgrequest */
972/* near-side and link-partner advertised capabilities */ 1410#define MC_CMD_GET_LINK_IN_LEN 0
973#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 1411
974#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 1412/* MC_CMD_GET_LINK_OUT msgresponse */
975/* Autonegotiated speed in mbit/s. The link may still be down 1413#define MC_CMD_GET_LINK_OUT_LEN 28
976 * even if this reads non-zero */ 1414#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
977#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 1415#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
978#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 1416#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
979#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 1417#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
980/* Whether we have overall link up */ 1418/* Enum values, see field(s): */
981#define MC_CMD_GET_LINK_LINK_UP_LBN 0 1419/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
982#define MC_CMD_GET_LINK_LINK_UP_WIDTH 1 1420#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
983#define MC_CMD_GET_LINK_FULL_DUPLEX_LBN 1 1421#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
984#define MC_CMD_GET_LINK_FULL_DUPLEX_WIDTH 1 1422#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
985/* Whether we have link at the layers provided by the BPX */ 1423#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
986#define MC_CMD_GET_LINK_BPX_LINK_LBN 2 1424#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
987#define MC_CMD_GET_LINK_BPX_LINK_WIDTH 1 1425#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
988/* Whether the PHY has external link */ 1426#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
989#define MC_CMD_GET_LINK_PHY_LINK_LBN 3 1427#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
990#define MC_CMD_GET_LINK_PHY_LINK_WIDTH 1 1428#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
991#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 1429#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
992#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 1430#define MC_CMD_FCNTL_OFF 0x0 /* enum */
993 1431#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */
994/* MC_CMD_SET_LINK: 1432#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */
995 * Write the unified MAC/PHY link configuration 1433#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
996 * 1434#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
997 * A loopback speed of "0" is supported, and means 1435#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
998 * (choose any available speed) 1436#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
999 * 1437#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
1000 * Locks required: None 1438#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
1001 * Return code: 0, EINVAL, ETIME 1439#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
1440#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
1441#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
1442
1443
1444/***********************************/
1445/* MC_CMD_SET_LINK
1446 * Write the unified MAC/PHY link configuration.
1002 */ 1447 */
1003#define MC_CMD_SET_LINK 0x2a 1448#define MC_CMD_SET_LINK 0x2a
1004#define MC_CMD_SET_LINK_IN_LEN 16 1449
1005#define MC_CMD_SET_LINK_IN_CAP_OFST 0 1450/* MC_CMD_SET_LINK_IN msgrequest */
1006#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 1451#define MC_CMD_SET_LINK_IN_LEN 16
1007#define MC_CMD_SET_LINK_LOWPOWER_LBN 0 1452#define MC_CMD_SET_LINK_IN_CAP_OFST 0
1008#define MC_CMD_SET_LINK_LOWPOWER_WIDTH 1 1453#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
1009#define MC_CMD_SET_LINK_POWEROFF_LBN 1 1454#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
1010#define MC_CMD_SET_LINK_POWEROFF_WIDTH 1 1455#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
1011#define MC_CMD_SET_LINK_TXDIS_LBN 2 1456#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
1012#define MC_CMD_SET_LINK_TXDIS_WIDTH 1 1457#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
1013#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 1458#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
1014#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 1459#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
1015#define MC_CMD_SET_LINK_OUT_LEN 0 1460#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
1016 1461/* Enum values, see field(s): */
1017/* MC_CMD_SET_ID_LED: 1462/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
1018 * Set indentification LED state 1463#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
1019 * 1464
1020 * Locks required: None 1465/* MC_CMD_SET_LINK_OUT msgresponse */
1021 * Return code: 0, EINVAL 1466#define MC_CMD_SET_LINK_OUT_LEN 0
1467
1468
1469/***********************************/
1470/* MC_CMD_SET_ID_LED
1471 * Set indentification LED state.
1022 */ 1472 */
1023#define MC_CMD_SET_ID_LED 0x2b 1473#define MC_CMD_SET_ID_LED 0x2b
1024#define MC_CMD_SET_ID_LED_IN_LEN 4 1474
1025#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 1475/* MC_CMD_SET_ID_LED_IN msgrequest */
1026#define MC_CMD_LED_OFF 0 1476#define MC_CMD_SET_ID_LED_IN_LEN 4
1027#define MC_CMD_LED_ON 1 1477#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
1028#define MC_CMD_LED_DEFAULT 2 1478#define MC_CMD_LED_OFF 0x0 /* enum */
1029#define MC_CMD_SET_ID_LED_OUT_LEN 0 1479#define MC_CMD_LED_ON 0x1 /* enum */
1030 1480#define MC_CMD_LED_DEFAULT 0x2 /* enum */
1031/* MC_CMD_SET_MAC: 1481
1032 * Set MAC configuration 1482/* MC_CMD_SET_ID_LED_OUT msgresponse */
1033 * 1483#define MC_CMD_SET_ID_LED_OUT_LEN 0
1034 * The MTU is the MTU programmed directly into the XMAC/GMAC 1484
1035 * (inclusive of EtherII, VLAN, bug16011 padding) 1485
1036 * 1486/***********************************/
1037 * Locks required: None 1487/* MC_CMD_SET_MAC
1038 * Return code: 0, EINVAL 1488 * Set MAC configuration.
1039 */ 1489 */
1040#define MC_CMD_SET_MAC 0x2c 1490#define MC_CMD_SET_MAC 0x2c
1041#define MC_CMD_SET_MAC_IN_LEN 24 1491
1042#define MC_CMD_SET_MAC_IN_MTU_OFST 0 1492/* MC_CMD_SET_MAC_IN msgrequest */
1043#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 1493#define MC_CMD_SET_MAC_IN_LEN 24
1044#define MC_CMD_SET_MAC_IN_ADDR_OFST 8 1494#define MC_CMD_SET_MAC_IN_MTU_OFST 0
1045#define MC_CMD_SET_MAC_IN_REJECT_OFST 16 1495#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
1046#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 1496#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
1047#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 1497#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
1048#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 1498#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
1049#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 1499#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
1050#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 1500#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
1051#define MC_CMD_SET_MAC_OUT_LEN 0 1501#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
1052 1502#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
1053/* MC_CMD_PHY_STATS: 1503#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
1054 * Get generic PHY statistics 1504#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
1055 * 1505#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
1056 * This call returns the statistics for a generic PHY in a sparse 1506/* MC_CMD_FCNTL_OFF 0x0 */
1057 * array (indexed by the enumerate). Each value is represented by 1507/* MC_CMD_FCNTL_RESPOND 0x1 */
1058 * a 32bit number. 1508/* MC_CMD_FCNTL_BIDIR 0x2 */
1059 * 1509#define MC_CMD_FCNTL_AUTO 0x3 /* enum */
1060 * If the DMA_ADDR is 0, then no DMA is performed, and the statistics 1510
1061 * may be read directly out of shared memory. If DMA_ADDR != 0, then 1511/* MC_CMD_SET_MAC_OUT msgresponse */
1062 * the statistics are dmad to that (page-aligned location) 1512#define MC_CMD_SET_MAC_OUT_LEN 0
1063 * 1513
1064 * Locks required: None 1514
1065 * Returns: 0, ETIME 1515/***********************************/
1066 * Response methods: shared memory, event 1516/* MC_CMD_PHY_STATS
1517 * Get generic PHY statistics.
1067 */ 1518 */
1068#define MC_CMD_PHY_STATS 0x2d 1519#define MC_CMD_PHY_STATS 0x2d
1069#define MC_CMD_PHY_STATS_IN_LEN 8
1070#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
1071#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
1072#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
1073#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
1074
1075/* Unified MAC statistics enumeration */
1076#define MC_CMD_MAC_GENERATION_START 0
1077#define MC_CMD_MAC_TX_PKTS 1
1078#define MC_CMD_MAC_TX_PAUSE_PKTS 2
1079#define MC_CMD_MAC_TX_CONTROL_PKTS 3
1080#define MC_CMD_MAC_TX_UNICAST_PKTS 4
1081#define MC_CMD_MAC_TX_MULTICAST_PKTS 5
1082#define MC_CMD_MAC_TX_BROADCAST_PKTS 6
1083#define MC_CMD_MAC_TX_BYTES 7
1084#define MC_CMD_MAC_TX_BAD_BYTES 8
1085#define MC_CMD_MAC_TX_LT64_PKTS 9
1086#define MC_CMD_MAC_TX_64_PKTS 10
1087#define MC_CMD_MAC_TX_65_TO_127_PKTS 11
1088#define MC_CMD_MAC_TX_128_TO_255_PKTS 12
1089#define MC_CMD_MAC_TX_256_TO_511_PKTS 13
1090#define MC_CMD_MAC_TX_512_TO_1023_PKTS 14
1091#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 15
1092#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 16
1093#define MC_CMD_MAC_TX_GTJUMBO_PKTS 17
1094#define MC_CMD_MAC_TX_BAD_FCS_PKTS 18
1095#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 19
1096#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 20
1097#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 21
1098#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 22
1099#define MC_CMD_MAC_TX_DEFERRED_PKTS 23
1100#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 24
1101#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 25
1102#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 26
1103#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 27
1104#define MC_CMD_MAC_RX_PKTS 28
1105#define MC_CMD_MAC_RX_PAUSE_PKTS 29
1106#define MC_CMD_MAC_RX_GOOD_PKTS 30
1107#define MC_CMD_MAC_RX_CONTROL_PKTS 31
1108#define MC_CMD_MAC_RX_UNICAST_PKTS 32
1109#define MC_CMD_MAC_RX_MULTICAST_PKTS 33
1110#define MC_CMD_MAC_RX_BROADCAST_PKTS 34
1111#define MC_CMD_MAC_RX_BYTES 35
1112#define MC_CMD_MAC_RX_BAD_BYTES 36
1113#define MC_CMD_MAC_RX_64_PKTS 37
1114#define MC_CMD_MAC_RX_65_TO_127_PKTS 38
1115#define MC_CMD_MAC_RX_128_TO_255_PKTS 39
1116#define MC_CMD_MAC_RX_256_TO_511_PKTS 40
1117#define MC_CMD_MAC_RX_512_TO_1023_PKTS 41
1118#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 42
1119#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 43
1120#define MC_CMD_MAC_RX_GTJUMBO_PKTS 44
1121#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 45
1122#define MC_CMD_MAC_RX_BAD_FCS_PKTS 46
1123#define MC_CMD_MAC_RX_OVERFLOW_PKTS 47
1124#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 48
1125#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 49
1126#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 50
1127#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 51
1128#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 52
1129#define MC_CMD_MAC_RX_JABBER_PKTS 53
1130#define MC_CMD_MAC_RX_NODESC_DROPS 54
1131#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 55
1132#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 56
1133#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
1134#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
1135#define MC_CMD_MAC_RX_MATCH_FAULT 59
1136#define MC_CMD_GMAC_DMABUF_START 64
1137#define MC_CMD_GMAC_DMABUF_END 95
1138/* Insert new members here. */
1139#define MC_CMD_MAC_GENERATION_END 96
1140#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
1141
1142/* MC_CMD_MAC_STATS:
1143 * Get unified GMAC/XMAC statistics
1144 *
1145 * This call returns unified statistics maintained by the MC as it
1146 * switches between the GMAC and XMAC. The MC will write out all
1147 * supported stats. The driver should zero initialise the buffer to
1148 * guarantee consistent results.
1149 *
1150 * Locks required: None
1151 * Returns: 0
1152 * Response methods: shared memory, event
1153 */
1154#define MC_CMD_MAC_STATS 0x2e
1155#define MC_CMD_MAC_STATS_IN_LEN 16
1156#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
1157#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
1158#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
1159#define MC_CMD_MAC_STATS_CMD_DMA_LBN 0
1160#define MC_CMD_MAC_STATS_CMD_DMA_WIDTH 1
1161#define MC_CMD_MAC_STATS_CMD_CLEAR_LBN 1
1162#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
1163#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
1164#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
1165/* Remaining PERIOD* fields only relevant when PERIODIC_CHANGE is set */
1166#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
1167#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
1168#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
1169#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
1170#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
1171#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
1172#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
1173#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
1174#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
1175
1176#define MC_CMD_MAC_STATS_OUT_LEN 0
1177
1178/* Callisto flags */
1179#define MC_CMD_SFT9001_ROBUST_LBN 0
1180#define MC_CMD_SFT9001_ROBUST_WIDTH 1
1181#define MC_CMD_SFT9001_SHORT_REACH_LBN 1
1182#define MC_CMD_SFT9001_SHORT_REACH_WIDTH 1
1183
1184/* MC_CMD_SFT9001_GET:
1185 * Read current callisto specific setting
1186 *
1187 * Locks required: None
1188 * Returns: 0, ETIME
1189 */
1190#define MC_CMD_SFT9001_GET 0x30
1191#define MC_CMD_SFT9001_GET_IN_LEN 0
1192#define MC_CMD_SFT9001_GET_OUT_LEN 4
1193#define MC_CMD_SFT9001_GET_OUT_FLAGS_OFST 0
1194 1520
1195/* MC_CMD_SFT9001_SET: 1521/* MC_CMD_PHY_STATS_IN msgrequest */
1196 * Write current callisto specific setting 1522#define MC_CMD_PHY_STATS_IN_LEN 8
1197 * 1523#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
1198 * Locks required: None 1524#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
1199 * Returns: 0, ETIME, EINVAL 1525#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
1526#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
1527
1528/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
1529#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
1530
1531/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
1532#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
1533#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
1534#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
1535#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
1536#define MC_CMD_OUI 0x0 /* enum */
1537#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */
1538#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */
1539#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */
1540#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */
1541#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */
1542#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */
1543#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */
1544#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */
1545#define MC_CMD_PCS_LINK_UP 0x9 /* enum */
1546#define MC_CMD_PCS_RX_FAULT 0xa /* enum */
1547#define MC_CMD_PCS_TX_FAULT 0xb /* enum */
1548#define MC_CMD_PCS_BER 0xc /* enum */
1549#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */
1550#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */
1551#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */
1552#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */
1553#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */
1554#define MC_CMD_PHYXS_SYNC 0x12 /* enum */
1555#define MC_CMD_AN_LINK_UP 0x13 /* enum */
1556#define MC_CMD_AN_COMPLETE 0x14 /* enum */
1557#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */
1558#define MC_CMD_CL22_LINK_UP 0x16 /* enum */
1559#define MC_CMD_PHY_NSTATS 0x17 /* enum */
1560
1561
1562/***********************************/
1563/* MC_CMD_MAC_STATS
1564 * Get generic MAC statistics.
1200 */ 1565 */
1201#define MC_CMD_SFT9001_SET 0x31 1566#define MC_CMD_MAC_STATS 0x2e
1202#define MC_CMD_SFT9001_SET_IN_LEN 4
1203#define MC_CMD_SFT9001_SET_IN_FLAGS_OFST 0
1204#define MC_CMD_SFT9001_SET_OUT_LEN 0
1205
1206 1567
1207/* MC_CMD_WOL_FILTER_SET: 1568/* MC_CMD_MAC_STATS_IN msgrequest */
1208 * Set a WoL filter 1569#define MC_CMD_MAC_STATS_IN_LEN 16
1209 * 1570#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
1210 * Locks required: None 1571#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
1211 * Returns: 0, EBUSY, EINVAL, ENOSYS 1572#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
1573#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
1574#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
1575#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
1576#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
1577#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
1578#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
1579#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
1580#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
1581#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
1582#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
1583#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
1584#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
1585#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
1586#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
1587#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
1588#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
1589#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
1590
1591/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
1592#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
1593
1594/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
1595#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
1596#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
1597#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
1598#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
1599#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
1600#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
1601#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
1602#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
1603#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
1604#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
1605#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
1606#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
1607#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
1608#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
1609#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
1610#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
1611#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
1612#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
1613#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
1614#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
1615#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
1616#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
1617#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
1618#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
1619#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
1620#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
1621#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
1622#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
1623#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
1624#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
1625#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
1626#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
1627#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
1628#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
1629#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
1630#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
1631#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
1632#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
1633#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
1634#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
1635#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
1636#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
1637#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
1638#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
1639#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
1640#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
1641#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
1642#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
1643#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
1644#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
1645#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
1646#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
1647#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
1648#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
1649#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
1650#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
1651#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
1652#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
1653#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
1654#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
1655#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
1656#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
1657#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
1658#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
1659#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
1660#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
1661#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */
1662#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */
1663#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
1664#define MC_CMD_MAC_NSTATS 0x61 /* enum */
1665
1666
1667/***********************************/
1668/* MC_CMD_SRIOV
1669 * to be documented
1670 */
1671#define MC_CMD_SRIOV 0x30
1672
1673/* MC_CMD_SRIOV_IN msgrequest */
1674#define MC_CMD_SRIOV_IN_LEN 12
1675#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
1676#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
1677#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
1678
1679/* MC_CMD_SRIOV_OUT msgresponse */
1680#define MC_CMD_SRIOV_OUT_LEN 8
1681#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
1682#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
1683
1684/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
1685#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
1686#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
1687#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
1688#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
1689#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
1690#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
1691#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
1692#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
1693#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
1694#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
1695#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
1696#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
1697#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
1698#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
1699#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
1700#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
1701#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
1702#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
1703#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
1704#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
1705#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
1706#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
1707#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
1708#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
1709#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
1710#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
1711
1712
1713/***********************************/
1714/* MC_CMD_MEMCPY
1715 * Perform memory copy operation.
1716 */
1717#define MC_CMD_MEMCPY 0x31
1718
1719/* MC_CMD_MEMCPY_IN msgrequest */
1720#define MC_CMD_MEMCPY_IN_LENMIN 32
1721#define MC_CMD_MEMCPY_IN_LENMAX 224
1722#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
1723#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
1724#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
1725#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
1726#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
1727
1728/* MC_CMD_MEMCPY_OUT msgresponse */
1729#define MC_CMD_MEMCPY_OUT_LEN 0
1730
1731
1732/***********************************/
1733/* MC_CMD_WOL_FILTER_SET
1734 * Set a WoL filter.
1212 */ 1735 */
1213#define MC_CMD_WOL_FILTER_SET 0x32 1736#define MC_CMD_WOL_FILTER_SET 0x32
1214#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 /* 190 rounded up to a word */ 1737
1215#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 1738/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
1216#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 1739#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
1217 1740#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
1218/* There is a union at offset 8, following defines overlap due to 1741#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
1219 * this */ 1742#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
1220#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 1743#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
1221 1744#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */
1222#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST \ 1745#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */
1223 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 1746#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */
1224 1747#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */
1225#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST \ 1748#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */
1226 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 1749#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */
1227#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST \ 1750#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */
1228 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 4) 1751#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
1229#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST \ 1752#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
1230 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 8) 1753#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
1231#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST \ 1754
1232 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 10) 1755/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
1233 1756#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
1234#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST \ 1757/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
1235 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 1758/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
1236#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST \ 1759#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
1237 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 16) 1760#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
1238#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST \ 1761#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
1239 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 32) 1762#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
1240#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST \ 1763
1241 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 34) 1764/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
1242 1765#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
1243#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST \ 1766/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
1244 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 1767/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
1245#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_OFST \ 1768#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
1246 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 48) 1769#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
1247#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST \ 1770#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
1248 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 176) 1771#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
1249#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST \ 1772#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
1250 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 177) 1773#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
1251#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \ 1774
1252 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178) 1775/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
1253 1776#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
1254#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \ 1777/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
1255 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 1778/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
1256#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 1779#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
1257#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 1780#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
1258#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 1781#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
1259#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1 1782#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
1260 1783#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
1261#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 1784#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
1262#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 1785#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
1263 1786#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
1264/* WOL Filter types enumeration */ 1787
1265#define MC_CMD_WOL_TYPE_MAGIC 0x0 1788/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
1266 /* unused 0x1 */ 1789#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
1267#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 1790/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
1268#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 1791/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
1269#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 1792#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
1270#define MC_CMD_WOL_TYPE_BITMAP 0x5 1793#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
1271#define MC_CMD_WOL_TYPE_LINK 0x6 1794#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
1272#define MC_CMD_WOL_TYPE_MAX 0x7 1795#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
1273 1796#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
1274#define MC_CMD_FILTER_MODE_SIMPLE 0x0 1797#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
1275#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff 1798#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
1276 1799#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
1277/* MC_CMD_WOL_FILTER_REMOVE: 1800#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
1278 * Remove a WoL filter 1801#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
1279 * 1802
1280 * Locks required: None 1803/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
1281 * Returns: 0, EINVAL, ENOSYS 1804#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
1805/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
1806/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
1807#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
1808#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
1809#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
1810#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
1811#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
1812
1813/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
1814#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
1815#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
1816
1817
1818/***********************************/
1819/* MC_CMD_WOL_FILTER_REMOVE
1820 * Remove a WoL filter.
1282 */ 1821 */
1283#define MC_CMD_WOL_FILTER_REMOVE 0x33 1822#define MC_CMD_WOL_FILTER_REMOVE 0x33
1284#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
1285#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
1286#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
1287 1823
1824/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
1825#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
1826#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
1288 1827
1289/* MC_CMD_WOL_FILTER_RESET: 1828/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
1290 * Reset (i.e. remove all) WoL filters 1829#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
1291 * 1830
1292 * Locks required: None 1831
1293 * Returns: 0, ENOSYS 1832/***********************************/
1833/* MC_CMD_WOL_FILTER_RESET
1834 * Reset (i.e. remove all) WoL filters.
1294 */ 1835 */
1295#define MC_CMD_WOL_FILTER_RESET 0x34 1836#define MC_CMD_WOL_FILTER_RESET 0x34
1296#define MC_CMD_WOL_FILTER_RESET_IN_LEN 0
1297#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
1298 1837
1299/* MC_CMD_SET_MCAST_HASH: 1838/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
1300 * Set the MCASH hash value without otherwise 1839#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
1301 * reconfiguring the MAC 1840#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
1841#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
1842#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
1843
1844/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
1845#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
1846
1847
1848/***********************************/
1849/* MC_CMD_SET_MCAST_HASH
1850 * Set the MCASH hash value.
1302 */ 1851 */
1303#define MC_CMD_SET_MCAST_HASH 0x35 1852#define MC_CMD_SET_MCAST_HASH 0x35
1304#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
1305#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
1306#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
1307#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
1308 1853
1309/* MC_CMD_NVRAM_TYPES: 1854/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
1310 * Return bitfield indicating available types of virtual NVRAM partitions 1855#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
1311 * 1856#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
1312 * Locks required: none 1857#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
1313 * Returns: 0 1858#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
1859#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
1860
1861/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
1862#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
1863
1864
1865/***********************************/
1866/* MC_CMD_NVRAM_TYPES
1867 * Get virtual NVRAM partitions information.
1314 */ 1868 */
1315#define MC_CMD_NVRAM_TYPES 0x36 1869#define MC_CMD_NVRAM_TYPES 0x36
1316#define MC_CMD_NVRAM_TYPES_IN_LEN 0 1870
1317#define MC_CMD_NVRAM_TYPES_OUT_LEN 4 1871/* MC_CMD_NVRAM_TYPES_IN msgrequest */
1318#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 1872#define MC_CMD_NVRAM_TYPES_IN_LEN 0
1319 1873
1320/* Supported NVRAM types */ 1874/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
1321#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0 1875#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
1322#define MC_CMD_NVRAM_TYPE_MC_FW 1 1876#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
1323#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 2 1877#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */
1324#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 3 1878#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */
1325#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 4 1879#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */
1326#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 5 1880#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */
1327#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 6 1881#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */
1328#define MC_CMD_NVRAM_TYPE_EXP_ROM 7 1882#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */
1329#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 8 1883#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */
1330#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 9 1884#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */
1331#define MC_CMD_NVRAM_TYPE_PHY_PORT0 10 1885#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */
1332#define MC_CMD_NVRAM_TYPE_PHY_PORT1 11 1886#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */
1333#define MC_CMD_NVRAM_TYPE_LOG 12 1887#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */
1334 1888#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */
1335/* MC_CMD_NVRAM_INFO: 1889#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */
1336 * Read info about a virtual NVRAM partition 1890#define MC_CMD_NVRAM_TYPE_FPGA 0xd /* enum */
1337 * 1891
1338 * Locks required: none 1892
1339 * Returns: 0, EINVAL (bad type) 1893/***********************************/
1894/* MC_CMD_NVRAM_INFO
1895 * Read info about a virtual NVRAM partition.
1340 */ 1896 */
1341#define MC_CMD_NVRAM_INFO 0x37 1897#define MC_CMD_NVRAM_INFO 0x37
1342#define MC_CMD_NVRAM_INFO_IN_LEN 4 1898
1343#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 1899/* MC_CMD_NVRAM_INFO_IN msgrequest */
1344#define MC_CMD_NVRAM_INFO_OUT_LEN 24 1900#define MC_CMD_NVRAM_INFO_IN_LEN 4
1345#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 1901#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
1346#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 1902/* Enum values, see field(s): */
1347#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 1903/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1348#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 1904
1349#define MC_CMD_NVRAM_PROTECTED_LBN 0 1905/* MC_CMD_NVRAM_INFO_OUT msgresponse */
1350#define MC_CMD_NVRAM_PROTECTED_WIDTH 1 1906#define MC_CMD_NVRAM_INFO_OUT_LEN 24
1351#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 1907#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
1352#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 1908/* Enum values, see field(s): */
1353 1909/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1354/* MC_CMD_NVRAM_UPDATE_START: 1910#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
1355 * Start a group of update operations on a virtual NVRAM partition 1911#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
1356 * 1912#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
1357 * Locks required: PHY_LOCK if type==*PHY* 1913#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
1358 * Returns: 0, EINVAL (bad type), EACCES (if PHY_LOCK required and not held) 1914#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
1915#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
1916#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
1917
1918
1919/***********************************/
1920/* MC_CMD_NVRAM_UPDATE_START
1921 * Start a group of update operations on a virtual NVRAM partition.
1359 */ 1922 */
1360#define MC_CMD_NVRAM_UPDATE_START 0x38 1923#define MC_CMD_NVRAM_UPDATE_START 0x38
1361#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
1362#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
1363#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
1364 1924
1365/* MC_CMD_NVRAM_READ: 1925/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
1366 * Read data from a virtual NVRAM partition 1926#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
1367 * 1927#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
1368 * Locks required: PHY_LOCK if type==*PHY* 1928/* Enum values, see field(s): */
1369 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) 1929/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1930
1931/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
1932#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
1933
1934
1935/***********************************/
1936/* MC_CMD_NVRAM_READ
1937 * Read data from a virtual NVRAM partition.
1370 */ 1938 */
1371#define MC_CMD_NVRAM_READ 0x39 1939#define MC_CMD_NVRAM_READ 0x39
1372#define MC_CMD_NVRAM_READ_IN_LEN 12 1940
1373#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 1941/* MC_CMD_NVRAM_READ_IN msgrequest */
1374#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 1942#define MC_CMD_NVRAM_READ_IN_LEN 12
1375#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 1943#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
1376#define MC_CMD_NVRAM_READ_OUT_LEN(_read_bytes) (_read_bytes) 1944/* Enum values, see field(s): */
1377#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 1945/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1378 1946#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
1379/* MC_CMD_NVRAM_WRITE: 1947#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
1380 * Write data to a virtual NVRAM partition 1948
1381 * 1949/* MC_CMD_NVRAM_READ_OUT msgresponse */
1382 * Locks required: PHY_LOCK if type==*PHY* 1950#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
1383 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) 1951#define MC_CMD_NVRAM_READ_OUT_LENMAX 255
1952#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
1953#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
1954#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
1955#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
1956#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 255
1957
1958
1959/***********************************/
1960/* MC_CMD_NVRAM_WRITE
1961 * Write data to a virtual NVRAM partition.
1384 */ 1962 */
1385#define MC_CMD_NVRAM_WRITE 0x3a 1963#define MC_CMD_NVRAM_WRITE 0x3a
1386#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 1964
1387#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 1965/* MC_CMD_NVRAM_WRITE_IN msgrequest */
1388#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 1966#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
1389#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 1967#define MC_CMD_NVRAM_WRITE_IN_LENMAX 255
1390#define MC_CMD_NVRAM_WRITE_IN_LEN(_write_bytes) (12 + _write_bytes) 1968#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
1391#define MC_CMD_NVRAM_WRITE_OUT_LEN 0 1969#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
1392 1970/* Enum values, see field(s): */
1393/* MC_CMD_NVRAM_ERASE: 1971/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1394 * Erase sector(s) from a virtual NVRAM partition 1972#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
1395 * 1973#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
1396 * Locks required: PHY_LOCK if type==*PHY* 1974#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
1397 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) 1975#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
1976#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
1977#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 243
1978
1979/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
1980#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
1981
1982
1983/***********************************/
1984/* MC_CMD_NVRAM_ERASE
1985 * Erase sector(s) from a virtual NVRAM partition.
1398 */ 1986 */
1399#define MC_CMD_NVRAM_ERASE 0x3b 1987#define MC_CMD_NVRAM_ERASE 0x3b
1400#define MC_CMD_NVRAM_ERASE_IN_LEN 12 1988
1401#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 1989/* MC_CMD_NVRAM_ERASE_IN msgrequest */
1402#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 1990#define MC_CMD_NVRAM_ERASE_IN_LEN 12
1403#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 1991#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
1404#define MC_CMD_NVRAM_ERASE_OUT_LEN 0 1992/* Enum values, see field(s): */
1405 1993/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1406/* MC_CMD_NVRAM_UPDATE_FINISH: 1994#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
1407 * Finish a group of update operations on a virtual NVRAM partition 1995#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
1408 * 1996
1409 * Locks required: PHY_LOCK if type==*PHY* 1997/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
1410 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) 1998#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
1999
2000
2001/***********************************/
2002/* MC_CMD_NVRAM_UPDATE_FINISH
2003 * Finish a group of update operations on a virtual NVRAM partition.
1411 */ 2004 */
1412#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c 2005#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
1413#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
1414#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
1415#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
1416#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
1417 2006
1418/* MC_CMD_REBOOT: 2007/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
2008#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
2009#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
2010/* Enum values, see field(s): */
2011/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
2012#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
2013
2014/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse */
2015#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
2016
2017
2018/***********************************/
2019/* MC_CMD_REBOOT
1419 * Reboot the MC. 2020 * Reboot the MC.
1420 *
1421 * The AFTER_ASSERTION flag is intended to be used when the driver notices
1422 * an assertion failure (at which point it is expected to perform a complete
1423 * tear down and reinitialise), to allow both ports to reset the MC once
1424 * in an atomic fashion.
1425 *
1426 * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
1427 * which means that they will automatically reboot out of the assertion
1428 * handler, so this is in practise an optional operation. It is still
1429 * recommended that drivers execute this to support custom firmwares
1430 * with REBOOT_ON_ASSERT=0.
1431 *
1432 * Locks required: NONE
1433 * Returns: Nothing. You get back a response with ERR=1, DATALEN=0
1434 */ 2021 */
1435#define MC_CMD_REBOOT 0x3d 2022#define MC_CMD_REBOOT 0x3d
1436#define MC_CMD_REBOOT_IN_LEN 4
1437#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
1438#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 1
1439#define MC_CMD_REBOOT_OUT_LEN 0
1440 2023
1441/* MC_CMD_SCHEDINFO: 2024/* MC_CMD_REBOOT_IN msgrequest */
1442 * Request scheduler info. from the MC. 2025#define MC_CMD_REBOOT_IN_LEN 4
1443 * 2026#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
1444 * Locks required: NONE 2027#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
1445 * Returns: An array of (timeslice,maximum overrun), one for each thread, 2028
1446 * in ascending order of thread address.s 2029/* MC_CMD_REBOOT_OUT msgresponse */
2030#define MC_CMD_REBOOT_OUT_LEN 0
2031
2032
2033/***********************************/
2034/* MC_CMD_SCHEDINFO
2035 * Request scheduler info.
1447 */ 2036 */
1448#define MC_CMD_SCHEDINFO 0x3e 2037#define MC_CMD_SCHEDINFO 0x3e
1449#define MC_CMD_SCHEDINFO_IN_LEN 0
1450 2038
2039/* MC_CMD_SCHEDINFO_IN msgrequest */
2040#define MC_CMD_SCHEDINFO_IN_LEN 0
1451 2041
1452/* MC_CMD_SET_REBOOT_MODE: (debug) 2042/* MC_CMD_SCHEDINFO_OUT msgresponse */
1453 * Set the mode for the next MC reboot. 2043#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
1454 * 2044#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
1455 * Locks required: NONE 2045#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
1456 * 2046#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
1457 * Sets the reboot mode to the specified value. Returns the old mode. 2047#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
2048#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
2049#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
2050
2051
2052/***********************************/
2053/* MC_CMD_REBOOT_MODE
1458 */ 2054 */
1459#define MC_CMD_REBOOT_MODE 0x3f 2055#define MC_CMD_REBOOT_MODE 0x3f
1460#define MC_CMD_REBOOT_MODE_IN_LEN 4 2056
1461#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 2057/* MC_CMD_REBOOT_MODE_IN msgrequest */
1462#define MC_CMD_REBOOT_MODE_OUT_LEN 4 2058#define MC_CMD_REBOOT_MODE_IN_LEN 4
1463#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 2059#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
1464#define MC_CMD_REBOOT_MODE_NORMAL 0 2060#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */
1465#define MC_CMD_REBOOT_MODE_SNAPPER 3 2061#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */
1466 2062
1467/* MC_CMD_DEBUG_LOG: 2063/* MC_CMD_REBOOT_MODE_OUT msgresponse */
1468 * Null request/response command (debug) 2064#define MC_CMD_REBOOT_MODE_OUT_LEN 4
1469 * - sequence number is always zero 2065#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
1470 * - only supported on the UART interface 2066
1471 * (the same set of bytes is delivered as an 2067
1472 * event over PCI) 2068/***********************************/
1473 */ 2069/* MC_CMD_SENSOR_INFO
1474#define MC_CMD_DEBUG_LOG 0x40
1475#define MC_CMD_DEBUG_LOG_IN_LEN 0
1476#define MC_CMD_DEBUG_LOG_OUT_LEN 0
1477
1478/* Generic sensor enumeration. Note that a dual port NIC
1479 * will EITHER expose PHY_COMMON_TEMP OR PHY0_TEMP and
1480 * PHY1_TEMP depending on whether there is a single sensor
1481 * in the vicinity of the two port, or one per port.
1482 */
1483#define MC_CMD_SENSOR_CONTROLLER_TEMP 0 /* degC */
1484#define MC_CMD_SENSOR_PHY_COMMON_TEMP 1 /* degC */
1485#define MC_CMD_SENSOR_CONTROLLER_COOLING 2 /* bool */
1486#define MC_CMD_SENSOR_PHY0_TEMP 3 /* degC */
1487#define MC_CMD_SENSOR_PHY0_COOLING 4 /* bool */
1488#define MC_CMD_SENSOR_PHY1_TEMP 5 /* degC */
1489#define MC_CMD_SENSOR_PHY1_COOLING 6 /* bool */
1490#define MC_CMD_SENSOR_IN_1V0 7 /* mV */
1491#define MC_CMD_SENSOR_IN_1V2 8 /* mV */
1492#define MC_CMD_SENSOR_IN_1V8 9 /* mV */
1493#define MC_CMD_SENSOR_IN_2V5 10 /* mV */
1494#define MC_CMD_SENSOR_IN_3V3 11 /* mV */
1495#define MC_CMD_SENSOR_IN_12V0 12 /* mV */
1496
1497
1498/* Sensor state */
1499#define MC_CMD_SENSOR_STATE_OK 0
1500#define MC_CMD_SENSOR_STATE_WARNING 1
1501#define MC_CMD_SENSOR_STATE_FATAL 2
1502#define MC_CMD_SENSOR_STATE_BROKEN 3
1503
1504/* MC_CMD_SENSOR_INFO:
1505 * Returns information about every available sensor. 2070 * Returns information about every available sensor.
1506 *
1507 * Each sensor has a single (16bit) value, and a corresponding state.
1508 * The mapping between value and sensor is nominally determined by the
1509 * MC, but in practise is implemented as zero (BROKEN), one (TEMPERATURE),
1510 * or two (VOLTAGE) ranges per sensor per state.
1511 *
1512 * This call returns a mask (32bit) of the sensors that are supported
1513 * by this platform, then an array (indexed by MC_CMD_SENSOR) of byte
1514 * offsets to the per-sensor arrays. Each sensor array has four 16bit
1515 * numbers, min1, max1, min2, max2.
1516 *
1517 * Locks required: None
1518 * Returns: 0
1519 */ 2071 */
1520#define MC_CMD_SENSOR_INFO 0x41 2072#define MC_CMD_SENSOR_INFO 0x41
1521#define MC_CMD_SENSOR_INFO_IN_LEN 0
1522#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
1523#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
1524 (4 + (_x))
1525#define MC_CMD_SENSOR_INFO_OUT_MIN1_OFST(_ofst) \
1526 ((_ofst) + 0)
1527#define MC_CMD_SENSOR_INFO_OUT_MAX1_OFST(_ofst) \
1528 ((_ofst) + 2)
1529#define MC_CMD_SENSOR_INFO_OUT_MIN2_OFST(_ofst) \
1530 ((_ofst) + 4)
1531#define MC_CMD_SENSOR_INFO_OUT_MAX2_OFST(_ofst) \
1532 ((_ofst) + 6)
1533 2073
2074/* MC_CMD_SENSOR_INFO_IN msgrequest */
2075#define MC_CMD_SENSOR_INFO_IN_LEN 0
2076
2077/* MC_CMD_SENSOR_INFO_OUT msgresponse */
2078#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
2079#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
2080#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
2081#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
2082#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */
2083#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */
2084#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */
2085#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */
2086#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */
2087#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */
2088#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */
2089#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */
2090#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */
2091#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */
2092#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */
2093#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */
2094#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */
2095#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum */
2096#define MC_CMD_SENSOR_IN_VREF 0xe /* enum */
2097#define MC_CMD_SENSOR_ENTRY_OFST 4
2098#define MC_CMD_SENSOR_ENTRY_LEN 8
2099#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
2100#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
2101#define MC_CMD_SENSOR_ENTRY_MINNUM 1
2102#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
2103
2104/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
2105#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
2106#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
2107#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
2108#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
2109#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
2110#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
2111#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
2112#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
2113#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
2114#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
2115#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
2116#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
2117#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
2118#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
2119#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
2120#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
2121#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
2122
2123
2124/***********************************/
1534/* MC_CMD_READ_SENSORS 2125/* MC_CMD_READ_SENSORS
1535 * Returns the current reading from each sensor 2126 * Returns the current reading from each sensor.
1536 *
1537 * Returns a sparse array of sensor readings (indexed by the sensor
1538 * type) into host memory. Each array element is a dword.
1539 *
1540 * The MC will send a SENSOREVT event every time any sensor changes state. The
1541 * driver is responsible for ensuring that it doesn't miss any events. The board
1542 * will function normally if all sensors are in STATE_OK or state_WARNING.
1543 * Otherwise the board should not be expected to function.
1544 */ 2127 */
1545#define MC_CMD_READ_SENSORS 0x42 2128#define MC_CMD_READ_SENSORS 0x42
1546#define MC_CMD_READ_SENSORS_IN_LEN 8
1547#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
1548#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
1549#define MC_CMD_READ_SENSORS_OUT_LEN 0
1550 2129
1551/* Sensor reading fields */ 2130/* MC_CMD_READ_SENSORS_IN msgrequest */
1552#define MC_CMD_READ_SENSOR_VALUE_LBN 0 2131#define MC_CMD_READ_SENSORS_IN_LEN 8
1553#define MC_CMD_READ_SENSOR_VALUE_WIDTH 16 2132#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
1554#define MC_CMD_READ_SENSOR_STATE_LBN 16 2133#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
1555#define MC_CMD_READ_SENSOR_STATE_WIDTH 8 2134#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
1556 2135#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
1557 2136
1558/* MC_CMD_GET_PHY_STATE: 2137/* MC_CMD_READ_SENSORS_OUT msgresponse */
1559 * Report current state of PHY. A "zombie" PHY is a PHY that has failed to 2138#define MC_CMD_READ_SENSORS_OUT_LEN 0
1560 * boot (e.g. due to missing or corrupted firmware). 2139
1561 * 2140/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
1562 * Locks required: None 2141#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3
1563 * Return code: 0 2142#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
2143#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
2144#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
2145#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
2146#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
2147#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
2148#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */
2149#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */
2150#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */
2151#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */
2152#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
2153#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
2154
2155
2156/***********************************/
2157/* MC_CMD_GET_PHY_STATE
2158 * Report current state of PHY.
1564 */ 2159 */
1565#define MC_CMD_GET_PHY_STATE 0x43 2160#define MC_CMD_GET_PHY_STATE 0x43
1566 2161
1567#define MC_CMD_GET_PHY_STATE_IN_LEN 0 2162/* MC_CMD_GET_PHY_STATE_IN msgrequest */
1568#define MC_CMD_GET_PHY_STATE_OUT_LEN 4 2163#define MC_CMD_GET_PHY_STATE_IN_LEN 0
1569#define MC_CMD_GET_PHY_STATE_STATE_OFST 0
1570/* PHY state enumeration: */
1571#define MC_CMD_PHY_STATE_OK 1
1572#define MC_CMD_PHY_STATE_ZOMBIE 2
1573 2164
2165/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
2166#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
2167#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
2168#define MC_CMD_PHY_STATE_OK 0x1 /* enum */
2169#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */
1574 2170
1575/* 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to 2171
1576 * disable 802.Qbb for a given priority. */ 2172/***********************************/
2173/* MC_CMD_SETUP_8021QBB
2174 * 802.1Qbb control.
2175 */
1577#define MC_CMD_SETUP_8021QBB 0x44 2176#define MC_CMD_SETUP_8021QBB 0x44
1578#define MC_CMD_SETUP_8021QBB_IN_LEN 32
1579#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
1580#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFFST 0
1581 2177
2178/* MC_CMD_SETUP_8021QBB_IN msgrequest */
2179#define MC_CMD_SETUP_8021QBB_IN_LEN 32
2180#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
2181#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
1582 2182
1583/* MC_CMD_WOL_FILTER_GET: 2183/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
1584 * Retrieve ID of any WoL filters 2184#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
1585 *
1586 * Locks required: None
1587 * Returns: 0, ENOSYS
1588 */
1589#define MC_CMD_WOL_FILTER_GET 0x45
1590#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
1591#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
1592#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
1593 2185
1594 2186
1595/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD: 2187/***********************************/
1596 * Offload a protocol to NIC for lights-out state 2188/* MC_CMD_WOL_FILTER_GET
1597 * 2189 * Retrieve ID of any WoL filters.
1598 * Locks required: None
1599 * Returns: 0, ENOSYS
1600 */ 2190 */
1601#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 2191#define MC_CMD_WOL_FILTER_GET 0x45
1602 2192
1603#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN 16 2193/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
1604#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 2194#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
1605 2195
1606/* There is a union at offset 4, following defines overlap due to 2196/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
1607 * this */ 2197#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
1608#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 2198#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
1609#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPMAC_OFST 4
1610#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPIP_OFST 10
1611#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSMAC_OFST 4
1612#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSSNIPV6_OFST 10
1613#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSIPV6_OFST 26
1614 2199
1615#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
1616#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
1617 2200
2201/***********************************/
2202/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
2203 * Add a protocol offload to NIC for lights-out state.
2204 */
2205#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
1618 2206
1619/* MC_CMD_REMOVE_LIGHTSOUT_PROTOCOL_OFFLOAD: 2207/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
1620 * Offload a protocol to NIC for lights-out state 2208#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
1621 * 2209#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
1622 * Locks required: None 2210#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
1623 * Returns: 0, ENOSYS 2211#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
2212#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
2213#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
2214#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
2215#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
2216#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
2217#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
2218
2219/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
2220#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
2221/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
2222#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
2223#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
2224#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
2225
2226/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
2227#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
2228/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
2229#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
2230#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
2231#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
2232#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
2233#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
2234#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
2235
2236/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
2237#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
2238#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
2239
2240
2241/***********************************/
2242/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
2243 * Remove a protocol offload from NIC for lights-out state.
1624 */ 2244 */
1625#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 2245#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
1626#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
1627#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
1628 2246
1629#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 2247/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
1630#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 2248#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
2249#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
2250#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
1631 2251
1632/* Lights-out offload protocols enumeration */ 2252/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
1633#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 2253#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
1634#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2
1635 2254
1636 2255
1637/* MC_CMD_MAC_RESET_RESTORE: 2256/***********************************/
1638 * Restore MAC after block reset 2257/* MC_CMD_MAC_RESET_RESTORE
1639 * 2258 * Restore MAC after block reset.
1640 * Locks required: None
1641 * Returns: 0
1642 */ 2259 */
1643
1644#define MC_CMD_MAC_RESET_RESTORE 0x48 2260#define MC_CMD_MAC_RESET_RESTORE 0x48
1645#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
1646#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
1647 2261
2262/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
2263#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
2264
2265/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
2266#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
1648 2267
1649/* MC_CMD_TEST_ASSERT:
1650 * Deliberately trigger an assert-detonation in the firmware for testing
1651 * purposes (i.e. to allow tests that the driver copes gracefully).
1652 *
1653 * Locks required: None
1654 * Returns: 0
1655 */
1656 2268
2269/***********************************/
2270/* MC_CMD_TESTASSERT
2271 */
1657#define MC_CMD_TESTASSERT 0x49 2272#define MC_CMD_TESTASSERT 0x49
1658#define MC_CMD_TESTASSERT_IN_LEN 0
1659#define MC_CMD_TESTASSERT_OUT_LEN 0
1660 2273
1661/* MC_CMD_WORKAROUND 0x4a 2274/* MC_CMD_TESTASSERT_IN msgrequest */
1662 * 2275#define MC_CMD_TESTASSERT_IN_LEN 0
1663 * Enable/Disable a given workaround. The mcfw will return EINVAL if it 2276
1664 * doesn't understand the given workaround number - which should not 2277/* MC_CMD_TESTASSERT_OUT msgresponse */
1665 * be treated as a hard error by client code. 2278#define MC_CMD_TESTASSERT_OUT_LEN 0
1666 * 2279
1667 * This op does not imply any semantics about each workaround, that's between 2280
1668 * the driver and the mcfw on a per-workaround basis. 2281/***********************************/
1669 * 2282/* MC_CMD_WORKAROUND
1670 * Locks required: None 2283 * Enable/Disable a given workaround.
1671 * Returns: 0, EINVAL
1672 */ 2284 */
1673#define MC_CMD_WORKAROUND 0x4a 2285#define MC_CMD_WORKAROUND 0x4a
1674#define MC_CMD_WORKAROUND_IN_LEN 8 2286
1675#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 2287/* MC_CMD_WORKAROUND_IN msgrequest */
1676#define MC_CMD_WORKAROUND_BUG17230 1 2288#define MC_CMD_WORKAROUND_IN_LEN 8
1677#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 2289#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
1678#define MC_CMD_WORKAROUND_OUT_LEN 0 2290#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */
1679 2291#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
1680/* MC_CMD_GET_PHY_MEDIA_INFO: 2292
1681 * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for 2293/* MC_CMD_WORKAROUND_OUT msgresponse */
1682 * SFP+ PHYs). 2294#define MC_CMD_WORKAROUND_OUT_LEN 0
1683 * 2295
1684 * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE); 2296
1685 * the valid "page number" input values, and the output data, are interpreted 2297/***********************************/
1686 * on a per-type basis. 2298/* MC_CMD_GET_PHY_MEDIA_INFO
1687 * 2299 * Read media-specific data from PHY.
1688 * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address
1689 * 0xA0 offset 0 or 0x80.
1690 * Anything else: currently undefined.
1691 *
1692 * Locks required: None
1693 * Return code: 0
1694 */ 2300 */
1695#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b 2301#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
1696#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 2302
1697#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 2303/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
1698#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes)) 2304#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
1699#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 2305#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
1700#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 2306
1701 2307/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
1702/* MC_CMD_NVRAM_TEST: 2308#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
1703 * Test a particular NVRAM partition for valid contents (where "valid" 2309#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 255
1704 * depends on the type of partition). 2310#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
1705 * 2311#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
1706 * Locks required: None 2312#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
1707 * Return code: 0 2313#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
2314#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
2315#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 251
2316
2317
2318/***********************************/
2319/* MC_CMD_NVRAM_TEST
2320 * Test a particular NVRAM partition.
1708 */ 2321 */
1709#define MC_CMD_NVRAM_TEST 0x4c 2322#define MC_CMD_NVRAM_TEST 0x4c
1710#define MC_CMD_NVRAM_TEST_IN_LEN 4 2323
1711#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 2324/* MC_CMD_NVRAM_TEST_IN msgrequest */
1712#define MC_CMD_NVRAM_TEST_OUT_LEN 4 2325#define MC_CMD_NVRAM_TEST_IN_LEN 4
1713#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 2326#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
1714#define MC_CMD_NVRAM_TEST_PASS 0 2327/* Enum values, see field(s): */
1715#define MC_CMD_NVRAM_TEST_FAIL 1 2328/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1716#define MC_CMD_NVRAM_TEST_NOTSUPP 2 2329
1717 2330/* MC_CMD_NVRAM_TEST_OUT msgresponse */
1718/* MC_CMD_MRSFP_TWEAK: (debug) 2331#define MC_CMD_NVRAM_TEST_OUT_LEN 4
1719 * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds. 2332#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
1720 * I2C I/O expander bits are always read; if equaliser parameters are supplied, 2333#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */
1721 * they are configured first. 2334#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */
1722 * 2335#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */
1723 * Locks required: None 2336
1724 * Return code: 0, EINVAL 2337
2338/***********************************/
2339/* MC_CMD_MRSFP_TWEAK
2340 * Read status and/or set parameters for the 'mrsfp' driver.
1725 */ 2341 */
1726#define MC_CMD_MRSFP_TWEAK 0x4d 2342#define MC_CMD_MRSFP_TWEAK 0x4d
1727#define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0 2343
1728#define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16 2344/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
1729#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */ 2345#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
1730#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */ 2346#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
1731#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */ 2347#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
1732#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */ 2348#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
1733#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 2349#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
1734#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */ 2350
1735#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */ 2351/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
1736#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */ 2352#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
1737 2353
1738/* MC_CMD_TEST_HACK: (debug (unsurprisingly)) 2354/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
1739 * Change bits of network port state for test purposes in ways that would never be 2355#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
1740 * useful in normal operation and so need a special command to change. */ 2356#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
1741#define MC_CMD_TEST_HACK 0x2f 2357#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
1742#define MC_CMD_TEST_HACK_IN_LEN 8 2358#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
1743#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0 2359#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */
1744#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */ 2360#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */
1745#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */ 2361
1746#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */ 2362
1747#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */ 2363/***********************************/
1748#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */ 2364/* MC_CMD_SENSOR_SET_LIMS
1749#define MC_CMD_TEST_HACK_OUT_LEN 0 2365 * Adjusts the sensor limits.
1750
1751/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
1752 * is a warranty-voiding operation.
1753 *
1754 * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
1755 * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
1756 * of these limits are meaningful and what their interpretation is is sensor-specific.
1757 *
1758 * OUT: nothing
1759 *
1760 * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
1761 * out of range.
1762 */ 2366 */
1763#define MC_CMD_SENSOR_SET_LIMS 0x4e 2367#define MC_CMD_SENSOR_SET_LIMS 0x4e
1764#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 2368
1765#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 2369/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
1766#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 2370#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
1767#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 2371#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
1768#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 2372/* Enum values, see field(s): */
1769#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 2373/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
1770 2374#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
1771/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be 2375#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
1772 * used for post-3.0 extensions. If you run out of space, look for gaps or 2376#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
1773 * commands that are unused in the existing range. */ 2377#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
2378
2379/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
2380#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
2381
2382
2383/***********************************/
2384/* MC_CMD_GET_RESOURCE_LIMITS
2385 */
2386#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
2387
2388/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
2389#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
2390
2391/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
2392#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
2393#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
2394#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
2395#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
2396#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
2397
2398/* MC_CMD_RESOURCE_SPECIFIER enum */
2399#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */
2400#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
2401
1774 2402
1775#endif /* MCDI_PCOL_H */ 2403#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c
index 6c63ab0710af..7bcad899a936 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_phy.c
@@ -116,7 +116,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
116 goto fail; 116 goto fail;
117 } 117 }
118 118
119 *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED); 119 *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
120 120
121 return 0; 121 return 0;
122 122
@@ -264,22 +264,22 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
264 264
265 /* TODO: Advertise the capabilities supported by this PHY */ 265 /* TODO: Advertise the capabilities supported by this PHY */
266 supported = 0; 266 supported = 0;
267 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN)) 267 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
268 supported |= PHY_MODE_TX_DISABLED; 268 supported |= PHY_MODE_TX_DISABLED;
269 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN)) 269 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
270 supported |= PHY_MODE_LOW_POWER; 270 supported |= PHY_MODE_LOW_POWER;
271 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN)) 271 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
272 supported |= PHY_MODE_OFF; 272 supported |= PHY_MODE_OFF;
273 273
274 mode = efx->phy_mode & supported; 274 mode = efx->phy_mode & supported;
275 275
276 flags = 0; 276 flags = 0;
277 if (mode & PHY_MODE_TX_DISABLED) 277 if (mode & PHY_MODE_TX_DISABLED)
278 flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN); 278 flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
279 if (mode & PHY_MODE_LOW_POWER) 279 if (mode & PHY_MODE_LOW_POWER)
280 flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN); 280 flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
281 if (mode & PHY_MODE_OFF) 281 if (mode & PHY_MODE_OFF)
282 flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN); 282 flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
283 283
284 return flags; 284 return flags;
285} 285}
@@ -436,8 +436,8 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
436 break; 436 break;
437 } 437 }
438 438
439 link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN)); 439 link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
440 link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN)); 440 link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
441 link_state->speed = speed; 441 link_state->speed = speed;
442} 442}
443 443
@@ -592,7 +592,7 @@ static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
592 592
593 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) 593 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
594 return -EIO; 594 return -EIO;
595 if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK) 595 if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
596 return -EINVAL; 596 return -EINVAL;
597 597
598 return 0; 598 return 0;
@@ -680,7 +680,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
680 u32 mode; 680 u32 mode;
681 int rc; 681 int rc;
682 682
683 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { 683 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
684 rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); 684 rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
685 if (rc < 0) 685 if (rc < 0)
686 return rc; 686 return rc;
@@ -691,15 +691,15 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
691 /* If we support both LONG and SHORT, then run each in response to 691 /* If we support both LONG and SHORT, then run each in response to
692 * break or not. Otherwise, run the one we support */ 692 * break or not. Otherwise, run the one we support */
693 mode = 0; 693 mode = 0;
694 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) { 694 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) {
695 if ((flags & ETH_TEST_FL_OFFLINE) && 695 if ((flags & ETH_TEST_FL_OFFLINE) &&
696 (phy_cfg->flags & 696 (phy_cfg->flags &
697 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) 697 (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)))
698 mode = MC_CMD_PHY_BIST_CABLE_LONG; 698 mode = MC_CMD_PHY_BIST_CABLE_LONG;
699 else 699 else
700 mode = MC_CMD_PHY_BIST_CABLE_SHORT; 700 mode = MC_CMD_PHY_BIST_CABLE_SHORT;
701 } else if (phy_cfg->flags & 701 } else if (phy_cfg->flags &
702 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)) 702 (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))
703 mode = MC_CMD_PHY_BIST_CABLE_LONG; 703 mode = MC_CMD_PHY_BIST_CABLE_LONG;
704 704
705 if (mode != 0) { 705 if (mode != 0) {
@@ -717,14 +717,14 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
717{ 717{
718 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 718 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
719 719
720 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { 720 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
721 if (index == 0) 721 if (index == 0)
722 return "bist"; 722 return "bist";
723 --index; 723 --index;
724 } 724 }
725 725
726 if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) | 726 if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) |
727 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) { 727 (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) {
728 if (index == 0) 728 if (index == 0)
729 return "cable"; 729 return "cable";
730 --index; 730 --index;
@@ -741,7 +741,7 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
741 741
742const struct efx_phy_operations efx_mcdi_phy_ops = { 742const struct efx_phy_operations efx_mcdi_phy_ops = {
743 .probe = efx_mcdi_phy_probe, 743 .probe = efx_mcdi_phy_probe,
744 .init = efx_port_dummy_op_int, 744 .init = efx_port_dummy_op_int,
745 .reconfigure = efx_mcdi_phy_reconfigure, 745 .reconfigure = efx_mcdi_phy_reconfigure,
746 .poll = efx_mcdi_phy_poll, 746 .poll = efx_mcdi_phy_poll,
747 .fini = efx_port_dummy_op_void, 747 .fini = efx_port_dummy_op_void,
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 7ab385c8136d..9acfd6696ffb 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -228,7 +228,7 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
228/** 228/**
229 * efx_mdio_set_settings - Set (some of) the PHY settings over MDIO. 229 * efx_mdio_set_settings - Set (some of) the PHY settings over MDIO.
230 * @efx: Efx NIC 230 * @efx: Efx NIC
231 * @ecmd: New settings 231 * @ecmd: New settings
232 */ 232 */
233int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 233int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
234{ 234{
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index bc9dcd6b30d7..26b3c23b0b6f 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -280,7 +280,7 @@ fail:
280 --part; 280 --part;
281 efx_mtd_remove_partition(part); 281 efx_mtd_remove_partition(part);
282 } 282 }
283 /* mtd_device_register() returns 1 if the MTD table is full */ 283 /* Failure is unlikely here, but probably means we're out of memory */
284 return -ENOMEM; 284 return -ENOMEM;
285} 285}
286 286
@@ -382,7 +382,7 @@ static int falcon_mtd_sync(struct mtd_info *mtd)
382 return rc; 382 return rc;
383} 383}
384 384
385static struct efx_mtd_ops falcon_mtd_ops = { 385static const struct efx_mtd_ops falcon_mtd_ops = {
386 .read = falcon_mtd_read, 386 .read = falcon_mtd_read,
387 .erase = falcon_mtd_erase, 387 .erase = falcon_mtd_erase,
388 .write = falcon_mtd_write, 388 .write = falcon_mtd_write,
@@ -560,7 +560,7 @@ static int siena_mtd_sync(struct mtd_info *mtd)
560 return rc; 560 return rc;
561} 561}
562 562
563static struct efx_mtd_ops siena_mtd_ops = { 563static const struct efx_mtd_ops siena_mtd_ops = {
564 .read = siena_mtd_read, 564 .read = siena_mtd_read,
565 .erase = siena_mtd_erase, 565 .erase = siena_mtd_erase,
566 .write = siena_mtd_write, 566 .write = siena_mtd_write,
@@ -572,7 +572,7 @@ struct siena_nvram_type_info {
572 const char *name; 572 const char *name;
573}; 573};
574 574
575static struct siena_nvram_type_info siena_nvram_types[] = { 575static const struct siena_nvram_type_info siena_nvram_types[] = {
576 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, 576 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
577 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" }, 577 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
578 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" }, 578 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
@@ -593,7 +593,7 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
593 unsigned int type) 593 unsigned int type)
594{ 594{
595 struct efx_mtd_partition *part = &efx_mtd->part[part_id]; 595 struct efx_mtd_partition *part = &efx_mtd->part[part_id];
596 struct siena_nvram_type_info *info; 596 const struct siena_nvram_type_info *info;
597 size_t size, erase_size; 597 size_t size, erase_size;
598 bool protected; 598 bool protected;
599 int rc; 599 int rc;
@@ -627,11 +627,10 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
627 struct efx_mtd *efx_mtd) 627 struct efx_mtd *efx_mtd)
628{ 628{
629 struct efx_mtd_partition *part; 629 struct efx_mtd_partition *part;
630 uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN / 630 uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM];
631 sizeof(uint16_t)];
632 int rc; 631 int rc;
633 632
634 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list); 633 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
635 if (rc) 634 if (rc)
636 return rc; 635 return rc;
637 636
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c49502bab6a3..0b95505e8968 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -13,10 +13,6 @@
13#ifndef EFX_NET_DRIVER_H 13#ifndef EFX_NET_DRIVER_H
14#define EFX_NET_DRIVER_H 14#define EFX_NET_DRIVER_H
15 15
16#if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG)
17#define DEBUG
18#endif
19
20#include <linux/netdevice.h> 16#include <linux/netdevice.h>
21#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
22#include <linux/ethtool.h> 18#include <linux/ethtool.h>
@@ -28,6 +24,7 @@
28#include <linux/device.h> 24#include <linux/device.h>
29#include <linux/highmem.h> 25#include <linux/highmem.h>
30#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/mutex.h>
31#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
32#include <linux/i2c.h> 29#include <linux/i2c.h>
33 30
@@ -42,7 +39,7 @@
42 39
43#define EFX_DRIVER_VERSION "3.1" 40#define EFX_DRIVER_VERSION "3.1"
44 41
45#ifdef EFX_ENABLE_DEBUG 42#ifdef DEBUG
46#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 43#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
47#define EFX_WARN_ON_PARANOID(x) WARN_ON(x) 44#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
48#else 45#else
@@ -56,8 +53,10 @@
56 * 53 *
57 **************************************************************************/ 54 **************************************************************************/
58 55
59#define EFX_MAX_CHANNELS 32 56#define EFX_MAX_CHANNELS 32U
60#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 57#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
58#define EFX_EXTRA_CHANNEL_IOV 0
59#define EFX_MAX_EXTRA_CHANNELS 1U
61 60
62/* Checksum generation is a per-queue option in hardware, so each 61/* Checksum generation is a per-queue option in hardware, so each
63 * queue visible to the networking core is backed by two hardware TX 62 * queue visible to the networking core is backed by two hardware TX
@@ -85,15 +84,8 @@ struct efx_special_buffer {
85 void *addr; 84 void *addr;
86 dma_addr_t dma_addr; 85 dma_addr_t dma_addr;
87 unsigned int len; 86 unsigned int len;
88 int index; 87 unsigned int index;
89 int entries; 88 unsigned int entries;
90};
91
92enum efx_flush_state {
93 FLUSH_NONE,
94 FLUSH_PENDING,
95 FLUSH_FAILED,
96 FLUSH_DONE,
97}; 89};
98 90
99/** 91/**
@@ -142,7 +134,6 @@ struct efx_tx_buffer {
142 * @txd: The hardware descriptor ring 134 * @txd: The hardware descriptor ring
143 * @ptr_mask: The size of the ring minus 1. 135 * @ptr_mask: The size of the ring minus 1.
144 * @initialised: Has hardware queue been initialised? 136 * @initialised: Has hardware queue been initialised?
145 * @flushed: Used when handling queue flushing
146 * @read_count: Current read pointer. 137 * @read_count: Current read pointer.
147 * This is the number of buffers that have been removed from both rings. 138 * This is the number of buffers that have been removed from both rings.
148 * @old_write_count: The value of @write_count when last checked. 139 * @old_write_count: The value of @write_count when last checked.
@@ -185,7 +176,6 @@ struct efx_tx_queue {
185 struct efx_special_buffer txd; 176 struct efx_special_buffer txd;
186 unsigned int ptr_mask; 177 unsigned int ptr_mask;
187 bool initialised; 178 bool initialised;
188 enum efx_flush_state flushed;
189 179
190 /* Members used mainly on the completion path */ 180 /* Members used mainly on the completion path */
191 unsigned int read_count ____cacheline_aligned_in_smp; 181 unsigned int read_count ____cacheline_aligned_in_smp;
@@ -209,12 +199,12 @@ struct efx_tx_queue {
209/** 199/**
210 * struct efx_rx_buffer - An Efx RX data buffer 200 * struct efx_rx_buffer - An Efx RX data buffer
211 * @dma_addr: DMA base address of the buffer 201 * @dma_addr: DMA base address of the buffer
212 * @skb: The associated socket buffer, if any. 202 * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE).
213 * If both this and page are %NULL, the buffer slot is currently free. 203 * Will be %NULL if the buffer slot is currently free.
214 * @page: The associated page buffer, if any. 204 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
215 * If both this and skb are %NULL, the buffer slot is currently free. 205 * Will be %NULL if the buffer slot is currently free.
216 * @len: Buffer length, in bytes. 206 * @len: Buffer length, in bytes.
217 * @is_page: Indicates if @page is valid. If false, @skb is valid. 207 * @flags: Flags for buffer and packet state.
218 */ 208 */
219struct efx_rx_buffer { 209struct efx_rx_buffer {
220 dma_addr_t dma_addr; 210 dma_addr_t dma_addr;
@@ -223,8 +213,11 @@ struct efx_rx_buffer {
223 struct page *page; 213 struct page *page;
224 } u; 214 } u;
225 unsigned int len; 215 unsigned int len;
226 bool is_page; 216 u16 flags;
227}; 217};
218#define EFX_RX_BUF_PAGE 0x0001
219#define EFX_RX_PKT_CSUMMED 0x0002
220#define EFX_RX_PKT_DISCARD 0x0004
228 221
229/** 222/**
230 * struct efx_rx_page_state - Page-based rx buffer state 223 * struct efx_rx_page_state - Page-based rx buffer state
@@ -250,6 +243,9 @@ struct efx_rx_page_state {
250 * @buffer: The software buffer ring 243 * @buffer: The software buffer ring
251 * @rxd: The hardware descriptor ring 244 * @rxd: The hardware descriptor ring
252 * @ptr_mask: The size of the ring minus 1. 245 * @ptr_mask: The size of the ring minus 1.
246 * @enabled: Receive queue enabled indicator.
247 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
248 * @rxq_flush_pending.
253 * @added_count: Number of buffers added to the receive queue. 249 * @added_count: Number of buffers added to the receive queue.
254 * @notified_count: Number of buffers given to NIC (<= @added_count). 250 * @notified_count: Number of buffers given to NIC (<= @added_count).
255 * @removed_count: Number of buffers removed from the receive queue. 251 * @removed_count: Number of buffers removed from the receive queue.
@@ -264,13 +260,14 @@ struct efx_rx_page_state {
264 * @alloc_page_count: RX allocation strategy counter. 260 * @alloc_page_count: RX allocation strategy counter.
265 * @alloc_skb_count: RX allocation strategy counter. 261 * @alloc_skb_count: RX allocation strategy counter.
266 * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). 262 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
267 * @flushed: Use when handling queue flushing
268 */ 263 */
269struct efx_rx_queue { 264struct efx_rx_queue {
270 struct efx_nic *efx; 265 struct efx_nic *efx;
271 struct efx_rx_buffer *buffer; 266 struct efx_rx_buffer *buffer;
272 struct efx_special_buffer rxd; 267 struct efx_special_buffer rxd;
273 unsigned int ptr_mask; 268 unsigned int ptr_mask;
269 bool enabled;
270 bool flush_pending;
274 271
275 int added_count; 272 int added_count;
276 int notified_count; 273 int notified_count;
@@ -284,8 +281,6 @@ struct efx_rx_queue {
284 unsigned int alloc_skb_count; 281 unsigned int alloc_skb_count;
285 struct timer_list slow_fill; 282 struct timer_list slow_fill;
286 unsigned int slow_fill_count; 283 unsigned int slow_fill_count;
287
288 enum efx_flush_state flushed;
289}; 284};
290 285
291/** 286/**
@@ -319,6 +314,7 @@ enum efx_rx_alloc_method {
319 * 314 *
320 * @efx: Associated Efx NIC 315 * @efx: Associated Efx NIC
321 * @channel: Channel instance number 316 * @channel: Channel instance number
317 * @type: Channel type definition
322 * @enabled: Channel enabled indicator 318 * @enabled: Channel enabled indicator
323 * @irq: IRQ number (MSI and MSI-X only) 319 * @irq: IRQ number (MSI and MSI-X only)
324 * @irq_moderation: IRQ moderation value (in hardware ticks) 320 * @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -329,6 +325,7 @@ enum efx_rx_alloc_method {
329 * @eventq_mask: Event queue pointer mask 325 * @eventq_mask: Event queue pointer mask
330 * @eventq_read_ptr: Event queue read pointer 326 * @eventq_read_ptr: Event queue read pointer
331 * @last_eventq_read_ptr: Last event queue read pointer value. 327 * @last_eventq_read_ptr: Last event queue read pointer value.
328 * @last_irq_cpu: Last CPU to handle interrupt for this channel
332 * @irq_count: Number of IRQs since last adaptive moderation decision 329 * @irq_count: Number of IRQs since last adaptive moderation decision
333 * @irq_mod_score: IRQ moderation score 330 * @irq_mod_score: IRQ moderation score
334 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 331 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -348,6 +345,7 @@ enum efx_rx_alloc_method {
348struct efx_channel { 345struct efx_channel {
349 struct efx_nic *efx; 346 struct efx_nic *efx;
350 int channel; 347 int channel;
348 const struct efx_channel_type *type;
351 bool enabled; 349 bool enabled;
352 int irq; 350 int irq;
353 unsigned int irq_moderation; 351 unsigned int irq_moderation;
@@ -359,6 +357,7 @@ struct efx_channel {
359 unsigned int eventq_read_ptr; 357 unsigned int eventq_read_ptr;
360 unsigned int last_eventq_read_ptr; 358 unsigned int last_eventq_read_ptr;
361 359
360 int last_irq_cpu;
362 unsigned int irq_count; 361 unsigned int irq_count;
363 unsigned int irq_mod_score; 362 unsigned int irq_mod_score;
364#ifdef CONFIG_RFS_ACCEL 363#ifdef CONFIG_RFS_ACCEL
@@ -380,12 +379,31 @@ struct efx_channel {
380 * access with prefetches. 379 * access with prefetches.
381 */ 380 */
382 struct efx_rx_buffer *rx_pkt; 381 struct efx_rx_buffer *rx_pkt;
383 bool rx_pkt_csummed;
384 382
385 struct efx_rx_queue rx_queue; 383 struct efx_rx_queue rx_queue;
386 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; 384 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
387}; 385};
388 386
387/**
388 * struct efx_channel_type - distinguishes traffic and extra channels
389 * @handle_no_channel: Handle failure to allocate an extra channel
390 * @pre_probe: Set up extra state prior to initialisation
391 * @post_remove: Tear down extra state after finalisation, if allocated.
392 * May be called on channels that have not been probed.
393 * @get_name: Generate the channel's name (used for its IRQ handler)
394 * @copy: Copy the channel state prior to reallocation. May be %NULL if
395 * reallocation is not supported.
396 * @keep_eventq: Flag for whether event queue should be kept initialised
397 * while the device is stopped
398 */
399struct efx_channel_type {
400 void (*handle_no_channel)(struct efx_nic *);
401 int (*pre_probe)(struct efx_channel *);
402 void (*get_name)(struct efx_channel *, char *buf, size_t len);
403 struct efx_channel *(*copy)(const struct efx_channel *);
404 bool keep_eventq;
405};
406
389enum efx_led_mode { 407enum efx_led_mode {
390 EFX_LED_OFF = 0, 408 EFX_LED_OFF = 0,
391 EFX_LED_ON = 1, 409 EFX_LED_ON = 1,
@@ -395,12 +413,12 @@ enum efx_led_mode {
395#define STRING_TABLE_LOOKUP(val, member) \ 413#define STRING_TABLE_LOOKUP(val, member) \
396 ((val) < member ## _max) ? member ## _names[val] : "(invalid)" 414 ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
397 415
398extern const char *efx_loopback_mode_names[]; 416extern const char *const efx_loopback_mode_names[];
399extern const unsigned int efx_loopback_mode_max; 417extern const unsigned int efx_loopback_mode_max;
400#define LOOPBACK_MODE(efx) \ 418#define LOOPBACK_MODE(efx) \
401 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) 419 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
402 420
403extern const char *efx_reset_type_names[]; 421extern const char *const efx_reset_type_names[];
404extern const unsigned int efx_reset_type_max; 422extern const unsigned int efx_reset_type_max;
405#define RESET_TYPE(type) \ 423#define RESET_TYPE(type) \
406 STRING_TABLE_LOOKUP(type, efx_reset_type) 424 STRING_TABLE_LOOKUP(type, efx_reset_type)
@@ -474,18 +492,6 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left,
474} 492}
475 493
476/** 494/**
477 * struct efx_mac_operations - Efx MAC operations table
478 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
479 * @update_stats: Update statistics
480 * @check_fault: Check fault state. True if fault present.
481 */
482struct efx_mac_operations {
483 int (*reconfigure) (struct efx_nic *efx);
484 void (*update_stats) (struct efx_nic *efx);
485 bool (*check_fault)(struct efx_nic *efx);
486};
487
488/**
489 * struct efx_phy_operations - Efx PHY operations table 495 * struct efx_phy_operations - Efx PHY operations table
490 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, 496 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
491 * efx->loopback_modes. 497 * efx->loopback_modes.
@@ -552,64 +558,64 @@ struct efx_mac_stats {
552 u64 tx_bytes; 558 u64 tx_bytes;
553 u64 tx_good_bytes; 559 u64 tx_good_bytes;
554 u64 tx_bad_bytes; 560 u64 tx_bad_bytes;
555 unsigned long tx_packets; 561 u64 tx_packets;
556 unsigned long tx_bad; 562 u64 tx_bad;
557 unsigned long tx_pause; 563 u64 tx_pause;
558 unsigned long tx_control; 564 u64 tx_control;
559 unsigned long tx_unicast; 565 u64 tx_unicast;
560 unsigned long tx_multicast; 566 u64 tx_multicast;
561 unsigned long tx_broadcast; 567 u64 tx_broadcast;
562 unsigned long tx_lt64; 568 u64 tx_lt64;
563 unsigned long tx_64; 569 u64 tx_64;
564 unsigned long tx_65_to_127; 570 u64 tx_65_to_127;
565 unsigned long tx_128_to_255; 571 u64 tx_128_to_255;
566 unsigned long tx_256_to_511; 572 u64 tx_256_to_511;
567 unsigned long tx_512_to_1023; 573 u64 tx_512_to_1023;
568 unsigned long tx_1024_to_15xx; 574 u64 tx_1024_to_15xx;
569 unsigned long tx_15xx_to_jumbo; 575 u64 tx_15xx_to_jumbo;
570 unsigned long tx_gtjumbo; 576 u64 tx_gtjumbo;
571 unsigned long tx_collision; 577 u64 tx_collision;
572 unsigned long tx_single_collision; 578 u64 tx_single_collision;
573 unsigned long tx_multiple_collision; 579 u64 tx_multiple_collision;
574 unsigned long tx_excessive_collision; 580 u64 tx_excessive_collision;
575 unsigned long tx_deferred; 581 u64 tx_deferred;
576 unsigned long tx_late_collision; 582 u64 tx_late_collision;
577 unsigned long tx_excessive_deferred; 583 u64 tx_excessive_deferred;
578 unsigned long tx_non_tcpudp; 584 u64 tx_non_tcpudp;
579 unsigned long tx_mac_src_error; 585 u64 tx_mac_src_error;
580 unsigned long tx_ip_src_error; 586 u64 tx_ip_src_error;
581 u64 rx_bytes; 587 u64 rx_bytes;
582 u64 rx_good_bytes; 588 u64 rx_good_bytes;
583 u64 rx_bad_bytes; 589 u64 rx_bad_bytes;
584 unsigned long rx_packets; 590 u64 rx_packets;
585 unsigned long rx_good; 591 u64 rx_good;
586 unsigned long rx_bad; 592 u64 rx_bad;
587 unsigned long rx_pause; 593 u64 rx_pause;
588 unsigned long rx_control; 594 u64 rx_control;
589 unsigned long rx_unicast; 595 u64 rx_unicast;
590 unsigned long rx_multicast; 596 u64 rx_multicast;
591 unsigned long rx_broadcast; 597 u64 rx_broadcast;
592 unsigned long rx_lt64; 598 u64 rx_lt64;
593 unsigned long rx_64; 599 u64 rx_64;
594 unsigned long rx_65_to_127; 600 u64 rx_65_to_127;
595 unsigned long rx_128_to_255; 601 u64 rx_128_to_255;
596 unsigned long rx_256_to_511; 602 u64 rx_256_to_511;
597 unsigned long rx_512_to_1023; 603 u64 rx_512_to_1023;
598 unsigned long rx_1024_to_15xx; 604 u64 rx_1024_to_15xx;
599 unsigned long rx_15xx_to_jumbo; 605 u64 rx_15xx_to_jumbo;
600 unsigned long rx_gtjumbo; 606 u64 rx_gtjumbo;
601 unsigned long rx_bad_lt64; 607 u64 rx_bad_lt64;
602 unsigned long rx_bad_64_to_15xx; 608 u64 rx_bad_64_to_15xx;
603 unsigned long rx_bad_15xx_to_jumbo; 609 u64 rx_bad_15xx_to_jumbo;
604 unsigned long rx_bad_gtjumbo; 610 u64 rx_bad_gtjumbo;
605 unsigned long rx_overflow; 611 u64 rx_overflow;
606 unsigned long rx_missed; 612 u64 rx_missed;
607 unsigned long rx_false_carrier; 613 u64 rx_false_carrier;
608 unsigned long rx_symbol_error; 614 u64 rx_symbol_error;
609 unsigned long rx_align_error; 615 u64 rx_align_error;
610 unsigned long rx_length_error; 616 u64 rx_length_error;
611 unsigned long rx_internal_error; 617 u64 rx_internal_error;
612 unsigned long rx_good_lt64; 618 u64 rx_good_lt64;
613}; 619};
614 620
615/* Number of bits used in a multicast filter hash address */ 621/* Number of bits used in a multicast filter hash address */
@@ -625,6 +631,8 @@ union efx_multicast_hash {
625}; 631};
626 632
627struct efx_filter_state; 633struct efx_filter_state;
634struct efx_vf;
635struct vfdi_status;
628 636
629/** 637/**
630 * struct efx_nic - an Efx NIC 638 * struct efx_nic - an Efx NIC
@@ -640,6 +648,7 @@ struct efx_filter_state;
640 * @membase_phys: Memory BAR value as physical address 648 * @membase_phys: Memory BAR value as physical address
641 * @membase: Memory BAR value 649 * @membase: Memory BAR value
642 * @interrupt_mode: Interrupt mode 650 * @interrupt_mode: Interrupt mode
651 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
643 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 652 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
644 * @irq_rx_moderation: IRQ moderation time for RX event queues 653 * @irq_rx_moderation: IRQ moderation time for RX event queues
645 * @msg_enable: Log message enable flags 654 * @msg_enable: Log message enable flags
@@ -649,8 +658,13 @@ struct efx_filter_state;
649 * @rx_queue: RX DMA queues 658 * @rx_queue: RX DMA queues
650 * @channel: Channels 659 * @channel: Channels
651 * @channel_name: Names for channels and their IRQs 660 * @channel_name: Names for channels and their IRQs
661 * @extra_channel_types: Types of extra (non-traffic) channels that
662 * should be allocated for this NIC
652 * @rxq_entries: Size of receive queues requested by user. 663 * @rxq_entries: Size of receive queues requested by user.
653 * @txq_entries: Size of transmit queues requested by user. 664 * @txq_entries: Size of transmit queues requested by user.
665 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
666 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
667 * @sram_lim_qw: Qword address limit of SRAM
654 * @next_buffer_table: First available buffer table id 668 * @next_buffer_table: First available buffer table id
655 * @n_channels: Number of channels in use 669 * @n_channels: Number of channels in use
656 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 670 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
@@ -663,7 +677,7 @@ struct efx_filter_state;
663 * @int_error_expire: Time at which error count will be expired 677 * @int_error_expire: Time at which error count will be expired
664 * @irq_status: Interrupt status buffer 678 * @irq_status: Interrupt status buffer
665 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 679 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
666 * @fatal_irq_level: IRQ level (bit number) used for serious errors 680 * @irq_level: IRQ level/index for IRQs not triggered by an event queue
667 * @mtd_list: List of MTDs attached to the NIC 681 * @mtd_list: List of MTDs attached to the NIC
668 * @nic_data: Hardware dependent state 682 * @nic_data: Hardware dependent state
669 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 683 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
@@ -676,7 +690,6 @@ struct efx_filter_state;
676 * @port_initialized: Port initialized? 690 * @port_initialized: Port initialized?
677 * @net_dev: Operating system network device. Consider holding the rtnl lock 691 * @net_dev: Operating system network device. Consider holding the rtnl lock
678 * @stats_buffer: DMA buffer for statistics 692 * @stats_buffer: DMA buffer for statistics
679 * @mac_op: MAC interface
680 * @phy_type: PHY type 693 * @phy_type: PHY type
681 * @phy_op: PHY interface 694 * @phy_op: PHY interface
682 * @phy_data: PHY private data (including PHY-specific stats) 695 * @phy_data: PHY private data (including PHY-specific stats)
@@ -689,21 +702,42 @@ struct efx_filter_state;
689 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. 702 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
690 * @multicast_hash: Multicast hash table 703 * @multicast_hash: Multicast hash table
691 * @wanted_fc: Wanted flow control flags 704 * @wanted_fc: Wanted flow control flags
705 * @fc_disable: When non-zero flow control is disabled. Typically used to
706 * ensure that network back pressure doesn't delay dma queue flushes.
707 * Serialised by the rtnl lock.
692 * @mac_work: Work item for changing MAC promiscuity and multicast hash 708 * @mac_work: Work item for changing MAC promiscuity and multicast hash
693 * @loopback_mode: Loopback status 709 * @loopback_mode: Loopback status
694 * @loopback_modes: Supported loopback mode bitmask 710 * @loopback_modes: Supported loopback mode bitmask
695 * @loopback_selftest: Offline self-test private state 711 * @loopback_selftest: Offline self-test private state
712 * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
713 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
714 * Decremented when the efx_flush_rx_queue() is called.
715 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
716 * completed (either success or failure). Not used when MCDI is used to
717 * flush receive queues.
718 * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
719 * @vf: Array of &struct efx_vf objects.
720 * @vf_count: Number of VFs intended to be enabled.
721 * @vf_init_count: Number of VFs that have been fully initialised.
722 * @vi_scale: log2 number of vnics per VF.
723 * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
724 * @vfdi_status: Common VFDI status page to be dmad to VF address space.
725 * @local_addr_list: List of local addresses. Protected by %local_lock.
726 * @local_page_list: List of DMA addressable pages used to broadcast
727 * %local_addr_list. Protected by %local_lock.
728 * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
729 * @peer_work: Work item to broadcast peer addresses to VMs.
696 * @monitor_work: Hardware monitor workitem 730 * @monitor_work: Hardware monitor workitem
697 * @biu_lock: BIU (bus interface unit) lock 731 * @biu_lock: BIU (bus interface unit) lock
698 * @last_irq_cpu: Last CPU to handle interrupt. 732 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
699 * This register is written with the SMP processor ID whenever an 733 * field is used by efx_test_interrupts() to verify that an
700 * interrupt is handled. It is used by efx_nic_test_interrupt() 734 * interrupt has occurred.
701 * to verify that an interrupt has occurred.
702 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 735 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
703 * @mac_stats: MAC statistics. These include all statistics the MACs 736 * @mac_stats: MAC statistics. These include all statistics the MACs
704 * can provide. Generic code converts these into a standard 737 * can provide. Generic code converts these into a standard
705 * &struct net_device_stats. 738 * &struct net_device_stats.
706 * @stats_lock: Statistics update lock. Serialises statistics fetches 739 * @stats_lock: Statistics update lock. Serialises statistics fetches
740 * and access to @mac_stats.
707 * 741 *
708 * This is stored in the private area of the &struct net_device. 742 * This is stored in the private area of the &struct net_device.
709 */ 743 */
@@ -722,6 +756,7 @@ struct efx_nic {
722 void __iomem *membase; 756 void __iomem *membase;
723 757
724 enum efx_int_mode interrupt_mode; 758 enum efx_int_mode interrupt_mode;
759 unsigned int timer_quantum_ns;
725 bool irq_rx_adaptive; 760 bool irq_rx_adaptive;
726 unsigned int irq_rx_moderation; 761 unsigned int irq_rx_moderation;
727 u32 msg_enable; 762 u32 msg_enable;
@@ -731,12 +766,18 @@ struct efx_nic {
731 766
732 struct efx_channel *channel[EFX_MAX_CHANNELS]; 767 struct efx_channel *channel[EFX_MAX_CHANNELS];
733 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; 768 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
769 const struct efx_channel_type *
770 extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
734 771
735 unsigned rxq_entries; 772 unsigned rxq_entries;
736 unsigned txq_entries; 773 unsigned txq_entries;
774 unsigned tx_dc_base;
775 unsigned rx_dc_base;
776 unsigned sram_lim_qw;
737 unsigned next_buffer_table; 777 unsigned next_buffer_table;
738 unsigned n_channels; 778 unsigned n_channels;
739 unsigned n_rx_channels; 779 unsigned n_rx_channels;
780 unsigned rss_spread;
740 unsigned tx_channel_offset; 781 unsigned tx_channel_offset;
741 unsigned n_tx_channels; 782 unsigned n_tx_channels;
742 unsigned int rx_buffer_len; 783 unsigned int rx_buffer_len;
@@ -749,7 +790,7 @@ struct efx_nic {
749 790
750 struct efx_buffer irq_status; 791 struct efx_buffer irq_status;
751 unsigned irq_zero_count; 792 unsigned irq_zero_count;
752 unsigned fatal_irq_level; 793 unsigned irq_level;
753 794
754#ifdef CONFIG_SFC_MTD 795#ifdef CONFIG_SFC_MTD
755 struct list_head mtd_list; 796 struct list_head mtd_list;
@@ -766,8 +807,6 @@ struct efx_nic {
766 807
767 struct efx_buffer stats_buffer; 808 struct efx_buffer stats_buffer;
768 809
769 const struct efx_mac_operations *mac_op;
770
771 unsigned int phy_type; 810 unsigned int phy_type;
772 const struct efx_phy_operations *phy_op; 811 const struct efx_phy_operations *phy_op;
773 void *phy_data; 812 void *phy_data;
@@ -782,6 +821,7 @@ struct efx_nic {
782 bool promiscuous; 821 bool promiscuous;
783 union efx_multicast_hash multicast_hash; 822 union efx_multicast_hash multicast_hash;
784 u8 wanted_fc; 823 u8 wanted_fc;
824 unsigned fc_disable;
785 825
786 atomic_t rx_reset; 826 atomic_t rx_reset;
787 enum efx_loopback_mode loopback_mode; 827 enum efx_loopback_mode loopback_mode;
@@ -791,11 +831,30 @@ struct efx_nic {
791 831
792 struct efx_filter_state *filter_state; 832 struct efx_filter_state *filter_state;
793 833
834 atomic_t drain_pending;
835 atomic_t rxq_flush_pending;
836 atomic_t rxq_flush_outstanding;
837 wait_queue_head_t flush_wq;
838
839#ifdef CONFIG_SFC_SRIOV
840 struct efx_channel *vfdi_channel;
841 struct efx_vf *vf;
842 unsigned vf_count;
843 unsigned vf_init_count;
844 unsigned vi_scale;
845 unsigned vf_buftbl_base;
846 struct efx_buffer vfdi_status;
847 struct list_head local_addr_list;
848 struct list_head local_page_list;
849 struct mutex local_lock;
850 struct work_struct peer_work;
851#endif
852
794 /* The following fields may be written more often */ 853 /* The following fields may be written more often */
795 854
796 struct delayed_work monitor_work ____cacheline_aligned_in_smp; 855 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
797 spinlock_t biu_lock; 856 spinlock_t biu_lock;
798 volatile signed int last_irq_cpu; 857 int last_irq_cpu;
799 unsigned n_rx_nodesc_drop_cnt; 858 unsigned n_rx_nodesc_drop_cnt;
800 struct efx_mac_stats mac_stats; 859 struct efx_mac_stats mac_stats;
801 spinlock_t stats_lock; 860 spinlock_t stats_lock;
@@ -806,15 +865,6 @@ static inline int efx_dev_registered(struct efx_nic *efx)
806 return efx->net_dev->reg_state == NETREG_REGISTERED; 865 return efx->net_dev->reg_state == NETREG_REGISTERED;
807} 866}
808 867
809/* Net device name, for inclusion in log messages if it has been registered.
810 * Use efx->name not efx->net_dev->name so that races with (un)registration
811 * are harmless.
812 */
813static inline const char *efx_dev_name(struct efx_nic *efx)
814{
815 return efx_dev_registered(efx) ? efx->name : "";
816}
817
818static inline unsigned int efx_port_num(struct efx_nic *efx) 868static inline unsigned int efx_port_num(struct efx_nic *efx)
819{ 869{
820 return efx->net_dev->dev_id; 870 return efx->net_dev->dev_id;
@@ -825,6 +875,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
825 * @probe: Probe the controller 875 * @probe: Probe the controller
826 * @remove: Free resources allocated by probe() 876 * @remove: Free resources allocated by probe()
827 * @init: Initialise the controller 877 * @init: Initialise the controller
878 * @dimension_resources: Dimension controller resources (buffer table,
879 * and VIs once the available interrupt resources are clear)
828 * @fini: Shut down the controller 880 * @fini: Shut down the controller
829 * @monitor: Periodic function for polling link state and hardware monitor 881 * @monitor: Periodic function for polling link state and hardware monitor
830 * @map_reset_reason: Map ethtool reset reason to a reset method 882 * @map_reset_reason: Map ethtool reset reason to a reset method
@@ -840,14 +892,15 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
840 * @stop_stats: Stop the regular fetching of statistics 892 * @stop_stats: Stop the regular fetching of statistics
841 * @set_id_led: Set state of identifying LED or revert to automatic function 893 * @set_id_led: Set state of identifying LED or revert to automatic function
842 * @push_irq_moderation: Apply interrupt moderation value 894 * @push_irq_moderation: Apply interrupt moderation value
843 * @push_multicast_hash: Apply multicast hash table
844 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY 895 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
896 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
897 * to the hardware. Serialised by the mac_lock.
898 * @check_mac_fault: Check MAC fault state. True if fault present.
845 * @get_wol: Get WoL configuration from driver state 899 * @get_wol: Get WoL configuration from driver state
846 * @set_wol: Push WoL configuration to the NIC 900 * @set_wol: Push WoL configuration to the NIC
847 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) 901 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
848 * @test_registers: Test read/write functionality of control registers 902 * @test_registers: Test read/write functionality of control registers
849 * @test_nvram: Test validity of NVRAM contents 903 * @test_nvram: Test validity of NVRAM contents
850 * @default_mac_ops: efx_mac_operations to set at startup
851 * @revision: Hardware architecture revision 904 * @revision: Hardware architecture revision
852 * @mem_map_size: Memory BAR mapped size 905 * @mem_map_size: Memory BAR mapped size
853 * @txd_ptr_tbl_base: TX descriptor ring base address 906 * @txd_ptr_tbl_base: TX descriptor ring base address
@@ -862,8 +915,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
862 * from &enum efx_init_mode. 915 * from &enum efx_init_mode.
863 * @phys_addr_channels: Number of channels with physically addressed 916 * @phys_addr_channels: Number of channels with physically addressed
864 * descriptors 917 * descriptors
865 * @tx_dc_base: Base address in SRAM of TX queue descriptor caches 918 * @timer_period_max: Maximum period of interrupt timer (in ticks)
866 * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
867 * @offload_features: net_device feature flags for protocol offload 919 * @offload_features: net_device feature flags for protocol offload
868 * features implemented in hardware 920 * features implemented in hardware
869 */ 921 */
@@ -871,6 +923,7 @@ struct efx_nic_type {
871 int (*probe)(struct efx_nic *efx); 923 int (*probe)(struct efx_nic *efx);
872 void (*remove)(struct efx_nic *efx); 924 void (*remove)(struct efx_nic *efx);
873 int (*init)(struct efx_nic *efx); 925 int (*init)(struct efx_nic *efx);
926 void (*dimension_resources)(struct efx_nic *efx);
874 void (*fini)(struct efx_nic *efx); 927 void (*fini)(struct efx_nic *efx);
875 void (*monitor)(struct efx_nic *efx); 928 void (*monitor)(struct efx_nic *efx);
876 enum reset_type (*map_reset_reason)(enum reset_type reason); 929 enum reset_type (*map_reset_reason)(enum reset_type reason);
@@ -885,14 +938,14 @@ struct efx_nic_type {
885 void (*stop_stats)(struct efx_nic *efx); 938 void (*stop_stats)(struct efx_nic *efx);
886 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); 939 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
887 void (*push_irq_moderation)(struct efx_channel *channel); 940 void (*push_irq_moderation)(struct efx_channel *channel);
888 void (*push_multicast_hash)(struct efx_nic *efx);
889 int (*reconfigure_port)(struct efx_nic *efx); 941 int (*reconfigure_port)(struct efx_nic *efx);
942 int (*reconfigure_mac)(struct efx_nic *efx);
943 bool (*check_mac_fault)(struct efx_nic *efx);
890 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); 944 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
891 int (*set_wol)(struct efx_nic *efx, u32 type); 945 int (*set_wol)(struct efx_nic *efx, u32 type);
892 void (*resume_wol)(struct efx_nic *efx); 946 void (*resume_wol)(struct efx_nic *efx);
893 int (*test_registers)(struct efx_nic *efx); 947 int (*test_registers)(struct efx_nic *efx);
894 int (*test_nvram)(struct efx_nic *efx); 948 int (*test_nvram)(struct efx_nic *efx);
895 const struct efx_mac_operations *default_mac_ops;
896 949
897 int revision; 950 int revision;
898 unsigned int mem_map_size; 951 unsigned int mem_map_size;
@@ -906,8 +959,7 @@ struct efx_nic_type {
906 unsigned int rx_buffer_padding; 959 unsigned int rx_buffer_padding;
907 unsigned int max_interrupt_mode; 960 unsigned int max_interrupt_mode;
908 unsigned int phys_addr_channels; 961 unsigned int phys_addr_channels;
909 unsigned int tx_dc_base; 962 unsigned int timer_period_max;
910 unsigned int rx_dc_base;
911 netdev_features_t offload_features; 963 netdev_features_t offload_features;
912}; 964};
913 965
@@ -931,6 +983,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
931 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ 983 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
932 (_efx)->channel[_channel->channel + 1] : NULL) 984 (_efx)->channel[_channel->channel + 1] : NULL)
933 985
986/* Iterate over all used channels in reverse */
987#define efx_for_each_channel_rev(_channel, _efx) \
988 for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \
989 _channel; \
990 _channel = _channel->channel ? \
991 (_efx)->channel[_channel->channel - 1] : NULL)
992
934static inline struct efx_tx_queue * 993static inline struct efx_tx_queue *
935efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) 994efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
936{ 995{
@@ -971,16 +1030,12 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
971 1030
972/* Iterate over all possible TX queues belonging to a channel */ 1031/* Iterate over all possible TX queues belonging to a channel */
973#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ 1032#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
974 for (_tx_queue = (_channel)->tx_queue; \ 1033 if (!efx_channel_has_tx_queues(_channel)) \
975 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 1034 ; \
976 _tx_queue++) 1035 else \
977 1036 for (_tx_queue = (_channel)->tx_queue; \
978static inline struct efx_rx_queue * 1037 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
979efx_get_rx_queue(struct efx_nic *efx, unsigned index) 1038 _tx_queue++)
980{
981 EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
982 return &efx->channel[index]->rx_queue;
983}
984 1039
985static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) 1040static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
986{ 1041{
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 3edfbaf5f022..2bf4283f05fe 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -49,24 +49,29 @@
49#define EFX_INT_ERROR_EXPIRE 3600 49#define EFX_INT_ERROR_EXPIRE 3600
50#define EFX_MAX_INT_ERRORS 5 50#define EFX_MAX_INT_ERRORS 5
51 51
52/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
53 */
54#define EFX_FLUSH_INTERVAL 10
55#define EFX_FLUSH_POLL_COUNT 100
56
57/* Size and alignment of special buffers (4KB) */
58#define EFX_BUF_SIZE 4096
59
60/* Depth of RX flush request fifo */ 52/* Depth of RX flush request fifo */
61#define EFX_RX_FLUSH_COUNT 4 53#define EFX_RX_FLUSH_COUNT 4
62 54
63/* Generated event code for efx_generate_test_event() */ 55/* Driver generated events */
64#define EFX_CHANNEL_MAGIC_TEST(_channel) \ 56#define _EFX_CHANNEL_MAGIC_TEST 0x000101
65 (0x00010100 + (_channel)->channel) 57#define _EFX_CHANNEL_MAGIC_FILL 0x000102
66 58#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
67/* Generated event code for efx_generate_fill_event() */ 59#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
68#define EFX_CHANNEL_MAGIC_FILL(_channel) \ 60
69 (0x00010200 + (_channel)->channel) 61#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
62#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
63
64#define EFX_CHANNEL_MAGIC_TEST(_channel) \
65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
66#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
68 efx_rx_queue_index(_rx_queue))
69#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
71 efx_rx_queue_index(_rx_queue))
72#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
74 (_tx_queue)->queue)
70 75
71/************************************************************************** 76/**************************************************************************
72 * 77 *
@@ -187,7 +192,7 @@ static void
187efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 192efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
188{ 193{
189 efx_qword_t buf_desc; 194 efx_qword_t buf_desc;
190 int index; 195 unsigned int index;
191 dma_addr_t dma_addr; 196 dma_addr_t dma_addr;
192 int i; 197 int i;
193 198
@@ -196,7 +201,7 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
196 /* Write buffer descriptors to NIC */ 201 /* Write buffer descriptors to NIC */
197 for (i = 0; i < buffer->entries; i++) { 202 for (i = 0; i < buffer->entries; i++) {
198 index = buffer->index + i; 203 index = buffer->index + i;
199 dma_addr = buffer->dma_addr + (i * 4096); 204 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
200 netif_dbg(efx, probe, efx->net_dev, 205 netif_dbg(efx, probe, efx->net_dev,
201 "mapping special buffer %d at %llx\n", 206 "mapping special buffer %d at %llx\n",
202 index, (unsigned long long)dma_addr); 207 index, (unsigned long long)dma_addr);
@@ -259,6 +264,10 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
259 /* Select new buffer ID */ 264 /* Select new buffer ID */
260 buffer->index = efx->next_buffer_table; 265 buffer->index = efx->next_buffer_table;
261 efx->next_buffer_table += buffer->entries; 266 efx->next_buffer_table += buffer->entries;
267#ifdef CONFIG_SFC_SRIOV
268 BUG_ON(efx_sriov_enabled(efx) &&
269 efx->vf_buftbl_base < efx->next_buffer_table);
270#endif
262 271
263 netif_dbg(efx, probe, efx->net_dev, 272 netif_dbg(efx, probe, efx->net_dev,
264 "allocating special buffers %d-%d at %llx+%x " 273 "allocating special buffers %d-%d at %llx+%x "
@@ -430,8 +439,6 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
430 struct efx_nic *efx = tx_queue->efx; 439 struct efx_nic *efx = tx_queue->efx;
431 efx_oword_t reg; 440 efx_oword_t reg;
432 441
433 tx_queue->flushed = FLUSH_NONE;
434
435 /* Pin TX descriptor ring */ 442 /* Pin TX descriptor ring */
436 efx_init_special_buffer(efx, &tx_queue->txd); 443 efx_init_special_buffer(efx, &tx_queue->txd);
437 444
@@ -488,9 +495,6 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
488 struct efx_nic *efx = tx_queue->efx; 495 struct efx_nic *efx = tx_queue->efx;
489 efx_oword_t tx_flush_descq; 496 efx_oword_t tx_flush_descq;
490 497
491 tx_queue->flushed = FLUSH_PENDING;
492
493 /* Post a flush command */
494 EFX_POPULATE_OWORD_2(tx_flush_descq, 498 EFX_POPULATE_OWORD_2(tx_flush_descq,
495 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 499 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
496 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 500 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
@@ -502,9 +506,6 @@ void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
502 struct efx_nic *efx = tx_queue->efx; 506 struct efx_nic *efx = tx_queue->efx;
503 efx_oword_t tx_desc_ptr; 507 efx_oword_t tx_desc_ptr;
504 508
505 /* The queue should have been flushed */
506 WARN_ON(tx_queue->flushed != FLUSH_DONE);
507
508 /* Remove TX descriptor ring from card */ 509 /* Remove TX descriptor ring from card */
509 EFX_ZERO_OWORD(tx_desc_ptr); 510 EFX_ZERO_OWORD(tx_desc_ptr);
510 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 511 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
@@ -595,8 +596,6 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
595 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 596 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
596 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 597 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
597 598
598 rx_queue->flushed = FLUSH_NONE;
599
600 /* Pin RX descriptor ring */ 599 /* Pin RX descriptor ring */
601 efx_init_special_buffer(efx, &rx_queue->rxd); 600 efx_init_special_buffer(efx, &rx_queue->rxd);
602 601
@@ -625,9 +624,6 @@ static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
625 struct efx_nic *efx = rx_queue->efx; 624 struct efx_nic *efx = rx_queue->efx;
626 efx_oword_t rx_flush_descq; 625 efx_oword_t rx_flush_descq;
627 626
628 rx_queue->flushed = FLUSH_PENDING;
629
630 /* Post a flush command */
631 EFX_POPULATE_OWORD_2(rx_flush_descq, 627 EFX_POPULATE_OWORD_2(rx_flush_descq,
632 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 628 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
633 FRF_AZ_RX_FLUSH_DESCQ, 629 FRF_AZ_RX_FLUSH_DESCQ,
@@ -640,9 +636,6 @@ void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
640 efx_oword_t rx_desc_ptr; 636 efx_oword_t rx_desc_ptr;
641 struct efx_nic *efx = rx_queue->efx; 637 struct efx_nic *efx = rx_queue->efx;
642 638
643 /* The queue should already have been flushed */
644 WARN_ON(rx_queue->flushed != FLUSH_DONE);
645
646 /* Remove RX descriptor ring from card */ 639 /* Remove RX descriptor ring from card */
647 EFX_ZERO_OWORD(rx_desc_ptr); 640 EFX_ZERO_OWORD(rx_desc_ptr);
648 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 641 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
@@ -660,6 +653,103 @@ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
660 653
661/************************************************************************** 654/**************************************************************************
662 * 655 *
656 * Flush handling
657 *
658 **************************************************************************/
659
660/* efx_nic_flush_queues() must be woken up when all flushes are completed,
661 * or more RX flushes can be kicked off.
662 */
663static bool efx_flush_wake(struct efx_nic *efx)
664{
665 /* Ensure that all updates are visible to efx_nic_flush_queues() */
666 smp_mb();
667
668 return (atomic_read(&efx->drain_pending) == 0 ||
669 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
670 && atomic_read(&efx->rxq_flush_pending) > 0));
671}
672
673/* Flush all the transmit queues, and continue flushing receive queues until
674 * they're all flushed. Wait for the DRAIN events to be recieved so that there
675 * are no more RX and TX events left on any channel. */
676int efx_nic_flush_queues(struct efx_nic *efx)
677{
678 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
679 struct efx_channel *channel;
680 struct efx_rx_queue *rx_queue;
681 struct efx_tx_queue *tx_queue;
682 int rc = 0;
683
684 efx->fc_disable++;
685 efx->type->prepare_flush(efx);
686
687 efx_for_each_channel(channel, efx) {
688 efx_for_each_channel_tx_queue(tx_queue, channel) {
689 atomic_inc(&efx->drain_pending);
690 efx_flush_tx_queue(tx_queue);
691 }
692 efx_for_each_channel_rx_queue(rx_queue, channel) {
693 atomic_inc(&efx->drain_pending);
694 rx_queue->flush_pending = true;
695 atomic_inc(&efx->rxq_flush_pending);
696 }
697 }
698
699 while (timeout && atomic_read(&efx->drain_pending) > 0) {
700 /* If SRIOV is enabled, then offload receive queue flushing to
701 * the firmware (though we will still have to poll for
702 * completion). If that fails, fall back to the old scheme.
703 */
704 if (efx_sriov_enabled(efx)) {
705 rc = efx_mcdi_flush_rxqs(efx);
706 if (!rc)
707 goto wait;
708 }
709
710 /* The hardware supports four concurrent rx flushes, each of
711 * which may need to be retried if there is an outstanding
712 * descriptor fetch
713 */
714 efx_for_each_channel(channel, efx) {
715 efx_for_each_channel_rx_queue(rx_queue, channel) {
716 if (atomic_read(&efx->rxq_flush_outstanding) >=
717 EFX_RX_FLUSH_COUNT)
718 break;
719
720 if (rx_queue->flush_pending) {
721 rx_queue->flush_pending = false;
722 atomic_dec(&efx->rxq_flush_pending);
723 atomic_inc(&efx->rxq_flush_outstanding);
724 efx_flush_rx_queue(rx_queue);
725 }
726 }
727 }
728
729 wait:
730 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
731 timeout);
732 }
733
734 if (atomic_read(&efx->drain_pending)) {
735 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
736 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
737 atomic_read(&efx->rxq_flush_outstanding),
738 atomic_read(&efx->rxq_flush_pending));
739 rc = -ETIMEDOUT;
740
741 atomic_set(&efx->drain_pending, 0);
742 atomic_set(&efx->rxq_flush_pending, 0);
743 atomic_set(&efx->rxq_flush_outstanding, 0);
744 }
745
746 efx->fc_disable--;
747
748 return rc;
749}
750
751/**************************************************************************
752 *
663 * Event queue processing 753 * Event queue processing
664 * Event queues are processed by per-channel tasklets. 754 * Event queues are processed by per-channel tasklets.
665 * 755 *
@@ -682,7 +772,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
682} 772}
683 773
684/* Use HW to insert a SW defined event */ 774/* Use HW to insert a SW defined event */
685static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) 775void efx_generate_event(struct efx_nic *efx, unsigned int evq,
776 efx_qword_t *event)
686{ 777{
687 efx_oword_t drv_ev_reg; 778 efx_oword_t drv_ev_reg;
688 779
@@ -692,8 +783,18 @@ static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
692 drv_ev_reg.u32[1] = event->u32[1]; 783 drv_ev_reg.u32[1] = event->u32[1];
693 drv_ev_reg.u32[2] = 0; 784 drv_ev_reg.u32[2] = 0;
694 drv_ev_reg.u32[3] = 0; 785 drv_ev_reg.u32[3] = 0;
695 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); 786 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
696 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); 787 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
788}
789
790static void efx_magic_event(struct efx_channel *channel, u32 magic)
791{
792 efx_qword_t event;
793
794 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
795 FSE_AZ_EV_CODE_DRV_GEN_EV,
796 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
797 efx_generate_event(channel->efx, channel->channel, &event);
697} 798}
698 799
699/* Handle a transmit completion event 800/* Handle a transmit completion event
@@ -710,6 +811,9 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
710 struct efx_nic *efx = channel->efx; 811 struct efx_nic *efx = channel->efx;
711 int tx_packets = 0; 812 int tx_packets = 0;
712 813
814 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
815 return 0;
816
713 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 817 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
714 /* Transmit completion */ 818 /* Transmit completion */
715 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 819 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
@@ -726,11 +830,9 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
726 tx_queue = efx_channel_get_tx_queue( 830 tx_queue = efx_channel_get_tx_queue(
727 channel, tx_ev_q_label % EFX_TXQ_TYPES); 831 channel, tx_ev_q_label % EFX_TXQ_TYPES);
728 832
729 if (efx_dev_registered(efx)) 833 netif_tx_lock(efx->net_dev);
730 netif_tx_lock(efx->net_dev);
731 efx_notify_tx_desc(tx_queue); 834 efx_notify_tx_desc(tx_queue);
732 if (efx_dev_registered(efx)) 835 netif_tx_unlock(efx->net_dev);
733 netif_tx_unlock(efx->net_dev);
734 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 836 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
735 EFX_WORKAROUND_10727(efx)) { 837 EFX_WORKAROUND_10727(efx)) {
736 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 838 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
@@ -745,10 +847,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
745} 847}
746 848
747/* Detect errors included in the rx_evt_pkt_ok bit. */ 849/* Detect errors included in the rx_evt_pkt_ok bit. */
748static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 850static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
749 const efx_qword_t *event, 851 const efx_qword_t *event)
750 bool *rx_ev_pkt_ok,
751 bool *discard)
752{ 852{
753 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 853 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
754 struct efx_nic *efx = rx_queue->efx; 854 struct efx_nic *efx = rx_queue->efx;
@@ -793,15 +893,11 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
793 ++channel->n_rx_tcp_udp_chksum_err; 893 ++channel->n_rx_tcp_udp_chksum_err;
794 } 894 }
795 895
796 /* The frame must be discarded if any of these are true. */
797 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
798 rx_ev_tobe_disc | rx_ev_pause_frm);
799
800 /* TOBE_DISC is expected on unicast mismatches; don't print out an 896 /* TOBE_DISC is expected on unicast mismatches; don't print out an
801 * error message. FRM_TRUNC indicates RXDP dropped the packet due 897 * error message. FRM_TRUNC indicates RXDP dropped the packet due
802 * to a FIFO overflow. 898 * to a FIFO overflow.
803 */ 899 */
804#ifdef EFX_ENABLE_DEBUG 900#ifdef DEBUG
805 if (rx_ev_other_err && net_ratelimit()) { 901 if (rx_ev_other_err && net_ratelimit()) {
806 netif_dbg(efx, rx_err, efx->net_dev, 902 netif_dbg(efx, rx_err, efx->net_dev,
807 " RX queue %d unexpected RX event " 903 " RX queue %d unexpected RX event "
@@ -819,6 +915,11 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
819 rx_ev_pause_frm ? " [PAUSE]" : ""); 915 rx_ev_pause_frm ? " [PAUSE]" : "");
820 } 916 }
821#endif 917#endif
918
919 /* The frame must be discarded if any of these are true. */
920 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
921 rx_ev_tobe_disc | rx_ev_pause_frm) ?
922 EFX_RX_PKT_DISCARD : 0;
822} 923}
823 924
824/* Handle receive events that are not in-order. */ 925/* Handle receive events that are not in-order. */
@@ -851,8 +952,13 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
851 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 952 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
852 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 953 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
853 unsigned expected_ptr; 954 unsigned expected_ptr;
854 bool rx_ev_pkt_ok, discard = false, checksummed; 955 bool rx_ev_pkt_ok;
956 u16 flags;
855 struct efx_rx_queue *rx_queue; 957 struct efx_rx_queue *rx_queue;
958 struct efx_nic *efx = channel->efx;
959
960 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
961 return;
856 962
857 /* Basic packet information */ 963 /* Basic packet information */
858 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 964 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
@@ -874,12 +980,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
874 /* If packet is marked as OK and packet type is TCP/IP or 980 /* If packet is marked as OK and packet type is TCP/IP or
875 * UDP/IP, then we can rely on the hardware checksum. 981 * UDP/IP, then we can rely on the hardware checksum.
876 */ 982 */
877 checksummed = 983 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
878 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 984 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
879 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP; 985 EFX_RX_PKT_CSUMMED : 0;
880 } else { 986 } else {
881 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); 987 flags = efx_handle_rx_not_ok(rx_queue, event);
882 checksummed = false;
883 } 988 }
884 989
885 /* Detect multicast packets that didn't match the filter */ 990 /* Detect multicast packets that didn't match the filter */
@@ -890,35 +995,111 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
890 995
891 if (unlikely(!rx_ev_mcast_hash_match)) { 996 if (unlikely(!rx_ev_mcast_hash_match)) {
892 ++channel->n_rx_mcast_mismatch; 997 ++channel->n_rx_mcast_mismatch;
893 discard = true; 998 flags |= EFX_RX_PKT_DISCARD;
894 } 999 }
895 } 1000 }
896 1001
897 channel->irq_mod_score += 2; 1002 channel->irq_mod_score += 2;
898 1003
899 /* Handle received packet */ 1004 /* Handle received packet */
900 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, 1005 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
901 checksummed, discard); 1006}
1007
1008/* If this flush done event corresponds to a &struct efx_tx_queue, then
1009 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1010 * of all transmit completions.
1011 */
1012static void
1013efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1014{
1015 struct efx_tx_queue *tx_queue;
1016 int qid;
1017
1018 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1019 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1020 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1021 qid % EFX_TXQ_TYPES);
1022
1023 efx_magic_event(tx_queue->channel,
1024 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1025 }
1026}
1027
1028/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1029 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1030 * the RX queue back to the mask of RX queues in need of flushing.
1031 */
1032static void
1033efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1034{
1035 struct efx_channel *channel;
1036 struct efx_rx_queue *rx_queue;
1037 int qid;
1038 bool failed;
1039
1040 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1041 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1042 if (qid >= efx->n_channels)
1043 return;
1044 channel = efx_get_channel(efx, qid);
1045 if (!efx_channel_has_rx_queue(channel))
1046 return;
1047 rx_queue = efx_channel_get_rx_queue(channel);
1048
1049 if (failed) {
1050 netif_info(efx, hw, efx->net_dev,
1051 "RXQ %d flush retry\n", qid);
1052 rx_queue->flush_pending = true;
1053 atomic_inc(&efx->rxq_flush_pending);
1054 } else {
1055 efx_magic_event(efx_rx_queue_channel(rx_queue),
1056 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1057 }
1058 atomic_dec(&efx->rxq_flush_outstanding);
1059 if (efx_flush_wake(efx))
1060 wake_up(&efx->flush_wq);
1061}
1062
1063static void
1064efx_handle_drain_event(struct efx_channel *channel)
1065{
1066 struct efx_nic *efx = channel->efx;
1067
1068 WARN_ON(atomic_read(&efx->drain_pending) == 0);
1069 atomic_dec(&efx->drain_pending);
1070 if (efx_flush_wake(efx))
1071 wake_up(&efx->flush_wq);
902} 1072}
903 1073
904static void 1074static void
905efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1075efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
906{ 1076{
907 struct efx_nic *efx = channel->efx; 1077 struct efx_nic *efx = channel->efx;
908 unsigned code; 1078 struct efx_rx_queue *rx_queue =
1079 efx_channel_has_rx_queue(channel) ?
1080 efx_channel_get_rx_queue(channel) : NULL;
1081 unsigned magic, code;
909 1082
910 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1083 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
911 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 1084 code = _EFX_CHANNEL_MAGIC_CODE(magic);
912 ; /* ignore */ 1085
913 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 1086 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1087 /* ignore */
1088 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
914 /* The queue must be empty, so we won't receive any rx 1089 /* The queue must be empty, so we won't receive any rx
915 * events, so efx_process_channel() won't refill the 1090 * events, so efx_process_channel() won't refill the
916 * queue. Refill it here */ 1091 * queue. Refill it here */
917 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); 1092 efx_fast_push_rx_descriptors(rx_queue);
918 else 1093 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1094 rx_queue->enabled = false;
1095 efx_handle_drain_event(channel);
1096 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1097 efx_handle_drain_event(channel);
1098 } else {
919 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1099 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
920 "generated event "EFX_QWORD_FMT"\n", 1100 "generated event "EFX_QWORD_FMT"\n",
921 channel->channel, EFX_QWORD_VAL(*event)); 1101 channel->channel, EFX_QWORD_VAL(*event));
1102 }
922} 1103}
923 1104
924static void 1105static void
@@ -935,10 +1116,14 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
935 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1116 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
936 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1117 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
937 channel->channel, ev_sub_data); 1118 channel->channel, ev_sub_data);
1119 efx_handle_tx_flush_done(efx, event);
1120 efx_sriov_tx_flush_done(efx, event);
938 break; 1121 break;
939 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1122 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
940 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1123 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
941 channel->channel, ev_sub_data); 1124 channel->channel, ev_sub_data);
1125 efx_handle_rx_flush_done(efx, event);
1126 efx_sriov_rx_flush_done(efx, event);
942 break; 1127 break;
943 case FSE_AZ_EVQ_INIT_DONE_EV: 1128 case FSE_AZ_EVQ_INIT_DONE_EV:
944 netif_dbg(efx, hw, efx->net_dev, 1129 netif_dbg(efx, hw, efx->net_dev,
@@ -970,16 +1155,24 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
970 RESET_TYPE_DISABLE); 1155 RESET_TYPE_DISABLE);
971 break; 1156 break;
972 case FSE_BZ_RX_DSC_ERROR_EV: 1157 case FSE_BZ_RX_DSC_ERROR_EV:
973 netif_err(efx, rx_err, efx->net_dev, 1158 if (ev_sub_data < EFX_VI_BASE) {
974 "RX DMA Q %d reports descriptor fetch error." 1159 netif_err(efx, rx_err, efx->net_dev,
975 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 1160 "RX DMA Q %d reports descriptor fetch error."
976 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1161 " RX Q %d is disabled.\n", ev_sub_data,
1162 ev_sub_data);
1163 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1164 } else
1165 efx_sriov_desc_fetch_err(efx, ev_sub_data);
977 break; 1166 break;
978 case FSE_BZ_TX_DSC_ERROR_EV: 1167 case FSE_BZ_TX_DSC_ERROR_EV:
979 netif_err(efx, tx_err, efx->net_dev, 1168 if (ev_sub_data < EFX_VI_BASE) {
980 "TX DMA Q %d reports descriptor fetch error." 1169 netif_err(efx, tx_err, efx->net_dev,
981 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 1170 "TX DMA Q %d reports descriptor fetch error."
982 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1171 " TX Q %d is disabled.\n", ev_sub_data,
1172 ev_sub_data);
1173 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1174 } else
1175 efx_sriov_desc_fetch_err(efx, ev_sub_data);
983 break; 1176 break;
984 default: 1177 default:
985 netif_vdbg(efx, hw, efx->net_dev, 1178 netif_vdbg(efx, hw, efx->net_dev,
@@ -1039,6 +1232,9 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1039 case FSE_AZ_EV_CODE_DRIVER_EV: 1232 case FSE_AZ_EV_CODE_DRIVER_EV:
1040 efx_handle_driver_event(channel, &event); 1233 efx_handle_driver_event(channel, &event);
1041 break; 1234 break;
1235 case FSE_CZ_EV_CODE_USER_EV:
1236 efx_sriov_event(channel, &event);
1237 break;
1042 case FSE_CZ_EV_CODE_MCDI_EV: 1238 case FSE_CZ_EV_CODE_MCDI_EV:
1043 efx_mcdi_process_event(channel, &event); 1239 efx_mcdi_process_event(channel, &event);
1044 break; 1240 break;
@@ -1139,161 +1335,13 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
1139 1335
1140void efx_nic_generate_test_event(struct efx_channel *channel) 1336void efx_nic_generate_test_event(struct efx_channel *channel)
1141{ 1337{
1142 unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); 1338 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1143 efx_qword_t test_event;
1144
1145 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1146 FSE_AZ_EV_CODE_DRV_GEN_EV,
1147 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1148 efx_generate_event(channel, &test_event);
1149} 1339}
1150 1340
1151void efx_nic_generate_fill_event(struct efx_channel *channel) 1341void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
1152{ 1342{
1153 unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel); 1343 efx_magic_event(efx_rx_queue_channel(rx_queue),
1154 efx_qword_t test_event; 1344 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1155
1156 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1157 FSE_AZ_EV_CODE_DRV_GEN_EV,
1158 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1159 efx_generate_event(channel, &test_event);
1160}
1161
1162/**************************************************************************
1163 *
1164 * Flush handling
1165 *
1166 **************************************************************************/
1167
1168
1169static void efx_poll_flush_events(struct efx_nic *efx)
1170{
1171 struct efx_channel *channel = efx_get_channel(efx, 0);
1172 struct efx_tx_queue *tx_queue;
1173 struct efx_rx_queue *rx_queue;
1174 unsigned int read_ptr = channel->eventq_read_ptr;
1175 unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
1176
1177 do {
1178 efx_qword_t *event = efx_event(channel, read_ptr);
1179 int ev_code, ev_sub_code, ev_queue;
1180 bool ev_failed;
1181
1182 if (!efx_event_present(event))
1183 break;
1184
1185 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1186 ev_sub_code = EFX_QWORD_FIELD(*event,
1187 FSF_AZ_DRIVER_EV_SUBCODE);
1188 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1189 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1190 ev_queue = EFX_QWORD_FIELD(*event,
1191 FSF_AZ_DRIVER_EV_SUBDATA);
1192 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1193 tx_queue = efx_get_tx_queue(
1194 efx, ev_queue / EFX_TXQ_TYPES,
1195 ev_queue % EFX_TXQ_TYPES);
1196 tx_queue->flushed = FLUSH_DONE;
1197 }
1198 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1199 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1200 ev_queue = EFX_QWORD_FIELD(
1201 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1202 ev_failed = EFX_QWORD_FIELD(
1203 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1204 if (ev_queue < efx->n_rx_channels) {
1205 rx_queue = efx_get_rx_queue(efx, ev_queue);
1206 rx_queue->flushed =
1207 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1208 }
1209 }
1210
1211 /* We're about to destroy the queue anyway, so
1212 * it's ok to throw away every non-flush event */
1213 EFX_SET_QWORD(*event);
1214
1215 ++read_ptr;
1216 } while (read_ptr != end_ptr);
1217
1218 channel->eventq_read_ptr = read_ptr;
1219}
1220
1221/* Handle tx and rx flushes at the same time, since they run in
1222 * parallel in the hardware and there's no reason for us to
1223 * serialise them */
1224int efx_nic_flush_queues(struct efx_nic *efx)
1225{
1226 struct efx_channel *channel;
1227 struct efx_rx_queue *rx_queue;
1228 struct efx_tx_queue *tx_queue;
1229 int i, tx_pending, rx_pending;
1230
1231 /* If necessary prepare the hardware for flushing */
1232 efx->type->prepare_flush(efx);
1233
1234 /* Flush all tx queues in parallel */
1235 efx_for_each_channel(channel, efx) {
1236 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1237 if (tx_queue->initialised)
1238 efx_flush_tx_queue(tx_queue);
1239 }
1240 }
1241
1242 /* The hardware supports four concurrent rx flushes, each of which may
1243 * need to be retried if there is an outstanding descriptor fetch */
1244 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1245 rx_pending = tx_pending = 0;
1246 efx_for_each_channel(channel, efx) {
1247 efx_for_each_channel_rx_queue(rx_queue, channel) {
1248 if (rx_queue->flushed == FLUSH_PENDING)
1249 ++rx_pending;
1250 }
1251 }
1252 efx_for_each_channel(channel, efx) {
1253 efx_for_each_channel_rx_queue(rx_queue, channel) {
1254 if (rx_pending == EFX_RX_FLUSH_COUNT)
1255 break;
1256 if (rx_queue->flushed == FLUSH_FAILED ||
1257 rx_queue->flushed == FLUSH_NONE) {
1258 efx_flush_rx_queue(rx_queue);
1259 ++rx_pending;
1260 }
1261 }
1262 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1263 if (tx_queue->initialised &&
1264 tx_queue->flushed != FLUSH_DONE)
1265 ++tx_pending;
1266 }
1267 }
1268
1269 if (rx_pending == 0 && tx_pending == 0)
1270 return 0;
1271
1272 msleep(EFX_FLUSH_INTERVAL);
1273 efx_poll_flush_events(efx);
1274 }
1275
1276 /* Mark the queues as all flushed. We're going to return failure
1277 * leading to a reset, or fake up success anyway */
1278 efx_for_each_channel(channel, efx) {
1279 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1280 if (tx_queue->initialised &&
1281 tx_queue->flushed != FLUSH_DONE)
1282 netif_err(efx, hw, efx->net_dev,
1283 "tx queue %d flush command timed out\n",
1284 tx_queue->queue);
1285 tx_queue->flushed = FLUSH_DONE;
1286 }
1287 efx_for_each_channel_rx_queue(rx_queue, channel) {
1288 if (rx_queue->flushed != FLUSH_DONE)
1289 netif_err(efx, hw, efx->net_dev,
1290 "rx queue %d flush command timed out\n",
1291 efx_rx_queue_index(rx_queue));
1292 rx_queue->flushed = FLUSH_DONE;
1293 }
1294 }
1295
1296 return -ETIMEDOUT;
1297} 1345}
1298 1346
1299/************************************************************************** 1347/**************************************************************************
@@ -1311,7 +1359,7 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
1311 efx_oword_t int_en_reg_ker; 1359 efx_oword_t int_en_reg_ker;
1312 1360
1313 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1361 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1314 FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level, 1362 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1315 FRF_AZ_KER_INT_KER, force, 1363 FRF_AZ_KER_INT_KER, force,
1316 FRF_AZ_DRV_INT_EN_KER, enabled); 1364 FRF_AZ_DRV_INT_EN_KER, enabled);
1317 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1365 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
@@ -1319,18 +1367,10 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
1319 1367
1320void efx_nic_enable_interrupts(struct efx_nic *efx) 1368void efx_nic_enable_interrupts(struct efx_nic *efx)
1321{ 1369{
1322 struct efx_channel *channel;
1323
1324 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1370 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1325 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1371 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1326 1372
1327 /* Enable interrupts */
1328 efx_nic_interrupts(efx, true, false); 1373 efx_nic_interrupts(efx, true, false);
1329
1330 /* Force processing of all the channels to get the EVQ RPTRs up to
1331 date */
1332 efx_for_each_channel(channel, efx)
1333 efx_schedule_channel(channel);
1334} 1374}
1335 1375
1336void efx_nic_disable_interrupts(struct efx_nic *efx) 1376void efx_nic_disable_interrupts(struct efx_nic *efx)
@@ -1427,11 +1467,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1427 efx_readd(efx, &reg, FR_BZ_INT_ISR0); 1467 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1428 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1468 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1429 1469
1430 /* Check to see if we have a serious error condition */ 1470 /* Handle non-event-queue sources */
1431 if (queues & (1U << efx->fatal_irq_level)) { 1471 if (queues & (1U << efx->irq_level)) {
1432 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1472 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1433 if (unlikely(syserr)) 1473 if (unlikely(syserr))
1434 return efx_nic_fatal_interrupt(efx); 1474 return efx_nic_fatal_interrupt(efx);
1475 efx->last_irq_cpu = raw_smp_processor_id();
1435 } 1476 }
1436 1477
1437 if (queues != 0) { 1478 if (queues != 0) {
@@ -1441,7 +1482,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1441 /* Schedule processing of any interrupting queues */ 1482 /* Schedule processing of any interrupting queues */
1442 efx_for_each_channel(channel, efx) { 1483 efx_for_each_channel(channel, efx) {
1443 if (queues & 1) 1484 if (queues & 1)
1444 efx_schedule_channel(channel); 1485 efx_schedule_channel_irq(channel);
1445 queues >>= 1; 1486 queues >>= 1;
1446 } 1487 }
1447 result = IRQ_HANDLED; 1488 result = IRQ_HANDLED;
@@ -1458,18 +1499,16 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1458 efx_for_each_channel(channel, efx) { 1499 efx_for_each_channel(channel, efx) {
1459 event = efx_event(channel, channel->eventq_read_ptr); 1500 event = efx_event(channel, channel->eventq_read_ptr);
1460 if (efx_event_present(event)) 1501 if (efx_event_present(event))
1461 efx_schedule_channel(channel); 1502 efx_schedule_channel_irq(channel);
1462 else 1503 else
1463 efx_nic_eventq_read_ack(channel); 1504 efx_nic_eventq_read_ack(channel);
1464 } 1505 }
1465 } 1506 }
1466 1507
1467 if (result == IRQ_HANDLED) { 1508 if (result == IRQ_HANDLED)
1468 efx->last_irq_cpu = raw_smp_processor_id();
1469 netif_vdbg(efx, intr, efx->net_dev, 1509 netif_vdbg(efx, intr, efx->net_dev,
1470 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1510 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1471 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1511 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1472 }
1473 1512
1474 return result; 1513 return result;
1475} 1514}
@@ -1488,20 +1527,20 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1488 efx_oword_t *int_ker = efx->irq_status.addr; 1527 efx_oword_t *int_ker = efx->irq_status.addr;
1489 int syserr; 1528 int syserr;
1490 1529
1491 efx->last_irq_cpu = raw_smp_processor_id();
1492 netif_vdbg(efx, intr, efx->net_dev, 1530 netif_vdbg(efx, intr, efx->net_dev,
1493 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1531 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1494 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1532 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1495 1533
1496 /* Check to see if we have a serious error condition */ 1534 /* Handle non-event-queue sources */
1497 if (channel->channel == efx->fatal_irq_level) { 1535 if (channel->channel == efx->irq_level) {
1498 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1536 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1499 if (unlikely(syserr)) 1537 if (unlikely(syserr))
1500 return efx_nic_fatal_interrupt(efx); 1538 return efx_nic_fatal_interrupt(efx);
1539 efx->last_irq_cpu = raw_smp_processor_id();
1501 } 1540 }
1502 1541
1503 /* Schedule processing of the channel */ 1542 /* Schedule processing of the channel */
1504 efx_schedule_channel(channel); 1543 efx_schedule_channel_irq(channel);
1505 1544
1506 return IRQ_HANDLED; 1545 return IRQ_HANDLED;
1507} 1546}
@@ -1598,6 +1637,58 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
1598 free_irq(efx->legacy_irq, efx); 1637 free_irq(efx->legacy_irq, efx);
1599} 1638}
1600 1639
1640/* Looks at available SRAM resources and works out how many queues we
1641 * can support, and where things like descriptor caches should live.
1642 *
1643 * SRAM is split up as follows:
1644 * 0 buftbl entries for channels
1645 * efx->vf_buftbl_base buftbl entries for SR-IOV
1646 * efx->rx_dc_base RX descriptor caches
1647 * efx->tx_dc_base TX descriptor caches
1648 */
1649void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1650{
1651 unsigned vi_count, buftbl_min;
1652
1653 /* Account for the buffer table entries backing the datapath channels
1654 * and the descriptor caches for those channels.
1655 */
1656 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1657 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1658 efx->n_channels * EFX_MAX_EVQ_SIZE)
1659 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1660 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1661
1662#ifdef CONFIG_SFC_SRIOV
1663 if (efx_sriov_wanted(efx)) {
1664 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1665
1666 efx->vf_buftbl_base = buftbl_min;
1667
1668 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1669 vi_count = max(vi_count, EFX_VI_BASE);
1670 buftbl_free = (sram_lim_qw - buftbl_min -
1671 vi_count * vi_dc_entries);
1672
1673 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1674 efx_vf_size(efx));
1675 vf_limit = min(buftbl_free / entries_per_vf,
1676 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1677
1678 if (efx->vf_count > vf_limit) {
1679 netif_err(efx, probe, efx->net_dev,
1680 "Reducing VF count from from %d to %d\n",
1681 efx->vf_count, vf_limit);
1682 efx->vf_count = vf_limit;
1683 }
1684 vi_count += efx->vf_count * efx_vf_size(efx);
1685 }
1686#endif
1687
1688 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1689 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1690}
1691
1601u32 efx_nic_fpga_ver(struct efx_nic *efx) 1692u32 efx_nic_fpga_ver(struct efx_nic *efx)
1602{ 1693{
1603 efx_oword_t altera_build; 1694 efx_oword_t altera_build;
@@ -1610,11 +1701,9 @@ void efx_nic_init_common(struct efx_nic *efx)
1610 efx_oword_t temp; 1701 efx_oword_t temp;
1611 1702
1612 /* Set positions of descriptor caches in SRAM. */ 1703 /* Set positions of descriptor caches in SRAM. */
1613 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, 1704 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1614 efx->type->tx_dc_base / 8);
1615 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1705 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1616 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, 1706 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1617 efx->type->rx_dc_base / 8);
1618 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1707 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1619 1708
1620 /* Set TX descriptor cache size. */ 1709 /* Set TX descriptor cache size. */
@@ -1640,10 +1729,10 @@ void efx_nic_init_common(struct efx_nic *efx)
1640 1729
1641 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1730 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1642 /* Use an interrupt level unused by event queues */ 1731 /* Use an interrupt level unused by event queues */
1643 efx->fatal_irq_level = 0x1f; 1732 efx->irq_level = 0x1f;
1644 else 1733 else
1645 /* Use a valid MSI-X vector */ 1734 /* Use a valid MSI-X vector */
1646 efx->fatal_irq_level = 0; 1735 efx->irq_level = 0;
1647 1736
1648 /* Enable all the genuinely fatal interrupts. (They are still 1737 /* Enable all the genuinely fatal interrupts. (They are still
1649 * masked by the overall interrupt mask, controlled by 1738 * masked by the overall interrupt mask, controlled by
@@ -1837,7 +1926,7 @@ struct efx_nic_reg_table {
1837 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1926 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1838 step, rows \ 1927 step, rows \
1839} 1928}
1840#define REGISTER_TABLE(name, min_rev, max_rev) \ 1929#define REGISTER_TABLE(name, min_rev, max_rev) \
1841 REGISTER_TABLE_DIMENSIONS( \ 1930 REGISTER_TABLE_DIMENSIONS( \
1842 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1931 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1843 min_rev, max_rev, \ 1932 min_rev, max_rev, \
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 5fb24d3aa3ca..246c4140453c 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -65,6 +65,11 @@ enum {
65#define FALCON_GMAC_LOOPBACKS \ 65#define FALCON_GMAC_LOOPBACKS \
66 (1 << LOOPBACK_GMAC) 66 (1 << LOOPBACK_GMAC)
67 67
68/* Alignment of PCIe DMA boundaries (4KB) */
69#define EFX_PAGE_SIZE 4096
70/* Size and alignment of buffer table entries (same) */
71#define EFX_BUF_SIZE EFX_PAGE_SIZE
72
68/** 73/**
69 * struct falcon_board_type - board operations and type information 74 * struct falcon_board_type - board operations and type information
70 * @id: Board type id, as found in NVRAM 75 * @id: Board type id, as found in NVRAM
@@ -144,12 +149,115 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
144 * struct siena_nic_data - Siena NIC state 149 * struct siena_nic_data - Siena NIC state
145 * @mcdi: Management-Controller-to-Driver Interface 150 * @mcdi: Management-Controller-to-Driver Interface
146 * @wol_filter_id: Wake-on-LAN packet filter id 151 * @wol_filter_id: Wake-on-LAN packet filter id
152 * @hwmon: Hardware monitor state
147 */ 153 */
148struct siena_nic_data { 154struct siena_nic_data {
149 struct efx_mcdi_iface mcdi; 155 struct efx_mcdi_iface mcdi;
150 int wol_filter_id; 156 int wol_filter_id;
157#ifdef CONFIG_SFC_MCDI_MON
158 struct efx_mcdi_mon hwmon;
159#endif
151}; 160};
152 161
162#ifdef CONFIG_SFC_MCDI_MON
163static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
164{
165 struct siena_nic_data *nic_data;
166 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
167 nic_data = efx->nic_data;
168 return &nic_data->hwmon;
169}
170#endif
171
172/*
173 * On the SFC9000 family each port is associated with 1 PCI physical
174 * function (PF) handled by sfc and a configurable number of virtual
175 * functions (VFs) that may be handled by some other driver, often in
176 * a VM guest. The queue pointer registers are mapped in both PF and
177 * VF BARs such that an 8K region provides access to a single RX, TX
178 * and event queue (collectively a Virtual Interface, VI or VNIC).
179 *
180 * The PF has access to all 1024 VIs while VFs are mapped to VIs
181 * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
182 * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
183 * The number of VIs and the VI_SCALE value are configurable but must
184 * be established at boot time by firmware.
185 */
186
187/* Maximum VI_SCALE parameter supported by Siena */
188#define EFX_VI_SCALE_MAX 6
189/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
190 * so this is the smallest allowed value. */
191#define EFX_VI_BASE 128U
192/* Maximum number of VFs allowed */
193#define EFX_VF_COUNT_MAX 127
194/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
195#define EFX_MAX_VF_EVQ_SIZE 8192UL
196/* The number of buffer table entries reserved for each VI on a VF */
197#define EFX_VF_BUFTBL_PER_VI \
198 ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
199 sizeof(efx_qword_t) / EFX_BUF_SIZE)
200
201#ifdef CONFIG_SFC_SRIOV
202
203static inline bool efx_sriov_wanted(struct efx_nic *efx)
204{
205 return efx->vf_count != 0;
206}
207static inline bool efx_sriov_enabled(struct efx_nic *efx)
208{
209 return efx->vf_init_count != 0;
210}
211static inline unsigned int efx_vf_size(struct efx_nic *efx)
212{
213 return 1 << efx->vi_scale;
214}
215
216extern int efx_init_sriov(void);
217extern void efx_sriov_probe(struct efx_nic *efx);
218extern int efx_sriov_init(struct efx_nic *efx);
219extern void efx_sriov_mac_address_changed(struct efx_nic *efx);
220extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
221extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
222extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
223extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
224extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
225extern void efx_sriov_reset(struct efx_nic *efx);
226extern void efx_sriov_fini(struct efx_nic *efx);
227extern void efx_fini_sriov(void);
228
229#else
230
231static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; }
232static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; }
233static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
234
235static inline int efx_init_sriov(void) { return 0; }
236static inline void efx_sriov_probe(struct efx_nic *efx) {}
237static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
238static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {}
239static inline void efx_sriov_tx_flush_done(struct efx_nic *efx,
240 efx_qword_t *event) {}
241static inline void efx_sriov_rx_flush_done(struct efx_nic *efx,
242 efx_qword_t *event) {}
243static inline void efx_sriov_event(struct efx_channel *channel,
244 efx_qword_t *event) {}
245static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {}
246static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {}
247static inline void efx_sriov_reset(struct efx_nic *efx) {}
248static inline void efx_sriov_fini(struct efx_nic *efx) {}
249static inline void efx_fini_sriov(void) {}
250
251#endif
252
253extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
254extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf,
255 u16 vlan, u8 qos);
256extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
257 struct ifla_vf_info *ivf);
258extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
259 bool spoofchk);
260
153extern const struct efx_nic_type falcon_a1_nic_type; 261extern const struct efx_nic_type falcon_a1_nic_type;
154extern const struct efx_nic_type falcon_b0_nic_type; 262extern const struct efx_nic_type falcon_b0_nic_type;
155extern const struct efx_nic_type siena_a0_nic_type; 263extern const struct efx_nic_type siena_a0_nic_type;
@@ -176,6 +284,7 @@ extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
176extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue); 284extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
177extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); 285extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
178extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); 286extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
287extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue);
179 288
180/* Event data path */ 289/* Event data path */
181extern int efx_nic_probe_eventq(struct efx_channel *channel); 290extern int efx_nic_probe_eventq(struct efx_channel *channel);
@@ -189,12 +298,14 @@ extern bool efx_nic_event_present(struct efx_channel *channel);
189/* MAC/PHY */ 298/* MAC/PHY */
190extern void falcon_drain_tx_fifo(struct efx_nic *efx); 299extern void falcon_drain_tx_fifo(struct efx_nic *efx);
191extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); 300extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
301extern bool falcon_xmac_check_fault(struct efx_nic *efx);
302extern int falcon_reconfigure_xmac(struct efx_nic *efx);
303extern void falcon_update_stats_xmac(struct efx_nic *efx);
192 304
193/* Interrupts and test events */ 305/* Interrupts and test events */
194extern int efx_nic_init_interrupt(struct efx_nic *efx); 306extern int efx_nic_init_interrupt(struct efx_nic *efx);
195extern void efx_nic_enable_interrupts(struct efx_nic *efx); 307extern void efx_nic_enable_interrupts(struct efx_nic *efx);
196extern void efx_nic_generate_test_event(struct efx_channel *channel); 308extern void efx_nic_generate_test_event(struct efx_channel *channel);
197extern void efx_nic_generate_fill_event(struct efx_channel *channel);
198extern void efx_nic_generate_interrupt(struct efx_nic *efx); 309extern void efx_nic_generate_interrupt(struct efx_nic *efx);
199extern void efx_nic_disable_interrupts(struct efx_nic *efx); 310extern void efx_nic_disable_interrupts(struct efx_nic *efx);
200extern void efx_nic_fini_interrupt(struct efx_nic *efx); 311extern void efx_nic_fini_interrupt(struct efx_nic *efx);
@@ -202,15 +313,14 @@ extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
202extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); 313extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
203extern void falcon_irq_ack_a1(struct efx_nic *efx); 314extern void falcon_irq_ack_a1(struct efx_nic *efx);
204 315
205#define EFX_IRQ_MOD_RESOLUTION 5
206#define EFX_IRQ_MOD_MAX 0x1000
207
208/* Global Resources */ 316/* Global Resources */
209extern int efx_nic_flush_queues(struct efx_nic *efx); 317extern int efx_nic_flush_queues(struct efx_nic *efx);
210extern void falcon_start_nic_stats(struct efx_nic *efx); 318extern void falcon_start_nic_stats(struct efx_nic *efx);
211extern void falcon_stop_nic_stats(struct efx_nic *efx); 319extern void falcon_stop_nic_stats(struct efx_nic *efx);
212extern void falcon_setup_xaui(struct efx_nic *efx); 320extern void falcon_setup_xaui(struct efx_nic *efx);
213extern int falcon_reset_xaui(struct efx_nic *efx); 321extern int falcon_reset_xaui(struct efx_nic *efx);
322extern void
323efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
214extern void efx_nic_init_common(struct efx_nic *efx); 324extern void efx_nic_init_common(struct efx_nic *efx);
215extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); 325extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
216 326
@@ -264,8 +374,8 @@ extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
264#define MAC_DATA_LBN 0 374#define MAC_DATA_LBN 0
265#define MAC_DATA_WIDTH 32 375#define MAC_DATA_WIDTH 32
266 376
267extern void efx_nic_generate_event(struct efx_channel *channel, 377extern void efx_generate_event(struct efx_nic *efx, unsigned int evq,
268 efx_qword_t *event); 378 efx_qword_t *event);
269 379
270extern void falcon_poll_xmac(struct efx_nic *efx); 380extern void falcon_poll_xmac(struct efx_nic *efx);
271 381
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 7ad97e397406..8a7caf88ffb6 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -47,7 +47,7 @@
47#define PMA_PMD_FTX_STATIC_LBN 13 47#define PMA_PMD_FTX_STATIC_LBN 13
48#define PMA_PMD_VEND1_REG 0xc001 48#define PMA_PMD_VEND1_REG 0xc001
49#define PMA_PMD_VEND1_LBTXD_LBN 15 49#define PMA_PMD_VEND1_LBTXD_LBN 15
50#define PCS_VEND1_REG 0xc000 50#define PCS_VEND1_REG 0xc000
51#define PCS_VEND1_LBTXD_LBN 5 51#define PCS_VEND1_LBTXD_LBN 5
52 52
53void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode) 53void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
@@ -453,9 +453,9 @@ const struct efx_phy_operations falcon_qt202x_phy_ops = {
453 .probe = qt202x_phy_probe, 453 .probe = qt202x_phy_probe,
454 .init = qt202x_phy_init, 454 .init = qt202x_phy_init,
455 .reconfigure = qt202x_phy_reconfigure, 455 .reconfigure = qt202x_phy_reconfigure,
456 .poll = qt202x_phy_poll, 456 .poll = qt202x_phy_poll,
457 .fini = efx_port_dummy_op_void, 457 .fini = efx_port_dummy_op_void,
458 .remove = qt202x_phy_remove, 458 .remove = qt202x_phy_remove,
459 .get_settings = qt202x_phy_get_settings, 459 .get_settings = qt202x_phy_get_settings,
460 .set_settings = efx_mdio_set_settings, 460 .set_settings = efx_mdio_set_settings,
461 .test_alive = efx_mdio_test_alive, 461 .test_alive = efx_mdio_test_alive,
diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/regs.h
index cc2c86b76a7b..ade4c4dc56ca 100644
--- a/drivers/net/ethernet/sfc/regs.h
+++ b/drivers/net/ethernet/sfc/regs.h
@@ -2446,8 +2446,8 @@
2446#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 2446#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
2447#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 2447#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
2448#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 2448#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
2449#define FRF_CZ_RMFT_DEST_MAC_LBN 16 2449#define FRF_CZ_RMFT_DEST_MAC_LBN 12
2450#define FRF_CZ_RMFT_DEST_MAC_WIDTH 44 2450#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
2451#define FRF_CZ_RMFT_VLAN_ID_LBN 0 2451#define FRF_CZ_RMFT_VLAN_ID_LBN 0
2452#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 2452#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
2453 2453
@@ -2523,8 +2523,8 @@
2523#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 2523#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
2524#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 2524#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
2525#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 2525#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
2526#define FRF_CZ_TMFT_SRC_MAC_LBN 16 2526#define FRF_CZ_TMFT_SRC_MAC_LBN 12
2527#define FRF_CZ_TMFT_SRC_MAC_WIDTH 44 2527#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
2528#define FRF_CZ_TMFT_VLAN_ID_LBN 0 2528#define FRF_CZ_TMFT_VLAN_ID_LBN 0
2529#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 2529#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
2530 2530
@@ -2895,17 +2895,17 @@
2895 2895
2896/* RX_MAC_FILTER_TBL0 */ 2896/* RX_MAC_FILTER_TBL0 */
2897/* RMFT_DEST_MAC is wider than 32 bits */ 2897/* RMFT_DEST_MAC is wider than 32 bits */
2898#define FRF_CZ_RMFT_DEST_MAC_LO_LBN 12 2898#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN
2899#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32 2899#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
2900#define FRF_CZ_RMFT_DEST_MAC_HI_LBN 44 2900#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32)
2901#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH 16 2901#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32)
2902 2902
2903/* TX_MAC_FILTER_TBL0 */ 2903/* TX_MAC_FILTER_TBL0 */
2904/* TMFT_SRC_MAC is wider than 32 bits */ 2904/* TMFT_SRC_MAC is wider than 32 bits */
2905#define FRF_CZ_TMFT_SRC_MAC_LO_LBN 12 2905#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN
2906#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32 2906#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32)
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32)
2909 2909
2910/* TX_PACE_TBL */ 2910/* TX_PACE_TBL */
2911/* Values >20 are documented as reserved, but will result in a queue going 2911/* Values >20 are documented as reserved, but will result in a queue going
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index fc52fca74193..1ba290d0c21c 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -98,8 +98,8 @@ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
98 /* Offset is always within one page, so we don't need to consider 98 /* Offset is always within one page, so we don't need to consider
99 * the page order. 99 * the page order.
100 */ 100 */
101 return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) + 101 return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
102 efx->type->rx_buffer_hash_size); 102 efx->type->rx_buffer_hash_size;
103} 103}
104static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 104static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
105{ 105{
@@ -108,11 +108,10 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
108 108
109static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) 109static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
110{ 110{
111 if (buf->is_page) 111 if (buf->flags & EFX_RX_BUF_PAGE)
112 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); 112 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
113 else 113 else
114 return ((u8 *)buf->u.skb->data + 114 return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
115 efx->type->rx_buffer_hash_size);
116} 115}
117 116
118static inline u32 efx_rx_buf_hash(const u8 *eh) 117static inline u32 efx_rx_buf_hash(const u8 *eh)
@@ -122,10 +121,10 @@ static inline u32 efx_rx_buf_hash(const u8 *eh)
122 return __le32_to_cpup((const __le32 *)(eh - 4)); 121 return __le32_to_cpup((const __le32 *)(eh - 4));
123#else 122#else
124 const u8 *data = eh - 4; 123 const u8 *data = eh - 4;
125 return ((u32)data[0] | 124 return (u32)data[0] |
126 (u32)data[1] << 8 | 125 (u32)data[1] << 8 |
127 (u32)data[2] << 16 | 126 (u32)data[2] << 16 |
128 (u32)data[3] << 24); 127 (u32)data[3] << 24;
129#endif 128#endif
130} 129}
131 130
@@ -159,7 +158,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
159 /* Adjust the SKB for padding */ 158 /* Adjust the SKB for padding */
160 skb_reserve(skb, NET_IP_ALIGN); 159 skb_reserve(skb, NET_IP_ALIGN);
161 rx_buf->len = skb_len - NET_IP_ALIGN; 160 rx_buf->len = skb_len - NET_IP_ALIGN;
162 rx_buf->is_page = false; 161 rx_buf->flags = 0;
163 162
164 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 163 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
165 skb->data, rx_buf->len, 164 skb->data, rx_buf->len,
@@ -227,7 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
227 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 226 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
228 rx_buf->u.page = page; 227 rx_buf->u.page = page;
229 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 228 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
230 rx_buf->is_page = true; 229 rx_buf->flags = EFX_RX_BUF_PAGE;
231 ++rx_queue->added_count; 230 ++rx_queue->added_count;
232 ++rx_queue->alloc_page_count; 231 ++rx_queue->alloc_page_count;
233 ++state->refcnt; 232 ++state->refcnt;
@@ -248,7 +247,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
248static void efx_unmap_rx_buffer(struct efx_nic *efx, 247static void efx_unmap_rx_buffer(struct efx_nic *efx,
249 struct efx_rx_buffer *rx_buf) 248 struct efx_rx_buffer *rx_buf)
250{ 249{
251 if (rx_buf->is_page && rx_buf->u.page) { 250 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
252 struct efx_rx_page_state *state; 251 struct efx_rx_page_state *state;
253 252
254 state = page_address(rx_buf->u.page); 253 state = page_address(rx_buf->u.page);
@@ -258,7 +257,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
258 efx_rx_buf_size(efx), 257 efx_rx_buf_size(efx),
259 PCI_DMA_FROMDEVICE); 258 PCI_DMA_FROMDEVICE);
260 } 259 }
261 } else if (!rx_buf->is_page && rx_buf->u.skb) { 260 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
262 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 261 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
263 rx_buf->len, PCI_DMA_FROMDEVICE); 262 rx_buf->len, PCI_DMA_FROMDEVICE);
264 } 263 }
@@ -267,10 +266,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
267static void efx_free_rx_buffer(struct efx_nic *efx, 266static void efx_free_rx_buffer(struct efx_nic *efx,
268 struct efx_rx_buffer *rx_buf) 267 struct efx_rx_buffer *rx_buf)
269{ 268{
270 if (rx_buf->is_page && rx_buf->u.page) { 269 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
271 __free_pages(rx_buf->u.page, efx->rx_buffer_order); 270 __free_pages(rx_buf->u.page, efx->rx_buffer_order);
272 rx_buf->u.page = NULL; 271 rx_buf->u.page = NULL;
273 } else if (!rx_buf->is_page && rx_buf->u.skb) { 272 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
274 dev_kfree_skb_any(rx_buf->u.skb); 273 dev_kfree_skb_any(rx_buf->u.skb);
275 rx_buf->u.skb = NULL; 274 rx_buf->u.skb = NULL;
276 } 275 }
@@ -310,7 +309,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
310 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 309 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
311 new_buf->u.page = rx_buf->u.page; 310 new_buf->u.page = rx_buf->u.page;
312 new_buf->len = rx_buf->len; 311 new_buf->len = rx_buf->len;
313 new_buf->is_page = true; 312 new_buf->flags = EFX_RX_BUF_PAGE;
314 ++rx_queue->added_count; 313 ++rx_queue->added_count;
315} 314}
316 315
@@ -324,7 +323,10 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
324 struct efx_rx_buffer *new_buf; 323 struct efx_rx_buffer *new_buf;
325 unsigned index; 324 unsigned index;
326 325
327 if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 326 rx_buf->flags &= EFX_RX_BUF_PAGE;
327
328 if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
329 efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
328 page_count(rx_buf->u.page) == 1) 330 page_count(rx_buf->u.page) == 1)
329 efx_resurrect_rx_buffer(rx_queue, rx_buf); 331 efx_resurrect_rx_buffer(rx_queue, rx_buf);
330 332
@@ -402,17 +404,15 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
402void efx_rx_slow_fill(unsigned long context) 404void efx_rx_slow_fill(unsigned long context)
403{ 405{
404 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; 406 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
405 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
406 407
407 /* Post an event to cause NAPI to run and refill the queue */ 408 /* Post an event to cause NAPI to run and refill the queue */
408 efx_nic_generate_fill_event(channel); 409 efx_nic_generate_fill_event(rx_queue);
409 ++rx_queue->slow_fill_count; 410 ++rx_queue->slow_fill_count;
410} 411}
411 412
412static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 413static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
413 struct efx_rx_buffer *rx_buf, 414 struct efx_rx_buffer *rx_buf,
414 int len, bool *discard, 415 int len, bool *leak_packet)
415 bool *leak_packet)
416{ 416{
417 struct efx_nic *efx = rx_queue->efx; 417 struct efx_nic *efx = rx_queue->efx;
418 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 418 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -423,7 +423,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
423 /* The packet must be discarded, but this is only a fatal error 423 /* The packet must be discarded, but this is only a fatal error
424 * if the caller indicated it was 424 * if the caller indicated it was
425 */ 425 */
426 *discard = true; 426 rx_buf->flags |= EFX_RX_PKT_DISCARD;
427 427
428 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { 428 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
429 if (net_ratelimit()) 429 if (net_ratelimit())
@@ -436,7 +436,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
436 * data at the end of the skb will be trashed. So 436 * data at the end of the skb will be trashed. So
437 * we have no choice but to leak the fragment. 437 * we have no choice but to leak the fragment.
438 */ 438 */
439 *leak_packet = !rx_buf->is_page; 439 *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
440 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 440 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
441 } else { 441 } else {
442 if (net_ratelimit()) 442 if (net_ratelimit())
@@ -456,13 +456,13 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
456 */ 456 */
457static void efx_rx_packet_gro(struct efx_channel *channel, 457static void efx_rx_packet_gro(struct efx_channel *channel,
458 struct efx_rx_buffer *rx_buf, 458 struct efx_rx_buffer *rx_buf,
459 const u8 *eh, bool checksummed) 459 const u8 *eh)
460{ 460{
461 struct napi_struct *napi = &channel->napi_str; 461 struct napi_struct *napi = &channel->napi_str;
462 gro_result_t gro_result; 462 gro_result_t gro_result;
463 463
464 /* Pass the skb/page into the GRO engine */ 464 /* Pass the skb/page into the GRO engine */
465 if (rx_buf->is_page) { 465 if (rx_buf->flags & EFX_RX_BUF_PAGE) {
466 struct efx_nic *efx = channel->efx; 466 struct efx_nic *efx = channel->efx;
467 struct page *page = rx_buf->u.page; 467 struct page *page = rx_buf->u.page;
468 struct sk_buff *skb; 468 struct sk_buff *skb;
@@ -484,8 +484,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
484 skb->len = rx_buf->len; 484 skb->len = rx_buf->len;
485 skb->data_len = rx_buf->len; 485 skb->data_len = rx_buf->len;
486 skb->truesize += rx_buf->len; 486 skb->truesize += rx_buf->len;
487 skb->ip_summed = 487 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
488 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; 488 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
489 489
490 skb_record_rx_queue(skb, channel->channel); 490 skb_record_rx_queue(skb, channel->channel);
491 491
@@ -493,7 +493,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
493 } else { 493 } else {
494 struct sk_buff *skb = rx_buf->u.skb; 494 struct sk_buff *skb = rx_buf->u.skb;
495 495
496 EFX_BUG_ON_PARANOID(!checksummed); 496 EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
497 rx_buf->u.skb = NULL; 497 rx_buf->u.skb = NULL;
498 skb->ip_summed = CHECKSUM_UNNECESSARY; 498 skb->ip_summed = CHECKSUM_UNNECESSARY;
499 499
@@ -509,7 +509,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
509} 509}
510 510
511void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 511void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
512 unsigned int len, bool checksummed, bool discard) 512 unsigned int len, u16 flags)
513{ 513{
514 struct efx_nic *efx = rx_queue->efx; 514 struct efx_nic *efx = rx_queue->efx;
515 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 515 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
@@ -517,6 +517,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
517 bool leak_packet = false; 517 bool leak_packet = false;
518 518
519 rx_buf = efx_rx_buffer(rx_queue, index); 519 rx_buf = efx_rx_buffer(rx_queue, index);
520 rx_buf->flags |= flags;
520 521
521 /* This allows the refill path to post another buffer. 522 /* This allows the refill path to post another buffer.
522 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 523 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -525,18 +526,17 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
525 rx_queue->removed_count++; 526 rx_queue->removed_count++;
526 527
527 /* Validate the length encoded in the event vs the descriptor pushed */ 528 /* Validate the length encoded in the event vs the descriptor pushed */
528 efx_rx_packet__check_len(rx_queue, rx_buf, len, 529 efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
529 &discard, &leak_packet);
530 530
531 netif_vdbg(efx, rx_status, efx->net_dev, 531 netif_vdbg(efx, rx_status, efx->net_dev,
532 "RX queue %d received id %x at %llx+%x %s%s\n", 532 "RX queue %d received id %x at %llx+%x %s%s\n",
533 efx_rx_queue_index(rx_queue), index, 533 efx_rx_queue_index(rx_queue), index,
534 (unsigned long long)rx_buf->dma_addr, len, 534 (unsigned long long)rx_buf->dma_addr, len,
535 (checksummed ? " [SUMMED]" : ""), 535 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
536 (discard ? " [DISCARD]" : "")); 536 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
537 537
538 /* Discard packet, if instructed to do so */ 538 /* Discard packet, if instructed to do so */
539 if (unlikely(discard)) { 539 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
540 if (unlikely(leak_packet)) 540 if (unlikely(leak_packet))
541 channel->n_skbuff_leaks++; 541 channel->n_skbuff_leaks++;
542 else 542 else
@@ -563,18 +563,33 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
563 rx_buf->len = len - efx->type->rx_buffer_hash_size; 563 rx_buf->len = len - efx->type->rx_buffer_hash_size;
564out: 564out:
565 if (channel->rx_pkt) 565 if (channel->rx_pkt)
566 __efx_rx_packet(channel, 566 __efx_rx_packet(channel, channel->rx_pkt);
567 channel->rx_pkt, channel->rx_pkt_csummed);
568 channel->rx_pkt = rx_buf; 567 channel->rx_pkt = rx_buf;
569 channel->rx_pkt_csummed = checksummed; 568}
569
570static void efx_rx_deliver(struct efx_channel *channel,
571 struct efx_rx_buffer *rx_buf)
572{
573 struct sk_buff *skb;
574
575 /* We now own the SKB */
576 skb = rx_buf->u.skb;
577 rx_buf->u.skb = NULL;
578
579 /* Set the SKB flags */
580 skb_checksum_none_assert(skb);
581
582 /* Pass the packet up */
583 netif_receive_skb(skb);
584
585 /* Update allocation strategy method */
586 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
570} 587}
571 588
572/* Handle a received packet. Second half: Touches packet payload. */ 589/* Handle a received packet. Second half: Touches packet payload. */
573void __efx_rx_packet(struct efx_channel *channel, 590void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
574 struct efx_rx_buffer *rx_buf, bool checksummed)
575{ 591{
576 struct efx_nic *efx = channel->efx; 592 struct efx_nic *efx = channel->efx;
577 struct sk_buff *skb;
578 u8 *eh = efx_rx_buf_eh(efx, rx_buf); 593 u8 *eh = efx_rx_buf_eh(efx, rx_buf);
579 594
580 /* If we're in loopback test, then pass the packet directly to the 595 /* If we're in loopback test, then pass the packet directly to the
@@ -586,8 +601,8 @@ void __efx_rx_packet(struct efx_channel *channel,
586 return; 601 return;
587 } 602 }
588 603
589 if (!rx_buf->is_page) { 604 if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
590 skb = rx_buf->u.skb; 605 struct sk_buff *skb = rx_buf->u.skb;
591 606
592 prefetch(skb_shinfo(skb)); 607 prefetch(skb_shinfo(skb));
593 608
@@ -605,25 +620,12 @@ void __efx_rx_packet(struct efx_channel *channel,
605 } 620 }
606 621
607 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 622 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
608 checksummed = false; 623 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
609
610 if (likely(checksummed || rx_buf->is_page)) {
611 efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
612 return;
613 }
614 624
615 /* We now own the SKB */ 625 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
616 skb = rx_buf->u.skb; 626 efx_rx_packet_gro(channel, rx_buf, eh);
617 rx_buf->u.skb = NULL; 627 else
618 628 efx_rx_deliver(channel, rx_buf);
619 /* Set the SKB flags */
620 skb_checksum_none_assert(skb);
621
622 /* Pass the packet up */
623 netif_receive_skb(skb);
624
625 /* Update allocation strategy method */
626 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
627} 629}
628 630
629void efx_rx_strategy(struct efx_channel *channel) 631void efx_rx_strategy(struct efx_channel *channel)
@@ -703,6 +705,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
703 rx_queue->fast_fill_limit = limit; 705 rx_queue->fast_fill_limit = limit;
704 706
705 /* Set up RX descriptor ring */ 707 /* Set up RX descriptor ring */
708 rx_queue->enabled = true;
706 efx_nic_init_rx(rx_queue); 709 efx_nic_init_rx(rx_queue);
707} 710}
708 711
@@ -714,6 +717,9 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
714 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 717 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
715 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 718 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
716 719
720 /* A flush failure might have left rx_queue->enabled */
721 rx_queue->enabled = false;
722
717 del_timer_sync(&rx_queue->slow_fill); 723 del_timer_sync(&rx_queue->slow_fill);
718 efx_nic_fini_rx(rx_queue); 724 efx_nic_fini_rx(rx_queue);
719 725
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 52edd24fcde3..febe2a9e6211 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -19,7 +19,6 @@
19#include <linux/udp.h> 19#include <linux/udp.h>
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <asm/io.h>
23#include "net_driver.h" 22#include "net_driver.h"
24#include "efx.h" 23#include "efx.h"
25#include "nic.h" 24#include "nic.h"
@@ -50,7 +49,7 @@ static const char payload_msg[] =
50 49
51/* Interrupt mode names */ 50/* Interrupt mode names */
52static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; 51static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
53static const char *efx_interrupt_mode_names[] = { 52static const char *const efx_interrupt_mode_names[] = {
54 [EFX_INT_MODE_MSIX] = "MSI-X", 53 [EFX_INT_MODE_MSIX] = "MSI-X",
55 [EFX_INT_MODE_MSI] = "MSI", 54 [EFX_INT_MODE_MSI] = "MSI",
56 [EFX_INT_MODE_LEGACY] = "legacy", 55 [EFX_INT_MODE_LEGACY] = "legacy",
@@ -131,6 +130,8 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
131static int efx_test_interrupts(struct efx_nic *efx, 130static int efx_test_interrupts(struct efx_nic *efx,
132 struct efx_self_tests *tests) 131 struct efx_self_tests *tests)
133{ 132{
133 int cpu;
134
134 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 135 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
135 tests->interrupt = -1; 136 tests->interrupt = -1;
136 137
@@ -143,7 +144,8 @@ static int efx_test_interrupts(struct efx_nic *efx,
143 /* Wait for arrival of test interrupt. */ 144 /* Wait for arrival of test interrupt. */
144 netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); 145 netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
145 schedule_timeout_uninterruptible(HZ / 10); 146 schedule_timeout_uninterruptible(HZ / 10);
146 if (efx->last_irq_cpu >= 0) 147 cpu = ACCESS_ONCE(efx->last_irq_cpu);
148 if (cpu >= 0)
147 goto success; 149 goto success;
148 150
149 netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); 151 netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
@@ -151,8 +153,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
151 153
152 success: 154 success:
153 netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", 155 netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
154 INT_MODE(efx), 156 INT_MODE(efx), cpu);
155 efx->last_irq_cpu);
156 tests->interrupt = 1; 157 tests->interrupt = 1;
157 return 0; 158 return 0;
158} 159}
@@ -162,56 +163,57 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
162 struct efx_self_tests *tests) 163 struct efx_self_tests *tests)
163{ 164{
164 struct efx_nic *efx = channel->efx; 165 struct efx_nic *efx = channel->efx;
165 unsigned int read_ptr, count; 166 unsigned int read_ptr;
166 167 bool napi_ran, dma_seen, int_seen;
167 tests->eventq_dma[channel->channel] = -1;
168 tests->eventq_int[channel->channel] = -1;
169 tests->eventq_poll[channel->channel] = -1;
170 168
171 read_ptr = channel->eventq_read_ptr; 169 read_ptr = channel->eventq_read_ptr;
172 channel->efx->last_irq_cpu = -1; 170 channel->last_irq_cpu = -1;
173 smp_wmb(); 171 smp_wmb();
174 172
175 efx_nic_generate_test_event(channel); 173 efx_nic_generate_test_event(channel);
176 174
177 /* Wait for arrival of interrupt */ 175 /* Wait for arrival of interrupt. NAPI processing may or may
178 count = 0; 176 * not complete in time, but we can cope in any case.
179 do { 177 */
180 schedule_timeout_uninterruptible(HZ / 100); 178 msleep(10);
181 179 napi_disable(&channel->napi_str);
182 if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr) 180 if (channel->eventq_read_ptr != read_ptr) {
183 goto eventq_ok; 181 napi_ran = true;
184 } while (++count < 2); 182 dma_seen = true;
185 183 int_seen = true;
186 netif_err(efx, drv, efx->net_dev, 184 } else {
187 "channel %d timed out waiting for event queue\n", 185 napi_ran = false;
188 channel->channel); 186 dma_seen = efx_nic_event_present(channel);
189 187 int_seen = ACCESS_ONCE(channel->last_irq_cpu) >= 0;
190 /* See if interrupt arrived */
191 if (channel->efx->last_irq_cpu >= 0) {
192 netif_err(efx, drv, efx->net_dev,
193 "channel %d saw interrupt on CPU%d "
194 "during event queue test\n", channel->channel,
195 raw_smp_processor_id());
196 tests->eventq_int[channel->channel] = 1;
197 } 188 }
189 napi_enable(&channel->napi_str);
190 efx_nic_eventq_read_ack(channel);
191
192 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
193 tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
198 194
199 /* Check to see if event was received even if interrupt wasn't */ 195 if (dma_seen && int_seen) {
200 if (efx_nic_event_present(channel)) { 196 netif_dbg(efx, drv, efx->net_dev,
197 "channel %d event queue passed (with%s NAPI)\n",
198 channel->channel, napi_ran ? "" : "out");
199 return 0;
200 } else {
201 /* Report failure and whether either interrupt or DMA worked */
201 netif_err(efx, drv, efx->net_dev, 202 netif_err(efx, drv, efx->net_dev,
202 "channel %d event was generated, but " 203 "channel %d timed out waiting for event queue\n",
203 "failed to trigger an interrupt\n", channel->channel); 204 channel->channel);
204 tests->eventq_dma[channel->channel] = 1; 205 if (int_seen)
206 netif_err(efx, drv, efx->net_dev,
207 "channel %d saw interrupt "
208 "during event queue test\n",
209 channel->channel);
210 if (dma_seen)
211 netif_err(efx, drv, efx->net_dev,
212 "channel %d event was generated, but "
213 "failed to trigger an interrupt\n",
214 channel->channel);
215 return -ETIMEDOUT;
205 } 216 }
206
207 return -ETIMEDOUT;
208 eventq_ok:
209 netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n",
210 channel->channel);
211 tests->eventq_dma[channel->channel] = 1;
212 tests->eventq_int[channel->channel] = 1;
213 tests->eventq_poll[channel->channel] = 1;
214 return 0;
215} 217}
216 218
217static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, 219static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
@@ -316,7 +318,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
316 return; 318 return;
317 319
318 err: 320 err:
319#ifdef EFX_ENABLE_DEBUG 321#ifdef DEBUG
320 if (atomic_read(&state->rx_bad) == 0) { 322 if (atomic_read(&state->rx_bad) == 0) {
321 netif_err(efx, drv, efx->net_dev, "received packet:\n"); 323 netif_err(efx, drv, efx->net_dev, "received packet:\n");
322 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 324 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
@@ -395,11 +397,9 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
395 * interrupt handler. */ 397 * interrupt handler. */
396 smp_wmb(); 398 smp_wmb();
397 399
398 if (efx_dev_registered(efx)) 400 netif_tx_lock_bh(efx->net_dev);
399 netif_tx_lock_bh(efx->net_dev);
400 rc = efx_enqueue_skb(tx_queue, skb); 401 rc = efx_enqueue_skb(tx_queue, skb);
401 if (efx_dev_registered(efx)) 402 netif_tx_unlock_bh(efx->net_dev);
402 netif_tx_unlock_bh(efx->net_dev);
403 403
404 if (rc != NETDEV_TX_OK) { 404 if (rc != NETDEV_TX_OK) {
405 netif_err(efx, drv, efx->net_dev, 405 netif_err(efx, drv, efx->net_dev,
@@ -440,20 +440,18 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
440 int tx_done = 0, rx_good, rx_bad; 440 int tx_done = 0, rx_good, rx_bad;
441 int i, rc = 0; 441 int i, rc = 0;
442 442
443 if (efx_dev_registered(efx)) 443 netif_tx_lock_bh(efx->net_dev);
444 netif_tx_lock_bh(efx->net_dev);
445 444
446 /* Count the number of tx completions, and decrement the refcnt. Any 445 /* Count the number of tx completions, and decrement the refcnt. Any
447 * skbs not already completed will be free'd when the queue is flushed */ 446 * skbs not already completed will be free'd when the queue is flushed */
448 for (i=0; i < state->packet_count; i++) { 447 for (i = 0; i < state->packet_count; i++) {
449 skb = state->skbs[i]; 448 skb = state->skbs[i];
450 if (skb && !skb_shared(skb)) 449 if (skb && !skb_shared(skb))
451 ++tx_done; 450 ++tx_done;
452 dev_kfree_skb_any(skb); 451 dev_kfree_skb_any(skb);
453 } 452 }
454 453
455 if (efx_dev_registered(efx)) 454 netif_tx_unlock_bh(efx->net_dev);
456 netif_tx_unlock_bh(efx->net_dev);
457 455
458 /* Check TX completion and received packet counts */ 456 /* Check TX completion and received packet counts */
459 rx_good = atomic_read(&state->rx_good); 457 rx_good = atomic_read(&state->rx_good);
@@ -570,7 +568,7 @@ static int efx_wait_for_link(struct efx_nic *efx)
570 mutex_lock(&efx->mac_lock); 568 mutex_lock(&efx->mac_lock);
571 link_up = link_state->up; 569 link_up = link_state->up;
572 if (link_up) 570 if (link_up)
573 link_up = !efx->mac_op->check_fault(efx); 571 link_up = !efx->type->check_mac_fault(efx);
574 mutex_unlock(&efx->mac_lock); 572 mutex_unlock(&efx->mac_lock);
575 573
576 if (link_up) { 574 if (link_up) {
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index dba5456e70f3..87abe2a53846 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -37,7 +37,6 @@ struct efx_self_tests {
37 int interrupt; 37 int interrupt;
38 int eventq_dma[EFX_MAX_CHANNELS]; 38 int eventq_dma[EFX_MAX_CHANNELS];
39 int eventq_int[EFX_MAX_CHANNELS]; 39 int eventq_int[EFX_MAX_CHANNELS];
40 int eventq_poll[EFX_MAX_CHANNELS];
41 /* offline tests */ 40 /* offline tests */
42 int registers; 41 int registers;
43 int phy_ext[EFX_MAX_PHY_TESTS]; 42 int phy_ext[EFX_MAX_PHY_TESTS];
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 4d5d619feaa6..7bea79017a05 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -18,7 +18,6 @@
18#include "bitfield.h" 18#include "bitfield.h"
19#include "efx.h" 19#include "efx.h"
20#include "nic.h" 20#include "nic.h"
21#include "mac.h"
22#include "spi.h" 21#include "spi.h"
23#include "regs.h" 22#include "regs.h"
24#include "io.h" 23#include "io.h"
@@ -36,8 +35,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
36{ 35{
37 efx_dword_t timer_cmd; 36 efx_dword_t timer_cmd;
38 37
39 BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_CZ_TC_TIMER_VAL_WIDTH));
40
41 if (channel->irq_moderation) 38 if (channel->irq_moderation)
42 EFX_POPULATE_DWORD_2(timer_cmd, 39 EFX_POPULATE_DWORD_2(timer_cmd,
43 FRF_CZ_TC_TIMER_MODE, 40 FRF_CZ_TC_TIMER_MODE,
@@ -53,15 +50,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
53 channel->channel); 50 channel->channel);
54} 51}
55 52
56static void siena_push_multicast_hash(struct efx_nic *efx)
57{
58 WARN_ON(!mutex_is_locked(&efx->mac_lock));
59
60 efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
61 efx->multicast_hash.byte, sizeof(efx->multicast_hash),
62 NULL, 0, NULL);
63}
64
65static int siena_mdio_write(struct net_device *net_dev, 53static int siena_mdio_write(struct net_device *net_dev,
66 int prtad, int devad, u16 addr, u16 value) 54 int prtad, int devad, u16 addr, u16 value)
67{ 55{
@@ -226,7 +214,24 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
226 214
227static int siena_probe_nvconfig(struct efx_nic *efx) 215static int siena_probe_nvconfig(struct efx_nic *efx)
228{ 216{
229 return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL); 217 u32 caps = 0;
218 int rc;
219
220 rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps);
221
222 efx->timer_quantum_ns =
223 (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
224 3072 : 6144; /* 768 cycles */
225 return rc;
226}
227
228static void siena_dimension_resources(struct efx_nic *efx)
229{
230 /* Each port has a small block of internal SRAM dedicated to
231 * the buffer table and descriptor caches. In theory we can
232 * map both blocks to one port, but we don't.
233 */
234 efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
230} 235}
231 236
232static int siena_probe_nic(struct efx_nic *efx) 237static int siena_probe_nic(struct efx_nic *efx)
@@ -304,6 +309,12 @@ static int siena_probe_nic(struct efx_nic *efx)
304 goto fail5; 309 goto fail5;
305 } 310 }
306 311
312 rc = efx_mcdi_mon_probe(efx);
313 if (rc)
314 goto fail5;
315
316 efx_sriov_probe(efx);
317
307 return 0; 318 return 0;
308 319
309fail5: 320fail5:
@@ -391,6 +402,8 @@ static int siena_init_nic(struct efx_nic *efx)
391 402
392static void siena_remove_nic(struct efx_nic *efx) 403static void siena_remove_nic(struct efx_nic *efx)
393{ 404{
405 efx_mcdi_mon_remove(efx);
406
394 efx_nic_free_buffer(efx, &efx->irq_status); 407 efx_nic_free_buffer(efx, &efx->irq_status);
395 408
396 siena_reset_hw(efx, RESET_TYPE_ALL); 409 siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -617,6 +630,7 @@ const struct efx_nic_type siena_a0_nic_type = {
617 .probe = siena_probe_nic, 630 .probe = siena_probe_nic,
618 .remove = siena_remove_nic, 631 .remove = siena_remove_nic,
619 .init = siena_init_nic, 632 .init = siena_init_nic,
633 .dimension_resources = siena_dimension_resources,
620 .fini = efx_port_dummy_op_void, 634 .fini = efx_port_dummy_op_void,
621 .monitor = NULL, 635 .monitor = NULL,
622 .map_reset_reason = siena_map_reset_reason, 636 .map_reset_reason = siena_map_reset_reason,
@@ -630,14 +644,14 @@ const struct efx_nic_type siena_a0_nic_type = {
630 .stop_stats = siena_stop_nic_stats, 644 .stop_stats = siena_stop_nic_stats,
631 .set_id_led = efx_mcdi_set_id_led, 645 .set_id_led = efx_mcdi_set_id_led,
632 .push_irq_moderation = siena_push_irq_moderation, 646 .push_irq_moderation = siena_push_irq_moderation,
633 .push_multicast_hash = siena_push_multicast_hash, 647 .reconfigure_mac = efx_mcdi_mac_reconfigure,
648 .check_mac_fault = efx_mcdi_mac_check_fault,
634 .reconfigure_port = efx_mcdi_phy_reconfigure, 649 .reconfigure_port = efx_mcdi_phy_reconfigure,
635 .get_wol = siena_get_wol, 650 .get_wol = siena_get_wol,
636 .set_wol = siena_set_wol, 651 .set_wol = siena_set_wol,
637 .resume_wol = siena_init_wol, 652 .resume_wol = siena_init_wol,
638 .test_registers = siena_test_registers, 653 .test_registers = siena_test_registers,
639 .test_nvram = efx_mcdi_nvram_test_all, 654 .test_nvram = efx_mcdi_nvram_test_all,
640 .default_mac_ops = &efx_mcdi_mac_operations,
641 655
642 .revision = EFX_REV_SIENA_A0, 656 .revision = EFX_REV_SIENA_A0,
643 .mem_map_size = (FR_CZ_MC_TREG_SMEM + 657 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
@@ -654,8 +668,7 @@ const struct efx_nic_type siena_a0_nic_type = {
654 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 668 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
655 * interrupt handler only supports 32 669 * interrupt handler only supports 32
656 * channels */ 670 * channels */
657 .tx_dc_base = 0x88000, 671 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
658 .rx_dc_base = 0x68000,
659 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 672 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
660 NETIF_F_RXHASH | NETIF_F_NTUPLE), 673 NETIF_F_RXHASH | NETIF_F_NTUPLE),
661}; 674};
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
new file mode 100644
index 000000000000..80976e84eee6
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -0,0 +1,1643 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2010-2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9#include <linux/pci.h>
10#include <linux/module.h>
11#include "net_driver.h"
12#include "efx.h"
13#include "nic.h"
14#include "io.h"
15#include "mcdi.h"
16#include "filter.h"
17#include "mcdi_pcol.h"
18#include "regs.h"
19#include "vfdi.h"
20
21/* Number of longs required to track all the VIs in a VF */
22#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
23
24/**
25 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
26 * @VF_TX_FILTER_OFF: Disabled
27 * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only
28 * 2 TX queues allowed per VF.
29 * @VF_TX_FILTER_ON: Enabled
30 */
31enum efx_vf_tx_filter_mode {
32 VF_TX_FILTER_OFF,
33 VF_TX_FILTER_AUTO,
34 VF_TX_FILTER_ON,
35};
36
37/**
38 * struct efx_vf - Back-end resource and protocol state for a PCI VF
39 * @efx: The Efx NIC owning this VF
40 * @pci_rid: The PCI requester ID for this VF
41 * @pci_name: The PCI name (formatted address) of this VF
42 * @index: Index of VF within its port and PF.
43 * @req: VFDI incoming request work item. Incoming USR_EV events are received
44 * by the NAPI handler, but must be handled by executing MCDI requests
45 * inside a work item.
46 * @req_addr: VFDI incoming request DMA address (in VF's PCI address space).
47 * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member.
48 * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member.
49 * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by
50 * @status_lock
51 * @busy: VFDI request queued to be processed or being processed. Receiving
52 * a VFDI request when @busy is set is an error condition.
53 * @buf: Incoming VFDI requests are DMA from the VF into this buffer.
54 * @buftbl_base: Buffer table entries for this VF start at this index.
55 * @rx_filtering: Receive filtering has been requested by the VF driver.
56 * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request.
57 * @rx_filter_qid: VF relative qid for RX filter requested by VF.
58 * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported.
59 * @tx_filter_mode: Transmit MAC filtering mode.
60 * @tx_filter_id: Transmit MAC filter ID.
61 * @addr: The MAC address and outer vlan tag of the VF.
62 * @status_addr: VF DMA address of page for &struct vfdi_status updates.
63 * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
64 * @peer_page_addrs and @peer_page_count from simultaneous
65 * updates by the VM and consumption by
66 * efx_sriov_update_vf_addr()
67 * @peer_page_addrs: Pointer to an array of guest pages for local addresses.
68 * @peer_page_count: Number of entries in @peer_page_count.
69 * @evq0_addrs: Array of guest pages backing evq0.
70 * @evq0_count: Number of entries in @evq0_addrs.
71 * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler
72 * to wait for flush completions.
73 * @txq_lock: Mutex for TX queue allocation.
74 * @txq_mask: Mask of initialized transmit queues.
75 * @txq_count: Number of initialized transmit queues.
76 * @rxq_mask: Mask of initialized receive queues.
77 * @rxq_count: Number of initialized receive queues.
78 * @rxq_retry_mask: Mask or receive queues that need to be flushed again
79 * due to flush failure.
80 * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
81 * @reset_work: Work item to schedule a VF reset.
82 */
83struct efx_vf {
84 struct efx_nic *efx;
85 unsigned int pci_rid;
86 char pci_name[13]; /* dddd:bb:dd.f */
87 unsigned int index;
88 struct work_struct req;
89 u64 req_addr;
90 int req_type;
91 unsigned req_seqno;
92 unsigned msg_seqno;
93 bool busy;
94 struct efx_buffer buf;
95 unsigned buftbl_base;
96 bool rx_filtering;
97 enum efx_filter_flags rx_filter_flags;
98 unsigned rx_filter_qid;
99 int rx_filter_id;
100 enum efx_vf_tx_filter_mode tx_filter_mode;
101 int tx_filter_id;
102 struct vfdi_endpoint addr;
103 u64 status_addr;
104 struct mutex status_lock;
105 u64 *peer_page_addrs;
106 unsigned peer_page_count;
107 u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) /
108 EFX_BUF_SIZE];
109 unsigned evq0_count;
110 wait_queue_head_t flush_waitq;
111 struct mutex txq_lock;
112 unsigned long txq_mask[VI_MASK_LENGTH];
113 unsigned txq_count;
114 unsigned long rxq_mask[VI_MASK_LENGTH];
115 unsigned rxq_count;
116 unsigned long rxq_retry_mask[VI_MASK_LENGTH];
117 atomic_t rxq_retry_count;
118 struct work_struct reset_work;
119};
120
121struct efx_memcpy_req {
122 unsigned int from_rid;
123 void *from_buf;
124 u64 from_addr;
125 unsigned int to_rid;
126 u64 to_addr;
127 unsigned length;
128};
129
130/**
131 * struct efx_local_addr - A MAC address on the vswitch without a VF.
132 *
133 * Siena does not have a switch, so VFs can't transmit data to each
134 * other. Instead the VFs must be made aware of the local addresses
135 * on the vswitch, so that they can arrange for an alternative
136 * software datapath to be used.
137 *
138 * @link: List head for insertion into efx->local_addr_list.
139 * @addr: Ethernet address
140 */
141struct efx_local_addr {
142 struct list_head link;
143 u8 addr[ETH_ALEN];
144};
145
146/**
147 * struct efx_endpoint_page - Page of vfdi_endpoint structures
148 *
149 * @link: List head for insertion into efx->local_page_list.
150 * @ptr: Pointer to page.
151 * @addr: DMA address of page.
152 */
153struct efx_endpoint_page {
154 struct list_head link;
155 void *ptr;
156 dma_addr_t addr;
157};
158
159/* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */
160#define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
161 ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
162#define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
163 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
164 (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
165#define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
166 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
167 (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
168
169#define EFX_FIELD_MASK(_field) \
170 ((1 << _field ## _WIDTH) - 1)
171
172/* VFs can only use this many transmit channels */
173static unsigned int vf_max_tx_channels = 2;
174module_param(vf_max_tx_channels, uint, 0444);
175MODULE_PARM_DESC(vf_max_tx_channels,
176 "Limit the number of TX channels VFs can use");
177
178static int max_vfs = -1;
179module_param(max_vfs, int, 0444);
180MODULE_PARM_DESC(max_vfs,
181 "Reduce the number of VFs initialized by the driver");
182
183/* Workqueue used by VFDI communication. We can't use the global
184 * workqueue because it may be running the VF driver's probe()
185 * routine, which will be blocked there waiting for a VFDI response.
186 */
187static struct workqueue_struct *vfdi_workqueue;
188
189static unsigned abs_index(struct efx_vf *vf, unsigned index)
190{
191 return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
192}
193
194static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
195 unsigned *vi_scale_out, unsigned *vf_total_out)
196{
197 u8 inbuf[MC_CMD_SRIOV_IN_LEN];
198 u8 outbuf[MC_CMD_SRIOV_OUT_LEN];
199 unsigned vi_scale, vf_total;
200 size_t outlen;
201 int rc;
202
203 MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0);
204 MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
205 MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
206
207 rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
208 outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
209 if (rc)
210 return rc;
211 if (outlen < MC_CMD_SRIOV_OUT_LEN)
212 return -EIO;
213
214 vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL);
215 vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE);
216 if (vi_scale > EFX_VI_SCALE_MAX)
217 return -EOPNOTSUPP;
218
219 if (vi_scale_out)
220 *vi_scale_out = vi_scale;
221 if (vf_total_out)
222 *vf_total_out = vf_total;
223
224 return 0;
225}
226
227static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
228{
229 efx_oword_t reg;
230
231 EFX_POPULATE_OWORD_2(reg,
232 FRF_CZ_USREV_DIS, enabled ? 0 : 1,
233 FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel);
234 efx_writeo(efx, &reg, FR_CZ_USR_EV_CFG);
235}
236
237static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
238 unsigned int count)
239{
240 u8 *inbuf, *record;
241 unsigned int used;
242 u32 from_rid, from_hi, from_lo;
243 int rc;
244
245 mb(); /* Finish writing source/reading dest before DMA starts */
246
247 used = MC_CMD_MEMCPY_IN_LEN(count);
248 if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
249 return -ENOBUFS;
250
251 /* Allocate room for the largest request */
252 inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL);
253 if (inbuf == NULL)
254 return -ENOMEM;
255
256 record = inbuf;
257 MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
258 while (count-- > 0) {
259 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
260 req->to_rid);
261 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO,
262 (u32)req->to_addr);
263 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
264 (u32)(req->to_addr >> 32));
265 if (req->from_buf == NULL) {
266 from_rid = req->from_rid;
267 from_lo = (u32)req->from_addr;
268 from_hi = (u32)(req->from_addr >> 32);
269 } else {
270 if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) {
271 rc = -ENOBUFS;
272 goto out;
273 }
274
275 from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
276 from_lo = used;
277 from_hi = 0;
278 memcpy(inbuf + used, req->from_buf, req->length);
279 used += req->length;
280 }
281
282 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
283 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO,
284 from_lo);
285 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
286 from_hi);
287 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
288 req->length);
289
290 ++req;
291 record += MC_CMD_MEMCPY_IN_RECORD_LEN;
292 }
293
294 rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
295out:
296 kfree(inbuf);
297
298 mb(); /* Don't write source/read dest before DMA is complete */
299
300 return rc;
301}
302
303/* The TX filter is entirely controlled by this driver, and is modified
304 * underneath the feet of the VF
305 */
306static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
307{
308 struct efx_nic *efx = vf->efx;
309 struct efx_filter_spec filter;
310 u16 vlan;
311 int rc;
312
313 if (vf->tx_filter_id != -1) {
314 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
315 vf->tx_filter_id);
316 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n",
317 vf->pci_name, vf->tx_filter_id);
318 vf->tx_filter_id = -1;
319 }
320
321 if (is_zero_ether_addr(vf->addr.mac_addr))
322 return;
323
324 /* Turn on TX filtering automatically if not explicitly
325 * enabled or disabled.
326 */
327 if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2)
328 vf->tx_filter_mode = VF_TX_FILTER_ON;
329
330 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
331 efx_filter_init_tx(&filter, abs_index(vf, 0));
332 rc = efx_filter_set_eth_local(&filter,
333 vlan ? vlan : EFX_FILTER_VID_UNSPEC,
334 vf->addr.mac_addr);
335 BUG_ON(rc);
336
337 rc = efx_filter_insert_filter(efx, &filter, true);
338 if (rc < 0) {
339 netif_warn(efx, hw, efx->net_dev,
340 "Unable to migrate tx filter for vf %s\n",
341 vf->pci_name);
342 } else {
343 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n",
344 vf->pci_name, rc);
345 vf->tx_filter_id = rc;
346 }
347}
348
349/* The RX filter is managed here on behalf of the VF driver */
350static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
351{
352 struct efx_nic *efx = vf->efx;
353 struct efx_filter_spec filter;
354 u16 vlan;
355 int rc;
356
357 if (vf->rx_filter_id != -1) {
358 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
359 vf->rx_filter_id);
360 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n",
361 vf->pci_name, vf->rx_filter_id);
362 vf->rx_filter_id = -1;
363 }
364
365 if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr))
366 return;
367
368 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
369 efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED,
370 vf->rx_filter_flags,
371 abs_index(vf, vf->rx_filter_qid));
372 rc = efx_filter_set_eth_local(&filter,
373 vlan ? vlan : EFX_FILTER_VID_UNSPEC,
374 vf->addr.mac_addr);
375 BUG_ON(rc);
376
377 rc = efx_filter_insert_filter(efx, &filter, true);
378 if (rc < 0) {
379 netif_warn(efx, hw, efx->net_dev,
380 "Unable to insert rx filter for vf %s\n",
381 vf->pci_name);
382 } else {
383 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n",
384 vf->pci_name, rc);
385 vf->rx_filter_id = rc;
386 }
387}
388
389static void __efx_sriov_update_vf_addr(struct efx_vf *vf)
390{
391 efx_sriov_reset_tx_filter(vf);
392 efx_sriov_reset_rx_filter(vf);
393 queue_work(vfdi_workqueue, &vf->efx->peer_work);
394}
395
396/* Push the peer list to this VF. The caller must hold status_lock to interlock
397 * with VFDI requests, and they must be serialised against manipulation of
398 * local_page_list, either by acquiring local_lock or by running from
399 * efx_sriov_peer_work()
400 */
401static void __efx_sriov_push_vf_status(struct efx_vf *vf)
402{
403 struct efx_nic *efx = vf->efx;
404 struct vfdi_status *status = efx->vfdi_status.addr;
405 struct efx_memcpy_req copy[4];
406 struct efx_endpoint_page *epp;
407 unsigned int pos, count;
408 unsigned data_offset;
409 efx_qword_t event;
410
411 WARN_ON(!mutex_is_locked(&vf->status_lock));
412 WARN_ON(!vf->status_addr);
413
414 status->local = vf->addr;
415 status->generation_end = ++status->generation_start;
416
417 memset(copy, '\0', sizeof(copy));
418 /* Write generation_start */
419 copy[0].from_buf = &status->generation_start;
420 copy[0].to_rid = vf->pci_rid;
421 copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status,
422 generation_start);
423 copy[0].length = sizeof(status->generation_start);
424 /* DMA the rest of the structure (excluding the generations). This
425 * assumes that the non-generation portion of vfdi_status is in
426 * one chunk starting at the version member.
427 */
428 data_offset = offsetof(struct vfdi_status, version);
429 copy[1].from_rid = efx->pci_dev->devfn;
430 copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset;
431 copy[1].to_rid = vf->pci_rid;
432 copy[1].to_addr = vf->status_addr + data_offset;
433 copy[1].length = status->length - data_offset;
434
435 /* Copy the peer pages */
436 pos = 2;
437 count = 0;
438 list_for_each_entry(epp, &efx->local_page_list, link) {
439 if (count == vf->peer_page_count) {
440 /* The VF driver will know they need to provide more
441 * pages because peer_addr_count is too large.
442 */
443 break;
444 }
445 copy[pos].from_buf = NULL;
446 copy[pos].from_rid = efx->pci_dev->devfn;
447 copy[pos].from_addr = epp->addr;
448 copy[pos].to_rid = vf->pci_rid;
449 copy[pos].to_addr = vf->peer_page_addrs[count];
450 copy[pos].length = EFX_PAGE_SIZE;
451
452 if (++pos == ARRAY_SIZE(copy)) {
453 efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
454 pos = 0;
455 }
456 ++count;
457 }
458
459 /* Write generation_end */
460 copy[pos].from_buf = &status->generation_end;
461 copy[pos].to_rid = vf->pci_rid;
462 copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
463 generation_end);
464 copy[pos].length = sizeof(status->generation_end);
465 efx_sriov_memcpy(efx, copy, pos + 1);
466
467 /* Notify the guest */
468 EFX_POPULATE_QWORD_3(event,
469 FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
470 VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
471 VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
472 ++vf->msg_seqno;
473 efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
474 &event);
475}
476
477static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
478 u64 *addr, unsigned count)
479{
480 efx_qword_t buf;
481 unsigned pos;
482
483 for (pos = 0; pos < count; ++pos) {
484 EFX_POPULATE_QWORD_3(buf,
485 FRF_AZ_BUF_ADR_REGION, 0,
486 FRF_AZ_BUF_ADR_FBUF,
487 addr ? addr[pos] >> 12 : 0,
488 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
489 efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL,
490 &buf, offset + pos);
491 }
492}
493
494static bool bad_vf_index(struct efx_nic *efx, unsigned index)
495{
496 return index >= efx_vf_size(efx);
497}
498
499static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
500{
501 unsigned max_buf_count = max_entry_count *
502 sizeof(efx_qword_t) / EFX_BUF_SIZE;
503
504 return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count);
505}
506
507/* Check that VI specified by per-port index belongs to a VF.
508 * Optionally set VF index and VI index within the VF.
509 */
510static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
511 struct efx_vf **vf_out, unsigned *rel_index_out)
512{
513 unsigned vf_i;
514
515 if (abs_index < EFX_VI_BASE)
516 return true;
517 vf_i = (abs_index - EFX_VI_BASE) * efx_vf_size(efx);
518 if (vf_i >= efx->vf_init_count)
519 return true;
520
521 if (vf_out)
522 *vf_out = efx->vf + vf_i;
523 if (rel_index_out)
524 *rel_index_out = abs_index % efx_vf_size(efx);
525 return false;
526}
527
528static int efx_vfdi_init_evq(struct efx_vf *vf)
529{
530 struct efx_nic *efx = vf->efx;
531 struct vfdi_req *req = vf->buf.addr;
532 unsigned vf_evq = req->u.init_evq.index;
533 unsigned buf_count = req->u.init_evq.buf_count;
534 unsigned abs_evq = abs_index(vf, vf_evq);
535 unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq);
536 efx_oword_t reg;
537
538 if (bad_vf_index(efx, vf_evq) ||
539 bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) {
540 if (net_ratelimit())
541 netif_err(efx, hw, efx->net_dev,
542 "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
543 vf->pci_name, vf_evq, buf_count);
544 return VFDI_RC_EINVAL;
545 }
546
547 efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
548
549 EFX_POPULATE_OWORD_3(reg,
550 FRF_CZ_TIMER_Q_EN, 1,
551 FRF_CZ_HOST_NOTIFY_MODE, 0,
552 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
553 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
554 EFX_POPULATE_OWORD_3(reg,
555 FRF_AZ_EVQ_EN, 1,
556 FRF_AZ_EVQ_SIZE, __ffs(buf_count),
557 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
558 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
559
560 if (vf_evq == 0) {
561 memcpy(vf->evq0_addrs, req->u.init_evq.addr,
562 buf_count * sizeof(u64));
563 vf->evq0_count = buf_count;
564 }
565
566 return VFDI_RC_SUCCESS;
567}
568
569static int efx_vfdi_init_rxq(struct efx_vf *vf)
570{
571 struct efx_nic *efx = vf->efx;
572 struct vfdi_req *req = vf->buf.addr;
573 unsigned vf_rxq = req->u.init_rxq.index;
574 unsigned vf_evq = req->u.init_rxq.evq;
575 unsigned buf_count = req->u.init_rxq.buf_count;
576 unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq);
577 unsigned label;
578 efx_oword_t reg;
579
580 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
581 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
582 if (net_ratelimit())
583 netif_err(efx, hw, efx->net_dev,
584 "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
585 "buf_count %d\n", vf->pci_name, vf_rxq,
586 vf_evq, buf_count);
587 return VFDI_RC_EINVAL;
588 }
589 if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
590 ++vf->rxq_count;
591 efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
592
593 label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
594 EFX_POPULATE_OWORD_6(reg,
595 FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl,
596 FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
597 FRF_AZ_RX_DESCQ_LABEL, label,
598 FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count),
599 FRF_AZ_RX_DESCQ_JUMBO,
600 !!(req->u.init_rxq.flags &
601 VFDI_RXQ_FLAG_SCATTER_EN),
602 FRF_AZ_RX_DESCQ_EN, 1);
603 efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
604 abs_index(vf, vf_rxq));
605
606 return VFDI_RC_SUCCESS;
607}
608
609static int efx_vfdi_init_txq(struct efx_vf *vf)
610{
611 struct efx_nic *efx = vf->efx;
612 struct vfdi_req *req = vf->buf.addr;
613 unsigned vf_txq = req->u.init_txq.index;
614 unsigned vf_evq = req->u.init_txq.evq;
615 unsigned buf_count = req->u.init_txq.buf_count;
616 unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq);
617 unsigned label, eth_filt_en;
618 efx_oword_t reg;
619
620 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) ||
621 vf_txq >= vf_max_tx_channels ||
622 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
623 if (net_ratelimit())
624 netif_err(efx, hw, efx->net_dev,
625 "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
626 "buf_count %d\n", vf->pci_name, vf_txq,
627 vf_evq, buf_count);
628 return VFDI_RC_EINVAL;
629 }
630
631 mutex_lock(&vf->txq_lock);
632 if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
633 ++vf->txq_count;
634 mutex_unlock(&vf->txq_lock);
635 efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
636
637 eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
638
639 label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL);
640 EFX_POPULATE_OWORD_8(reg,
641 FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U),
642 FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en,
643 FRF_AZ_TX_DESCQ_EN, 1,
644 FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl,
645 FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
646 FRF_AZ_TX_DESCQ_LABEL, label,
647 FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count),
648 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
649 efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
650 abs_index(vf, vf_txq));
651
652 return VFDI_RC_SUCCESS;
653}
654
655/* Returns true when efx_vfdi_fini_all_queues should wake */
656static bool efx_vfdi_flush_wake(struct efx_vf *vf)
657{
658 /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
659 smp_mb();
660
661 return (!vf->txq_count && !vf->rxq_count) ||
662 atomic_read(&vf->rxq_retry_count);
663}
664
665static void efx_vfdi_flush_clear(struct efx_vf *vf)
666{
667 memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
668 vf->txq_count = 0;
669 memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask));
670 vf->rxq_count = 0;
671 memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask));
672 atomic_set(&vf->rxq_retry_count, 0);
673}
674
675static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
676{
677 struct efx_nic *efx = vf->efx;
678 efx_oword_t reg;
679 unsigned count = efx_vf_size(efx);
680 unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
681 unsigned timeout = HZ;
682 unsigned index, rxqs_count;
683 __le32 *rxqs;
684 int rc;
685
686 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
687 if (rxqs == NULL)
688 return VFDI_RC_ENOMEM;
689
690 rtnl_lock();
691 if (efx->fc_disable++ == 0)
692 efx_mcdi_set_mac(efx);
693 rtnl_unlock();
694
695 /* Flush all the initialized queues */
696 rxqs_count = 0;
697 for (index = 0; index < count; ++index) {
698 if (test_bit(index, vf->txq_mask)) {
699 EFX_POPULATE_OWORD_2(reg,
700 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
701 FRF_AZ_TX_FLUSH_DESCQ,
702 vf_offset + index);
703 efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
704 }
705 if (test_bit(index, vf->rxq_mask))
706 rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index);
707 }
708
709 atomic_set(&vf->rxq_retry_count, 0);
710 while (timeout && (vf->rxq_count || vf->txq_count)) {
711 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs,
712 rxqs_count * sizeof(*rxqs), NULL, 0, NULL);
713 WARN_ON(rc < 0);
714
715 timeout = wait_event_timeout(vf->flush_waitq,
716 efx_vfdi_flush_wake(vf),
717 timeout);
718 rxqs_count = 0;
719 for (index = 0; index < count; ++index) {
720 if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
721 atomic_dec(&vf->rxq_retry_count);
722 rxqs[rxqs_count++] =
723 cpu_to_le32(vf_offset + index);
724 }
725 }
726 }
727
728 rtnl_lock();
729 if (--efx->fc_disable == 0)
730 efx_mcdi_set_mac(efx);
731 rtnl_unlock();
732
733 /* Irrespective of success/failure, fini the queues */
734 EFX_ZERO_OWORD(reg);
735 for (index = 0; index < count; ++index) {
736 efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
737 vf_offset + index);
738 efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
739 vf_offset + index);
740 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL,
741 vf_offset + index);
742 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL,
743 vf_offset + index);
744 }
745 efx_sriov_bufs(efx, vf->buftbl_base, NULL,
746 EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
747 kfree(rxqs);
748 efx_vfdi_flush_clear(vf);
749
750 vf->evq0_count = 0;
751
752 return timeout ? 0 : VFDI_RC_ETIMEDOUT;
753}
754
755static int efx_vfdi_insert_filter(struct efx_vf *vf)
756{
757 struct efx_nic *efx = vf->efx;
758 struct vfdi_req *req = vf->buf.addr;
759 unsigned vf_rxq = req->u.mac_filter.rxq;
760 unsigned flags;
761
762 if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) {
763 if (net_ratelimit())
764 netif_err(efx, hw, efx->net_dev,
765 "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
766 "flags 0x%x\n", vf->pci_name, vf_rxq,
767 req->u.mac_filter.flags);
768 return VFDI_RC_EINVAL;
769 }
770
771 flags = 0;
772 if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS)
773 flags |= EFX_FILTER_FLAG_RX_RSS;
774 if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER)
775 flags |= EFX_FILTER_FLAG_RX_SCATTER;
776 vf->rx_filter_flags = flags;
777 vf->rx_filter_qid = vf_rxq;
778 vf->rx_filtering = true;
779
780 efx_sriov_reset_rx_filter(vf);
781 queue_work(vfdi_workqueue, &efx->peer_work);
782
783 return VFDI_RC_SUCCESS;
784}
785
786static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
787{
788 vf->rx_filtering = false;
789 efx_sriov_reset_rx_filter(vf);
790 queue_work(vfdi_workqueue, &vf->efx->peer_work);
791
792 return VFDI_RC_SUCCESS;
793}
794
795static int efx_vfdi_set_status_page(struct efx_vf *vf)
796{
797 struct efx_nic *efx = vf->efx;
798 struct vfdi_req *req = vf->buf.addr;
799 u64 page_count = req->u.set_status_page.peer_page_count;
800 u64 max_page_count =
801 (EFX_PAGE_SIZE -
802 offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0]))
803 / sizeof(req->u.set_status_page.peer_page_addr[0]);
804
805 if (!req->u.set_status_page.dma_addr || page_count > max_page_count) {
806 if (net_ratelimit())
807 netif_err(efx, hw, efx->net_dev,
808 "ERROR: Invalid SET_STATUS_PAGE from %s\n",
809 vf->pci_name);
810 return VFDI_RC_EINVAL;
811 }
812
813 mutex_lock(&efx->local_lock);
814 mutex_lock(&vf->status_lock);
815 vf->status_addr = req->u.set_status_page.dma_addr;
816
817 kfree(vf->peer_page_addrs);
818 vf->peer_page_addrs = NULL;
819 vf->peer_page_count = 0;
820
821 if (page_count) {
822 vf->peer_page_addrs = kcalloc(page_count, sizeof(u64),
823 GFP_KERNEL);
824 if (vf->peer_page_addrs) {
825 memcpy(vf->peer_page_addrs,
826 req->u.set_status_page.peer_page_addr,
827 page_count * sizeof(u64));
828 vf->peer_page_count = page_count;
829 }
830 }
831
832 __efx_sriov_push_vf_status(vf);
833 mutex_unlock(&vf->status_lock);
834 mutex_unlock(&efx->local_lock);
835
836 return VFDI_RC_SUCCESS;
837}
838
839static int efx_vfdi_clear_status_page(struct efx_vf *vf)
840{
841 mutex_lock(&vf->status_lock);
842 vf->status_addr = 0;
843 mutex_unlock(&vf->status_lock);
844
845 return VFDI_RC_SUCCESS;
846}
847
848typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
849
850static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
851 [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
852 [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq,
853 [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq,
854 [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues,
855 [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter,
856 [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters,
857 [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page,
858 [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
859};
860
861static void efx_sriov_vfdi(struct work_struct *work)
862{
863 struct efx_vf *vf = container_of(work, struct efx_vf, req);
864 struct efx_nic *efx = vf->efx;
865 struct vfdi_req *req = vf->buf.addr;
866 struct efx_memcpy_req copy[2];
867 int rc;
868
869 /* Copy this page into the local address space */
870 memset(copy, '\0', sizeof(copy));
871 copy[0].from_rid = vf->pci_rid;
872 copy[0].from_addr = vf->req_addr;
873 copy[0].to_rid = efx->pci_dev->devfn;
874 copy[0].to_addr = vf->buf.dma_addr;
875 copy[0].length = EFX_PAGE_SIZE;
876 rc = efx_sriov_memcpy(efx, copy, 1);
877 if (rc) {
878 /* If we can't get the request, we can't reply to the caller */
879 if (net_ratelimit())
880 netif_err(efx, hw, efx->net_dev,
881 "ERROR: Unable to fetch VFDI request from %s rc %d\n",
882 vf->pci_name, -rc);
883 vf->busy = false;
884 return;
885 }
886
887 if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) {
888 rc = vfdi_ops[req->op](vf);
889 if (rc == 0) {
890 netif_dbg(efx, hw, efx->net_dev,
891 "vfdi request %d from %s ok\n",
892 req->op, vf->pci_name);
893 }
894 } else {
895 netif_dbg(efx, hw, efx->net_dev,
896 "ERROR: Unrecognised request %d from VF %s addr "
897 "%llx\n", req->op, vf->pci_name,
898 (unsigned long long)vf->req_addr);
899 rc = VFDI_RC_EOPNOTSUPP;
900 }
901
902 /* Allow subsequent VF requests */
903 vf->busy = false;
904 smp_wmb();
905
906 /* Respond to the request */
907 req->rc = rc;
908 req->op = VFDI_OP_RESPONSE;
909
910 memset(copy, '\0', sizeof(copy));
911 copy[0].from_buf = &req->rc;
912 copy[0].to_rid = vf->pci_rid;
913 copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc);
914 copy[0].length = sizeof(req->rc);
915 copy[1].from_buf = &req->op;
916 copy[1].to_rid = vf->pci_rid;
917 copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
918 copy[1].length = sizeof(req->op);
919
920 (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
921}
922
923
924
925/* After a reset the event queues inside the guests no longer exist. Fill the
926 * event ring in guest memory with VFDI reset events, then (re-initialise) the
927 * event queue to raise an interrupt. The guest driver will then recover.
928 */
929static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
930{
931 struct efx_nic *efx = vf->efx;
932 struct efx_memcpy_req copy_req[4];
933 efx_qword_t event;
934 unsigned int pos, count, k, buftbl, abs_evq;
935 efx_oword_t reg;
936 efx_dword_t ptr;
937 int rc;
938
939 BUG_ON(buffer->len != EFX_PAGE_SIZE);
940
941 if (!vf->evq0_count)
942 return;
943 BUG_ON(vf->evq0_count & (vf->evq0_count - 1));
944
945 mutex_lock(&vf->status_lock);
946 EFX_POPULATE_QWORD_3(event,
947 FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
948 VFDI_EV_SEQ, vf->msg_seqno,
949 VFDI_EV_TYPE, VFDI_EV_TYPE_RESET);
950 vf->msg_seqno++;
951 for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event))
952 memcpy(buffer->addr + pos, &event, sizeof(event));
953
954 for (pos = 0; pos < vf->evq0_count; pos += count) {
955 count = min_t(unsigned, vf->evq0_count - pos,
956 ARRAY_SIZE(copy_req));
957 for (k = 0; k < count; k++) {
958 copy_req[k].from_buf = NULL;
959 copy_req[k].from_rid = efx->pci_dev->devfn;
960 copy_req[k].from_addr = buffer->dma_addr;
961 copy_req[k].to_rid = vf->pci_rid;
962 copy_req[k].to_addr = vf->evq0_addrs[pos + k];
963 copy_req[k].length = EFX_PAGE_SIZE;
964 }
965 rc = efx_sriov_memcpy(efx, copy_req, count);
966 if (rc) {
967 if (net_ratelimit())
968 netif_err(efx, hw, efx->net_dev,
969 "ERROR: Unable to notify %s of reset"
970 ": %d\n", vf->pci_name, -rc);
971 break;
972 }
973 }
974
975 /* Reinitialise, arm and trigger evq0 */
976 abs_evq = abs_index(vf, 0);
977 buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
978 efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
979
980 EFX_POPULATE_OWORD_3(reg,
981 FRF_CZ_TIMER_Q_EN, 1,
982 FRF_CZ_HOST_NOTIFY_MODE, 0,
983 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
984 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
985 EFX_POPULATE_OWORD_3(reg,
986 FRF_AZ_EVQ_EN, 1,
987 FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count),
988 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
989 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
990 EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
991 efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq);
992
993 mutex_unlock(&vf->status_lock);
994}
995
996static void efx_sriov_reset_vf_work(struct work_struct *work)
997{
998 struct efx_vf *vf = container_of(work, struct efx_vf, req);
999 struct efx_nic *efx = vf->efx;
1000 struct efx_buffer buf;
1001
1002 if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
1003 efx_sriov_reset_vf(vf, &buf);
1004 efx_nic_free_buffer(efx, &buf);
1005 }
1006}
1007
1008static void efx_sriov_handle_no_channel(struct efx_nic *efx)
1009{
1010 netif_err(efx, drv, efx->net_dev,
1011 "ERROR: IOV requires MSI-X and 1 additional interrupt"
1012 "vector. IOV disabled\n");
1013 efx->vf_count = 0;
1014}
1015
1016static int efx_sriov_probe_channel(struct efx_channel *channel)
1017{
1018 channel->efx->vfdi_channel = channel;
1019 return 0;
1020}
1021
1022static void
1023efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
1024{
1025 snprintf(buf, len, "%s-iov", channel->efx->name);
1026}
1027
1028static const struct efx_channel_type efx_sriov_channel_type = {
1029 .handle_no_channel = efx_sriov_handle_no_channel,
1030 .pre_probe = efx_sriov_probe_channel,
1031 .get_name = efx_sriov_get_channel_name,
1032 /* no copy operation; channel must not be reallocated */
1033 .keep_eventq = true,
1034};
1035
1036void efx_sriov_probe(struct efx_nic *efx)
1037{
1038 unsigned count;
1039
1040 if (!max_vfs)
1041 return;
1042
1043 if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count))
1044 return;
1045 if (count > 0 && count > max_vfs)
1046 count = max_vfs;
1047
1048 /* efx_nic_dimension_resources() will reduce vf_count as appopriate */
1049 efx->vf_count = count;
1050
1051 efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type;
1052}
1053
1054/* Copy the list of individual addresses into the vfdi_status.peers
1055 * array and auxillary pages, protected by %local_lock. Drop that lock
1056 * and then broadcast the address list to every VF.
1057 */
1058static void efx_sriov_peer_work(struct work_struct *data)
1059{
1060 struct efx_nic *efx = container_of(data, struct efx_nic, peer_work);
1061 struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
1062 struct efx_vf *vf;
1063 struct efx_local_addr *local_addr;
1064 struct vfdi_endpoint *peer;
1065 struct efx_endpoint_page *epp;
1066 struct list_head pages;
1067 unsigned int peer_space;
1068 unsigned int peer_count;
1069 unsigned int pos;
1070
1071 mutex_lock(&efx->local_lock);
1072
1073 /* Move the existing peer pages off %local_page_list */
1074 INIT_LIST_HEAD(&pages);
1075 list_splice_tail_init(&efx->local_page_list, &pages);
1076
1077 /* Populate the VF addresses starting from entry 1 (entry 0 is
1078 * the PF address)
1079 */
1080 peer = vfdi_status->peers + 1;
1081 peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
1082 peer_count = 1;
1083 for (pos = 0; pos < efx->vf_count; ++pos) {
1084 vf = efx->vf + pos;
1085
1086 mutex_lock(&vf->status_lock);
1087 if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
1088 *peer++ = vf->addr;
1089 ++peer_count;
1090 --peer_space;
1091 BUG_ON(peer_space == 0);
1092 }
1093 mutex_unlock(&vf->status_lock);
1094 }
1095
1096 /* Fill the remaining addresses */
1097 list_for_each_entry(local_addr, &efx->local_addr_list, link) {
1098 memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN);
1099 peer->tci = 0;
1100 ++peer;
1101 ++peer_count;
1102 if (--peer_space == 0) {
1103 if (list_empty(&pages)) {
1104 epp = kmalloc(sizeof(*epp), GFP_KERNEL);
1105 if (!epp)
1106 break;
1107 epp->ptr = dma_alloc_coherent(
1108 &efx->pci_dev->dev, EFX_PAGE_SIZE,
1109 &epp->addr, GFP_KERNEL);
1110 if (!epp->ptr) {
1111 kfree(epp);
1112 break;
1113 }
1114 } else {
1115 epp = list_first_entry(
1116 &pages, struct efx_endpoint_page, link);
1117 list_del(&epp->link);
1118 }
1119
1120 list_add_tail(&epp->link, &efx->local_page_list);
1121 peer = (struct vfdi_endpoint *)epp->ptr;
1122 peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
1123 }
1124 }
1125 vfdi_status->peer_count = peer_count;
1126 mutex_unlock(&efx->local_lock);
1127
1128 /* Free any now unused endpoint pages */
1129 while (!list_empty(&pages)) {
1130 epp = list_first_entry(
1131 &pages, struct efx_endpoint_page, link);
1132 list_del(&epp->link);
1133 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1134 epp->ptr, epp->addr);
1135 kfree(epp);
1136 }
1137
1138 /* Finally, push the pages */
1139 for (pos = 0; pos < efx->vf_count; ++pos) {
1140 vf = efx->vf + pos;
1141
1142 mutex_lock(&vf->status_lock);
1143 if (vf->status_addr)
1144 __efx_sriov_push_vf_status(vf);
1145 mutex_unlock(&vf->status_lock);
1146 }
1147}
1148
1149static void efx_sriov_free_local(struct efx_nic *efx)
1150{
1151 struct efx_local_addr *local_addr;
1152 struct efx_endpoint_page *epp;
1153
1154 while (!list_empty(&efx->local_addr_list)) {
1155 local_addr = list_first_entry(&efx->local_addr_list,
1156 struct efx_local_addr, link);
1157 list_del(&local_addr->link);
1158 kfree(local_addr);
1159 }
1160
1161 while (!list_empty(&efx->local_page_list)) {
1162 epp = list_first_entry(&efx->local_page_list,
1163 struct efx_endpoint_page, link);
1164 list_del(&epp->link);
1165 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1166 epp->ptr, epp->addr);
1167 kfree(epp);
1168 }
1169}
1170
1171static int efx_sriov_vf_alloc(struct efx_nic *efx)
1172{
1173 unsigned index;
1174 struct efx_vf *vf;
1175
1176 efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
1177 if (!efx->vf)
1178 return -ENOMEM;
1179
1180 for (index = 0; index < efx->vf_count; ++index) {
1181 vf = efx->vf + index;
1182
1183 vf->efx = efx;
1184 vf->index = index;
1185 vf->rx_filter_id = -1;
1186 vf->tx_filter_mode = VF_TX_FILTER_AUTO;
1187 vf->tx_filter_id = -1;
1188 INIT_WORK(&vf->req, efx_sriov_vfdi);
1189 INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work);
1190 init_waitqueue_head(&vf->flush_waitq);
1191 mutex_init(&vf->status_lock);
1192 mutex_init(&vf->txq_lock);
1193 }
1194
1195 return 0;
1196}
1197
1198static void efx_sriov_vfs_fini(struct efx_nic *efx)
1199{
1200 struct efx_vf *vf;
1201 unsigned int pos;
1202
1203 for (pos = 0; pos < efx->vf_count; ++pos) {
1204 vf = efx->vf + pos;
1205
1206 efx_nic_free_buffer(efx, &vf->buf);
1207 kfree(vf->peer_page_addrs);
1208 vf->peer_page_addrs = NULL;
1209 vf->peer_page_count = 0;
1210
1211 vf->evq0_count = 0;
1212 }
1213}
1214
1215static int efx_sriov_vfs_init(struct efx_nic *efx)
1216{
1217 struct pci_dev *pci_dev = efx->pci_dev;
1218 unsigned index, devfn, sriov, buftbl_base;
1219 u16 offset, stride;
1220 struct efx_vf *vf;
1221 int rc;
1222
1223 sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
1224 if (!sriov)
1225 return -ENOENT;
1226
1227 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
1228 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
1229
1230 buftbl_base = efx->vf_buftbl_base;
1231 devfn = pci_dev->devfn + offset;
1232 for (index = 0; index < efx->vf_count; ++index) {
1233 vf = efx->vf + index;
1234
1235 /* Reserve buffer entries */
1236 vf->buftbl_base = buftbl_base;
1237 buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx);
1238
1239 vf->pci_rid = devfn;
1240 snprintf(vf->pci_name, sizeof(vf->pci_name),
1241 "%04x:%02x:%02x.%d",
1242 pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
1243 PCI_SLOT(devfn), PCI_FUNC(devfn));
1244
1245 rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
1246 if (rc)
1247 goto fail;
1248
1249 devfn += stride;
1250 }
1251
1252 return 0;
1253
1254fail:
1255 efx_sriov_vfs_fini(efx);
1256 return rc;
1257}
1258
1259int efx_sriov_init(struct efx_nic *efx)
1260{
1261 struct net_device *net_dev = efx->net_dev;
1262 struct vfdi_status *vfdi_status;
1263 int rc;
1264
1265 /* Ensure there's room for vf_channel */
1266 BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE);
1267 /* Ensure that VI_BASE is aligned on VI_SCALE */
1268 BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1));
1269
1270 if (efx->vf_count == 0)
1271 return 0;
1272
1273 rc = efx_sriov_cmd(efx, true, NULL, NULL);
1274 if (rc)
1275 goto fail_cmd;
1276
1277 rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
1278 if (rc)
1279 goto fail_status;
1280 vfdi_status = efx->vfdi_status.addr;
1281 memset(vfdi_status, 0, sizeof(*vfdi_status));
1282 vfdi_status->version = 1;
1283 vfdi_status->length = sizeof(*vfdi_status);
1284 vfdi_status->max_tx_channels = vf_max_tx_channels;
1285 vfdi_status->vi_scale = efx->vi_scale;
1286 vfdi_status->rss_rxq_count = efx->rss_spread;
1287 vfdi_status->peer_count = 1 + efx->vf_count;
1288 vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
1289
1290 rc = efx_sriov_vf_alloc(efx);
1291 if (rc)
1292 goto fail_alloc;
1293
1294 mutex_init(&efx->local_lock);
1295 INIT_WORK(&efx->peer_work, efx_sriov_peer_work);
1296 INIT_LIST_HEAD(&efx->local_addr_list);
1297 INIT_LIST_HEAD(&efx->local_page_list);
1298
1299 rc = efx_sriov_vfs_init(efx);
1300 if (rc)
1301 goto fail_vfs;
1302
1303 rtnl_lock();
1304 memcpy(vfdi_status->peers[0].mac_addr,
1305 net_dev->dev_addr, ETH_ALEN);
1306 efx->vf_init_count = efx->vf_count;
1307 rtnl_unlock();
1308
1309 efx_sriov_usrev(efx, true);
1310
1311 /* At this point we must be ready to accept VFDI requests */
1312
1313 rc = pci_enable_sriov(efx->pci_dev, efx->vf_count);
1314 if (rc)
1315 goto fail_pci;
1316
1317 netif_info(efx, probe, net_dev,
1318 "enabled SR-IOV for %d VFs, %d VI per VF\n",
1319 efx->vf_count, efx_vf_size(efx));
1320 return 0;
1321
1322fail_pci:
1323 efx_sriov_usrev(efx, false);
1324 rtnl_lock();
1325 efx->vf_init_count = 0;
1326 rtnl_unlock();
1327 efx_sriov_vfs_fini(efx);
1328fail_vfs:
1329 cancel_work_sync(&efx->peer_work);
1330 efx_sriov_free_local(efx);
1331 kfree(efx->vf);
1332fail_alloc:
1333 efx_nic_free_buffer(efx, &efx->vfdi_status);
1334fail_status:
1335 efx_sriov_cmd(efx, false, NULL, NULL);
1336fail_cmd:
1337 return rc;
1338}
1339
1340void efx_sriov_fini(struct efx_nic *efx)
1341{
1342 struct efx_vf *vf;
1343 unsigned int pos;
1344
1345 if (efx->vf_init_count == 0)
1346 return;
1347
1348 /* Disable all interfaces to reconfiguration */
1349 BUG_ON(efx->vfdi_channel->enabled);
1350 efx_sriov_usrev(efx, false);
1351 rtnl_lock();
1352 efx->vf_init_count = 0;
1353 rtnl_unlock();
1354
1355 /* Flush all reconfiguration work */
1356 for (pos = 0; pos < efx->vf_count; ++pos) {
1357 vf = efx->vf + pos;
1358 cancel_work_sync(&vf->req);
1359 cancel_work_sync(&vf->reset_work);
1360 }
1361 cancel_work_sync(&efx->peer_work);
1362
1363 pci_disable_sriov(efx->pci_dev);
1364
1365 /* Tear down back-end state */
1366 efx_sriov_vfs_fini(efx);
1367 efx_sriov_free_local(efx);
1368 kfree(efx->vf);
1369 efx_nic_free_buffer(efx, &efx->vfdi_status);
1370 efx_sriov_cmd(efx, false, NULL, NULL);
1371}
1372
1373void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event)
1374{
1375 struct efx_nic *efx = channel->efx;
1376 struct efx_vf *vf;
1377 unsigned qid, seq, type, data;
1378
1379 qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
1380
1381 /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */
1382 BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0);
1383 seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ);
1384 type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE);
1385 data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA);
1386
1387 netif_vdbg(efx, hw, efx->net_dev,
1388 "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
1389 qid, seq, type, data);
1390
1391 if (map_vi_index(efx, qid, &vf, NULL))
1392 return;
1393 if (vf->busy)
1394 goto error;
1395
1396 if (type == VFDI_EV_TYPE_REQ_WORD0) {
1397 /* Resynchronise */
1398 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1399 vf->req_seqno = seq + 1;
1400 vf->req_addr = 0;
1401 } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type)
1402 goto error;
1403
1404 switch (vf->req_type) {
1405 case VFDI_EV_TYPE_REQ_WORD0:
1406 case VFDI_EV_TYPE_REQ_WORD1:
1407 case VFDI_EV_TYPE_REQ_WORD2:
1408 vf->req_addr |= (u64)data << (vf->req_type << 4);
1409 ++vf->req_type;
1410 return;
1411
1412 case VFDI_EV_TYPE_REQ_WORD3:
1413 vf->req_addr |= (u64)data << 48;
1414 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1415 vf->busy = true;
1416 queue_work(vfdi_workqueue, &vf->req);
1417 return;
1418 }
1419
1420error:
1421 if (net_ratelimit())
1422 netif_err(efx, hw, efx->net_dev,
1423 "ERROR: Screaming VFDI request from %s\n",
1424 vf->pci_name);
1425 /* Reset the request and sequence number */
1426 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1427 vf->req_seqno = seq + 1;
1428}
1429
1430void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
1431{
1432 struct efx_vf *vf;
1433
1434 if (vf_i > efx->vf_init_count)
1435 return;
1436 vf = efx->vf + vf_i;
1437 netif_info(efx, hw, efx->net_dev,
1438 "FLR on VF %s\n", vf->pci_name);
1439
1440 vf->status_addr = 0;
1441 efx_vfdi_remove_all_filters(vf);
1442 efx_vfdi_flush_clear(vf);
1443
1444 vf->evq0_count = 0;
1445}
1446
1447void efx_sriov_mac_address_changed(struct efx_nic *efx)
1448{
1449 struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
1450
1451 if (!efx->vf_init_count)
1452 return;
1453 memcpy(vfdi_status->peers[0].mac_addr,
1454 efx->net_dev->dev_addr, ETH_ALEN);
1455 queue_work(vfdi_workqueue, &efx->peer_work);
1456}
1457
1458void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1459{
1460 struct efx_vf *vf;
1461 unsigned queue, qid;
1462
1463 queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1464 if (map_vi_index(efx, queue, &vf, &qid))
1465 return;
1466 /* Ignore flush completions triggered by an FLR */
1467 if (!test_bit(qid, vf->txq_mask))
1468 return;
1469
1470 __clear_bit(qid, vf->txq_mask);
1471 --vf->txq_count;
1472
1473 if (efx_vfdi_flush_wake(vf))
1474 wake_up(&vf->flush_waitq);
1475}
1476
1477void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1478{
1479 struct efx_vf *vf;
1480 unsigned ev_failed, queue, qid;
1481
1482 queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1483 ev_failed = EFX_QWORD_FIELD(*event,
1484 FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1485 if (map_vi_index(efx, queue, &vf, &qid))
1486 return;
1487 if (!test_bit(qid, vf->rxq_mask))
1488 return;
1489
1490 if (ev_failed) {
1491 set_bit(qid, vf->rxq_retry_mask);
1492 atomic_inc(&vf->rxq_retry_count);
1493 } else {
1494 __clear_bit(qid, vf->rxq_mask);
1495 --vf->rxq_count;
1496 }
1497 if (efx_vfdi_flush_wake(vf))
1498 wake_up(&vf->flush_waitq);
1499}
1500
1501/* Called from napi. Schedule the reset work item */
1502void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
1503{
1504 struct efx_vf *vf;
1505 unsigned int rel;
1506
1507 if (map_vi_index(efx, dmaq, &vf, &rel))
1508 return;
1509
1510 if (net_ratelimit())
1511 netif_err(efx, hw, efx->net_dev,
1512 "VF %d DMA Q %d reports descriptor fetch error.\n",
1513 vf->index, rel);
1514 queue_work(vfdi_workqueue, &vf->reset_work);
1515}
1516
1517/* Reset all VFs */
1518void efx_sriov_reset(struct efx_nic *efx)
1519{
1520 unsigned int vf_i;
1521 struct efx_buffer buf;
1522 struct efx_vf *vf;
1523
1524 ASSERT_RTNL();
1525
1526 if (efx->vf_init_count == 0)
1527 return;
1528
1529 efx_sriov_usrev(efx, true);
1530 (void)efx_sriov_cmd(efx, true, NULL, NULL);
1531
1532 if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
1533 return;
1534
1535 for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
1536 vf = efx->vf + vf_i;
1537 efx_sriov_reset_vf(vf, &buf);
1538 }
1539
1540 efx_nic_free_buffer(efx, &buf);
1541}
1542
1543int efx_init_sriov(void)
1544{
1545 /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
1546 * efx_sriov_peer_work() spend almost all their time sleeping for
1547 * MCDI to complete anyway
1548 */
1549 vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
1550 if (!vfdi_workqueue)
1551 return -ENOMEM;
1552
1553 return 0;
1554}
1555
1556void efx_fini_sriov(void)
1557{
1558 destroy_workqueue(vfdi_workqueue);
1559}
1560
1561int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
1562{
1563 struct efx_nic *efx = netdev_priv(net_dev);
1564 struct efx_vf *vf;
1565
1566 if (vf_i >= efx->vf_init_count)
1567 return -EINVAL;
1568 vf = efx->vf + vf_i;
1569
1570 mutex_lock(&vf->status_lock);
1571 memcpy(vf->addr.mac_addr, mac, ETH_ALEN);
1572 __efx_sriov_update_vf_addr(vf);
1573 mutex_unlock(&vf->status_lock);
1574
1575 return 0;
1576}
1577
1578int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
1579 u16 vlan, u8 qos)
1580{
1581 struct efx_nic *efx = netdev_priv(net_dev);
1582 struct efx_vf *vf;
1583 u16 tci;
1584
1585 if (vf_i >= efx->vf_init_count)
1586 return -EINVAL;
1587 vf = efx->vf + vf_i;
1588
1589 mutex_lock(&vf->status_lock);
1590 tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
1591 vf->addr.tci = htons(tci);
1592 __efx_sriov_update_vf_addr(vf);
1593 mutex_unlock(&vf->status_lock);
1594
1595 return 0;
1596}
1597
1598int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
1599 bool spoofchk)
1600{
1601 struct efx_nic *efx = netdev_priv(net_dev);
1602 struct efx_vf *vf;
1603 int rc;
1604
1605 if (vf_i >= efx->vf_init_count)
1606 return -EINVAL;
1607 vf = efx->vf + vf_i;
1608
1609 mutex_lock(&vf->txq_lock);
1610 if (vf->txq_count == 0) {
1611 vf->tx_filter_mode =
1612 spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF;
1613 rc = 0;
1614 } else {
1615 /* This cannot be changed while TX queues are running */
1616 rc = -EBUSY;
1617 }
1618 mutex_unlock(&vf->txq_lock);
1619 return rc;
1620}
1621
1622int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
1623 struct ifla_vf_info *ivi)
1624{
1625 struct efx_nic *efx = netdev_priv(net_dev);
1626 struct efx_vf *vf;
1627 u16 tci;
1628
1629 if (vf_i >= efx->vf_init_count)
1630 return -EINVAL;
1631 vf = efx->vf + vf_i;
1632
1633 ivi->vf = vf_i;
1634 memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN);
1635 ivi->tx_rate = 0;
1636 tci = ntohs(vf->addr.tci);
1637 ivi->vlan = tci & VLAN_VID_MASK;
1638 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
1639 ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON;
1640
1641 return 0;
1642}
1643
diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
index 71f2e3ebe1c7..5431a1bbff5c 100644
--- a/drivers/net/ethernet/sfc/spi.h
+++ b/drivers/net/ethernet/sfc/spi.h
@@ -68,7 +68,7 @@ static inline bool efx_spi_present(const struct efx_spi_device *spi)
68 68
69int falcon_spi_cmd(struct efx_nic *efx, 69int falcon_spi_cmd(struct efx_nic *efx,
70 const struct efx_spi_device *spi, unsigned int command, 70 const struct efx_spi_device *spi, unsigned int command,
71 int address, const void* in, void *out, size_t len); 71 int address, const void *in, void *out, size_t len);
72int falcon_spi_wait_write(struct efx_nic *efx, 72int falcon_spi_wait_write(struct efx_nic *efx,
73 const struct efx_spi_device *spi); 73 const struct efx_spi_device *spi);
74int falcon_spi_read(struct efx_nic *efx, 74int falcon_spi_read(struct efx_nic *efx,
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index 7b0fd89e7b85..d37cb5017129 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -121,7 +121,7 @@
121#define GPHY_XCONTROL_REG 49152 121#define GPHY_XCONTROL_REG 49152
122#define GPHY_ISOLATE_LBN 10 122#define GPHY_ISOLATE_LBN 10
123#define GPHY_ISOLATE_WIDTH 1 123#define GPHY_ISOLATE_WIDTH 1
124#define GPHY_DUPLEX_LBN 8 124#define GPHY_DUPLEX_LBN 8
125#define GPHY_DUPLEX_WIDTH 1 125#define GPHY_DUPLEX_WIDTH 1
126#define GPHY_LOOPBACK_NEAR_LBN 14 126#define GPHY_LOOPBACK_NEAR_LBN 14
127#define GPHY_LOOPBACK_NEAR_WIDTH 1 127#define GPHY_LOOPBACK_NEAR_WIDTH 1
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 72f0fbc73b1a..94d0365b31cd 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -110,7 +110,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
110 * little benefit from using descriptors that cross those 110 * little benefit from using descriptors that cross those
111 * boundaries and we keep things simple by not doing so. 111 * boundaries and we keep things simple by not doing so.
112 */ 112 */
113 unsigned len = (~dma_addr & 0xfff) + 1; 113 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
114 114
115 /* Work around hardware bug for unaligned buffers. */ 115 /* Work around hardware bug for unaligned buffers. */
116 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) 116 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
@@ -339,7 +339,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
339 * OS to free the skb. 339 * OS to free the skb.
340 */ 340 */
341netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, 341netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
342 struct net_device *net_dev) 342 struct net_device *net_dev)
343{ 343{
344 struct efx_nic *efx = netdev_priv(net_dev); 344 struct efx_nic *efx = netdev_priv(net_dev);
345 struct efx_tx_queue *tx_queue; 345 struct efx_tx_queue *tx_queue;
@@ -446,10 +446,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
446 likely(efx->port_enabled) && 446 likely(efx->port_enabled) &&
447 likely(netif_device_present(efx->net_dev))) { 447 likely(netif_device_present(efx->net_dev))) {
448 fill_level = tx_queue->insert_count - tx_queue->read_count; 448 fill_level = tx_queue->insert_count - tx_queue->read_count;
449 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 449 if (fill_level < EFX_TXQ_THRESHOLD(efx))
450 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
451 netif_tx_wake_queue(tx_queue->core_txq); 450 netif_tx_wake_queue(tx_queue->core_txq);
452 }
453 } 451 }
454 452
455 /* Check whether the hardware queue is now empty */ 453 /* Check whether the hardware queue is now empty */
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 7c21b334a75b..29bb3f9941c0 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -512,7 +512,7 @@ static bool txc43128_phy_poll(struct efx_nic *efx)
512 return efx->link_state.up != was_up; 512 return efx->link_state.up != was_up;
513} 513}
514 514
515static const char *txc43128_test_names[] = { 515static const char *const txc43128_test_names[] = {
516 "bist" 516 "bist"
517}; 517};
518 518
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
new file mode 100644
index 000000000000..225557caaf5a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -0,0 +1,255 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2010-2012 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9#ifndef _VFDI_H
10#define _VFDI_H
11
12/**
13 * DOC: Virtual Function Driver Interface
14 *
15 * This file contains software structures used to form a two way
16 * communication channel between the VF driver and the PF driver,
17 * named Virtual Function Driver Interface (VFDI).
18 *
19 * For the purposes of VFDI, a page is a memory region with size and
20 * alignment of 4K. All addresses are DMA addresses to be used within
21 * the domain of the relevant VF.
22 *
23 * The only hardware-defined channels for a VF driver to communicate
24 * with the PF driver are the event mailboxes (%FR_CZ_USR_EV
25 * registers). Writing to these registers generates an event with
26 * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
27 * and USER_EV_REG_VALUE set to the value written. The PF driver may
28 * direct or disable delivery of these events by setting
29 * %FR_CZ_USR_EV_CFG.
30 *
31 * The PF driver can send arbitrary events to arbitrary event queues.
32 * However, for consistency, VFDI events from the PF are defined to
33 * follow the same form and be sent to the first event queue assigned
34 * to the VF while that queue is enabled by the VF driver.
35 *
36 * The general form of the variable bits of VFDI events is:
37 *
38 * 0 16 24 31
39 * | DATA | TYPE | SEQ |
40 *
41 * SEQ is a sequence number which should be incremented by 1 (modulo
42 * 256) for each event. The sequence numbers used in each direction
43 * are independent.
44 *
45 * The VF submits requests of type &struct vfdi_req by sending the
46 * address of the request (ADDR) in a series of 4 events:
47 *
48 * 0 16 24 31
49 * | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ |
50 * | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
51 * | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
52 * | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
53 *
54 * The address must be page-aligned. After receiving such a valid
55 * series of events, the PF driver will attempt to read the request
56 * and write a response to the same address. In case of an invalid
57 * sequence of events or a DMA error, there will be no response.
58 *
59 * The VF driver may request that the PF driver writes status
60 * information into its domain asynchronously. After writing the
61 * status, the PF driver will send an event of the form:
62 *
63 * 0 16 24 31
64 * | reserved | VFDI_EV_TYPE_STATUS | SEQ |
65 *
66 * In case the VF must be reset for any reason, the PF driver will
67 * send an event of the form:
68 *
69 * 0 16 24 31
70 * | reserved | VFDI_EV_TYPE_RESET | SEQ |
71 *
72 * It is then the responsibility of the VF driver to request
73 * reinitialisation of its queues.
74 */
75#define VFDI_EV_SEQ_LBN 24
76#define VFDI_EV_SEQ_WIDTH 8
77#define VFDI_EV_TYPE_LBN 16
78#define VFDI_EV_TYPE_WIDTH 8
79#define VFDI_EV_TYPE_REQ_WORD0 0
80#define VFDI_EV_TYPE_REQ_WORD1 1
81#define VFDI_EV_TYPE_REQ_WORD2 2
82#define VFDI_EV_TYPE_REQ_WORD3 3
83#define VFDI_EV_TYPE_STATUS 4
84#define VFDI_EV_TYPE_RESET 5
85#define VFDI_EV_DATA_LBN 0
86#define VFDI_EV_DATA_WIDTH 16
87
88struct vfdi_endpoint {
89 u8 mac_addr[ETH_ALEN];
90 __be16 tci;
91};
92
93/**
94 * enum vfdi_op - VFDI operation enumeration
95 * @VFDI_OP_RESPONSE: Indicates a response to the request.
96 * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
97 * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
98 * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
99 * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
100 * finalize the SRAM entries.
101 * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targetting the given RXQ.
102 * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
103 * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
104 * from PF and write the initial status.
105 * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
106 * updates from PF.
107 */
108enum vfdi_op {
109 VFDI_OP_RESPONSE = 0,
110 VFDI_OP_INIT_EVQ = 1,
111 VFDI_OP_INIT_RXQ = 2,
112 VFDI_OP_INIT_TXQ = 3,
113 VFDI_OP_FINI_ALL_QUEUES = 4,
114 VFDI_OP_INSERT_FILTER = 5,
115 VFDI_OP_REMOVE_ALL_FILTERS = 6,
116 VFDI_OP_SET_STATUS_PAGE = 7,
117 VFDI_OP_CLEAR_STATUS_PAGE = 8,
118 VFDI_OP_LIMIT,
119};
120
121/* Response codes for VFDI operations. Other values may be used in future. */
122#define VFDI_RC_SUCCESS 0
123#define VFDI_RC_ENOMEM (-12)
124#define VFDI_RC_EINVAL (-22)
125#define VFDI_RC_EOPNOTSUPP (-95)
126#define VFDI_RC_ETIMEDOUT (-110)
127
128/**
129 * struct vfdi_req - Request from VF driver to PF driver
130 * @op: Operation code or response indicator, taken from &enum vfdi_op.
131 * @rc: Response code. Set to 0 on success or a negative error code on failure.
132 * @u.init_evq.index: Index of event queue to create.
133 * @u.init_evq.buf_count: Number of 4k buffers backing event queue.
134 * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
135 * address of each page backing the event queue.
136 * @u.init_rxq.index: Index of receive queue to create.
137 * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
138 * @u.init_rxq.evq: Instance of event queue to target receive events at.
139 * @u.init_rxq.label: Label used in receive events.
140 * @u.init_rxq.flags: Unused.
141 * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
142 * address of each page backing the receive queue.
143 * @u.init_txq.index: Index of transmit queue to create.
144 * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
145 * @u.init_txq.evq: Instance of event queue to target transmit completion
146 * events at.
147 * @u.init_txq.label: Label used in transmit completion events.
148 * @u.init_txq.flags: Checksum offload flags.
149 * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
150 * address of each page backing the transmit queue.
151 * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targetting
152 * all traffic at this receive queue.
153 * @u.mac_filter.flags: MAC filter flags.
154 * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
155 * This address must be page-aligned and the PF may write up to a
156 * whole page (allowing for extension of the structure).
157 * @u.set_status_page.peer_page_count: Number of additional pages the VF
158 * has provided into which peer addresses may be DMAd.
159 * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
160 * If the number of peers exceeds 256, then the VF must provide
161 * additional pages in this array. The PF will then DMA up to
162 * 512 vfdi_endpoint structures into each page. These addresses
163 * must be page-aligned.
164 */
165struct vfdi_req {
166 u32 op;
167 u32 reserved1;
168 s32 rc;
169 u32 reserved2;
170 union {
171 struct {
172 u32 index;
173 u32 buf_count;
174 u64 addr[];
175 } init_evq;
176 struct {
177 u32 index;
178 u32 buf_count;
179 u32 evq;
180 u32 label;
181 u32 flags;
182#define VFDI_RXQ_FLAG_SCATTER_EN 1
183 u32 reserved;
184 u64 addr[];
185 } init_rxq;
186 struct {
187 u32 index;
188 u32 buf_count;
189 u32 evq;
190 u32 label;
191 u32 flags;
192#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
193#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
194 u32 reserved;
195 u64 addr[];
196 } init_txq;
197 struct {
198 u32 rxq;
199 u32 flags;
200#define VFDI_MAC_FILTER_FLAG_RSS 1
201#define VFDI_MAC_FILTER_FLAG_SCATTER 2
202 } mac_filter;
203 struct {
204 u64 dma_addr;
205 u64 peer_page_count;
206 u64 peer_page_addr[];
207 } set_status_page;
208 } u;
209};
210
211/**
212 * struct vfdi_status - Status provided by PF driver to VF driver
213 * @generation_start: A generation count DMA'd to VF *before* the
214 * rest of the structure.
215 * @generation_end: A generation count DMA'd to VF *after* the
216 * rest of the structure.
217 * @version: Version of this structure; currently set to 1. Later
218 * versions must either be layout-compatible or only be sent to VFs
219 * that specifically request them.
220 * @length: Total length of this structure including embedded tables
221 * @vi_scale: log2 the number of VIs available on this VF. This quantity
222 * is used by the hardware for register decoding.
223 * @max_tx_channels: The maximum number of transmit queues the VF can use.
224 * @rss_rxq_count: The number of receive queues present in the shared RSS
225 * indirection table.
226 * @peer_count: Total number of peers in the complete peer list. If larger
227 * than ARRAY_SIZE(%peers), then the VF must provide sufficient
228 * additional pages each of which is filled with vfdi_endpoint structures.
229 * @local: The MAC address and outer VLAN tag of *this* VF
230 * @peers: Table of peer addresses. The @tci fields in these structures
231 * are currently unused and must be ignored. Additional peers are
232 * written into any additional pages provided by the VF.
233 * @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
234 * for interrupt moderation timers, in nanoseconds. This member is only
235 * present if @length is sufficiently large.
236 */
237struct vfdi_status {
238 u32 generation_start;
239 u32 generation_end;
240 u32 version;
241 u32 length;
242 u8 vi_scale;
243 u8 max_tx_channels;
244 u8 rss_rxq_count;
245 u8 reserved1;
246 u16 peer_count;
247 u16 reserved2;
248 struct vfdi_endpoint local;
249 struct vfdi_endpoint peers[256];
250
251 /* Members below here extend version 1 of this structure */
252 u32 timer_quantum_ns;
253};
254
255#endif