aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-12-09 01:14:38 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-12-09 01:14:38 -0500
commitbcd6acd51f3d4d1ada201e9bc5c40a31d6d80c71 (patch)
tree2f6dffd2d3e4dd67355a224de7e7a960335a92fd /drivers/net/sfc
parent11c34c7deaeeebcee342cbc35e1bb2a6711b2431 (diff)
parent3ff6a468b45b5dfeb0e903e56f4eb27d34b2437c (diff)
Merge commit 'origin/master' into next
Conflicts: include/linux/kvm.h
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/Kconfig13
-rw-r--r--drivers/net/sfc/Makefile7
-rw-r--r--drivers/net/sfc/bitfield.h17
-rw-r--r--drivers/net/sfc/boards.c328
-rw-r--r--drivers/net/sfc/boards.h28
-rw-r--r--drivers/net/sfc/efx.c793
-rw-r--r--drivers/net/sfc/efx.h65
-rw-r--r--drivers/net/sfc/enum.h116
-rw-r--r--drivers/net/sfc/ethtool.c222
-rw-r--r--drivers/net/sfc/ethtool.h27
-rw-r--r--drivers/net/sfc/falcon.c2829
-rw-r--r--drivers/net/sfc/falcon.h145
-rw-r--r--drivers/net/sfc/falcon_boards.c752
-rw-r--r--drivers/net/sfc/falcon_gmac.c123
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h1333
-rw-r--r--drivers/net/sfc/falcon_io.h258
-rw-r--r--drivers/net/sfc/falcon_xmac.c278
-rw-r--r--drivers/net/sfc/gmii.h60
-rw-r--r--drivers/net/sfc/io.h256
-rw-r--r--drivers/net/sfc/mac.h6
-rw-r--r--drivers/net/sfc/mcdi.c1112
-rw-r--r--drivers/net/sfc/mcdi.h130
-rw-r--r--drivers/net/sfc/mcdi_mac.c152
-rw-r--r--drivers/net/sfc/mcdi_pcol.h1578
-rw-r--r--drivers/net/sfc/mcdi_phy.c597
-rw-r--r--drivers/net/sfc/mdio_10g.c144
-rw-r--r--drivers/net/sfc/mdio_10g.h6
-rw-r--r--drivers/net/sfc/mtd.c559
-rw-r--r--drivers/net/sfc/net_driver.h302
-rw-r--r--drivers/net/sfc/nic.c1583
-rw-r--r--drivers/net/sfc/nic.h261
-rw-r--r--drivers/net/sfc/phy.h27
-rw-r--r--drivers/net/sfc/qt202x_phy.c (renamed from drivers/net/sfc/xfp_phy.c)132
-rw-r--r--drivers/net/sfc/regs.h3168
-rw-r--r--drivers/net/sfc/rx.c82
-rw-r--r--drivers/net/sfc/rx.h26
-rw-r--r--drivers/net/sfc/selftest.c146
-rw-r--r--drivers/net/sfc/sfe4001.c435
-rw-r--r--drivers/net/sfc/siena.c604
-rw-r--r--drivers/net/sfc/spi.h18
-rw-r--r--drivers/net/sfc/tenxpress.c223
-rw-r--r--drivers/net/sfc/tx.c184
-rw-r--r--drivers/net/sfc/tx.h25
-rw-r--r--drivers/net/sfc/workarounds.h20
44 files changed, 13068 insertions, 6102 deletions
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index 260aafaac235..a65c98638398 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -1,5 +1,5 @@
1config SFC 1config SFC
2 tristate "Solarflare Solarstorm SFC4000 support" 2 tristate "Solarflare Solarstorm SFC4000/SFC9000-family support"
3 depends on PCI && INET 3 depends on PCI && INET
4 select MDIO 4 select MDIO
5 select CRC32 5 select CRC32
@@ -7,15 +7,16 @@ config SFC
7 select I2C_ALGOBIT 7 select I2C_ALGOBIT
8 help 8 help
9 This driver supports 10-gigabit Ethernet cards based on 9 This driver supports 10-gigabit Ethernet cards based on
10 the Solarflare Communications Solarstorm SFC4000 controller. 10 the Solarflare Communications Solarstorm SFC4000 and
11 SFC9000-family controllers.
11 12
12 To compile this driver as a module, choose M here. The module 13 To compile this driver as a module, choose M here. The module
13 will be called sfc. 14 will be called sfc.
14config SFC_MTD 15config SFC_MTD
15 bool "Solarflare Solarstorm SFC4000 flash MTD support" 16 bool "Solarflare Solarstorm SFC4000/SFC9000-family MTD support"
16 depends on SFC && MTD && !(SFC=y && MTD=m) 17 depends on SFC && MTD && !(SFC=y && MTD=m)
17 default y 18 default y
18 help 19 help
19 This exposes the on-board flash memory as an MTD device (e.g. 20 This exposes the on-board flash memory as MTD devices (e.g.
20 /dev/mtd1). This makes it possible to upload new boot code 21 /dev/mtd1). This makes it possible to upload new firmware
21 to the NIC. 22 to the NIC.
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index b89f9be3cb13..1047b19c60a5 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,6 +1,7 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \ 1sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o \
2 falcon_xmac.o selftest.o ethtool.o xfp_phy.o \ 2 falcon_gmac.o falcon_xmac.o mcdi_mac.o \
3 mdio_10g.o tenxpress.o boards.o sfe4001.o 3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o falcon_boards.o mcdi.o mcdi_phy.o
4sfc-$(CONFIG_SFC_MTD) += mtd.o 5sfc-$(CONFIG_SFC_MTD) += mtd.o
5 6
6obj-$(CONFIG_SFC) += sfc.o 7obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index d54d84c267b9..098ac2ad757d 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -37,6 +37,8 @@
37#define EFX_DWORD_2_WIDTH 32 37#define EFX_DWORD_2_WIDTH 32
38#define EFX_DWORD_3_LBN 96 38#define EFX_DWORD_3_LBN 96
39#define EFX_DWORD_3_WIDTH 32 39#define EFX_DWORD_3_WIDTH 32
40#define EFX_QWORD_0_LBN 0
41#define EFX_QWORD_0_WIDTH 64
40 42
41/* Specified attribute (e.g. LBN) of the specified field */ 43/* Specified attribute (e.g. LBN) of the specified field */
42#define EFX_VAL(field, attribute) field ## _ ## attribute 44#define EFX_VAL(field, attribute) field ## _ ## attribute
@@ -520,19 +522,6 @@ typedef union efx_oword {
520#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 522#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
521#endif 523#endif
522 524
523#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
524 if (falcon_rev(efx) >= FALCON_REV_B0) { \
525 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
526 } else { \
527 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
528 } \
529} while (0)
530
531#define EFX_QWORD_FIELD_VER(efx, qword, field) \
532 (falcon_rev(efx) >= FALCON_REV_B0 ? \
533 EFX_QWORD_FIELD((qword), field##_B0) : \
534 EFX_QWORD_FIELD((qword), field##_A1))
535
536/* Used to avoid compiler warnings about shift range exceeding width 525/* Used to avoid compiler warnings about shift range exceeding width
537 * of the data types when dma_addr_t is only 32 bits wide. 526 * of the data types when dma_addr_t is only 32 bits wide.
538 */ 527 */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
deleted file mode 100644
index 4a4c74c891b7..000000000000
--- a/drivers/net/sfc/boards.c
+++ /dev/null
@@ -1,328 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "phy.h"
12#include "boards.h"
13#include "efx.h"
14#include "workarounds.h"
15
16/* Macros for unpacking the board revision */
17/* The revision info is in host byte order. */
18#define BOARD_TYPE(_rev) (_rev >> 8)
19#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
20#define BOARD_MINOR(_rev) (_rev & 0xf)
21
22/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
23#define BLINK_INTERVAL (HZ/2)
24
25static void blink_led_timer(unsigned long context)
26{
27 struct efx_nic *efx = (struct efx_nic *)context;
28 struct efx_blinker *bl = &efx->board_info.blinker;
29 efx->board_info.set_id_led(efx, bl->state);
30 bl->state = !bl->state;
31 if (bl->resubmit)
32 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
33}
34
35static void board_blink(struct efx_nic *efx, bool blink)
36{
37 struct efx_blinker *blinker = &efx->board_info.blinker;
38
39 /* The rtnl mutex serialises all ethtool ioctls, so
40 * nothing special needs doing here. */
41 if (blink) {
42 blinker->resubmit = true;
43 blinker->state = false;
44 setup_timer(&blinker->timer, blink_led_timer,
45 (unsigned long)efx);
46 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
47 } else {
48 blinker->resubmit = false;
49 if (blinker->timer.function)
50 del_timer_sync(&blinker->timer);
51 efx->board_info.init_leds(efx);
52 }
53}
54
55/*****************************************************************************
56 * Support for LM87 sensor chip used on several boards
57 */
58#define LM87_REG_ALARMS1 0x41
59#define LM87_REG_ALARMS2 0x42
60#define LM87_IN_LIMITS(nr, _min, _max) \
61 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
62#define LM87_AIN_LIMITS(nr, _min, _max) \
63 0x3B + (nr), _max, 0x1A + (nr), _min
64#define LM87_TEMP_INT_LIMITS(_min, _max) \
65 0x39, _max, 0x3A, _min
66#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
67 0x37, _max, 0x38, _min
68
69#define LM87_ALARM_TEMP_INT 0x10
70#define LM87_ALARM_TEMP_EXT1 0x20
71
72#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
73
74static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
75 const u8 *reg_values)
76{
77 struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info);
78 int rc;
79
80 if (!client)
81 return -EIO;
82
83 while (*reg_values) {
84 u8 reg = *reg_values++;
85 u8 value = *reg_values++;
86 rc = i2c_smbus_write_byte_data(client, reg, value);
87 if (rc)
88 goto err;
89 }
90
91 efx->board_info.hwmon_client = client;
92 return 0;
93
94err:
95 i2c_unregister_device(client);
96 return rc;
97}
98
99static void efx_fini_lm87(struct efx_nic *efx)
100{
101 i2c_unregister_device(efx->board_info.hwmon_client);
102}
103
104static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
105{
106 struct i2c_client *client = efx->board_info.hwmon_client;
107 s32 alarms1, alarms2;
108
109 /* If link is up then do not monitor temperature */
110 if (EFX_WORKAROUND_7884(efx) && efx->link_up)
111 return 0;
112
113 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
114 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
115 if (alarms1 < 0)
116 return alarms1;
117 if (alarms2 < 0)
118 return alarms2;
119 alarms1 &= mask;
120 alarms2 &= mask >> 8;
121 if (alarms1 || alarms2) {
122 EFX_ERR(efx,
123 "LM87 detected a hardware failure (status %02x:%02x)"
124 "%s%s\n",
125 alarms1, alarms2,
126 (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
127 (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
128 return -ERANGE;
129 }
130
131 return 0;
132}
133
134#else /* !CONFIG_SENSORS_LM87 */
135
136static inline int
137efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
138 const u8 *reg_values)
139{
140 return 0;
141}
142static inline void efx_fini_lm87(struct efx_nic *efx)
143{
144}
145static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
146{
147 return 0;
148}
149
150#endif /* CONFIG_SENSORS_LM87 */
151
152/*****************************************************************************
153 * Support for the SFE4002
154 *
155 */
156static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
157
158static const u8 sfe4002_lm87_regs[] = {
159 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
160 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
161 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
162 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
163 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
164 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
165 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
166 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
167 LM87_TEMP_INT_LIMITS(10, 60), /* board */
168 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
169 0
170};
171
172static struct i2c_board_info sfe4002_hwmon_info = {
173 I2C_BOARD_INFO("lm87", 0x2e),
174 .platform_data = &sfe4002_lm87_channel,
175};
176
177/****************************************************************************/
178/* LED allocations. Note that on rev A0 boards the schematic and the reality
179 * differ: red and green are swapped. Below is the fixed (A1) layout (there
180 * are only 3 A0 boards in existence, so no real reason to make this
181 * conditional).
182 */
183#define SFE4002_FAULT_LED (2) /* Red */
184#define SFE4002_RX_LED (0) /* Green */
185#define SFE4002_TX_LED (1) /* Amber */
186
187static void sfe4002_init_leds(struct efx_nic *efx)
188{
189 /* Set the TX and RX LEDs to reflect status and activity, and the
190 * fault LED off */
191 xfp_set_led(efx, SFE4002_TX_LED,
192 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
193 xfp_set_led(efx, SFE4002_RX_LED,
194 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
195 xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
196}
197
198static void sfe4002_set_id_led(struct efx_nic *efx, bool state)
199{
200 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
201 QUAKE_LED_OFF);
202}
203
204static int sfe4002_check_hw(struct efx_nic *efx)
205{
206 /* A0 board rev. 4002s report a temperature fault the whole time
207 * (bad sensor) so we mask it out. */
208 unsigned alarm_mask =
209 (efx->board_info.major == 0 && efx->board_info.minor == 0) ?
210 ~LM87_ALARM_TEMP_EXT1 : ~0;
211
212 return efx_check_lm87(efx, alarm_mask);
213}
214
215static int sfe4002_init(struct efx_nic *efx)
216{
217 int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
218 if (rc)
219 return rc;
220 efx->board_info.monitor = sfe4002_check_hw;
221 efx->board_info.init_leds = sfe4002_init_leds;
222 efx->board_info.set_id_led = sfe4002_set_id_led;
223 efx->board_info.blink = board_blink;
224 efx->board_info.fini = efx_fini_lm87;
225 return 0;
226}
227
228/*****************************************************************************
229 * Support for the SFN4112F
230 *
231 */
232static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
233
234static const u8 sfn4112f_lm87_regs[] = {
235 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
236 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
237 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
238 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
239 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
240 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
241 LM87_TEMP_INT_LIMITS(10, 60), /* board */
242 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
243 0
244};
245
246static struct i2c_board_info sfn4112f_hwmon_info = {
247 I2C_BOARD_INFO("lm87", 0x2e),
248 .platform_data = &sfn4112f_lm87_channel,
249};
250
251#define SFN4112F_ACT_LED 0
252#define SFN4112F_LINK_LED 1
253
254static void sfn4112f_init_leds(struct efx_nic *efx)
255{
256 xfp_set_led(efx, SFN4112F_ACT_LED,
257 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
258 xfp_set_led(efx, SFN4112F_LINK_LED,
259 QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
260}
261
262static void sfn4112f_set_id_led(struct efx_nic *efx, bool state)
263{
264 xfp_set_led(efx, SFN4112F_LINK_LED,
265 state ? QUAKE_LED_ON : QUAKE_LED_OFF);
266}
267
268static int sfn4112f_check_hw(struct efx_nic *efx)
269{
270 /* Mask out unused sensors */
271 return efx_check_lm87(efx, ~0x48);
272}
273
274static int sfn4112f_init(struct efx_nic *efx)
275{
276 int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
277 if (rc)
278 return rc;
279 efx->board_info.monitor = sfn4112f_check_hw;
280 efx->board_info.init_leds = sfn4112f_init_leds;
281 efx->board_info.set_id_led = sfn4112f_set_id_led;
282 efx->board_info.blink = board_blink;
283 efx->board_info.fini = efx_fini_lm87;
284 return 0;
285}
286
287/* This will get expanded as board-specific details get moved out of the
288 * PHY drivers. */
289struct efx_board_data {
290 enum efx_board_type type;
291 const char *ref_model;
292 const char *gen_type;
293 int (*init) (struct efx_nic *nic);
294};
295
296
297static struct efx_board_data board_data[] = {
298 { EFX_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init },
299 { EFX_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
300 { EFX_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
301 sfn4111t_init },
302 { EFX_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter",
303 sfn4112f_init },
304};
305
306void efx_set_board_info(struct efx_nic *efx, u16 revision_info)
307{
308 struct efx_board_data *data = NULL;
309 int i;
310
311 efx->board_info.type = BOARD_TYPE(revision_info);
312 efx->board_info.major = BOARD_MAJOR(revision_info);
313 efx->board_info.minor = BOARD_MINOR(revision_info);
314
315 for (i = 0; i < ARRAY_SIZE(board_data); i++)
316 if (board_data[i].type == efx->board_info.type)
317 data = &board_data[i];
318
319 if (data) {
320 EFX_INFO(efx, "board is %s rev %c%d\n",
321 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
322 ? data->ref_model : data->gen_type,
323 'A' + efx->board_info.major, efx->board_info.minor);
324 efx->board_info.init = data->init;
325 } else {
326 EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
327 }
328}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
deleted file mode 100644
index 44942de0e080..000000000000
--- a/drivers/net/sfc/boards.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_BOARDS_H
11#define EFX_BOARDS_H
12
13/* Board IDs (must fit in 8 bits) */
14enum efx_board_type {
15 EFX_BOARD_SFE4001 = 1,
16 EFX_BOARD_SFE4002 = 2,
17 EFX_BOARD_SFN4111T = 0x51,
18 EFX_BOARD_SFN4112F = 0x52,
19};
20
21extern void efx_set_board_info(struct efx_nic *efx, u16 revision_info);
22
23/* SFE4001 (10GBASE-T) */
24extern int sfe4001_init(struct efx_nic *efx);
25/* SFN4111T (100/1000/10GBASE-T) */
26extern int sfn4111t_init(struct efx_nic *efx);
27
28#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index cc4b2f99989d..f983e3b507cc 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc. 4 * Copyright 2005-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -21,12 +21,73 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include "net_driver.h" 23#include "net_driver.h"
24#include "ethtool.h"
25#include "tx.h"
26#include "rx.h"
27#include "efx.h" 24#include "efx.h"
28#include "mdio_10g.h" 25#include "mdio_10g.h"
29#include "falcon.h" 26#include "nic.h"
27
28#include "mcdi.h"
29
30/**************************************************************************
31 *
32 * Type name strings
33 *
34 **************************************************************************
35 */
36
37/* Loopback mode names (see LOOPBACK_MODE()) */
38const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
39const char *efx_loopback_mode_names[] = {
40 [LOOPBACK_NONE] = "NONE",
41 [LOOPBACK_DATA] = "DATAPATH",
42 [LOOPBACK_GMAC] = "GMAC",
43 [LOOPBACK_XGMII] = "XGMII",
44 [LOOPBACK_XGXS] = "XGXS",
45 [LOOPBACK_XAUI] = "XAUI",
46 [LOOPBACK_GMII] = "GMII",
47 [LOOPBACK_SGMII] = "SGMII",
48 [LOOPBACK_XGBR] = "XGBR",
49 [LOOPBACK_XFI] = "XFI",
50 [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
51 [LOOPBACK_GMII_FAR] = "GMII_FAR",
52 [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
53 [LOOPBACK_XFI_FAR] = "XFI_FAR",
54 [LOOPBACK_GPHY] = "GPHY",
55 [LOOPBACK_PHYXS] = "PHYXS",
56 [LOOPBACK_PCS] = "PCS",
57 [LOOPBACK_PMAPMD] = "PMA/PMD",
58 [LOOPBACK_XPORT] = "XPORT",
59 [LOOPBACK_XGMII_WS] = "XGMII_WS",
60 [LOOPBACK_XAUI_WS] = "XAUI_WS",
61 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
62 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
63 [LOOPBACK_GMII_WS] = "GMII_WS",
64 [LOOPBACK_XFI_WS] = "XFI_WS",
65 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
66 [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
67};
68
69/* Interrupt mode names (see INT_MODE())) */
70const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
71const char *efx_interrupt_mode_names[] = {
72 [EFX_INT_MODE_MSIX] = "MSI-X",
73 [EFX_INT_MODE_MSI] = "MSI",
74 [EFX_INT_MODE_LEGACY] = "legacy",
75};
76
77const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
78const char *efx_reset_type_names[] = {
79 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
80 [RESET_TYPE_ALL] = "ALL",
81 [RESET_TYPE_WORLD] = "WORLD",
82 [RESET_TYPE_DISABLE] = "DISABLE",
83 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
84 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
85 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
86 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
87 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
88 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
89 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
90};
30 91
31#define EFX_MAX_MTU (9 * 1024) 92#define EFX_MAX_MTU (9 * 1024)
32 93
@@ -145,7 +206,8 @@ static void efx_fini_channels(struct efx_nic *efx);
145 206
146#define EFX_ASSERT_RESET_SERIALISED(efx) \ 207#define EFX_ASSERT_RESET_SERIALISED(efx) \
147 do { \ 208 do { \
148 if (efx->state == STATE_RUNNING) \ 209 if ((efx->state == STATE_RUNNING) || \
210 (efx->state == STATE_DISABLED)) \
149 ASSERT_RTNL(); \ 211 ASSERT_RTNL(); \
150 } while (0) 212 } while (0)
151 213
@@ -171,7 +233,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
171 !channel->enabled)) 233 !channel->enabled))
172 return 0; 234 return 0;
173 235
174 rx_packets = falcon_process_eventq(channel, rx_quota); 236 rx_packets = efx_nic_process_eventq(channel, rx_quota);
175 if (rx_packets == 0) 237 if (rx_packets == 0)
176 return 0; 238 return 0;
177 239
@@ -203,7 +265,7 @@ static inline void efx_channel_processed(struct efx_channel *channel)
203 channel->work_pending = false; 265 channel->work_pending = false;
204 smp_wmb(); 266 smp_wmb();
205 267
206 falcon_eventq_read_ack(channel); 268 efx_nic_eventq_read_ack(channel);
207} 269}
208 270
209/* NAPI poll handler 271/* NAPI poll handler
@@ -228,26 +290,20 @@ static int efx_poll(struct napi_struct *napi, int budget)
228 if (channel->used_flags & EFX_USED_BY_RX && 290 if (channel->used_flags & EFX_USED_BY_RX &&
229 efx->irq_rx_adaptive && 291 efx->irq_rx_adaptive &&
230 unlikely(++channel->irq_count == 1000)) { 292 unlikely(++channel->irq_count == 1000)) {
231 unsigned old_irq_moderation = channel->irq_moderation;
232
233 if (unlikely(channel->irq_mod_score < 293 if (unlikely(channel->irq_mod_score <
234 irq_adapt_low_thresh)) { 294 irq_adapt_low_thresh)) {
235 channel->irq_moderation = 295 if (channel->irq_moderation > 1) {
236 max_t(int, 296 channel->irq_moderation -= 1;
237 channel->irq_moderation - 297 efx->type->push_irq_moderation(channel);
238 FALCON_IRQ_MOD_RESOLUTION, 298 }
239 FALCON_IRQ_MOD_RESOLUTION);
240 } else if (unlikely(channel->irq_mod_score > 299 } else if (unlikely(channel->irq_mod_score >
241 irq_adapt_high_thresh)) { 300 irq_adapt_high_thresh)) {
242 channel->irq_moderation = 301 if (channel->irq_moderation <
243 min(channel->irq_moderation + 302 efx->irq_rx_moderation) {
244 FALCON_IRQ_MOD_RESOLUTION, 303 channel->irq_moderation += 1;
245 efx->irq_rx_moderation); 304 efx->type->push_irq_moderation(channel);
305 }
246 } 306 }
247
248 if (channel->irq_moderation != old_irq_moderation)
249 falcon_set_int_moderation(channel);
250
251 channel->irq_count = 0; 307 channel->irq_count = 0;
252 channel->irq_mod_score = 0; 308 channel->irq_mod_score = 0;
253 } 309 }
@@ -280,7 +336,7 @@ void efx_process_channel_now(struct efx_channel *channel)
280 BUG_ON(!channel->enabled); 336 BUG_ON(!channel->enabled);
281 337
282 /* Disable interrupts and wait for ISRs to complete */ 338 /* Disable interrupts and wait for ISRs to complete */
283 falcon_disable_interrupts(efx); 339 efx_nic_disable_interrupts(efx);
284 if (efx->legacy_irq) 340 if (efx->legacy_irq)
285 synchronize_irq(efx->legacy_irq); 341 synchronize_irq(efx->legacy_irq);
286 if (channel->irq) 342 if (channel->irq)
@@ -290,14 +346,14 @@ void efx_process_channel_now(struct efx_channel *channel)
290 napi_disable(&channel->napi_str); 346 napi_disable(&channel->napi_str);
291 347
292 /* Poll the channel */ 348 /* Poll the channel */
293 efx_process_channel(channel, efx->type->evq_size); 349 efx_process_channel(channel, EFX_EVQ_SIZE);
294 350
295 /* Ack the eventq. This may cause an interrupt to be generated 351 /* Ack the eventq. This may cause an interrupt to be generated
296 * when they are reenabled */ 352 * when they are reenabled */
297 efx_channel_processed(channel); 353 efx_channel_processed(channel);
298 354
299 napi_enable(&channel->napi_str); 355 napi_enable(&channel->napi_str);
300 falcon_enable_interrupts(efx); 356 efx_nic_enable_interrupts(efx);
301} 357}
302 358
303/* Create event queue 359/* Create event queue
@@ -309,7 +365,7 @@ static int efx_probe_eventq(struct efx_channel *channel)
309{ 365{
310 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel); 366 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
311 367
312 return falcon_probe_eventq(channel); 368 return efx_nic_probe_eventq(channel);
313} 369}
314 370
315/* Prepare channel's event queue */ 371/* Prepare channel's event queue */
@@ -319,21 +375,21 @@ static void efx_init_eventq(struct efx_channel *channel)
319 375
320 channel->eventq_read_ptr = 0; 376 channel->eventq_read_ptr = 0;
321 377
322 falcon_init_eventq(channel); 378 efx_nic_init_eventq(channel);
323} 379}
324 380
325static void efx_fini_eventq(struct efx_channel *channel) 381static void efx_fini_eventq(struct efx_channel *channel)
326{ 382{
327 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel); 383 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
328 384
329 falcon_fini_eventq(channel); 385 efx_nic_fini_eventq(channel);
330} 386}
331 387
332static void efx_remove_eventq(struct efx_channel *channel) 388static void efx_remove_eventq(struct efx_channel *channel)
333{ 389{
334 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel); 390 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
335 391
336 falcon_remove_eventq(channel); 392 efx_nic_remove_eventq(channel);
337} 393}
338 394
339/************************************************************************** 395/**************************************************************************
@@ -499,7 +555,7 @@ static void efx_fini_channels(struct efx_nic *efx)
499 EFX_ASSERT_RESET_SERIALISED(efx); 555 EFX_ASSERT_RESET_SERIALISED(efx);
500 BUG_ON(efx->port_enabled); 556 BUG_ON(efx->port_enabled);
501 557
502 rc = falcon_flush_queues(efx); 558 rc = efx_nic_flush_queues(efx);
503 if (rc) 559 if (rc)
504 EFX_ERR(efx, "failed to flush queues\n"); 560 EFX_ERR(efx, "failed to flush queues\n");
505 else 561 else
@@ -547,8 +603,10 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
547 * netif_carrier_on/off) of the link status, and also maintains the 603 * netif_carrier_on/off) of the link status, and also maintains the
548 * link status's stop on the port's TX queue. 604 * link status's stop on the port's TX queue.
549 */ 605 */
550static void efx_link_status_changed(struct efx_nic *efx) 606void efx_link_status_changed(struct efx_nic *efx)
551{ 607{
608 struct efx_link_state *link_state = &efx->link_state;
609
552 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 610 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
553 * that no events are triggered between unregister_netdev() and the 611 * that no events are triggered between unregister_netdev() and the
554 * driver unloading. A more general condition is that NETDEV_CHANGE 612 * driver unloading. A more general condition is that NETDEV_CHANGE
@@ -561,19 +619,19 @@ static void efx_link_status_changed(struct efx_nic *efx)
561 return; 619 return;
562 } 620 }
563 621
564 if (efx->link_up != netif_carrier_ok(efx->net_dev)) { 622 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
565 efx->n_link_state_changes++; 623 efx->n_link_state_changes++;
566 624
567 if (efx->link_up) 625 if (link_state->up)
568 netif_carrier_on(efx->net_dev); 626 netif_carrier_on(efx->net_dev);
569 else 627 else
570 netif_carrier_off(efx->net_dev); 628 netif_carrier_off(efx->net_dev);
571 } 629 }
572 630
573 /* Status message for kernel log */ 631 /* Status message for kernel log */
574 if (efx->link_up) { 632 if (link_state->up) {
575 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n", 633 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
576 efx->link_speed, efx->link_fd ? "full" : "half", 634 link_state->speed, link_state->fd ? "full" : "half",
577 efx->net_dev->mtu, 635 efx->net_dev->mtu,
578 (efx->promiscuous ? " [PROMISC]" : "")); 636 (efx->promiscuous ? " [PROMISC]" : ""));
579 } else { 637 } else {
@@ -582,16 +640,49 @@ static void efx_link_status_changed(struct efx_nic *efx)
582 640
583} 641}
584 642
643void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
644{
645 efx->link_advertising = advertising;
646 if (advertising) {
647 if (advertising & ADVERTISED_Pause)
648 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
649 else
650 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
651 if (advertising & ADVERTISED_Asym_Pause)
652 efx->wanted_fc ^= EFX_FC_TX;
653 }
654}
655
656void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type wanted_fc)
657{
658 efx->wanted_fc = wanted_fc;
659 if (efx->link_advertising) {
660 if (wanted_fc & EFX_FC_RX)
661 efx->link_advertising |= (ADVERTISED_Pause |
662 ADVERTISED_Asym_Pause);
663 else
664 efx->link_advertising &= ~(ADVERTISED_Pause |
665 ADVERTISED_Asym_Pause);
666 if (wanted_fc & EFX_FC_TX)
667 efx->link_advertising ^= ADVERTISED_Asym_Pause;
668 }
669}
670
585static void efx_fini_port(struct efx_nic *efx); 671static void efx_fini_port(struct efx_nic *efx);
586 672
587/* This call reinitialises the MAC to pick up new PHY settings. The 673/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
588 * caller must hold the mac_lock */ 674 * the MAC appropriately. All other PHY configuration changes are pushed
589void __efx_reconfigure_port(struct efx_nic *efx) 675 * through phy_op->set_settings(), and pushed asynchronously to the MAC
676 * through efx_monitor().
677 *
678 * Callers must hold the mac_lock
679 */
680int __efx_reconfigure_port(struct efx_nic *efx)
590{ 681{
591 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 682 enum efx_phy_mode phy_mode;
683 int rc;
592 684
593 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n", 685 WARN_ON(!mutex_is_locked(&efx->mac_lock));
594 raw_smp_processor_id());
595 686
596 /* Serialise the promiscuous flag with efx_set_multicast_list. */ 687 /* Serialise the promiscuous flag with efx_set_multicast_list. */
597 if (efx_dev_registered(efx)) { 688 if (efx_dev_registered(efx)) {
@@ -599,61 +690,48 @@ void __efx_reconfigure_port(struct efx_nic *efx)
599 netif_addr_unlock_bh(efx->net_dev); 690 netif_addr_unlock_bh(efx->net_dev);
600 } 691 }
601 692
602 falcon_deconfigure_mac_wrapper(efx); 693 /* Disable PHY transmit in mac level loopbacks */
603 694 phy_mode = efx->phy_mode;
604 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
605 if (LOOPBACK_INTERNAL(efx)) 695 if (LOOPBACK_INTERNAL(efx))
606 efx->phy_mode |= PHY_MODE_TX_DISABLED; 696 efx->phy_mode |= PHY_MODE_TX_DISABLED;
607 else 697 else
608 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; 698 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
609 efx->phy_op->reconfigure(efx);
610 699
611 if (falcon_switch_mac(efx)) 700 rc = efx->type->reconfigure_port(efx);
612 goto fail;
613 701
614 efx->mac_op->reconfigure(efx); 702 if (rc)
615 703 efx->phy_mode = phy_mode;
616 /* Inform kernel of loss/gain of carrier */
617 efx_link_status_changed(efx);
618 return;
619 704
620fail: 705 return rc;
621 EFX_ERR(efx, "failed to reconfigure MAC\n");
622 efx->port_enabled = false;
623 efx_fini_port(efx);
624} 706}
625 707
626/* Reinitialise the MAC to pick up new PHY settings, even if the port is 708/* Reinitialise the MAC to pick up new PHY settings, even if the port is
627 * disabled. */ 709 * disabled. */
628void efx_reconfigure_port(struct efx_nic *efx) 710int efx_reconfigure_port(struct efx_nic *efx)
629{ 711{
712 int rc;
713
630 EFX_ASSERT_RESET_SERIALISED(efx); 714 EFX_ASSERT_RESET_SERIALISED(efx);
631 715
632 mutex_lock(&efx->mac_lock); 716 mutex_lock(&efx->mac_lock);
633 __efx_reconfigure_port(efx); 717 rc = __efx_reconfigure_port(efx);
634 mutex_unlock(&efx->mac_lock); 718 mutex_unlock(&efx->mac_lock);
635}
636
637/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
638 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
639 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
640static void efx_phy_work(struct work_struct *data)
641{
642 struct efx_nic *efx = container_of(data, struct efx_nic, phy_work);
643 719
644 mutex_lock(&efx->mac_lock); 720 return rc;
645 if (efx->port_enabled)
646 __efx_reconfigure_port(efx);
647 mutex_unlock(&efx->mac_lock);
648} 721}
649 722
723/* Asynchronous work item for changing MAC promiscuity and multicast
724 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
725 * MAC directly. */
650static void efx_mac_work(struct work_struct *data) 726static void efx_mac_work(struct work_struct *data)
651{ 727{
652 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); 728 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
653 729
654 mutex_lock(&efx->mac_lock); 730 mutex_lock(&efx->mac_lock);
655 if (efx->port_enabled) 731 if (efx->port_enabled) {
656 efx->mac_op->irq(efx); 732 efx->type->push_multicast_hash(efx);
733 efx->mac_op->reconfigure(efx);
734 }
657 mutex_unlock(&efx->mac_lock); 735 mutex_unlock(&efx->mac_lock);
658} 736}
659 737
@@ -663,8 +741,8 @@ static int efx_probe_port(struct efx_nic *efx)
663 741
664 EFX_LOG(efx, "create port\n"); 742 EFX_LOG(efx, "create port\n");
665 743
666 /* Connect up MAC/PHY operations table and read MAC address */ 744 /* Connect up MAC/PHY operations table */
667 rc = falcon_probe_port(efx); 745 rc = efx->type->probe_port(efx);
668 if (rc) 746 if (rc)
669 goto err; 747 goto err;
670 748
@@ -699,29 +777,33 @@ static int efx_init_port(struct efx_nic *efx)
699 777
700 EFX_LOG(efx, "init port\n"); 778 EFX_LOG(efx, "init port\n");
701 779
702 rc = efx->phy_op->init(efx);
703 if (rc)
704 return rc;
705 mutex_lock(&efx->mac_lock); 780 mutex_lock(&efx->mac_lock);
706 efx->phy_op->reconfigure(efx); 781
707 rc = falcon_switch_mac(efx); 782 rc = efx->phy_op->init(efx);
708 mutex_unlock(&efx->mac_lock);
709 if (rc) 783 if (rc)
710 goto fail; 784 goto fail1;
711 efx->mac_op->reconfigure(efx);
712 785
713 efx->port_initialized = true; 786 efx->port_initialized = true;
714 efx_stats_enable(efx); 787
788 /* Reconfigure the MAC before creating dma queues (required for
789 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
790 efx->mac_op->reconfigure(efx);
791
792 /* Ensure the PHY advertises the correct flow control settings */
793 rc = efx->phy_op->reconfigure(efx);
794 if (rc)
795 goto fail2;
796
797 mutex_unlock(&efx->mac_lock);
715 return 0; 798 return 0;
716 799
717fail: 800fail2:
718 efx->phy_op->fini(efx); 801 efx->phy_op->fini(efx);
802fail1:
803 mutex_unlock(&efx->mac_lock);
719 return rc; 804 return rc;
720} 805}
721 806
722/* Allow efx_reconfigure_port() to be scheduled, and close the window
723 * between efx_stop_port and efx_flush_all whereby a previously scheduled
724 * efx_phy_work()/efx_mac_work() may have been cancelled */
725static void efx_start_port(struct efx_nic *efx) 807static void efx_start_port(struct efx_nic *efx)
726{ 808{
727 EFX_LOG(efx, "start port\n"); 809 EFX_LOG(efx, "start port\n");
@@ -729,15 +811,16 @@ static void efx_start_port(struct efx_nic *efx)
729 811
730 mutex_lock(&efx->mac_lock); 812 mutex_lock(&efx->mac_lock);
731 efx->port_enabled = true; 813 efx->port_enabled = true;
732 __efx_reconfigure_port(efx); 814
733 efx->mac_op->irq(efx); 815 /* efx_mac_work() might have been scheduled after efx_stop_port(),
816 * and then cancelled by efx_flush_all() */
817 efx->type->push_multicast_hash(efx);
818 efx->mac_op->reconfigure(efx);
819
734 mutex_unlock(&efx->mac_lock); 820 mutex_unlock(&efx->mac_lock);
735} 821}
736 822
737/* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing, 823/* Prevent efx_mac_work() and efx_monitor() from working */
738 * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work
739 * and efx_mac_work may still be scheduled via NAPI processing until
740 * efx_flush_all() is called */
741static void efx_stop_port(struct efx_nic *efx) 824static void efx_stop_port(struct efx_nic *efx)
742{ 825{
743 EFX_LOG(efx, "stop port\n"); 826 EFX_LOG(efx, "stop port\n");
@@ -760,11 +843,10 @@ static void efx_fini_port(struct efx_nic *efx)
760 if (!efx->port_initialized) 843 if (!efx->port_initialized)
761 return; 844 return;
762 845
763 efx_stats_disable(efx);
764 efx->phy_op->fini(efx); 846 efx->phy_op->fini(efx);
765 efx->port_initialized = false; 847 efx->port_initialized = false;
766 848
767 efx->link_up = false; 849 efx->link_state.up = false;
768 efx_link_status_changed(efx); 850 efx_link_status_changed(efx);
769} 851}
770 852
@@ -772,7 +854,7 @@ static void efx_remove_port(struct efx_nic *efx)
772{ 854{
773 EFX_LOG(efx, "destroying port\n"); 855 EFX_LOG(efx, "destroying port\n");
774 856
775 falcon_remove_port(efx); 857 efx->type->remove_port(efx);
776} 858}
777 859
778/************************************************************************** 860/**************************************************************************
@@ -824,9 +906,8 @@ static int efx_init_io(struct efx_nic *efx)
824 goto fail2; 906 goto fail2;
825 } 907 }
826 908
827 efx->membase_phys = pci_resource_start(efx->pci_dev, 909 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
828 efx->type->mem_bar); 910 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
829 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
830 if (rc) { 911 if (rc) {
831 EFX_ERR(efx, "request for memory BAR failed\n"); 912 EFX_ERR(efx, "request for memory BAR failed\n");
832 rc = -EIO; 913 rc = -EIO;
@@ -835,21 +916,20 @@ static int efx_init_io(struct efx_nic *efx)
835 efx->membase = ioremap_nocache(efx->membase_phys, 916 efx->membase = ioremap_nocache(efx->membase_phys,
836 efx->type->mem_map_size); 917 efx->type->mem_map_size);
837 if (!efx->membase) { 918 if (!efx->membase) {
838 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", 919 EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
839 efx->type->mem_bar,
840 (unsigned long long)efx->membase_phys, 920 (unsigned long long)efx->membase_phys,
841 efx->type->mem_map_size); 921 efx->type->mem_map_size);
842 rc = -ENOMEM; 922 rc = -ENOMEM;
843 goto fail4; 923 goto fail4;
844 } 924 }
845 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", 925 EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
846 efx->type->mem_bar, (unsigned long long)efx->membase_phys, 926 (unsigned long long)efx->membase_phys,
847 efx->type->mem_map_size, efx->membase); 927 efx->type->mem_map_size, efx->membase);
848 928
849 return 0; 929 return 0;
850 930
851 fail4: 931 fail4:
852 pci_release_region(efx->pci_dev, efx->type->mem_bar); 932 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
853 fail3: 933 fail3:
854 efx->membase_phys = 0; 934 efx->membase_phys = 0;
855 fail2: 935 fail2:
@@ -868,7 +948,7 @@ static void efx_fini_io(struct efx_nic *efx)
868 } 948 }
869 949
870 if (efx->membase_phys) { 950 if (efx->membase_phys) {
871 pci_release_region(efx->pci_dev, efx->type->mem_bar); 951 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
872 efx->membase_phys = 0; 952 efx->membase_phys = 0;
873 } 953 }
874 954
@@ -1011,7 +1091,7 @@ static int efx_probe_nic(struct efx_nic *efx)
1011 EFX_LOG(efx, "creating NIC\n"); 1091 EFX_LOG(efx, "creating NIC\n");
1012 1092
1013 /* Carry out hardware-type specific initialisation */ 1093 /* Carry out hardware-type specific initialisation */
1014 rc = falcon_probe_nic(efx); 1094 rc = efx->type->probe(efx);
1015 if (rc) 1095 if (rc)
1016 return rc; 1096 return rc;
1017 1097
@@ -1032,7 +1112,7 @@ static void efx_remove_nic(struct efx_nic *efx)
1032 EFX_LOG(efx, "destroying NIC\n"); 1112 EFX_LOG(efx, "destroying NIC\n");
1033 1113
1034 efx_remove_interrupts(efx); 1114 efx_remove_interrupts(efx);
1035 falcon_remove_nic(efx); 1115 efx->type->remove(efx);
1036} 1116}
1037 1117
1038/************************************************************************** 1118/**************************************************************************
@@ -1112,12 +1192,31 @@ static void efx_start_all(struct efx_nic *efx)
1112 efx_for_each_channel(channel, efx) 1192 efx_for_each_channel(channel, efx)
1113 efx_start_channel(channel); 1193 efx_start_channel(channel);
1114 1194
1115 falcon_enable_interrupts(efx); 1195 efx_nic_enable_interrupts(efx);
1116 1196
1117 /* Start hardware monitor if we're in RUNNING */ 1197 /* Switch to event based MCDI completions after enabling interrupts.
1118 if (efx->state == STATE_RUNNING) 1198 * If a reset has been scheduled, then we need to stay in polled mode.
1199 * Rather than serialising efx_mcdi_mode_event() [which sleeps] and
1200 * reset_pending [modified from an atomic context], we instead guarantee
1201 * that efx_mcdi_mode_poll() isn't reverted erroneously */
1202 efx_mcdi_mode_event(efx);
1203 if (efx->reset_pending != RESET_TYPE_NONE)
1204 efx_mcdi_mode_poll(efx);
1205
1206 /* Start the hardware monitor if there is one. Otherwise (we're link
1207 * event driven), we have to poll the PHY because after an event queue
1208 * flush, we could have a missed a link state change */
1209 if (efx->type->monitor != NULL) {
1119 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1210 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1120 efx_monitor_interval); 1211 efx_monitor_interval);
1212 } else {
1213 mutex_lock(&efx->mac_lock);
1214 if (efx->phy_op->poll(efx))
1215 efx_link_status_changed(efx);
1216 mutex_unlock(&efx->mac_lock);
1217 }
1218
1219 efx->type->start_stats(efx);
1121} 1220}
1122 1221
1123/* Flush all delayed work. Should only be called when no more delayed work 1222/* Flush all delayed work. Should only be called when no more delayed work
@@ -1136,8 +1235,6 @@ static void efx_flush_all(struct efx_nic *efx)
1136 1235
1137 /* Stop scheduled port reconfigurations */ 1236 /* Stop scheduled port reconfigurations */
1138 cancel_work_sync(&efx->mac_work); 1237 cancel_work_sync(&efx->mac_work);
1139 cancel_work_sync(&efx->phy_work);
1140
1141} 1238}
1142 1239
1143/* Quiesce hardware and software without bringing the link down. 1240/* Quiesce hardware and software without bringing the link down.
@@ -1155,8 +1252,13 @@ static void efx_stop_all(struct efx_nic *efx)
1155 if (!efx->port_enabled) 1252 if (!efx->port_enabled)
1156 return; 1253 return;
1157 1254
1255 efx->type->stop_stats(efx);
1256
1257 /* Switch to MCDI polling on Siena before disabling interrupts */
1258 efx_mcdi_mode_poll(efx);
1259
1158 /* Disable interrupts and wait for ISR to complete */ 1260 /* Disable interrupts and wait for ISR to complete */
1159 falcon_disable_interrupts(efx); 1261 efx_nic_disable_interrupts(efx);
1160 if (efx->legacy_irq) 1262 if (efx->legacy_irq)
1161 synchronize_irq(efx->legacy_irq); 1263 synchronize_irq(efx->legacy_irq);
1162 efx_for_each_channel(channel, efx) { 1264 efx_for_each_channel(channel, efx) {
@@ -1173,15 +1275,9 @@ static void efx_stop_all(struct efx_nic *efx)
1173 * window to loose phy events */ 1275 * window to loose phy events */
1174 efx_stop_port(efx); 1276 efx_stop_port(efx);
1175 1277
1176 /* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */ 1278 /* Flush efx_mac_work(), refill_workqueue, monitor_work */
1177 efx_flush_all(efx); 1279 efx_flush_all(efx);
1178 1280
1179 /* Isolate the MAC from the TX and RX engines, so that queue
1180 * flushes will complete in a timely fashion. */
1181 falcon_deconfigure_mac_wrapper(efx);
1182 msleep(10); /* Let the Rx FIFO drain */
1183 falcon_drain_tx_fifo(efx);
1184
1185 /* Stop the kernel transmit interface late, so the watchdog 1281 /* Stop the kernel transmit interface late, so the watchdog
1186 * timer isn't ticking over the flush */ 1282 * timer isn't ticking over the flush */
1187 if (efx_dev_registered(efx)) { 1283 if (efx_dev_registered(efx)) {
@@ -1201,41 +1297,39 @@ static void efx_remove_all(struct efx_nic *efx)
1201 efx_remove_nic(efx); 1297 efx_remove_nic(efx);
1202} 1298}
1203 1299
1204/* A convinience function to safely flush all the queues */
1205void efx_flush_queues(struct efx_nic *efx)
1206{
1207 EFX_ASSERT_RESET_SERIALISED(efx);
1208
1209 efx_stop_all(efx);
1210
1211 efx_fini_channels(efx);
1212 efx_init_channels(efx);
1213
1214 efx_start_all(efx);
1215}
1216
1217/************************************************************************** 1300/**************************************************************************
1218 * 1301 *
1219 * Interrupt moderation 1302 * Interrupt moderation
1220 * 1303 *
1221 **************************************************************************/ 1304 **************************************************************************/
1222 1305
1306static unsigned irq_mod_ticks(int usecs, int resolution)
1307{
1308 if (usecs <= 0)
1309 return 0; /* cannot receive interrupts ahead of time :-) */
1310 if (usecs < resolution)
1311 return 1; /* never round down to 0 */
1312 return usecs / resolution;
1313}
1314
1223/* Set interrupt moderation parameters */ 1315/* Set interrupt moderation parameters */
1224void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, 1316void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1225 bool rx_adaptive) 1317 bool rx_adaptive)
1226{ 1318{
1227 struct efx_tx_queue *tx_queue; 1319 struct efx_tx_queue *tx_queue;
1228 struct efx_rx_queue *rx_queue; 1320 struct efx_rx_queue *rx_queue;
1321 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
1322 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
1229 1323
1230 EFX_ASSERT_RESET_SERIALISED(efx); 1324 EFX_ASSERT_RESET_SERIALISED(efx);
1231 1325
1232 efx_for_each_tx_queue(tx_queue, efx) 1326 efx_for_each_tx_queue(tx_queue, efx)
1233 tx_queue->channel->irq_moderation = tx_usecs; 1327 tx_queue->channel->irq_moderation = tx_ticks;
1234 1328
1235 efx->irq_rx_adaptive = rx_adaptive; 1329 efx->irq_rx_adaptive = rx_adaptive;
1236 efx->irq_rx_moderation = rx_usecs; 1330 efx->irq_rx_moderation = rx_ticks;
1237 efx_for_each_rx_queue(rx_queue, efx) 1331 efx_for_each_rx_queue(rx_queue, efx)
1238 rx_queue->channel->irq_moderation = rx_usecs; 1332 rx_queue->channel->irq_moderation = rx_ticks;
1239} 1333}
1240 1334
1241/************************************************************************** 1335/**************************************************************************
@@ -1250,10 +1344,10 @@ static void efx_monitor(struct work_struct *data)
1250{ 1344{
1251 struct efx_nic *efx = container_of(data, struct efx_nic, 1345 struct efx_nic *efx = container_of(data, struct efx_nic,
1252 monitor_work.work); 1346 monitor_work.work);
1253 int rc;
1254 1347
1255 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", 1348 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1256 raw_smp_processor_id()); 1349 raw_smp_processor_id());
1350 BUG_ON(efx->type->monitor == NULL);
1257 1351
1258 /* If the mac_lock is already held then it is likely a port 1352 /* If the mac_lock is already held then it is likely a port
1259 * reconfiguration is already in place, which will likely do 1353 * reconfiguration is already in place, which will likely do
@@ -1262,15 +1356,7 @@ static void efx_monitor(struct work_struct *data)
1262 goto out_requeue; 1356 goto out_requeue;
1263 if (!efx->port_enabled) 1357 if (!efx->port_enabled)
1264 goto out_unlock; 1358 goto out_unlock;
1265 rc = efx->board_info.monitor(efx); 1359 efx->type->monitor(efx);
1266 if (rc) {
1267 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
1268 (rc == -ERANGE) ? "reported fault" : "failed");
1269 efx->phy_mode |= PHY_MODE_LOW_POWER;
1270 falcon_sim_phy_event(efx);
1271 }
1272 efx->phy_op->poll(efx);
1273 efx->mac_op->poll(efx);
1274 1360
1275out_unlock: 1361out_unlock:
1276 mutex_unlock(&efx->mac_lock); 1362 mutex_unlock(&efx->mac_lock);
@@ -1374,6 +1460,12 @@ static int efx_net_open(struct net_device *net_dev)
1374 return -EIO; 1460 return -EIO;
1375 if (efx->phy_mode & PHY_MODE_SPECIAL) 1461 if (efx->phy_mode & PHY_MODE_SPECIAL)
1376 return -EBUSY; 1462 return -EBUSY;
1463 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
1464 return -EIO;
1465
1466 /* Notify the kernel of the link state polled during driver load,
1467 * before the monitor starts running */
1468 efx_link_status_changed(efx);
1377 1469
1378 efx_start_all(efx); 1470 efx_start_all(efx);
1379 return 0; 1471 return 0;
@@ -1400,20 +1492,6 @@ static int efx_net_stop(struct net_device *net_dev)
1400 return 0; 1492 return 0;
1401} 1493}
1402 1494
1403void efx_stats_disable(struct efx_nic *efx)
1404{
1405 spin_lock(&efx->stats_lock);
1406 ++efx->stats_disable_count;
1407 spin_unlock(&efx->stats_lock);
1408}
1409
1410void efx_stats_enable(struct efx_nic *efx)
1411{
1412 spin_lock(&efx->stats_lock);
1413 --efx->stats_disable_count;
1414 spin_unlock(&efx->stats_lock);
1415}
1416
1417/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1495/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1418static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1496static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1419{ 1497{
@@ -1421,17 +1499,9 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1421 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1499 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1422 struct net_device_stats *stats = &net_dev->stats; 1500 struct net_device_stats *stats = &net_dev->stats;
1423 1501
1424 /* Update stats if possible, but do not wait if another thread 1502 spin_lock_bh(&efx->stats_lock);
1425 * is updating them or if MAC stats fetches are temporarily 1503 efx->type->update_stats(efx);
1426 * disabled; slightly stale stats are acceptable. 1504 spin_unlock_bh(&efx->stats_lock);
1427 */
1428 if (!spin_trylock(&efx->stats_lock))
1429 return stats;
1430 if (!efx->stats_disable_count) {
1431 efx->mac_op->update_stats(efx);
1432 falcon_update_nic_stats(efx);
1433 }
1434 spin_unlock(&efx->stats_lock);
1435 1505
1436 stats->rx_packets = mac_stats->rx_packets; 1506 stats->rx_packets = mac_stats->rx_packets;
1437 stats->tx_packets = mac_stats->tx_packets; 1507 stats->tx_packets = mac_stats->tx_packets;
@@ -1490,7 +1560,14 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1490 EFX_LOG(efx, "changing MTU to %d\n", new_mtu); 1560 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1491 1561
1492 efx_fini_channels(efx); 1562 efx_fini_channels(efx);
1563
1564 mutex_lock(&efx->mac_lock);
1565 /* Reconfigure the MAC before enabling the dma queues so that
1566 * the RX buffers don't overflow */
1493 net_dev->mtu = new_mtu; 1567 net_dev->mtu = new_mtu;
1568 efx->mac_op->reconfigure(efx);
1569 mutex_unlock(&efx->mac_lock);
1570
1494 efx_init_channels(efx); 1571 efx_init_channels(efx);
1495 1572
1496 efx_start_all(efx); 1573 efx_start_all(efx);
@@ -1514,7 +1591,9 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1514 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); 1591 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1515 1592
1516 /* Reconfigure the MAC */ 1593 /* Reconfigure the MAC */
1517 efx_reconfigure_port(efx); 1594 mutex_lock(&efx->mac_lock);
1595 efx->mac_op->reconfigure(efx);
1596 mutex_unlock(&efx->mac_lock);
1518 1597
1519 return 0; 1598 return 0;
1520} 1599}
@@ -1525,16 +1604,14 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1525 struct efx_nic *efx = netdev_priv(net_dev); 1604 struct efx_nic *efx = netdev_priv(net_dev);
1526 struct dev_mc_list *mc_list = net_dev->mc_list; 1605 struct dev_mc_list *mc_list = net_dev->mc_list;
1527 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 1606 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1528 bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
1529 bool changed = (efx->promiscuous != promiscuous);
1530 u32 crc; 1607 u32 crc;
1531 int bit; 1608 int bit;
1532 int i; 1609 int i;
1533 1610
1534 efx->promiscuous = promiscuous; 1611 efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
1535 1612
1536 /* Build multicast hash table */ 1613 /* Build multicast hash table */
1537 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) { 1614 if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1538 memset(mc_hash, 0xff, sizeof(*mc_hash)); 1615 memset(mc_hash, 0xff, sizeof(*mc_hash));
1539 } else { 1616 } else {
1540 memset(mc_hash, 0x00, sizeof(*mc_hash)); 1617 memset(mc_hash, 0x00, sizeof(*mc_hash));
@@ -1544,17 +1621,17 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1544 set_bit_le(bit, mc_hash->byte); 1621 set_bit_le(bit, mc_hash->byte);
1545 mc_list = mc_list->next; 1622 mc_list = mc_list->next;
1546 } 1623 }
1547 }
1548
1549 if (!efx->port_enabled)
1550 /* Delay pushing settings until efx_start_port() */
1551 return;
1552 1624
1553 if (changed) 1625 /* Broadcast packets go through the multicast hash filter.
1554 queue_work(efx->workqueue, &efx->phy_work); 1626 * ether_crc_le() of the broadcast address is 0xbe2612ff
1627 * so we always add bit 0xff to the mask.
1628 */
1629 set_bit_le(0xff, mc_hash->byte);
1630 }
1555 1631
1556 /* Create and activate new global multicast hash table */ 1632 if (efx->port_enabled)
1557 falcon_set_multicast_hash(efx); 1633 queue_work(efx->workqueue, &efx->mac_work);
1634 /* Otherwise efx_start_port() will do this */
1558} 1635}
1559 1636
1560static const struct net_device_ops efx_netdev_ops = { 1637static const struct net_device_ops efx_netdev_ops = {
@@ -1683,21 +1760,18 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1683 1760
1684/* Tears down the entire software state and most of the hardware state 1761/* Tears down the entire software state and most of the hardware state
1685 * before reset. */ 1762 * before reset. */
1686void efx_reset_down(struct efx_nic *efx, enum reset_type method, 1763void efx_reset_down(struct efx_nic *efx, enum reset_type method)
1687 struct ethtool_cmd *ecmd)
1688{ 1764{
1689 EFX_ASSERT_RESET_SERIALISED(efx); 1765 EFX_ASSERT_RESET_SERIALISED(efx);
1690 1766
1691 efx_stats_disable(efx);
1692 efx_stop_all(efx); 1767 efx_stop_all(efx);
1693 mutex_lock(&efx->mac_lock); 1768 mutex_lock(&efx->mac_lock);
1694 mutex_lock(&efx->spi_lock); 1769 mutex_lock(&efx->spi_lock);
1695 1770
1696 efx->phy_op->get_settings(efx, ecmd);
1697
1698 efx_fini_channels(efx); 1771 efx_fini_channels(efx);
1699 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 1772 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
1700 efx->phy_op->fini(efx); 1773 efx->phy_op->fini(efx);
1774 efx->type->fini(efx);
1701} 1775}
1702 1776
1703/* This function will always ensure that the locks acquired in 1777/* This function will always ensure that the locks acquired in
@@ -1705,79 +1779,67 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method,
1705 * that we were unable to reinitialise the hardware, and the 1779 * that we were unable to reinitialise the hardware, and the
1706 * driver should be disabled. If ok is false, then the rx and tx 1780 * driver should be disabled. If ok is false, then the rx and tx
1707 * engines are not restarted, pending a RESET_DISABLE. */ 1781 * engines are not restarted, pending a RESET_DISABLE. */
1708int efx_reset_up(struct efx_nic *efx, enum reset_type method, 1782int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
1709 struct ethtool_cmd *ecmd, bool ok)
1710{ 1783{
1711 int rc; 1784 int rc;
1712 1785
1713 EFX_ASSERT_RESET_SERIALISED(efx); 1786 EFX_ASSERT_RESET_SERIALISED(efx);
1714 1787
1715 rc = falcon_init_nic(efx); 1788 rc = efx->type->init(efx);
1716 if (rc) { 1789 if (rc) {
1717 EFX_ERR(efx, "failed to initialise NIC\n"); 1790 EFX_ERR(efx, "failed to initialise NIC\n");
1718 ok = false; 1791 goto fail;
1719 } 1792 }
1720 1793
1794 if (!ok)
1795 goto fail;
1796
1721 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { 1797 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
1722 if (ok) { 1798 rc = efx->phy_op->init(efx);
1723 rc = efx->phy_op->init(efx); 1799 if (rc)
1724 if (rc) 1800 goto fail;
1725 ok = false; 1801 if (efx->phy_op->reconfigure(efx))
1726 } 1802 EFX_ERR(efx, "could not restore PHY settings\n");
1727 if (!ok)
1728 efx->port_initialized = false;
1729 } 1803 }
1730 1804
1731 if (ok) { 1805 efx->mac_op->reconfigure(efx);
1732 efx_init_channels(efx);
1733 1806
1734 if (efx->phy_op->set_settings(efx, ecmd)) 1807 efx_init_channels(efx);
1735 EFX_ERR(efx, "could not restore PHY settings\n"); 1808
1736 } 1809 mutex_unlock(&efx->spi_lock);
1810 mutex_unlock(&efx->mac_lock);
1811
1812 efx_start_all(efx);
1813
1814 return 0;
1815
1816fail:
1817 efx->port_initialized = false;
1737 1818
1738 mutex_unlock(&efx->spi_lock); 1819 mutex_unlock(&efx->spi_lock);
1739 mutex_unlock(&efx->mac_lock); 1820 mutex_unlock(&efx->mac_lock);
1740 1821
1741 if (ok) {
1742 efx_start_all(efx);
1743 efx_stats_enable(efx);
1744 }
1745 return rc; 1822 return rc;
1746} 1823}
1747 1824
1748/* Reset the NIC as transparently as possible. Do not reset the PHY 1825/* Reset the NIC using the specified method. Note that the reset may
1749 * Note that the reset may fail, in which case the card will be left 1826 * fail, in which case the card will be left in an unusable state.
1750 * in a most-probably-unusable state.
1751 * 1827 *
1752 * This function will sleep. You cannot reset from within an atomic 1828 * Caller must hold the rtnl_lock.
1753 * state; use efx_schedule_reset() instead.
1754 *
1755 * Grabs the rtnl_lock.
1756 */ 1829 */
1757static int efx_reset(struct efx_nic *efx) 1830int efx_reset(struct efx_nic *efx, enum reset_type method)
1758{ 1831{
1759 struct ethtool_cmd ecmd; 1832 int rc, rc2;
1760 enum reset_type method = efx->reset_pending; 1833 bool disabled;
1761 int rc = 0;
1762 1834
1763 /* Serialise with kernel interfaces */ 1835 EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
1764 rtnl_lock();
1765 1836
1766 /* If we're not RUNNING then don't reset. Leave the reset_pending 1837 efx_reset_down(efx, method);
1767 * flag set so that efx_pci_probe_main will be retried */
1768 if (efx->state != STATE_RUNNING) {
1769 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1770 goto out_unlock;
1771 }
1772 1838
1773 EFX_INFO(efx, "resetting (%d)\n", method); 1839 rc = efx->type->reset(efx, method);
1774
1775 efx_reset_down(efx, method, &ecmd);
1776
1777 rc = falcon_reset_hw(efx, method);
1778 if (rc) { 1840 if (rc) {
1779 EFX_ERR(efx, "failed to reset hardware\n"); 1841 EFX_ERR(efx, "failed to reset hardware\n");
1780 goto out_disable; 1842 goto out;
1781 } 1843 }
1782 1844
1783 /* Allow resets to be rescheduled. */ 1845 /* Allow resets to be rescheduled. */
@@ -1789,25 +1851,22 @@ static int efx_reset(struct efx_nic *efx)
1789 * can respond to requests. */ 1851 * can respond to requests. */
1790 pci_set_master(efx->pci_dev); 1852 pci_set_master(efx->pci_dev);
1791 1853
1854out:
1792 /* Leave device stopped if necessary */ 1855 /* Leave device stopped if necessary */
1793 if (method == RESET_TYPE_DISABLE) { 1856 disabled = rc || method == RESET_TYPE_DISABLE;
1794 efx_reset_up(efx, method, &ecmd, false); 1857 rc2 = efx_reset_up(efx, method, !disabled);
1795 rc = -EIO; 1858 if (rc2) {
1796 } else { 1859 disabled = true;
1797 rc = efx_reset_up(efx, method, &ecmd, true); 1860 if (!rc)
1861 rc = rc2;
1798 } 1862 }
1799 1863
1800out_disable: 1864 if (disabled) {
1801 if (rc) {
1802 EFX_ERR(efx, "has been disabled\n"); 1865 EFX_ERR(efx, "has been disabled\n");
1803 efx->state = STATE_DISABLED; 1866 efx->state = STATE_DISABLED;
1804 dev_close(efx->net_dev);
1805 } else { 1867 } else {
1806 EFX_LOG(efx, "reset complete\n"); 1868 EFX_LOG(efx, "reset complete\n");
1807 } 1869 }
1808
1809out_unlock:
1810 rtnl_unlock();
1811 return rc; 1870 return rc;
1812} 1871}
1813 1872
@@ -1816,9 +1875,19 @@ out_unlock:
1816 */ 1875 */
1817static void efx_reset_work(struct work_struct *data) 1876static void efx_reset_work(struct work_struct *data)
1818{ 1877{
1819 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work); 1878 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
1879
1880 /* If we're not RUNNING then don't reset. Leave the reset_pending
1881 * flag set so that efx_pci_probe_main will be retried */
1882 if (efx->state != STATE_RUNNING) {
1883 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1884 return;
1885 }
1820 1886
1821 efx_reset(nic); 1887 rtnl_lock();
1888 if (efx_reset(efx, efx->reset_pending))
1889 dev_close(efx->net_dev);
1890 rtnl_unlock();
1822} 1891}
1823 1892
1824void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) 1893void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
@@ -1843,18 +1912,24 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1843 case RESET_TYPE_TX_SKIP: 1912 case RESET_TYPE_TX_SKIP:
1844 method = RESET_TYPE_INVISIBLE; 1913 method = RESET_TYPE_INVISIBLE;
1845 break; 1914 break;
1915 case RESET_TYPE_MC_FAILURE:
1846 default: 1916 default:
1847 method = RESET_TYPE_ALL; 1917 method = RESET_TYPE_ALL;
1848 break; 1918 break;
1849 } 1919 }
1850 1920
1851 if (method != type) 1921 if (method != type)
1852 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method); 1922 EFX_LOG(efx, "scheduling %s reset for %s\n",
1923 RESET_TYPE(method), RESET_TYPE(type));
1853 else 1924 else
1854 EFX_LOG(efx, "scheduling reset (%d)\n", method); 1925 EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method));
1855 1926
1856 efx->reset_pending = method; 1927 efx->reset_pending = method;
1857 1928
1929 /* efx_process_channel() will no longer read events once a
1930 * reset is scheduled. So switch back to poll'd MCDI completions. */
1931 efx_mcdi_mode_poll(efx);
1932
1858 queue_work(reset_workqueue, &efx->reset_work); 1933 queue_work(reset_workqueue, &efx->reset_work);
1859} 1934}
1860 1935
@@ -1867,15 +1942,19 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1867/* PCI device ID table */ 1942/* PCI device ID table */
1868static struct pci_device_id efx_pci_table[] __devinitdata = { 1943static struct pci_device_id efx_pci_table[] __devinitdata = {
1869 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), 1944 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1870 .driver_data = (unsigned long) &falcon_a_nic_type}, 1945 .driver_data = (unsigned long) &falcon_a1_nic_type},
1871 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), 1946 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1872 .driver_data = (unsigned long) &falcon_b_nic_type}, 1947 .driver_data = (unsigned long) &falcon_b0_nic_type},
1948 {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID),
1949 .driver_data = (unsigned long) &siena_a0_nic_type},
1950 {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID),
1951 .driver_data = (unsigned long) &siena_a0_nic_type},
1873 {0} /* end of list */ 1952 {0} /* end of list */
1874}; 1953};
1875 1954
1876/************************************************************************** 1955/**************************************************************************
1877 * 1956 *
1878 * Dummy PHY/MAC/Board operations 1957 * Dummy PHY/MAC operations
1879 * 1958 *
1880 * Can be used for some unimplemented operations 1959 * Can be used for some unimplemented operations
1881 * Needed so all function pointers are valid and do not have to be tested 1960 * Needed so all function pointers are valid and do not have to be tested
@@ -1887,29 +1966,19 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
1887 return 0; 1966 return 0;
1888} 1967}
1889void efx_port_dummy_op_void(struct efx_nic *efx) {} 1968void efx_port_dummy_op_void(struct efx_nic *efx) {}
1890void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {} 1969void efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1891 1970{
1892static struct efx_mac_operations efx_dummy_mac_operations = { 1971}
1893 .reconfigure = efx_port_dummy_op_void, 1972bool efx_port_dummy_op_poll(struct efx_nic *efx)
1894 .poll = efx_port_dummy_op_void, 1973{
1895 .irq = efx_port_dummy_op_void, 1974 return false;
1896}; 1975}
1897 1976
1898static struct efx_phy_operations efx_dummy_phy_operations = { 1977static struct efx_phy_operations efx_dummy_phy_operations = {
1899 .init = efx_port_dummy_op_int, 1978 .init = efx_port_dummy_op_int,
1900 .reconfigure = efx_port_dummy_op_void, 1979 .reconfigure = efx_port_dummy_op_int,
1901 .poll = efx_port_dummy_op_void, 1980 .poll = efx_port_dummy_op_poll,
1902 .fini = efx_port_dummy_op_void, 1981 .fini = efx_port_dummy_op_void,
1903 .clear_interrupt = efx_port_dummy_op_void,
1904};
1905
1906static struct efx_board efx_dummy_board_info = {
1907 .init = efx_port_dummy_op_int,
1908 .init_leds = efx_port_dummy_op_void,
1909 .set_id_led = efx_port_dummy_op_blink,
1910 .monitor = efx_port_dummy_op_int,
1911 .blink = efx_port_dummy_op_blink,
1912 .fini = efx_port_dummy_op_void,
1913}; 1982};
1914 1983
1915/************************************************************************** 1984/**************************************************************************
@@ -1932,26 +2001,26 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1932 /* Initialise common structures */ 2001 /* Initialise common structures */
1933 memset(efx, 0, sizeof(*efx)); 2002 memset(efx, 0, sizeof(*efx));
1934 spin_lock_init(&efx->biu_lock); 2003 spin_lock_init(&efx->biu_lock);
1935 spin_lock_init(&efx->phy_lock); 2004 mutex_init(&efx->mdio_lock);
1936 mutex_init(&efx->spi_lock); 2005 mutex_init(&efx->spi_lock);
2006#ifdef CONFIG_SFC_MTD
2007 INIT_LIST_HEAD(&efx->mtd_list);
2008#endif
1937 INIT_WORK(&efx->reset_work, efx_reset_work); 2009 INIT_WORK(&efx->reset_work, efx_reset_work);
1938 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); 2010 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1939 efx->pci_dev = pci_dev; 2011 efx->pci_dev = pci_dev;
1940 efx->state = STATE_INIT; 2012 efx->state = STATE_INIT;
1941 efx->reset_pending = RESET_TYPE_NONE; 2013 efx->reset_pending = RESET_TYPE_NONE;
1942 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2014 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1943 efx->board_info = efx_dummy_board_info;
1944 2015
1945 efx->net_dev = net_dev; 2016 efx->net_dev = net_dev;
1946 efx->rx_checksum_enabled = true; 2017 efx->rx_checksum_enabled = true;
1947 spin_lock_init(&efx->netif_stop_lock); 2018 spin_lock_init(&efx->netif_stop_lock);
1948 spin_lock_init(&efx->stats_lock); 2019 spin_lock_init(&efx->stats_lock);
1949 efx->stats_disable_count = 1;
1950 mutex_init(&efx->mac_lock); 2020 mutex_init(&efx->mac_lock);
1951 efx->mac_op = &efx_dummy_mac_operations; 2021 efx->mac_op = type->default_mac_ops;
1952 efx->phy_op = &efx_dummy_phy_operations; 2022 efx->phy_op = &efx_dummy_phy_operations;
1953 efx->mdio.dev = net_dev; 2023 efx->mdio.dev = net_dev;
1954 INIT_WORK(&efx->phy_work, efx_phy_work);
1955 INIT_WORK(&efx->mac_work, efx_mac_work); 2024 INIT_WORK(&efx->mac_work, efx_mac_work);
1956 atomic_set(&efx->netif_stop_count, 1); 2025 atomic_set(&efx->netif_stop_count, 1);
1957 2026
@@ -1981,17 +2050,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1981 2050
1982 efx->type = type; 2051 efx->type = type;
1983 2052
1984 /* Sanity-check NIC type */
1985 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1986 (efx->type->txd_ring_mask + 1));
1987 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1988 (efx->type->rxd_ring_mask + 1));
1989 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1990 (efx->type->evq_size - 1));
1991 /* As close as we can get to guaranteeing that we don't overflow */ 2053 /* As close as we can get to guaranteeing that we don't overflow */
1992 EFX_BUG_ON_PARANOID(efx->type->evq_size < 2054 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
1993 (efx->type->txd_ring_mask + 1 + 2055
1994 efx->type->rxd_ring_mask + 1));
1995 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 2056 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1996 2057
1997 /* Higher numbered interrupt modes are less capable! */ 2058 /* Higher numbered interrupt modes are less capable! */
@@ -2027,19 +2088,10 @@ static void efx_fini_struct(struct efx_nic *efx)
2027 */ 2088 */
2028static void efx_pci_remove_main(struct efx_nic *efx) 2089static void efx_pci_remove_main(struct efx_nic *efx)
2029{ 2090{
2030 EFX_ASSERT_RESET_SERIALISED(efx); 2091 efx_nic_fini_interrupt(efx);
2031
2032 /* Skip everything if we never obtained a valid membase */
2033 if (!efx->membase)
2034 return;
2035
2036 efx_fini_channels(efx); 2092 efx_fini_channels(efx);
2037 efx_fini_port(efx); 2093 efx_fini_port(efx);
2038 2094 efx->type->fini(efx);
2039 /* Shutdown the board, then the NIC and board state */
2040 efx->board_info.fini(efx);
2041 falcon_fini_interrupt(efx);
2042
2043 efx_fini_napi(efx); 2095 efx_fini_napi(efx);
2044 efx_remove_all(efx); 2096 efx_remove_all(efx);
2045} 2097}
@@ -2063,9 +2115,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2063 /* Allow any queued efx_resets() to complete */ 2115 /* Allow any queued efx_resets() to complete */
2064 rtnl_unlock(); 2116 rtnl_unlock();
2065 2117
2066 if (efx->membase == NULL)
2067 goto out;
2068
2069 efx_unregister_netdev(efx); 2118 efx_unregister_netdev(efx);
2070 2119
2071 efx_mtd_remove(efx); 2120 efx_mtd_remove(efx);
@@ -2078,7 +2127,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2078 2127
2079 efx_pci_remove_main(efx); 2128 efx_pci_remove_main(efx);
2080 2129
2081out:
2082 efx_fini_io(efx); 2130 efx_fini_io(efx);
2083 EFX_LOG(efx, "shutdown successful\n"); 2131 EFX_LOG(efx, "shutdown successful\n");
2084 2132
@@ -2103,39 +2151,31 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2103 if (rc) 2151 if (rc)
2104 goto fail2; 2152 goto fail2;
2105 2153
2106 /* Initialise the board */ 2154 rc = efx->type->init(efx);
2107 rc = efx->board_info.init(efx);
2108 if (rc) {
2109 EFX_ERR(efx, "failed to initialise board\n");
2110 goto fail3;
2111 }
2112
2113 rc = falcon_init_nic(efx);
2114 if (rc) { 2155 if (rc) {
2115 EFX_ERR(efx, "failed to initialise NIC\n"); 2156 EFX_ERR(efx, "failed to initialise NIC\n");
2116 goto fail4; 2157 goto fail3;
2117 } 2158 }
2118 2159
2119 rc = efx_init_port(efx); 2160 rc = efx_init_port(efx);
2120 if (rc) { 2161 if (rc) {
2121 EFX_ERR(efx, "failed to initialise port\n"); 2162 EFX_ERR(efx, "failed to initialise port\n");
2122 goto fail5; 2163 goto fail4;
2123 } 2164 }
2124 2165
2125 efx_init_channels(efx); 2166 efx_init_channels(efx);
2126 2167
2127 rc = falcon_init_interrupt(efx); 2168 rc = efx_nic_init_interrupt(efx);
2128 if (rc) 2169 if (rc)
2129 goto fail6; 2170 goto fail5;
2130 2171
2131 return 0; 2172 return 0;
2132 2173
2133 fail6: 2174 fail5:
2134 efx_fini_channels(efx); 2175 efx_fini_channels(efx);
2135 efx_fini_port(efx); 2176 efx_fini_port(efx);
2136 fail5:
2137 fail4: 2177 fail4:
2138 efx->board_info.fini(efx); 2178 efx->type->fini(efx);
2139 fail3: 2179 fail3:
2140 efx_fini_napi(efx); 2180 efx_fini_napi(efx);
2141 fail2: 2181 fail2:
@@ -2165,9 +2205,11 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2165 net_dev = alloc_etherdev(sizeof(*efx)); 2205 net_dev = alloc_etherdev(sizeof(*efx));
2166 if (!net_dev) 2206 if (!net_dev)
2167 return -ENOMEM; 2207 return -ENOMEM;
2168 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | 2208 net_dev->features |= (type->offload_features | NETIF_F_SG |
2169 NETIF_F_HIGHDMA | NETIF_F_TSO | 2209 NETIF_F_HIGHDMA | NETIF_F_TSO |
2170 NETIF_F_GRO); 2210 NETIF_F_GRO);
2211 if (type->offload_features & NETIF_F_V6_CSUM)
2212 net_dev->features |= NETIF_F_TSO6;
2171 /* Mask for features that also apply to VLAN devices */ 2213 /* Mask for features that also apply to VLAN devices */
2172 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2214 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2173 NETIF_F_HIGHDMA | NETIF_F_TSO); 2215 NETIF_F_HIGHDMA | NETIF_F_TSO);
@@ -2219,18 +2261,19 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2219 goto fail4; 2261 goto fail4;
2220 } 2262 }
2221 2263
2222 /* Switch to the running state before we expose the device to 2264 /* Switch to the running state before we expose the device to the OS,
2223 * the OS. This is to ensure that the initial gathering of 2265 * so that dev_open()|efx_start_all() will actually start the device */
2224 * MAC stats succeeds. */
2225 efx->state = STATE_RUNNING; 2266 efx->state = STATE_RUNNING;
2226 2267
2227 efx_mtd_probe(efx); /* allowed to fail */
2228
2229 rc = efx_register_netdev(efx); 2268 rc = efx_register_netdev(efx);
2230 if (rc) 2269 if (rc)
2231 goto fail5; 2270 goto fail5;
2232 2271
2233 EFX_LOG(efx, "initialisation successful\n"); 2272 EFX_LOG(efx, "initialisation successful\n");
2273
2274 rtnl_lock();
2275 efx_mtd_probe(efx); /* allowed to fail */
2276 rtnl_unlock();
2234 return 0; 2277 return 0;
2235 2278
2236 fail5: 2279 fail5:
@@ -2246,11 +2289,107 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2246 return rc; 2289 return rc;
2247} 2290}
2248 2291
2292static int efx_pm_freeze(struct device *dev)
2293{
2294 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2295
2296 efx->state = STATE_FINI;
2297
2298 netif_device_detach(efx->net_dev);
2299
2300 efx_stop_all(efx);
2301 efx_fini_channels(efx);
2302
2303 return 0;
2304}
2305
2306static int efx_pm_thaw(struct device *dev)
2307{
2308 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2309
2310 efx->state = STATE_INIT;
2311
2312 efx_init_channels(efx);
2313
2314 mutex_lock(&efx->mac_lock);
2315 efx->phy_op->reconfigure(efx);
2316 mutex_unlock(&efx->mac_lock);
2317
2318 efx_start_all(efx);
2319
2320 netif_device_attach(efx->net_dev);
2321
2322 efx->state = STATE_RUNNING;
2323
2324 efx->type->resume_wol(efx);
2325
2326 return 0;
2327}
2328
2329static int efx_pm_poweroff(struct device *dev)
2330{
2331 struct pci_dev *pci_dev = to_pci_dev(dev);
2332 struct efx_nic *efx = pci_get_drvdata(pci_dev);
2333
2334 efx->type->fini(efx);
2335
2336 efx->reset_pending = RESET_TYPE_NONE;
2337
2338 pci_save_state(pci_dev);
2339 return pci_set_power_state(pci_dev, PCI_D3hot);
2340}
2341
2342/* Used for both resume and restore */
2343static int efx_pm_resume(struct device *dev)
2344{
2345 struct pci_dev *pci_dev = to_pci_dev(dev);
2346 struct efx_nic *efx = pci_get_drvdata(pci_dev);
2347 int rc;
2348
2349 rc = pci_set_power_state(pci_dev, PCI_D0);
2350 if (rc)
2351 return rc;
2352 pci_restore_state(pci_dev);
2353 rc = pci_enable_device(pci_dev);
2354 if (rc)
2355 return rc;
2356 pci_set_master(efx->pci_dev);
2357 rc = efx->type->reset(efx, RESET_TYPE_ALL);
2358 if (rc)
2359 return rc;
2360 rc = efx->type->init(efx);
2361 if (rc)
2362 return rc;
2363 efx_pm_thaw(dev);
2364 return 0;
2365}
2366
2367static int efx_pm_suspend(struct device *dev)
2368{
2369 int rc;
2370
2371 efx_pm_freeze(dev);
2372 rc = efx_pm_poweroff(dev);
2373 if (rc)
2374 efx_pm_resume(dev);
2375 return rc;
2376}
2377
2378static struct dev_pm_ops efx_pm_ops = {
2379 .suspend = efx_pm_suspend,
2380 .resume = efx_pm_resume,
2381 .freeze = efx_pm_freeze,
2382 .thaw = efx_pm_thaw,
2383 .poweroff = efx_pm_poweroff,
2384 .restore = efx_pm_resume,
2385};
2386
2249static struct pci_driver efx_pci_driver = { 2387static struct pci_driver efx_pci_driver = {
2250 .name = EFX_DRIVER_NAME, 2388 .name = EFX_DRIVER_NAME,
2251 .id_table = efx_pci_table, 2389 .id_table = efx_pci_table,
2252 .probe = efx_pci_probe, 2390 .probe = efx_pci_probe,
2253 .remove = efx_pci_remove, 2391 .remove = efx_pci_remove,
2392 .driver.pm = &efx_pm_ops,
2254}; 2393};
2255 2394
2256/************************************************************************** 2395/**************************************************************************
@@ -2314,8 +2453,8 @@ static void __exit efx_exit_module(void)
2314module_init(efx_init_module); 2453module_init(efx_init_module);
2315module_exit(efx_exit_module); 2454module_exit(efx_exit_module);
2316 2455
2317MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and " 2456MODULE_AUTHOR("Solarflare Communications and "
2318 "Solarflare Communications"); 2457 "Michael Brown <mbrown@fensystems.co.uk>");
2319MODULE_DESCRIPTION("Solarflare Communications network driver"); 2458MODULE_DESCRIPTION("Solarflare Communications network driver");
2320MODULE_LICENSE("GPL"); 2459MODULE_LICENSE("GPL");
2321MODULE_DEVICE_TABLE(pci, efx_pci_table); 2460MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index aecaf62f4929..a615ac051530 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -18,35 +18,64 @@
18#define FALCON_A_P_DEVID 0x0703 18#define FALCON_A_P_DEVID 0x0703
19#define FALCON_A_S_DEVID 0x6703 19#define FALCON_A_S_DEVID 0x6703
20#define FALCON_B_P_DEVID 0x0710 20#define FALCON_B_P_DEVID 0x0710
21#define BETHPAGE_A_P_DEVID 0x0803
22#define SIENA_A_P_DEVID 0x0813
23
24/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
25#define EFX_MEM_BAR 2
21 26
22/* TX */ 27/* TX */
23extern netdev_tx_t efx_xmit(struct efx_nic *efx, 28extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
24 struct efx_tx_queue *tx_queue, 29extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
25 struct sk_buff *skb); 30extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
31extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
32extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
33extern netdev_tx_t
34efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
35extern netdev_tx_t
36efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
26extern void efx_stop_queue(struct efx_nic *efx); 38extern void efx_stop_queue(struct efx_nic *efx);
27extern void efx_wake_queue(struct efx_nic *efx); 39extern void efx_wake_queue(struct efx_nic *efx);
40#define EFX_TXQ_SIZE 1024
41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
28 42
29/* RX */ 43/* RX */
30extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 44extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
45extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
46extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
47extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
48extern void efx_rx_strategy(struct efx_channel *channel);
49extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
50extern void efx_rx_work(struct work_struct *data);
51extern void __efx_rx_packet(struct efx_channel *channel,
52 struct efx_rx_buffer *rx_buf, bool checksummed);
31extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 53extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
32 unsigned int len, bool checksummed, bool discard); 54 unsigned int len, bool checksummed, bool discard);
33extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); 55extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
56#define EFX_RXQ_SIZE 1024
57#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
34 58
35/* Channels */ 59/* Channels */
36extern void efx_process_channel_now(struct efx_channel *channel); 60extern void efx_process_channel_now(struct efx_channel *channel);
37extern void efx_flush_queues(struct efx_nic *efx); 61#define EFX_EVQ_SIZE 4096
62#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
38 63
39/* Ports */ 64/* Ports */
40extern void efx_stats_disable(struct efx_nic *efx); 65extern int efx_reconfigure_port(struct efx_nic *efx);
41extern void efx_stats_enable(struct efx_nic *efx); 66extern int __efx_reconfigure_port(struct efx_nic *efx);
42extern void efx_reconfigure_port(struct efx_nic *efx); 67
43extern void __efx_reconfigure_port(struct efx_nic *efx); 68/* Ethtool support */
69extern int efx_ethtool_get_settings(struct net_device *net_dev,
70 struct ethtool_cmd *ecmd);
71extern int efx_ethtool_set_settings(struct net_device *net_dev,
72 struct ethtool_cmd *ecmd);
73extern const struct ethtool_ops efx_ethtool_ops;
44 74
45/* Reset handling */ 75/* Reset handling */
46extern void efx_reset_down(struct efx_nic *efx, enum reset_type method, 76extern int efx_reset(struct efx_nic *efx, enum reset_type method);
47 struct ethtool_cmd *ecmd); 77extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
48extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, 78extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
49 struct ethtool_cmd *ecmd, bool ok);
50 79
51/* Global */ 80/* Global */
52extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 81extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
@@ -60,7 +89,9 @@ extern void efx_hex_dump(const u8 *, unsigned int, const char *);
60/* Dummy PHY ops for PHY drivers */ 89/* Dummy PHY ops for PHY drivers */
61extern int efx_port_dummy_op_int(struct efx_nic *efx); 90extern int efx_port_dummy_op_int(struct efx_nic *efx);
62extern void efx_port_dummy_op_void(struct efx_nic *efx); 91extern void efx_port_dummy_op_void(struct efx_nic *efx);
63extern void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink); 92extern void
93efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
94extern bool efx_port_dummy_op_poll(struct efx_nic *efx);
64 95
65/* MTD */ 96/* MTD */
66#ifdef CONFIG_SFC_MTD 97#ifdef CONFIG_SFC_MTD
@@ -84,4 +115,8 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
84 napi_schedule(&channel->napi_str); 115 napi_schedule(&channel->napi_str);
85} 116}
86 117
118extern void efx_link_status_changed(struct efx_nic *efx);
119extern void efx_link_set_advertising(struct efx_nic *efx, u32);
120extern void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type);
121
87#endif /* EFX_EFX_H */ 122#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index 60cbc6e1e66b..384cfe3b1be1 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc. 3 * Copyright 2007-2009 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -13,44 +13,101 @@
13/** 13/**
14 * enum efx_loopback_mode - loopback modes 14 * enum efx_loopback_mode - loopback modes
15 * @LOOPBACK_NONE: no loopback 15 * @LOOPBACK_NONE: no loopback
16 * @LOOPBACK_GMAC: loopback within GMAC at unspecified level 16 * @LOOPBACK_DATA: data path loopback
17 * @LOOPBACK_XGMII: loopback within XMAC at XGMII level 17 * @LOOPBACK_GMAC: loopback within GMAC
18 * @LOOPBACK_XGXS: loopback within XMAC at XGXS level 18 * @LOOPBACK_XGMII: loopback after XMAC
19 * @LOOPBACK_XAUI: loopback within XMAC at XAUI level 19 * @LOOPBACK_XGXS: loopback within BPX after XGXS
20 * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
21 * @LOOPBACK_GMII: loopback within BPX after GMAC
22 * @LOOPBACK_SGMII: loopback within BPX within SGMII
23 * @LOOPBACK_XGBR: loopback within BPX within XGBR
24 * @LOOPBACK_XFI: loopback within BPX before XFI serdes
25 * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
26 * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
27 * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
28 * @LOOPBACK_XFI_FAR: loopback after XFI serdes
20 * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level 29 * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
21 * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level 30 * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
22 * @LOOPBACK_PCS: loopback within 10G PHY at PCS level 31 * @LOOPBACK_PCS: loopback within 10G PHY at PCS level
23 * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level 32 * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
24 * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!) 33 * @LOOPBACK_XPORT: cross port loopback
34 * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
35 * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
36 * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
37 * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
38 * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
39 * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
40 * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
41 * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
25 */ 42 */
26/* Please keep in order and up-to-date w.r.t the following two #defines */ 43/* Please keep up-to-date w.r.t the following two #defines */
27enum efx_loopback_mode { 44enum efx_loopback_mode {
28 LOOPBACK_NONE = 0, 45 LOOPBACK_NONE = 0,
29 LOOPBACK_GMAC = 1, 46 LOOPBACK_DATA = 1,
30 LOOPBACK_XGMII = 2, 47 LOOPBACK_GMAC = 2,
31 LOOPBACK_XGXS = 3, 48 LOOPBACK_XGMII = 3,
32 LOOPBACK_XAUI = 4, 49 LOOPBACK_XGXS = 4,
33 LOOPBACK_GPHY = 5, 50 LOOPBACK_XAUI = 5,
34 LOOPBACK_PHYXS = 6, 51 LOOPBACK_GMII = 6,
35 LOOPBACK_PCS = 7, 52 LOOPBACK_SGMII = 7,
36 LOOPBACK_PMAPMD = 8, 53 LOOPBACK_XGBR = 8,
37 LOOPBACK_NETWORK = 9, 54 LOOPBACK_XFI = 9,
55 LOOPBACK_XAUI_FAR = 10,
56 LOOPBACK_GMII_FAR = 11,
57 LOOPBACK_SGMII_FAR = 12,
58 LOOPBACK_XFI_FAR = 13,
59 LOOPBACK_GPHY = 14,
60 LOOPBACK_PHYXS = 15,
61 LOOPBACK_PCS = 16,
62 LOOPBACK_PMAPMD = 17,
63 LOOPBACK_XPORT = 18,
64 LOOPBACK_XGMII_WS = 19,
65 LOOPBACK_XAUI_WS = 20,
66 LOOPBACK_XAUI_WS_FAR = 21,
67 LOOPBACK_XAUI_WS_NEAR = 22,
68 LOOPBACK_GMII_WS = 23,
69 LOOPBACK_XFI_WS = 24,
70 LOOPBACK_XFI_WS_FAR = 25,
71 LOOPBACK_PHYXS_WS = 26,
38 LOOPBACK_MAX 72 LOOPBACK_MAX
39}; 73};
40
41#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD 74#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
42 75
43extern const char *efx_loopback_mode_names[];
44#define LOOPBACK_MODE_NAME(mode) \
45 STRING_TABLE_LOOKUP(mode, efx_loopback_mode)
46#define LOOPBACK_MODE(efx) \
47 LOOPBACK_MODE_NAME(efx->loopback_mode)
48
49/* These loopbacks occur within the controller */ 76/* These loopbacks occur within the controller */
50#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_GMAC) | \ 77#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
51 (1 << LOOPBACK_XGMII)| \ 78 (1 << LOOPBACK_GMAC) | \
52 (1 << LOOPBACK_XGXS) | \ 79 (1 << LOOPBACK_XGMII)| \
53 (1 << LOOPBACK_XAUI)) 80 (1 << LOOPBACK_XGXS) | \
81 (1 << LOOPBACK_XAUI) | \
82 (1 << LOOPBACK_GMII) | \
83 (1 << LOOPBACK_SGMII) | \
84 (1 << LOOPBACK_SGMII) | \
85 (1 << LOOPBACK_XGBR) | \
86 (1 << LOOPBACK_XFI) | \
87 (1 << LOOPBACK_XAUI_FAR) | \
88 (1 << LOOPBACK_GMII_FAR) | \
89 (1 << LOOPBACK_SGMII_FAR) | \
90 (1 << LOOPBACK_XFI_FAR) | \
91 (1 << LOOPBACK_XGMII_WS) | \
92 (1 << LOOPBACK_XAUI_WS) | \
93 (1 << LOOPBACK_XAUI_WS_FAR) | \
94 (1 << LOOPBACK_XAUI_WS_NEAR) | \
95 (1 << LOOPBACK_GMII_WS) | \
96 (1 << LOOPBACK_XFI_WS) | \
97 (1 << LOOPBACK_XFI_WS_FAR))
98
99#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
100 (1 << LOOPBACK_XAUI_WS) | \
101 (1 << LOOPBACK_XAUI_WS_FAR) | \
102 (1 << LOOPBACK_XAUI_WS_NEAR) | \
103 (1 << LOOPBACK_GMII_WS) | \
104 (1 << LOOPBACK_XFI_WS) | \
105 (1 << LOOPBACK_XFI_WS_FAR) | \
106 (1 << LOOPBACK_PHYXS_WS))
107
108#define LOOPBACKS_EXTERNAL(_efx) \
109 ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
110 ~(1 << LOOPBACK_NONE))
54 111
55#define LOOPBACK_MASK(_efx) \ 112#define LOOPBACK_MASK(_efx) \
56 (1 << (_efx)->loopback_mode) 113 (1 << (_efx)->loopback_mode)
@@ -58,6 +115,9 @@ extern const char *efx_loopback_mode_names[];
58#define LOOPBACK_INTERNAL(_efx) \ 115#define LOOPBACK_INTERNAL(_efx) \
59 (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx))) 116 (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
60 117
118#define LOOPBACK_EXTERNAL(_efx) \
119 (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
120
61#define LOOPBACK_CHANGED(_from, _to, _mask) \ 121#define LOOPBACK_CHANGED(_from, _to, _mask) \
62 (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask))) 122 (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
63 123
@@ -84,6 +144,7 @@ extern const char *efx_loopback_mode_names[];
84 * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch 144 * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
85 * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch 145 * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
86 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors 146 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
147 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
87 */ 148 */
88enum reset_type { 149enum reset_type {
89 RESET_TYPE_NONE = -1, 150 RESET_TYPE_NONE = -1,
@@ -98,6 +159,7 @@ enum reset_type {
98 RESET_TYPE_RX_DESC_FETCH, 159 RESET_TYPE_RX_DESC_FETCH,
99 RESET_TYPE_TX_DESC_FETCH, 160 RESET_TYPE_TX_DESC_FETCH,
100 RESET_TYPE_TX_SKIP, 161 RESET_TYPE_TX_SKIP,
162 RESET_TYPE_MC_FAILURE,
101 RESET_TYPE_MAX, 163 RESET_TYPE_MAX,
102}; 164};
103 165
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 45018f283ffa..6c0bbed8c477 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -10,30 +10,15 @@
10 10
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/ethtool.h> 12#include <linux/ethtool.h>
13#include <linux/mdio.h>
14#include <linux/rtnetlink.h> 13#include <linux/rtnetlink.h>
15#include "net_driver.h" 14#include "net_driver.h"
16#include "workarounds.h" 15#include "workarounds.h"
17#include "selftest.h" 16#include "selftest.h"
18#include "efx.h" 17#include "efx.h"
19#include "ethtool.h" 18#include "nic.h"
20#include "falcon.h"
21#include "spi.h" 19#include "spi.h"
22#include "mdio_10g.h" 20#include "mdio_10g.h"
23 21
24const char *efx_loopback_mode_names[] = {
25 [LOOPBACK_NONE] = "NONE",
26 [LOOPBACK_GMAC] = "GMAC",
27 [LOOPBACK_XGMII] = "XGMII",
28 [LOOPBACK_XGXS] = "XGXS",
29 [LOOPBACK_XAUI] = "XAUI",
30 [LOOPBACK_GPHY] = "GPHY",
31 [LOOPBACK_PHYXS] = "PHYXS",
32 [LOOPBACK_PCS] = "PCS",
33 [LOOPBACK_PMAPMD] = "PMA/PMD",
34 [LOOPBACK_NETWORK] = "NETWORK",
35};
36
37struct ethtool_string { 22struct ethtool_string {
38 char name[ETH_GSTRING_LEN]; 23 char name[ETH_GSTRING_LEN];
39}; 24};
@@ -167,6 +152,7 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
167 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), 152 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
168 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), 153 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
169 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), 154 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
170 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), 156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
171}; 157};
172 158
@@ -187,13 +173,15 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count)
187{ 173{
188 struct efx_nic *efx = netdev_priv(net_dev); 174 struct efx_nic *efx = netdev_priv(net_dev);
189 175
190 efx->board_info.blink(efx, 1); 176 do {
191 set_current_state(TASK_INTERRUPTIBLE); 177 efx->type->set_id_led(efx, EFX_LED_ON);
192 if (count) 178 schedule_timeout_interruptible(HZ / 2);
193 schedule_timeout(count * HZ); 179
194 else 180 efx->type->set_id_led(efx, EFX_LED_OFF);
195 schedule(); 181 schedule_timeout_interruptible(HZ / 2);
196 efx->board_info.blink(efx, 0); 182 } while (!signal_pending(current) && --count != 0);
183
184 efx->type->set_id_led(efx, EFX_LED_DEFAULT);
197 return 0; 185 return 0;
198} 186}
199 187
@@ -202,6 +190,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
202 struct ethtool_cmd *ecmd) 190 struct ethtool_cmd *ecmd)
203{ 191{
204 struct efx_nic *efx = netdev_priv(net_dev); 192 struct efx_nic *efx = netdev_priv(net_dev);
193 struct efx_link_state *link_state = &efx->link_state;
205 194
206 mutex_lock(&efx->mac_lock); 195 mutex_lock(&efx->mac_lock);
207 efx->phy_op->get_settings(efx, ecmd); 196 efx->phy_op->get_settings(efx, ecmd);
@@ -209,6 +198,13 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
209 198
210 /* Falcon GMAC does not support 1000Mbps HD */ 199 /* Falcon GMAC does not support 1000Mbps HD */
211 ecmd->supported &= ~SUPPORTED_1000baseT_Half; 200 ecmd->supported &= ~SUPPORTED_1000baseT_Half;
201 /* Both MACs support pause frames (bidirectional and respond-only) */
202 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
203
204 if (LOOPBACK_INTERNAL(efx)) {
205 ecmd->speed = link_state->speed;
206 ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
207 }
212 208
213 return 0; 209 return 0;
214} 210}
@@ -230,9 +226,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
230 mutex_lock(&efx->mac_lock); 226 mutex_lock(&efx->mac_lock);
231 rc = efx->phy_op->set_settings(efx, ecmd); 227 rc = efx->phy_op->set_settings(efx, ecmd);
232 mutex_unlock(&efx->mac_lock); 228 mutex_unlock(&efx->mac_lock);
233 if (!rc)
234 efx_reconfigure_port(efx);
235
236 return rc; 229 return rc;
237} 230}
238 231
@@ -243,6 +236,9 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
243 236
244 strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver)); 237 strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
245 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); 238 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
239 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
240 siena_print_fwver(efx, info->fw_version,
241 sizeof(info->fw_version));
246 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 242 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
247} 243}
248 244
@@ -289,7 +285,7 @@ static void efx_fill_test(unsigned int test_index,
289#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue 285#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
290#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue 286#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
291#define EFX_LOOPBACK_NAME(_mode, _counter) \ 287#define EFX_LOOPBACK_NAME(_mode, _counter) \
292 "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode) 288 "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
293 289
294/** 290/**
295 * efx_fill_loopback_test - fill in a block of loopback self-test entries 291 * efx_fill_loopback_test - fill in a block of loopback self-test entries
@@ -372,9 +368,21 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
372 efx_fill_test(n++, strings, data, &tests->registers, 368 efx_fill_test(n++, strings, data, &tests->registers,
373 "core", 0, "registers", NULL); 369 "core", 0, "registers", NULL);
374 370
375 for (i = 0; i < efx->phy_op->num_tests; i++) 371 if (efx->phy_op->run_tests != NULL) {
376 efx_fill_test(n++, strings, data, &tests->phy[i], 372 EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
377 "phy", 0, efx->phy_op->test_names[i], NULL); 373
374 for (i = 0; true; ++i) {
375 const char *name;
376
377 EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
378 name = efx->phy_op->test_name(efx, i);
379 if (name == NULL)
380 break;
381
382 efx_fill_test(n++, strings, data, &tests->phy[i],
383 "phy", 0, name, NULL);
384 }
385 }
378 386
379 /* Loopback tests */ 387 /* Loopback tests */
380 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { 388 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
@@ -463,6 +471,36 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
463 } 471 }
464} 472}
465 473
474static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
475{
476 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
477 unsigned long features;
478
479 features = NETIF_F_TSO;
480 if (efx->type->offload_features & NETIF_F_V6_CSUM)
481 features |= NETIF_F_TSO6;
482
483 if (enable)
484 net_dev->features |= features;
485 else
486 net_dev->features &= ~features;
487
488 return 0;
489}
490
491static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
492{
493 struct efx_nic *efx = netdev_priv(net_dev);
494 unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
495
496 if (enable)
497 net_dev->features |= features;
498 else
499 net_dev->features &= ~features;
500
501 return 0;
502}
503
466static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable) 504static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
467{ 505{
468 struct efx_nic *efx = netdev_priv(net_dev); 506 struct efx_nic *efx = netdev_priv(net_dev);
@@ -537,7 +575,7 @@ static u32 efx_ethtool_get_link(struct net_device *net_dev)
537{ 575{
538 struct efx_nic *efx = netdev_priv(net_dev); 576 struct efx_nic *efx = netdev_priv(net_dev);
539 577
540 return efx->link_up; 578 return efx->link_state.up;
541} 579}
542 580
543static int efx_ethtool_get_eeprom_len(struct net_device *net_dev) 581static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
@@ -562,7 +600,8 @@ static int efx_ethtool_get_eeprom(struct net_device *net_dev,
562 rc = mutex_lock_interruptible(&efx->spi_lock); 600 rc = mutex_lock_interruptible(&efx->spi_lock);
563 if (rc) 601 if (rc)
564 return rc; 602 return rc;
565 rc = falcon_spi_read(spi, eeprom->offset + EFX_EEPROM_BOOTCONFIG_START, 603 rc = falcon_spi_read(efx, spi,
604 eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
566 eeprom->len, &len, buf); 605 eeprom->len, &len, buf);
567 mutex_unlock(&efx->spi_lock); 606 mutex_unlock(&efx->spi_lock);
568 607
@@ -585,7 +624,8 @@ static int efx_ethtool_set_eeprom(struct net_device *net_dev,
585 rc = mutex_lock_interruptible(&efx->spi_lock); 624 rc = mutex_lock_interruptible(&efx->spi_lock);
586 if (rc) 625 if (rc)
587 return rc; 626 return rc;
588 rc = falcon_spi_write(spi, eeprom->offset + EFX_EEPROM_BOOTCONFIG_START, 627 rc = falcon_spi_write(efx, spi,
628 eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
589 eeprom->len, &len, buf); 629 eeprom->len, &len, buf);
590 mutex_unlock(&efx->spi_lock); 630 mutex_unlock(&efx->spi_lock);
591 631
@@ -618,6 +658,9 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
618 coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; 658 coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive;
619 coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; 659 coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation;
620 660
661 coalesce->tx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION;
662 coalesce->rx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION;
663
621 return 0; 664 return 0;
622} 665}
623 666
@@ -656,13 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
656 } 699 }
657 700
658 efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive); 701 efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive);
659
660 /* Reset channel to pick up new moderation value. Note that
661 * this may change the value of the irq_moderation field
662 * (e.g. to allow for hardware timer granularity).
663 */
664 efx_for_each_channel(channel, efx) 702 efx_for_each_channel(channel, efx)
665 falcon_set_int_moderation(channel); 703 efx->type->push_irq_moderation(channel);
666 704
667 return 0; 705 return 0;
668} 706}
@@ -671,8 +709,12 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
671 struct ethtool_pauseparam *pause) 709 struct ethtool_pauseparam *pause)
672{ 710{
673 struct efx_nic *efx = netdev_priv(net_dev); 711 struct efx_nic *efx = netdev_priv(net_dev);
674 enum efx_fc_type wanted_fc; 712 enum efx_fc_type wanted_fc, old_fc;
713 u32 old_adv;
675 bool reset; 714 bool reset;
715 int rc = 0;
716
717 mutex_lock(&efx->mac_lock);
676 718
677 wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | 719 wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
678 (pause->tx_pause ? EFX_FC_TX : 0) | 720 (pause->tx_pause ? EFX_FC_TX : 0) |
@@ -680,14 +722,14 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
680 722
681 if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { 723 if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
682 EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n"); 724 EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n");
683 return -EINVAL; 725 rc = -EINVAL;
726 goto out;
684 } 727 }
685 728
686 if (!(efx->phy_op->mmds & MDIO_DEVS_AN) && 729 if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
687 (wanted_fc & EFX_FC_AUTO)) { 730 EFX_LOG(efx, "Autonegotiation is disabled\n");
688 EFX_LOG(efx, "PHY does not support flow control " 731 rc = -EINVAL;
689 "autonegotiation\n"); 732 goto out;
690 return -EINVAL;
691 } 733 }
692 734
693 /* TX flow control may automatically turn itself off if the 735 /* TX flow control may automatically turn itself off if the
@@ -697,27 +739,40 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
697 * and fix it be cycling transmit flow control on this end. */ 739 * and fix it be cycling transmit flow control on this end. */
698 reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); 740 reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
699 if (EFX_WORKAROUND_11482(efx) && reset) { 741 if (EFX_WORKAROUND_11482(efx) && reset) {
700 if (falcon_rev(efx) >= FALCON_REV_B0) { 742 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
701 /* Recover by resetting the EM block */ 743 /* Recover by resetting the EM block */
702 if (efx->link_up) 744 falcon_stop_nic_stats(efx);
703 falcon_drain_tx_fifo(efx); 745 falcon_drain_tx_fifo(efx);
746 efx->mac_op->reconfigure(efx);
747 falcon_start_nic_stats(efx);
704 } else { 748 } else {
705 /* Schedule a reset to recover */ 749 /* Schedule a reset to recover */
706 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); 750 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
707 } 751 }
708 } 752 }
709 753
710 /* Try to push the pause parameters */ 754 old_adv = efx->link_advertising;
711 mutex_lock(&efx->mac_lock); 755 old_fc = efx->wanted_fc;
756 efx_link_set_wanted_fc(efx, wanted_fc);
757 if (efx->link_advertising != old_adv ||
758 (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
759 rc = efx->phy_op->reconfigure(efx);
760 if (rc) {
761 EFX_ERR(efx, "Unable to advertise requested flow "
762 "control setting\n");
763 goto out;
764 }
765 }
712 766
713 efx->wanted_fc = wanted_fc; 767 /* Reconfigure the MAC. The PHY *may* generate a link state change event
714 if (efx->phy_op->mmds & MDIO_DEVS_AN) 768 * if the user just changed the advertised capabilities, but there's no
715 mdio45_ethtool_spauseparam_an(&efx->mdio, pause); 769 * harm doing this twice */
716 __efx_reconfigure_port(efx); 770 efx->mac_op->reconfigure(efx);
717 771
772out:
718 mutex_unlock(&efx->mac_lock); 773 mutex_unlock(&efx->mac_lock);
719 774
720 return 0; 775 return rc;
721} 776}
722 777
723static void efx_ethtool_get_pauseparam(struct net_device *net_dev, 778static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
@@ -731,6 +786,50 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
731} 786}
732 787
733 788
789static void efx_ethtool_get_wol(struct net_device *net_dev,
790 struct ethtool_wolinfo *wol)
791{
792 struct efx_nic *efx = netdev_priv(net_dev);
793 return efx->type->get_wol(efx, wol);
794}
795
796
797static int efx_ethtool_set_wol(struct net_device *net_dev,
798 struct ethtool_wolinfo *wol)
799{
800 struct efx_nic *efx = netdev_priv(net_dev);
801 return efx->type->set_wol(efx, wol->wolopts);
802}
803
804extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
805{
806 struct efx_nic *efx = netdev_priv(net_dev);
807 enum reset_type method;
808 enum {
809 ETH_RESET_EFX_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
810 ETH_RESET_OFFLOAD | ETH_RESET_MAC)
811 };
812
813 /* Check for minimal reset flags */
814 if ((*flags & ETH_RESET_EFX_INVISIBLE) != ETH_RESET_EFX_INVISIBLE)
815 return -EINVAL;
816 *flags ^= ETH_RESET_EFX_INVISIBLE;
817 method = RESET_TYPE_INVISIBLE;
818
819 if (*flags & ETH_RESET_PHY) {
820 *flags ^= ETH_RESET_PHY;
821 method = RESET_TYPE_ALL;
822 }
823
824 if ((*flags & efx->type->reset_world_flags) ==
825 efx->type->reset_world_flags) {
826 *flags ^= efx->type->reset_world_flags;
827 method = RESET_TYPE_WORLD;
828 }
829
830 return efx_reset(efx, method);
831}
832
734const struct ethtool_ops efx_ethtool_ops = { 833const struct ethtool_ops efx_ethtool_ops = {
735 .get_settings = efx_ethtool_get_settings, 834 .get_settings = efx_ethtool_get_settings,
736 .set_settings = efx_ethtool_set_settings, 835 .set_settings = efx_ethtool_set_settings,
@@ -747,11 +846,13 @@ const struct ethtool_ops efx_ethtool_ops = {
747 .get_rx_csum = efx_ethtool_get_rx_csum, 846 .get_rx_csum = efx_ethtool_get_rx_csum,
748 .set_rx_csum = efx_ethtool_set_rx_csum, 847 .set_rx_csum = efx_ethtool_set_rx_csum,
749 .get_tx_csum = ethtool_op_get_tx_csum, 848 .get_tx_csum = ethtool_op_get_tx_csum,
750 .set_tx_csum = ethtool_op_set_tx_csum, 849 /* Need to enable/disable IPv6 too */
850 .set_tx_csum = efx_ethtool_set_tx_csum,
751 .get_sg = ethtool_op_get_sg, 851 .get_sg = ethtool_op_get_sg,
752 .set_sg = ethtool_op_set_sg, 852 .set_sg = ethtool_op_set_sg,
753 .get_tso = ethtool_op_get_tso, 853 .get_tso = ethtool_op_get_tso,
754 .set_tso = ethtool_op_set_tso, 854 /* Need to enable/disable TSO-IPv6 too */
855 .set_tso = efx_ethtool_set_tso,
755 .get_flags = ethtool_op_get_flags, 856 .get_flags = ethtool_op_get_flags,
756 .set_flags = ethtool_op_set_flags, 857 .set_flags = ethtool_op_set_flags,
757 .get_sset_count = efx_ethtool_get_sset_count, 858 .get_sset_count = efx_ethtool_get_sset_count,
@@ -759,4 +860,7 @@ const struct ethtool_ops efx_ethtool_ops = {
759 .get_strings = efx_ethtool_get_strings, 860 .get_strings = efx_ethtool_get_strings,
760 .phys_id = efx_ethtool_phys_id, 861 .phys_id = efx_ethtool_phys_id,
761 .get_ethtool_stats = efx_ethtool_get_stats, 862 .get_ethtool_stats = efx_ethtool_get_stats,
863 .get_wol = efx_ethtool_get_wol,
864 .set_wol = efx_ethtool_set_wol,
865 .reset = efx_ethtool_reset,
762}; 866};
diff --git a/drivers/net/sfc/ethtool.h b/drivers/net/sfc/ethtool.h
deleted file mode 100644
index 295ead403356..000000000000
--- a/drivers/net/sfc/ethtool.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_ETHTOOL_H
12#define EFX_ETHTOOL_H
13
14#include "net_driver.h"
15
16/*
17 * Ethtool support
18 */
19
20extern int efx_ethtool_get_settings(struct net_device *net_dev,
21 struct ethtool_cmd *ecmd);
22extern int efx_ethtool_set_settings(struct net_device *net_dev,
23 struct ethtool_cmd *ecmd);
24
25extern const struct ethtool_ops efx_ethtool_ops;
26
27#endif /* EFX_ETHTOOL_H */
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index c049364aec46..17afcd26e870 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -14,66 +14,20 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/i2c.h> 16#include <linux/i2c.h>
17#include <linux/i2c-algo-bit.h>
18#include <linux/mii.h> 17#include <linux/mii.h>
19#include "net_driver.h" 18#include "net_driver.h"
20#include "bitfield.h" 19#include "bitfield.h"
21#include "efx.h" 20#include "efx.h"
22#include "mac.h" 21#include "mac.h"
23#include "spi.h" 22#include "spi.h"
24#include "falcon.h" 23#include "nic.h"
25#include "falcon_hwdefs.h" 24#include "regs.h"
26#include "falcon_io.h" 25#include "io.h"
27#include "mdio_10g.h" 26#include "mdio_10g.h"
28#include "phy.h" 27#include "phy.h"
29#include "boards.h"
30#include "workarounds.h" 28#include "workarounds.h"
31 29
32/* Falcon hardware control. 30/* Hardware control for SFC4000 (aka Falcon). */
33 * Falcon is the internal codename for the SFC4000 controller that is
34 * present in SFE400X evaluation boards
35 */
36
37/**
38 * struct falcon_nic_data - Falcon NIC state
39 * @next_buffer_table: First available buffer table id
40 * @pci_dev2: The secondary PCI device if present
41 * @i2c_data: Operations and state for I2C bit-bashing algorithm
42 * @int_error_count: Number of internal errors seen recently
43 * @int_error_expire: Time at which error count will be expired
44 */
45struct falcon_nic_data {
46 unsigned next_buffer_table;
47 struct pci_dev *pci_dev2;
48 struct i2c_algo_bit_data i2c_data;
49
50 unsigned int_error_count;
51 unsigned long int_error_expire;
52};
53
54/**************************************************************************
55 *
56 * Configurable values
57 *
58 **************************************************************************
59 */
60
61static int disable_dma_stats;
62
63/* This is set to 16 for a good reason. In summary, if larger than
64 * 16, the descriptor cache holds more than a default socket
65 * buffer's worth of packets (for UDP we can only have at most one
66 * socket buffer's worth outstanding). This combined with the fact
67 * that we only get 1 TX event per descriptor cache means the NIC
68 * goes idle.
69 */
70#define TX_DC_ENTRIES 16
71#define TX_DC_ENTRIES_ORDER 0
72#define TX_DC_BASE 0x130000
73
74#define RX_DC_ENTRIES 64
75#define RX_DC_ENTRIES_ORDER 2
76#define RX_DC_BASE 0x100000
77 31
78static const unsigned int 32static const unsigned int
79/* "Large" EEPROM device: Atmel AT25640 or similar 33/* "Large" EEPROM device: Atmel AT25640 or similar
@@ -89,104 +43,6 @@ default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
89 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) 43 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
90 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)); 44 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
91 45
92/* RX FIFO XOFF watermark
93 *
94 * When the amount of the RX FIFO increases used increases past this
95 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
96 * This also has an effect on RX/TX arbitration
97 */
98static int rx_xoff_thresh_bytes = -1;
99module_param(rx_xoff_thresh_bytes, int, 0644);
100MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
101
102/* RX FIFO XON watermark
103 *
104 * When the amount of the RX FIFO used decreases below this
105 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
106 * This also has an effect on RX/TX arbitration
107 */
108static int rx_xon_thresh_bytes = -1;
109module_param(rx_xon_thresh_bytes, int, 0644);
110MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
111
112/* TX descriptor ring size - min 512 max 4k */
113#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
114#define FALCON_TXD_RING_SIZE 1024
115#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
116
117/* RX descriptor ring size - min 512 max 4k */
118#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
119#define FALCON_RXD_RING_SIZE 1024
120#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
121
122/* Event queue size - max 32k */
123#define FALCON_EVQ_ORDER EVQ_SIZE_4K
124#define FALCON_EVQ_SIZE 4096
125#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
126
127/* If FALCON_MAX_INT_ERRORS internal errors occur within
128 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
129 * disable it.
130 */
131#define FALCON_INT_ERROR_EXPIRE 3600
132#define FALCON_MAX_INT_ERRORS 5
133
134/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
135 */
136#define FALCON_FLUSH_INTERVAL 10
137#define FALCON_FLUSH_POLL_COUNT 100
138
139/**************************************************************************
140 *
141 * Falcon constants
142 *
143 **************************************************************************
144 */
145
146/* DMA address mask */
147#define FALCON_DMA_MASK DMA_BIT_MASK(46)
148
149/* TX DMA length mask (13-bit) */
150#define FALCON_TX_DMA_MASK (4096 - 1)
151
152/* Size and alignment of special buffers (4KB) */
153#define FALCON_BUF_SIZE 4096
154
155/* Dummy SRAM size code */
156#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
157
158#define FALCON_IS_DUAL_FUNC(efx) \
159 (falcon_rev(efx) < FALCON_REV_B0)
160
161/**************************************************************************
162 *
163 * Falcon hardware access
164 *
165 **************************************************************************/
166
167/* Read the current event from the event queue */
168static inline efx_qword_t *falcon_event(struct efx_channel *channel,
169 unsigned int index)
170{
171 return (((efx_qword_t *) (channel->eventq.addr)) + index);
172}
173
174/* See if an event is present
175 *
176 * We check both the high and low dword of the event for all ones. We
177 * wrote all ones when we cleared the event, and no valid event can
178 * have all ones in either its high or low dwords. This approach is
179 * robust against reordering.
180 *
181 * Note that using a single 64-bit comparison is incorrect; even
182 * though the CPU read will be atomic, the DMA write may not be.
183 */
184static inline int falcon_event_present(efx_qword_t *event)
185{
186 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
187 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
188}
189
190/************************************************************************** 46/**************************************************************************
191 * 47 *
192 * I2C bus - this is a bit-bashing interface using GPIO pins 48 * I2C bus - this is a bit-bashing interface using GPIO pins
@@ -200,9 +56,9 @@ static void falcon_setsda(void *data, int state)
200 struct efx_nic *efx = (struct efx_nic *)data; 56 struct efx_nic *efx = (struct efx_nic *)data;
201 efx_oword_t reg; 57 efx_oword_t reg;
202 58
203 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 59 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
204 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state); 60 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
205 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 61 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
206} 62}
207 63
208static void falcon_setscl(void *data, int state) 64static void falcon_setscl(void *data, int state)
@@ -210,9 +66,9 @@ static void falcon_setscl(void *data, int state)
210 struct efx_nic *efx = (struct efx_nic *)data; 66 struct efx_nic *efx = (struct efx_nic *)data;
211 efx_oword_t reg; 67 efx_oword_t reg;
212 68
213 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 69 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
214 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state); 70 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
215 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 71 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
216} 72}
217 73
218static int falcon_getsda(void *data) 74static int falcon_getsda(void *data)
@@ -220,8 +76,8 @@ static int falcon_getsda(void *data)
220 struct efx_nic *efx = (struct efx_nic *)data; 76 struct efx_nic *efx = (struct efx_nic *)data;
221 efx_oword_t reg; 77 efx_oword_t reg;
222 78
223 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 79 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
224 return EFX_OWORD_FIELD(reg, GPIO3_IN); 80 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
225} 81}
226 82
227static int falcon_getscl(void *data) 83static int falcon_getscl(void *data)
@@ -229,8 +85,8 @@ static int falcon_getscl(void *data)
229 struct efx_nic *efx = (struct efx_nic *)data; 85 struct efx_nic *efx = (struct efx_nic *)data;
230 efx_oword_t reg; 86 efx_oword_t reg;
231 87
232 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 88 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
233 return EFX_OWORD_FIELD(reg, GPIO0_IN); 89 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
234} 90}
235 91
236static struct i2c_algo_bit_data falcon_i2c_bit_operations = { 92static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
@@ -243,1115 +99,39 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
243 .timeout = DIV_ROUND_UP(HZ, 20), 99 .timeout = DIV_ROUND_UP(HZ, 20),
244}; 100};
245 101
246/************************************************************************** 102static void falcon_push_irq_moderation(struct efx_channel *channel)
247 *
248 * Falcon special buffer handling
249 * Special buffers are used for event queues and the TX and RX
250 * descriptor rings.
251 *
252 *************************************************************************/
253
254/*
255 * Initialise a Falcon special buffer
256 *
257 * This will define a buffer (previously allocated via
258 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
259 * it to be used for event queues, descriptor rings etc.
260 */
261static void
262falcon_init_special_buffer(struct efx_nic *efx,
263 struct efx_special_buffer *buffer)
264{
265 efx_qword_t buf_desc;
266 int index;
267 dma_addr_t dma_addr;
268 int i;
269
270 EFX_BUG_ON_PARANOID(!buffer->addr);
271
272 /* Write buffer descriptors to NIC */
273 for (i = 0; i < buffer->entries; i++) {
274 index = buffer->index + i;
275 dma_addr = buffer->dma_addr + (i * 4096);
276 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
277 index, (unsigned long long)dma_addr);
278 EFX_POPULATE_QWORD_4(buf_desc,
279 IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
280 BUF_ADR_REGION, 0,
281 BUF_ADR_FBUF, (dma_addr >> 12),
282 BUF_OWNER_ID_FBUF, 0);
283 falcon_write_sram(efx, &buf_desc, index);
284 }
285}
286
287/* Unmaps a buffer from Falcon and clears the buffer table entries */
288static void
289falcon_fini_special_buffer(struct efx_nic *efx,
290 struct efx_special_buffer *buffer)
291{
292 efx_oword_t buf_tbl_upd;
293 unsigned int start = buffer->index;
294 unsigned int end = (buffer->index + buffer->entries - 1);
295
296 if (!buffer->entries)
297 return;
298
299 EFX_LOG(efx, "unmapping special buffers %d-%d\n",
300 buffer->index, buffer->index + buffer->entries - 1);
301
302 EFX_POPULATE_OWORD_4(buf_tbl_upd,
303 BUF_UPD_CMD, 0,
304 BUF_CLR_CMD, 1,
305 BUF_CLR_END_ID, end,
306 BUF_CLR_START_ID, start);
307 falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
308}
309
310/*
311 * Allocate a new Falcon special buffer
312 *
313 * This allocates memory for a new buffer, clears it and allocates a
314 * new buffer ID range. It does not write into Falcon's buffer table.
315 *
316 * This call will allocate 4KB buffers, since Falcon can't use 8KB
317 * buffers for event queues and descriptor rings.
318 */
319static int falcon_alloc_special_buffer(struct efx_nic *efx,
320 struct efx_special_buffer *buffer,
321 unsigned int len)
322{
323 struct falcon_nic_data *nic_data = efx->nic_data;
324
325 len = ALIGN(len, FALCON_BUF_SIZE);
326
327 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
328 &buffer->dma_addr);
329 if (!buffer->addr)
330 return -ENOMEM;
331 buffer->len = len;
332 buffer->entries = len / FALCON_BUF_SIZE;
333 BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
334
335 /* All zeros is a potentially valid event so memset to 0xff */
336 memset(buffer->addr, 0xff, len);
337
338 /* Select new buffer ID */
339 buffer->index = nic_data->next_buffer_table;
340 nic_data->next_buffer_table += buffer->entries;
341
342 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
343 "(virt %p phys %llx)\n", buffer->index,
344 buffer->index + buffer->entries - 1,
345 (u64)buffer->dma_addr, len,
346 buffer->addr, (u64)virt_to_phys(buffer->addr));
347
348 return 0;
349}
350
351static void falcon_free_special_buffer(struct efx_nic *efx,
352 struct efx_special_buffer *buffer)
353{
354 if (!buffer->addr)
355 return;
356
357 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
358 "(virt %p phys %llx)\n", buffer->index,
359 buffer->index + buffer->entries - 1,
360 (u64)buffer->dma_addr, buffer->len,
361 buffer->addr, (u64)virt_to_phys(buffer->addr));
362
363 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
364 buffer->dma_addr);
365 buffer->addr = NULL;
366 buffer->entries = 0;
367}
368
369/**************************************************************************
370 *
371 * Falcon generic buffer handling
372 * These buffers are used for interrupt status and MAC stats
373 *
374 **************************************************************************/
375
376static int falcon_alloc_buffer(struct efx_nic *efx,
377 struct efx_buffer *buffer, unsigned int len)
378{
379 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
380 &buffer->dma_addr);
381 if (!buffer->addr)
382 return -ENOMEM;
383 buffer->len = len;
384 memset(buffer->addr, 0, len);
385 return 0;
386}
387
388static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
389{
390 if (buffer->addr) {
391 pci_free_consistent(efx->pci_dev, buffer->len,
392 buffer->addr, buffer->dma_addr);
393 buffer->addr = NULL;
394 }
395}
396
397/**************************************************************************
398 *
399 * Falcon TX path
400 *
401 **************************************************************************/
402
403/* Returns a pointer to the specified transmit descriptor in the TX
404 * descriptor queue belonging to the specified channel.
405 */
406static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
407 unsigned int index)
408{
409 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
410}
411
412/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
413static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
414{
415 unsigned write_ptr;
416 efx_dword_t reg;
417
418 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
419 EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
420 falcon_writel_page(tx_queue->efx, &reg,
421 TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
422}
423
424
425/* For each entry inserted into the software descriptor ring, create a
426 * descriptor in the hardware TX descriptor ring (in host memory), and
427 * write a doorbell.
428 */
429void falcon_push_buffers(struct efx_tx_queue *tx_queue)
430{
431
432 struct efx_tx_buffer *buffer;
433 efx_qword_t *txd;
434 unsigned write_ptr;
435
436 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
437
438 do {
439 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
440 buffer = &tx_queue->buffer[write_ptr];
441 txd = falcon_tx_desc(tx_queue, write_ptr);
442 ++tx_queue->write_count;
443
444 /* Create TX descriptor ring entry */
445 EFX_POPULATE_QWORD_5(*txd,
446 TX_KER_PORT, 0,
447 TX_KER_CONT, buffer->continuation,
448 TX_KER_BYTE_CNT, buffer->len,
449 TX_KER_BUF_REGION, 0,
450 TX_KER_BUF_ADR, buffer->dma_addr);
451 } while (tx_queue->write_count != tx_queue->insert_count);
452
453 wmb(); /* Ensure descriptors are written before they are fetched */
454 falcon_notify_tx_desc(tx_queue);
455}
456
457/* Allocate hardware resources for a TX queue */
458int falcon_probe_tx(struct efx_tx_queue *tx_queue)
459{
460 struct efx_nic *efx = tx_queue->efx;
461 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
462 FALCON_TXD_RING_SIZE *
463 sizeof(efx_qword_t));
464}
465
466void falcon_init_tx(struct efx_tx_queue *tx_queue)
467{
468 efx_oword_t tx_desc_ptr;
469 struct efx_nic *efx = tx_queue->efx;
470
471 tx_queue->flushed = false;
472
473 /* Pin TX descriptor ring */
474 falcon_init_special_buffer(efx, &tx_queue->txd);
475
476 /* Push TX descriptor ring to card */
477 EFX_POPULATE_OWORD_10(tx_desc_ptr,
478 TX_DESCQ_EN, 1,
479 TX_ISCSI_DDIG_EN, 0,
480 TX_ISCSI_HDIG_EN, 0,
481 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
482 TX_DESCQ_EVQ_ID, tx_queue->channel->channel,
483 TX_DESCQ_OWNER_ID, 0,
484 TX_DESCQ_LABEL, tx_queue->queue,
485 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
486 TX_DESCQ_TYPE, 0,
487 TX_NON_IP_DROP_DIS_B0, 1);
488
489 if (falcon_rev(efx) >= FALCON_REV_B0) {
490 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
491 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
492 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
493 }
494
495 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
496 tx_queue->queue);
497
498 if (falcon_rev(efx) < FALCON_REV_B0) {
499 efx_oword_t reg;
500
501 /* Only 128 bits in this register */
502 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
503
504 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
505 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
506 clear_bit_le(tx_queue->queue, (void *)&reg);
507 else
508 set_bit_le(tx_queue->queue, (void *)&reg);
509 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
510 }
511}
512
513static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
514{
515 struct efx_nic *efx = tx_queue->efx;
516 efx_oword_t tx_flush_descq;
517
518 /* Post a flush command */
519 EFX_POPULATE_OWORD_2(tx_flush_descq,
520 TX_FLUSH_DESCQ_CMD, 1,
521 TX_FLUSH_DESCQ, tx_queue->queue);
522 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
523}
524
525void falcon_fini_tx(struct efx_tx_queue *tx_queue)
526{
527 struct efx_nic *efx = tx_queue->efx;
528 efx_oword_t tx_desc_ptr;
529
530 /* The queue should have been flushed */
531 WARN_ON(!tx_queue->flushed);
532
533 /* Remove TX descriptor ring from card */
534 EFX_ZERO_OWORD(tx_desc_ptr);
535 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
536 tx_queue->queue);
537
538 /* Unpin TX descriptor ring */
539 falcon_fini_special_buffer(efx, &tx_queue->txd);
540}
541
542/* Free buffers backing TX queue */
543void falcon_remove_tx(struct efx_tx_queue *tx_queue)
544{
545 falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
546}
547
548/**************************************************************************
549 *
550 * Falcon RX path
551 *
552 **************************************************************************/
553
554/* Returns a pointer to the specified descriptor in the RX descriptor queue */
555static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
556 unsigned int index)
557{
558 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
559}
560
561/* This creates an entry in the RX descriptor queue */
562static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
563 unsigned index)
564{
565 struct efx_rx_buffer *rx_buf;
566 efx_qword_t *rxd;
567
568 rxd = falcon_rx_desc(rx_queue, index);
569 rx_buf = efx_rx_buffer(rx_queue, index);
570 EFX_POPULATE_QWORD_3(*rxd,
571 RX_KER_BUF_SIZE,
572 rx_buf->len -
573 rx_queue->efx->type->rx_buffer_padding,
574 RX_KER_BUF_REGION, 0,
575 RX_KER_BUF_ADR, rx_buf->dma_addr);
576}
577
578/* This writes to the RX_DESC_WPTR register for the specified receive
579 * descriptor ring.
580 */
581void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
582{
583 efx_dword_t reg;
584 unsigned write_ptr;
585
586 while (rx_queue->notified_count != rx_queue->added_count) {
587 falcon_build_rx_desc(rx_queue,
588 rx_queue->notified_count &
589 FALCON_RXD_RING_MASK);
590 ++rx_queue->notified_count;
591 }
592
593 wmb();
594 write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
595 EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
596 falcon_writel_page(rx_queue->efx, &reg,
597 RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
598}
599
600int falcon_probe_rx(struct efx_rx_queue *rx_queue)
601{
602 struct efx_nic *efx = rx_queue->efx;
603 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
604 FALCON_RXD_RING_SIZE *
605 sizeof(efx_qword_t));
606}
607
608void falcon_init_rx(struct efx_rx_queue *rx_queue)
609{
610 efx_oword_t rx_desc_ptr;
611 struct efx_nic *efx = rx_queue->efx;
612 bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
613 bool iscsi_digest_en = is_b0;
614
615 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
616 rx_queue->queue, rx_queue->rxd.index,
617 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
618
619 rx_queue->flushed = false;
620
621 /* Pin RX descriptor ring */
622 falcon_init_special_buffer(efx, &rx_queue->rxd);
623
624 /* Push RX descriptor ring to card */
625 EFX_POPULATE_OWORD_10(rx_desc_ptr,
626 RX_ISCSI_DDIG_EN, iscsi_digest_en,
627 RX_ISCSI_HDIG_EN, iscsi_digest_en,
628 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
629 RX_DESCQ_EVQ_ID, rx_queue->channel->channel,
630 RX_DESCQ_OWNER_ID, 0,
631 RX_DESCQ_LABEL, rx_queue->queue,
632 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
633 RX_DESCQ_TYPE, 0 /* kernel queue */ ,
634 /* For >=B0 this is scatter so disable */
635 RX_DESCQ_JUMBO, !is_b0,
636 RX_DESCQ_EN, 1);
637 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
638 rx_queue->queue);
639}
640
641static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
642{
643 struct efx_nic *efx = rx_queue->efx;
644 efx_oword_t rx_flush_descq;
645
646 /* Post a flush command */
647 EFX_POPULATE_OWORD_2(rx_flush_descq,
648 RX_FLUSH_DESCQ_CMD, 1,
649 RX_FLUSH_DESCQ, rx_queue->queue);
650 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
651}
652
653void falcon_fini_rx(struct efx_rx_queue *rx_queue)
654{
655 efx_oword_t rx_desc_ptr;
656 struct efx_nic *efx = rx_queue->efx;
657
658 /* The queue should already have been flushed */
659 WARN_ON(!rx_queue->flushed);
660
661 /* Remove RX descriptor ring from card */
662 EFX_ZERO_OWORD(rx_desc_ptr);
663 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
664 rx_queue->queue);
665
666 /* Unpin RX descriptor ring */
667 falcon_fini_special_buffer(efx, &rx_queue->rxd);
668}
669
670/* Free buffers backing RX queue */
671void falcon_remove_rx(struct efx_rx_queue *rx_queue)
672{
673 falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
674}
675
676/**************************************************************************
677 *
678 * Falcon event queue processing
679 * Event queues are processed by per-channel tasklets.
680 *
681 **************************************************************************/
682
683/* Update a channel's event queue's read pointer (RPTR) register
684 *
685 * This writes the EVQ_RPTR_REG register for the specified channel's
686 * event queue.
687 *
688 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
689 * whereas channel->eventq_read_ptr contains the index of the "next to
690 * read" event.
691 */
692void falcon_eventq_read_ack(struct efx_channel *channel)
693{
694 efx_dword_t reg;
695 struct efx_nic *efx = channel->efx;
696
697 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
698 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
699 channel->channel);
700}
701
702/* Use HW to insert a SW defined event */
703void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
704{
705 efx_oword_t drv_ev_reg;
706
707 EFX_POPULATE_OWORD_2(drv_ev_reg,
708 DRV_EV_QID, channel->channel,
709 DRV_EV_DATA,
710 EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
711 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
712}
713
714/* Handle a transmit completion event
715 *
716 * Falcon batches TX completion events; the message we receive is of
717 * the form "complete all TX events up to this index".
718 */
719static void falcon_handle_tx_event(struct efx_channel *channel,
720 efx_qword_t *event)
721{
722 unsigned int tx_ev_desc_ptr;
723 unsigned int tx_ev_q_label;
724 struct efx_tx_queue *tx_queue;
725 struct efx_nic *efx = channel->efx;
726
727 if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
728 /* Transmit completion */
729 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
730 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
731 tx_queue = &efx->tx_queue[tx_ev_q_label];
732 channel->irq_mod_score +=
733 (tx_ev_desc_ptr - tx_queue->read_count) &
734 efx->type->txd_ring_mask;
735 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
736 } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
737 /* Rewrite the FIFO write pointer */
738 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
739 tx_queue = &efx->tx_queue[tx_ev_q_label];
740
741 if (efx_dev_registered(efx))
742 netif_tx_lock(efx->net_dev);
743 falcon_notify_tx_desc(tx_queue);
744 if (efx_dev_registered(efx))
745 netif_tx_unlock(efx->net_dev);
746 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
747 EFX_WORKAROUND_10727(efx)) {
748 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
749 } else {
750 EFX_ERR(efx, "channel %d unexpected TX event "
751 EFX_QWORD_FMT"\n", channel->channel,
752 EFX_QWORD_VAL(*event));
753 }
754}
755
756/* Detect errors included in the rx_evt_pkt_ok bit. */
757static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
758 const efx_qword_t *event,
759 bool *rx_ev_pkt_ok,
760 bool *discard)
761{
762 struct efx_nic *efx = rx_queue->efx;
763 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
764 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
765 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
766 bool rx_ev_other_err, rx_ev_pause_frm;
767 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
768 unsigned rx_ev_pkt_type;
769
770 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
771 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
772 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
773 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
774 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
775 RX_EV_BUF_OWNER_ID_ERR);
776 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
777 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
778 RX_EV_IP_HDR_CHKSUM_ERR);
779 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
780 RX_EV_TCP_UDP_CHKSUM_ERR);
781 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
782 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
783 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
784 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
785 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
786
787 /* Every error apart from tobe_disc and pause_frm */
788 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
789 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
790 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
791
792 /* Count errors that are not in MAC stats. Ignore expected
793 * checksum errors during self-test. */
794 if (rx_ev_frm_trunc)
795 ++rx_queue->channel->n_rx_frm_trunc;
796 else if (rx_ev_tobe_disc)
797 ++rx_queue->channel->n_rx_tobe_disc;
798 else if (!efx->loopback_selftest) {
799 if (rx_ev_ip_hdr_chksum_err)
800 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
801 else if (rx_ev_tcp_udp_chksum_err)
802 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
803 }
804 if (rx_ev_ip_frag_err)
805 ++rx_queue->channel->n_rx_ip_frag_err;
806
807 /* The frame must be discarded if any of these are true. */
808 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
809 rx_ev_tobe_disc | rx_ev_pause_frm);
810
811 /* TOBE_DISC is expected on unicast mismatches; don't print out an
812 * error message. FRM_TRUNC indicates RXDP dropped the packet due
813 * to a FIFO overflow.
814 */
815#ifdef EFX_ENABLE_DEBUG
816 if (rx_ev_other_err) {
817 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
818 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
819 rx_queue->queue, EFX_QWORD_VAL(*event),
820 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
821 rx_ev_ip_hdr_chksum_err ?
822 " [IP_HDR_CHKSUM_ERR]" : "",
823 rx_ev_tcp_udp_chksum_err ?
824 " [TCP_UDP_CHKSUM_ERR]" : "",
825 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
826 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
827 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
828 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
829 rx_ev_pause_frm ? " [PAUSE]" : "");
830 }
831#endif
832}
833
834/* Handle receive events that are not in-order. */
835static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
836 unsigned index)
837{
838 struct efx_nic *efx = rx_queue->efx;
839 unsigned expected, dropped;
840
841 expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
842 dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
843 FALCON_RXD_RING_MASK);
844 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
845 dropped, index, expected);
846
847 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
848 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
849}
850
851/* Handle a packet received event
852 *
853 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
854 * wrong destination address
855 * Also "is multicast" and "matches multicast filter" flags can be used to
856 * discard non-matching multicast packets.
857 */
858static void falcon_handle_rx_event(struct efx_channel *channel,
859 const efx_qword_t *event)
860{
861 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
862 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
863 unsigned expected_ptr;
864 bool rx_ev_pkt_ok, discard = false, checksummed;
865 struct efx_rx_queue *rx_queue;
866 struct efx_nic *efx = channel->efx;
867
868 /* Basic packet information */
869 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
870 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
871 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
872 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
873 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
874 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
875
876 rx_queue = &efx->rx_queue[channel->channel];
877
878 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
879 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
880 if (unlikely(rx_ev_desc_ptr != expected_ptr))
881 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
882
883 if (likely(rx_ev_pkt_ok)) {
884 /* If packet is marked as OK and packet type is TCP/IPv4 or
885 * UDP/IPv4, then we can rely on the hardware checksum.
886 */
887 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
888 } else {
889 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
890 &discard);
891 checksummed = false;
892 }
893
894 /* Detect multicast packets that didn't match the filter */
895 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
896 if (rx_ev_mcast_pkt) {
897 unsigned int rx_ev_mcast_hash_match =
898 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
899
900 if (unlikely(!rx_ev_mcast_hash_match))
901 discard = true;
902 }
903
904 channel->irq_mod_score += 2;
905
906 /* Handle received packet */
907 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
908 checksummed, discard);
909}
910
911/* Global events are basically PHY events */
912static void falcon_handle_global_event(struct efx_channel *channel,
913 efx_qword_t *event)
914{
915 struct efx_nic *efx = channel->efx;
916 bool handled = false;
917
918 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
919 EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
920 EFX_QWORD_FIELD(*event, XG_PHY_INTR) ||
921 EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) {
922 efx->phy_op->clear_interrupt(efx);
923 queue_work(efx->workqueue, &efx->phy_work);
924 handled = true;
925 }
926
927 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
928 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) {
929 queue_work(efx->workqueue, &efx->mac_work);
930 handled = true;
931 }
932
933 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
934 EFX_ERR(efx, "channel %d seen global RX_RESET "
935 "event. Resetting.\n", channel->channel);
936
937 atomic_inc(&efx->rx_reset);
938 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
939 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
940 handled = true;
941 }
942
943 if (!handled)
944 EFX_ERR(efx, "channel %d unknown global event "
945 EFX_QWORD_FMT "\n", channel->channel,
946 EFX_QWORD_VAL(*event));
947}
948
949static void falcon_handle_driver_event(struct efx_channel *channel,
950 efx_qword_t *event)
951{
952 struct efx_nic *efx = channel->efx;
953 unsigned int ev_sub_code;
954 unsigned int ev_sub_data;
955
956 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
957 ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
958
959 switch (ev_sub_code) {
960 case TX_DESCQ_FLS_DONE_EV_DECODE:
961 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
962 channel->channel, ev_sub_data);
963 break;
964 case RX_DESCQ_FLS_DONE_EV_DECODE:
965 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
966 channel->channel, ev_sub_data);
967 break;
968 case EVQ_INIT_DONE_EV_DECODE:
969 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
970 channel->channel, ev_sub_data);
971 break;
972 case SRM_UPD_DONE_EV_DECODE:
973 EFX_TRACE(efx, "channel %d SRAM update done\n",
974 channel->channel);
975 break;
976 case WAKE_UP_EV_DECODE:
977 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
978 channel->channel, ev_sub_data);
979 break;
980 case TIMER_EV_DECODE:
981 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
982 channel->channel, ev_sub_data);
983 break;
984 case RX_RECOVERY_EV_DECODE:
985 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
986 "Resetting.\n", channel->channel);
987 atomic_inc(&efx->rx_reset);
988 efx_schedule_reset(efx,
989 EFX_WORKAROUND_6555(efx) ?
990 RESET_TYPE_RX_RECOVERY :
991 RESET_TYPE_DISABLE);
992 break;
993 case RX_DSC_ERROR_EV_DECODE:
994 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
995 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
996 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
997 break;
998 case TX_DSC_ERROR_EV_DECODE:
999 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
1000 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1001 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1002 break;
1003 default:
1004 EFX_TRACE(efx, "channel %d unknown driver event code %d "
1005 "data %04x\n", channel->channel, ev_sub_code,
1006 ev_sub_data);
1007 break;
1008 }
1009}
1010
1011int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1012{
1013 unsigned int read_ptr;
1014 efx_qword_t event, *p_event;
1015 int ev_code;
1016 int rx_packets = 0;
1017
1018 read_ptr = channel->eventq_read_ptr;
1019
1020 do {
1021 p_event = falcon_event(channel, read_ptr);
1022 event = *p_event;
1023
1024 if (!falcon_event_present(&event))
1025 /* End of events */
1026 break;
1027
1028 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
1029 channel->channel, EFX_QWORD_VAL(event));
1030
1031 /* Clear this event by marking it all ones */
1032 EFX_SET_QWORD(*p_event);
1033
1034 ev_code = EFX_QWORD_FIELD(event, EV_CODE);
1035
1036 switch (ev_code) {
1037 case RX_IP_EV_DECODE:
1038 falcon_handle_rx_event(channel, &event);
1039 ++rx_packets;
1040 break;
1041 case TX_IP_EV_DECODE:
1042 falcon_handle_tx_event(channel, &event);
1043 break;
1044 case DRV_GEN_EV_DECODE:
1045 channel->eventq_magic
1046 = EFX_QWORD_FIELD(event, EVQ_MAGIC);
1047 EFX_LOG(channel->efx, "channel %d received generated "
1048 "event "EFX_QWORD_FMT"\n", channel->channel,
1049 EFX_QWORD_VAL(event));
1050 break;
1051 case GLOBAL_EV_DECODE:
1052 falcon_handle_global_event(channel, &event);
1053 break;
1054 case DRIVER_EV_DECODE:
1055 falcon_handle_driver_event(channel, &event);
1056 break;
1057 default:
1058 EFX_ERR(channel->efx, "channel %d unknown event type %d"
1059 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1060 ev_code, EFX_QWORD_VAL(event));
1061 }
1062
1063 /* Increment read pointer */
1064 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1065
1066 } while (rx_packets < rx_quota);
1067
1068 channel->eventq_read_ptr = read_ptr;
1069 return rx_packets;
1070}
1071
1072void falcon_set_int_moderation(struct efx_channel *channel)
1073{ 103{
1074 efx_dword_t timer_cmd; 104 efx_dword_t timer_cmd;
1075 struct efx_nic *efx = channel->efx; 105 struct efx_nic *efx = channel->efx;
1076 106
1077 /* Set timer register */ 107 /* Set timer register */
1078 if (channel->irq_moderation) { 108 if (channel->irq_moderation) {
1079 /* Round to resolution supported by hardware. The value we
1080 * program is based at 0. So actual interrupt moderation
1081 * achieved is ((x + 1) * res).
1082 */
1083 channel->irq_moderation -= (channel->irq_moderation %
1084 FALCON_IRQ_MOD_RESOLUTION);
1085 if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION)
1086 channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION;
1087 EFX_POPULATE_DWORD_2(timer_cmd, 109 EFX_POPULATE_DWORD_2(timer_cmd,
1088 TIMER_MODE, TIMER_MODE_INT_HLDOFF, 110 FRF_AB_TC_TIMER_MODE,
1089 TIMER_VAL, 111 FFE_BB_TIMER_MODE_INT_HLDOFF,
1090 channel->irq_moderation / 112 FRF_AB_TC_TIMER_VAL,
1091 FALCON_IRQ_MOD_RESOLUTION - 1); 113 channel->irq_moderation - 1);
1092 } else { 114 } else {
1093 EFX_POPULATE_DWORD_2(timer_cmd, 115 EFX_POPULATE_DWORD_2(timer_cmd,
1094 TIMER_MODE, TIMER_MODE_DIS, 116 FRF_AB_TC_TIMER_MODE,
1095 TIMER_VAL, 0); 117 FFE_BB_TIMER_MODE_DIS,
118 FRF_AB_TC_TIMER_VAL, 0);
1096 } 119 }
1097 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, 120 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
1098 channel->channel); 121 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
1099 122 channel->channel);
1100} 123}
1101 124
1102/* Allocate buffer table entries for event queue */ 125static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
1103int falcon_probe_eventq(struct efx_channel *channel)
1104{
1105 struct efx_nic *efx = channel->efx;
1106 unsigned int evq_size;
1107
1108 evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
1109 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
1110}
1111 126
1112void falcon_init_eventq(struct efx_channel *channel) 127static void falcon_prepare_flush(struct efx_nic *efx)
1113{ 128{
1114 efx_oword_t evq_ptr; 129 falcon_deconfigure_mac_wrapper(efx);
1115 struct efx_nic *efx = channel->efx;
1116
1117 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1118 channel->channel, channel->eventq.index,
1119 channel->eventq.index + channel->eventq.entries - 1);
1120
1121 /* Pin event queue buffer */
1122 falcon_init_special_buffer(efx, &channel->eventq);
1123 130
1124 /* Fill event queue with all ones (i.e. empty events) */ 131 /* Wait for the tx and rx fifo's to get to the next packet boundary
1125 memset(channel->eventq.addr, 0xff, channel->eventq.len); 132 * (~1ms without back-pressure), then to drain the remainder of the
1126 133 * fifo's at data path speeds (negligible), with a healthy margin. */
1127 /* Push event queue to card */ 134 msleep(10);
1128 EFX_POPULATE_OWORD_3(evq_ptr,
1129 EVQ_EN, 1,
1130 EVQ_SIZE, FALCON_EVQ_ORDER,
1131 EVQ_BUF_BASE_ID, channel->eventq.index);
1132 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1133 channel->channel);
1134
1135 falcon_set_int_moderation(channel);
1136}
1137
1138void falcon_fini_eventq(struct efx_channel *channel)
1139{
1140 efx_oword_t eventq_ptr;
1141 struct efx_nic *efx = channel->efx;
1142
1143 /* Remove event queue from card */
1144 EFX_ZERO_OWORD(eventq_ptr);
1145 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1146 channel->channel);
1147
1148 /* Unpin event queue */
1149 falcon_fini_special_buffer(efx, &channel->eventq);
1150}
1151
1152/* Free buffers backing event queue */
1153void falcon_remove_eventq(struct efx_channel *channel)
1154{
1155 falcon_free_special_buffer(channel->efx, &channel->eventq);
1156}
1157
1158
1159/* Generates a test event on the event queue. A subsequent call to
1160 * process_eventq() should pick up the event and place the value of
1161 * "magic" into channel->eventq_magic;
1162 */
1163void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1164{
1165 efx_qword_t test_event;
1166
1167 EFX_POPULATE_QWORD_2(test_event,
1168 EV_CODE, DRV_GEN_EV_DECODE,
1169 EVQ_MAGIC, magic);
1170 falcon_generate_event(channel, &test_event);
1171}
1172
1173void falcon_sim_phy_event(struct efx_nic *efx)
1174{
1175 efx_qword_t phy_event;
1176
1177 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
1178 if (EFX_IS10G(efx))
1179 EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1);
1180 else
1181 EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1);
1182
1183 falcon_generate_event(&efx->channel[0], &phy_event);
1184}
1185
1186/**************************************************************************
1187 *
1188 * Flush handling
1189 *
1190 **************************************************************************/
1191
1192
1193static void falcon_poll_flush_events(struct efx_nic *efx)
1194{
1195 struct efx_channel *channel = &efx->channel[0];
1196 struct efx_tx_queue *tx_queue;
1197 struct efx_rx_queue *rx_queue;
1198 unsigned int read_ptr = channel->eventq_read_ptr;
1199 unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK;
1200
1201 do {
1202 efx_qword_t *event = falcon_event(channel, read_ptr);
1203 int ev_code, ev_sub_code, ev_queue;
1204 bool ev_failed;
1205
1206 if (!falcon_event_present(event))
1207 break;
1208
1209 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
1210 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
1211 if (ev_code == DRIVER_EV_DECODE &&
1212 ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) {
1213 ev_queue = EFX_QWORD_FIELD(*event,
1214 DRIVER_EV_TX_DESCQ_ID);
1215 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1216 tx_queue = efx->tx_queue + ev_queue;
1217 tx_queue->flushed = true;
1218 }
1219 } else if (ev_code == DRIVER_EV_DECODE &&
1220 ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) {
1221 ev_queue = EFX_QWORD_FIELD(*event,
1222 DRIVER_EV_RX_DESCQ_ID);
1223 ev_failed = EFX_QWORD_FIELD(*event,
1224 DRIVER_EV_RX_FLUSH_FAIL);
1225 if (ev_queue < efx->n_rx_queues) {
1226 rx_queue = efx->rx_queue + ev_queue;
1227
1228 /* retry the rx flush */
1229 if (ev_failed)
1230 falcon_flush_rx_queue(rx_queue);
1231 else
1232 rx_queue->flushed = true;
1233 }
1234 }
1235
1236 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1237 } while (read_ptr != end_ptr);
1238}
1239
1240/* Handle tx and rx flushes at the same time, since they run in
1241 * parallel in the hardware and there's no reason for us to
1242 * serialise them */
1243int falcon_flush_queues(struct efx_nic *efx)
1244{
1245 struct efx_rx_queue *rx_queue;
1246 struct efx_tx_queue *tx_queue;
1247 int i;
1248 bool outstanding;
1249
1250 /* Issue flush requests */
1251 efx_for_each_tx_queue(tx_queue, efx) {
1252 tx_queue->flushed = false;
1253 falcon_flush_tx_queue(tx_queue);
1254 }
1255 efx_for_each_rx_queue(rx_queue, efx) {
1256 rx_queue->flushed = false;
1257 falcon_flush_rx_queue(rx_queue);
1258 }
1259
1260 /* Poll the evq looking for flush completions. Since we're not pushing
1261 * any more rx or tx descriptors at this point, we're in no danger of
1262 * overflowing the evq whilst we wait */
1263 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1264 msleep(FALCON_FLUSH_INTERVAL);
1265 falcon_poll_flush_events(efx);
1266
1267 /* Check if every queue has been succesfully flushed */
1268 outstanding = false;
1269 efx_for_each_tx_queue(tx_queue, efx)
1270 outstanding |= !tx_queue->flushed;
1271 efx_for_each_rx_queue(rx_queue, efx)
1272 outstanding |= !rx_queue->flushed;
1273 if (!outstanding)
1274 return 0;
1275 }
1276
1277 /* Mark the queues as all flushed. We're going to return failure
1278 * leading to a reset, or fake up success anyway. "flushed" now
1279 * indicates that we tried to flush. */
1280 efx_for_each_tx_queue(tx_queue, efx) {
1281 if (!tx_queue->flushed)
1282 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1283 tx_queue->queue);
1284 tx_queue->flushed = true;
1285 }
1286 efx_for_each_rx_queue(rx_queue, efx) {
1287 if (!rx_queue->flushed)
1288 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1289 rx_queue->queue);
1290 rx_queue->flushed = true;
1291 }
1292
1293 if (EFX_WORKAROUND_7803(efx))
1294 return 0;
1295
1296 return -ETIMEDOUT;
1297}
1298
1299/**************************************************************************
1300 *
1301 * Falcon hardware interrupts
1302 * The hardware interrupt handler does very little work; all the event
1303 * queue processing is carried out by per-channel tasklets.
1304 *
1305 **************************************************************************/
1306
1307/* Enable/disable/generate Falcon interrupts */
1308static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1309 int force)
1310{
1311 efx_oword_t int_en_reg_ker;
1312
1313 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1314 KER_INT_KER, force,
1315 DRV_INT_EN_KER, enabled);
1316 falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
1317}
1318
1319void falcon_enable_interrupts(struct efx_nic *efx)
1320{
1321 efx_oword_t int_adr_reg_ker;
1322 struct efx_channel *channel;
1323
1324 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1325 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1326
1327 /* Program address */
1328 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1329 NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
1330 INT_ADR_KER, efx->irq_status.dma_addr);
1331 falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
1332
1333 /* Enable interrupts */
1334 falcon_interrupts(efx, 1, 0);
1335
1336 /* Force processing of all the channels to get the EVQ RPTRs up to
1337 date */
1338 efx_for_each_channel(channel, efx)
1339 efx_schedule_channel(channel);
1340}
1341
1342void falcon_disable_interrupts(struct efx_nic *efx)
1343{
1344 /* Disable interrupts */
1345 falcon_interrupts(efx, 0, 0);
1346}
1347
1348/* Generate a Falcon test interrupt
1349 * Interrupt must already have been enabled, otherwise nasty things
1350 * may happen.
1351 */
1352void falcon_generate_interrupt(struct efx_nic *efx)
1353{
1354 falcon_interrupts(efx, 1, 1);
1355} 135}
1356 136
1357/* Acknowledge a legacy interrupt from Falcon 137/* Acknowledge a legacy interrupt from Falcon
@@ -1364,113 +144,17 @@ void falcon_generate_interrupt(struct efx_nic *efx)
1364 * 144 *
1365 * NB most hardware supports MSI interrupts 145 * NB most hardware supports MSI interrupts
1366 */ 146 */
1367static inline void falcon_irq_ack_a1(struct efx_nic *efx) 147inline void falcon_irq_ack_a1(struct efx_nic *efx)
1368{
1369 efx_dword_t reg;
1370
1371 EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
1372 falcon_writel(efx, &reg, INT_ACK_REG_KER_A1);
1373 falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
1374}
1375
1376/* Process a fatal interrupt
1377 * Disable bus mastering ASAP and schedule a reset
1378 */
1379static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1380{ 148{
1381 struct falcon_nic_data *nic_data = efx->nic_data;
1382 efx_oword_t *int_ker = efx->irq_status.addr;
1383 efx_oword_t fatal_intr;
1384 int error, mem_perr;
1385
1386 falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
1387 error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
1388
1389 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1390 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1391 EFX_OWORD_VAL(fatal_intr),
1392 error ? "disabling bus mastering" : "no recognised error");
1393 if (error == 0)
1394 goto out;
1395
1396 /* If this is a memory parity error dump which blocks are offending */
1397 mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
1398 if (mem_perr) {
1399 efx_oword_t reg;
1400 falcon_read(efx, &reg, MEM_STAT_REG_KER);
1401 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1402 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1403 }
1404
1405 /* Disable both devices */
1406 pci_clear_master(efx->pci_dev);
1407 if (FALCON_IS_DUAL_FUNC(efx))
1408 pci_clear_master(nic_data->pci_dev2);
1409 falcon_disable_interrupts(efx);
1410
1411 /* Count errors and reset or disable the NIC accordingly */
1412 if (nic_data->int_error_count == 0 ||
1413 time_after(jiffies, nic_data->int_error_expire)) {
1414 nic_data->int_error_count = 0;
1415 nic_data->int_error_expire =
1416 jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
1417 }
1418 if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) {
1419 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1420 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1421 } else {
1422 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
1423 "NIC will be disabled\n");
1424 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1425 }
1426out:
1427 return IRQ_HANDLED;
1428}
1429
1430/* Handle a legacy interrupt from Falcon
1431 * Acknowledges the interrupt and schedule event queue processing.
1432 */
1433static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1434{
1435 struct efx_nic *efx = dev_id;
1436 efx_oword_t *int_ker = efx->irq_status.addr;
1437 irqreturn_t result = IRQ_NONE;
1438 struct efx_channel *channel;
1439 efx_dword_t reg; 149 efx_dword_t reg;
1440 u32 queues;
1441 int syserr;
1442 150
1443 /* Read the ISR which also ACKs the interrupts */ 151 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
1444 falcon_readl(efx, &reg, INT_ISR0_B0); 152 efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
1445 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 153 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
1446
1447 /* Check to see if we have a serious error condition */
1448 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1449 if (unlikely(syserr))
1450 return falcon_fatal_interrupt(efx);
1451
1452 /* Schedule processing of any interrupting queues */
1453 efx_for_each_channel(channel, efx) {
1454 if ((queues & 1) ||
1455 falcon_event_present(
1456 falcon_event(channel, channel->eventq_read_ptr))) {
1457 efx_schedule_channel(channel);
1458 result = IRQ_HANDLED;
1459 }
1460 queues >>= 1;
1461 }
1462
1463 if (result == IRQ_HANDLED) {
1464 efx->last_irq_cpu = raw_smp_processor_id();
1465 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1466 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1467 }
1468
1469 return result;
1470} 154}
1471 155
1472 156
1473static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 157irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1474{ 158{
1475 struct efx_nic *efx = dev_id; 159 struct efx_nic *efx = dev_id;
1476 efx_oword_t *int_ker = efx->irq_status.addr; 160 efx_oword_t *int_ker = efx->irq_status.addr;
@@ -1491,15 +175,15 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1491 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 175 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1492 176
1493 /* Check to see if we have a serious error condition */ 177 /* Check to see if we have a serious error condition */
1494 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); 178 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1495 if (unlikely(syserr)) 179 if (unlikely(syserr))
1496 return falcon_fatal_interrupt(efx); 180 return efx_nic_fatal_interrupt(efx);
1497 181
1498 /* Determine interrupting queues, clear interrupt status 182 /* Determine interrupting queues, clear interrupt status
1499 * register and acknowledge the device interrupt. 183 * register and acknowledge the device interrupt.
1500 */ 184 */
1501 BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS); 185 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
1502 queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS); 186 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
1503 EFX_ZERO_OWORD(*int_ker); 187 EFX_ZERO_OWORD(*int_ker);
1504 wmb(); /* Ensure the vector is cleared before interrupt ack */ 188 wmb(); /* Ensure the vector is cleared before interrupt ack */
1505 falcon_irq_ack_a1(efx); 189 falcon_irq_ack_a1(efx);
@@ -1515,126 +199,6 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1515 199
1516 return IRQ_HANDLED; 200 return IRQ_HANDLED;
1517} 201}
1518
1519/* Handle an MSI interrupt from Falcon
1520 *
1521 * Handle an MSI hardware interrupt. This routine schedules event
1522 * queue processing. No interrupt acknowledgement cycle is necessary.
1523 * Also, we never need to check that the interrupt is for us, since
1524 * MSI interrupts cannot be shared.
1525 */
1526static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1527{
1528 struct efx_channel *channel = dev_id;
1529 struct efx_nic *efx = channel->efx;
1530 efx_oword_t *int_ker = efx->irq_status.addr;
1531 int syserr;
1532
1533 efx->last_irq_cpu = raw_smp_processor_id();
1534 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1535 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1536
1537 /* Check to see if we have a serious error condition */
1538 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1539 if (unlikely(syserr))
1540 return falcon_fatal_interrupt(efx);
1541
1542 /* Schedule processing of the channel */
1543 efx_schedule_channel(channel);
1544
1545 return IRQ_HANDLED;
1546}
1547
1548
1549/* Setup RSS indirection table.
1550 * This maps from the hash value of the packet to RXQ
1551 */
1552static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1553{
1554 int i = 0;
1555 unsigned long offset;
1556 efx_dword_t dword;
1557
1558 if (falcon_rev(efx) < FALCON_REV_B0)
1559 return;
1560
1561 for (offset = RX_RSS_INDIR_TBL_B0;
1562 offset < RX_RSS_INDIR_TBL_B0 + 0x800;
1563 offset += 0x10) {
1564 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
1565 i % efx->n_rx_queues);
1566 falcon_writel(efx, &dword, offset);
1567 i++;
1568 }
1569}
1570
1571/* Hook interrupt handler(s)
1572 * Try MSI and then legacy interrupts.
1573 */
1574int falcon_init_interrupt(struct efx_nic *efx)
1575{
1576 struct efx_channel *channel;
1577 int rc;
1578
1579 if (!EFX_INT_MODE_USE_MSI(efx)) {
1580 irq_handler_t handler;
1581 if (falcon_rev(efx) >= FALCON_REV_B0)
1582 handler = falcon_legacy_interrupt_b0;
1583 else
1584 handler = falcon_legacy_interrupt_a1;
1585
1586 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1587 efx->name, efx);
1588 if (rc) {
1589 EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
1590 efx->pci_dev->irq);
1591 goto fail1;
1592 }
1593 return 0;
1594 }
1595
1596 /* Hook MSI or MSI-X interrupt */
1597 efx_for_each_channel(channel, efx) {
1598 rc = request_irq(channel->irq, falcon_msi_interrupt,
1599 IRQF_PROBE_SHARED, /* Not shared */
1600 channel->name, channel);
1601 if (rc) {
1602 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1603 goto fail2;
1604 }
1605 }
1606
1607 return 0;
1608
1609 fail2:
1610 efx_for_each_channel(channel, efx)
1611 free_irq(channel->irq, channel);
1612 fail1:
1613 return rc;
1614}
1615
1616void falcon_fini_interrupt(struct efx_nic *efx)
1617{
1618 struct efx_channel *channel;
1619 efx_oword_t reg;
1620
1621 /* Disable MSI/MSI-X interrupts */
1622 efx_for_each_channel(channel, efx) {
1623 if (channel->irq)
1624 free_irq(channel->irq, channel);
1625 }
1626
1627 /* ACK legacy interrupt */
1628 if (falcon_rev(efx) >= FALCON_REV_B0)
1629 falcon_read(efx, &reg, INT_ISR0_B0);
1630 else
1631 falcon_irq_ack_a1(efx);
1632
1633 /* Disable legacy interrupt */
1634 if (efx->legacy_irq)
1635 free_irq(efx->legacy_irq, efx);
1636}
1637
1638/************************************************************************** 202/**************************************************************************
1639 * 203 *
1640 * EEPROM/flash 204 * EEPROM/flash
@@ -1647,8 +211,8 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1647static int falcon_spi_poll(struct efx_nic *efx) 211static int falcon_spi_poll(struct efx_nic *efx)
1648{ 212{
1649 efx_oword_t reg; 213 efx_oword_t reg;
1650 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER); 214 efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
1651 return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; 215 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1652} 216}
1653 217
1654/* Wait for SPI command completion */ 218/* Wait for SPI command completion */
@@ -1678,11 +242,10 @@ static int falcon_spi_wait(struct efx_nic *efx)
1678 } 242 }
1679} 243}
1680 244
1681int falcon_spi_cmd(const struct efx_spi_device *spi, 245int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
1682 unsigned int command, int address, 246 unsigned int command, int address,
1683 const void *in, void *out, size_t len) 247 const void *in, void *out, size_t len)
1684{ 248{
1685 struct efx_nic *efx = spi->efx;
1686 bool addressed = (address >= 0); 249 bool addressed = (address >= 0);
1687 bool reading = (out != NULL); 250 bool reading = (out != NULL);
1688 efx_oword_t reg; 251 efx_oword_t reg;
@@ -1700,27 +263,27 @@ int falcon_spi_cmd(const struct efx_spi_device *spi,
1700 263
1701 /* Program address register, if we have an address */ 264 /* Program address register, if we have an address */
1702 if (addressed) { 265 if (addressed) {
1703 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); 266 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
1704 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER); 267 efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
1705 } 268 }
1706 269
1707 /* Program data register, if we have data */ 270 /* Program data register, if we have data */
1708 if (in != NULL) { 271 if (in != NULL) {
1709 memcpy(&reg, in, len); 272 memcpy(&reg, in, len);
1710 falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER); 273 efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
1711 } 274 }
1712 275
1713 /* Issue read/write command */ 276 /* Issue read/write command */
1714 EFX_POPULATE_OWORD_7(reg, 277 EFX_POPULATE_OWORD_7(reg,
1715 EE_SPI_HCMD_CMD_EN, 1, 278 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
1716 EE_SPI_HCMD_SF_SEL, spi->device_id, 279 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
1717 EE_SPI_HCMD_DABCNT, len, 280 FRF_AB_EE_SPI_HCMD_DABCNT, len,
1718 EE_SPI_HCMD_READ, reading, 281 FRF_AB_EE_SPI_HCMD_READ, reading,
1719 EE_SPI_HCMD_DUBCNT, 0, 282 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
1720 EE_SPI_HCMD_ADBCNT, 283 FRF_AB_EE_SPI_HCMD_ADBCNT,
1721 (addressed ? spi->addr_len : 0), 284 (addressed ? spi->addr_len : 0),
1722 EE_SPI_HCMD_ENC, command); 285 FRF_AB_EE_SPI_HCMD_ENC, command);
1723 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER); 286 efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
1724 287
1725 /* Wait for read/write to complete */ 288 /* Wait for read/write to complete */
1726 rc = falcon_spi_wait(efx); 289 rc = falcon_spi_wait(efx);
@@ -1729,7 +292,7 @@ int falcon_spi_cmd(const struct efx_spi_device *spi,
1729 292
1730 /* Read data */ 293 /* Read data */
1731 if (out != NULL) { 294 if (out != NULL) {
1732 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER); 295 efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
1733 memcpy(out, &reg, len); 296 memcpy(out, &reg, len);
1734 } 297 }
1735 298
@@ -1751,15 +314,15 @@ efx_spi_munge_command(const struct efx_spi_device *spi,
1751} 314}
1752 315
1753/* Wait up to 10 ms for buffered write completion */ 316/* Wait up to 10 ms for buffered write completion */
1754int falcon_spi_wait_write(const struct efx_spi_device *spi) 317int
318falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
1755{ 319{
1756 struct efx_nic *efx = spi->efx;
1757 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); 320 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
1758 u8 status; 321 u8 status;
1759 int rc; 322 int rc;
1760 323
1761 for (;;) { 324 for (;;) {
1762 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, 325 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
1763 &status, sizeof(status)); 326 &status, sizeof(status));
1764 if (rc) 327 if (rc)
1765 return rc; 328 return rc;
@@ -1775,8 +338,8 @@ int falcon_spi_wait_write(const struct efx_spi_device *spi)
1775 } 338 }
1776} 339}
1777 340
1778int falcon_spi_read(const struct efx_spi_device *spi, loff_t start, 341int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
1779 size_t len, size_t *retlen, u8 *buffer) 342 loff_t start, size_t len, size_t *retlen, u8 *buffer)
1780{ 343{
1781 size_t block_len, pos = 0; 344 size_t block_len, pos = 0;
1782 unsigned int command; 345 unsigned int command;
@@ -1786,7 +349,7 @@ int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1786 block_len = min(len - pos, FALCON_SPI_MAX_LEN); 349 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
1787 350
1788 command = efx_spi_munge_command(spi, SPI_READ, start + pos); 351 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1789 rc = falcon_spi_cmd(spi, command, start + pos, NULL, 352 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
1790 buffer + pos, block_len); 353 buffer + pos, block_len);
1791 if (rc) 354 if (rc)
1792 break; 355 break;
@@ -1805,8 +368,9 @@ int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1805 return rc; 368 return rc;
1806} 369}
1807 370
1808int falcon_spi_write(const struct efx_spi_device *spi, loff_t start, 371int
1809 size_t len, size_t *retlen, const u8 *buffer) 372falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
373 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
1810{ 374{
1811 u8 verify_buffer[FALCON_SPI_MAX_LEN]; 375 u8 verify_buffer[FALCON_SPI_MAX_LEN];
1812 size_t block_len, pos = 0; 376 size_t block_len, pos = 0;
@@ -1814,24 +378,24 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1814 int rc = 0; 378 int rc = 0;
1815 379
1816 while (pos < len) { 380 while (pos < len) {
1817 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0); 381 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
1818 if (rc) 382 if (rc)
1819 break; 383 break;
1820 384
1821 block_len = min(len - pos, 385 block_len = min(len - pos,
1822 falcon_spi_write_limit(spi, start + pos)); 386 falcon_spi_write_limit(spi, start + pos));
1823 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); 387 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
1824 rc = falcon_spi_cmd(spi, command, start + pos, 388 rc = falcon_spi_cmd(efx, spi, command, start + pos,
1825 buffer + pos, NULL, block_len); 389 buffer + pos, NULL, block_len);
1826 if (rc) 390 if (rc)
1827 break; 391 break;
1828 392
1829 rc = falcon_spi_wait_write(spi); 393 rc = falcon_spi_wait_write(efx, spi);
1830 if (rc) 394 if (rc)
1831 break; 395 break;
1832 396
1833 command = efx_spi_munge_command(spi, SPI_READ, start + pos); 397 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1834 rc = falcon_spi_cmd(spi, command, start + pos, 398 rc = falcon_spi_cmd(efx, spi, command, start + pos,
1835 NULL, verify_buffer, block_len); 399 NULL, verify_buffer, block_len);
1836 if (memcmp(verify_buffer, buffer + pos, block_len)) { 400 if (memcmp(verify_buffer, buffer + pos, block_len)) {
1837 rc = -EIO; 401 rc = -EIO;
@@ -1860,60 +424,70 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1860 ************************************************************************** 424 **************************************************************************
1861 */ 425 */
1862 426
1863static int falcon_reset_macs(struct efx_nic *efx) 427static void falcon_push_multicast_hash(struct efx_nic *efx)
1864{ 428{
1865 efx_oword_t reg; 429 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
430
431 WARN_ON(!mutex_is_locked(&efx->mac_lock));
432
433 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
434 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
435}
436
437static void falcon_reset_macs(struct efx_nic *efx)
438{
439 struct falcon_nic_data *nic_data = efx->nic_data;
440 efx_oword_t reg, mac_ctrl;
1866 int count; 441 int count;
1867 442
1868 if (falcon_rev(efx) < FALCON_REV_B0) { 443 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
1869 /* It's not safe to use GLB_CTL_REG to reset the 444 /* It's not safe to use GLB_CTL_REG to reset the
1870 * macs, so instead use the internal MAC resets 445 * macs, so instead use the internal MAC resets
1871 */ 446 */
1872 if (!EFX_IS10G(efx)) { 447 if (!EFX_IS10G(efx)) {
1873 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1); 448 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
1874 falcon_write(efx, &reg, GM_CFG1_REG); 449 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
1875 udelay(1000); 450 udelay(1000);
1876 451
1877 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0); 452 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
1878 falcon_write(efx, &reg, GM_CFG1_REG); 453 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
1879 udelay(1000); 454 udelay(1000);
1880 return 0; 455 return;
1881 } else { 456 } else {
1882 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1); 457 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1883 falcon_write(efx, &reg, XM_GLB_CFG_REG); 458 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1884 459
1885 for (count = 0; count < 10000; count++) { 460 for (count = 0; count < 10000; count++) {
1886 falcon_read(efx, &reg, XM_GLB_CFG_REG); 461 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1887 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0) 462 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1888 return 0; 463 0)
464 return;
1889 udelay(10); 465 udelay(10);
1890 } 466 }
1891 467
1892 EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); 468 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
1893 return -ETIMEDOUT;
1894 } 469 }
1895 } 470 }
1896 471
1897 /* MAC stats will fail whilst the TX fifo is draining. Serialise 472 /* Mac stats will fail whist the TX fifo is draining */
1898 * the drain sequence with the statistics fetch */ 473 WARN_ON(nic_data->stats_disable_count == 0);
1899 efx_stats_disable(efx);
1900 474
1901 falcon_read(efx, &reg, MAC0_CTRL_REG_KER); 475 efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1902 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); 476 EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
1903 falcon_write(efx, &reg, MAC0_CTRL_REG_KER); 477 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1904 478
1905 falcon_read(efx, &reg, GLB_CTL_REG_KER); 479 efx_reado(efx, &reg, FR_AB_GLB_CTL);
1906 EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1); 480 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1907 EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1); 481 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1908 EFX_SET_OWORD_FIELD(reg, RST_EM, 1); 482 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1909 falcon_write(efx, &reg, GLB_CTL_REG_KER); 483 efx_writeo(efx, &reg, FR_AB_GLB_CTL);
1910 484
1911 count = 0; 485 count = 0;
1912 while (1) { 486 while (1) {
1913 falcon_read(efx, &reg, GLB_CTL_REG_KER); 487 efx_reado(efx, &reg, FR_AB_GLB_CTL);
1914 if (!EFX_OWORD_FIELD(reg, RST_XGTX) && 488 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1915 !EFX_OWORD_FIELD(reg, RST_XGRX) && 489 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1916 !EFX_OWORD_FIELD(reg, RST_EM)) { 490 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1917 EFX_LOG(efx, "Completed MAC reset after %d loops\n", 491 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1918 count); 492 count);
1919 break; 493 break;
@@ -1926,55 +500,50 @@ static int falcon_reset_macs(struct efx_nic *efx)
1926 udelay(10); 500 udelay(10);
1927 } 501 }
1928 502
1929 efx_stats_enable(efx); 503 /* Ensure the correct MAC is selected before statistics
1930 504 * are re-enabled by the caller */
1931 /* If we've reset the EM block and the link is up, then 505 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1932 * we'll have to kick the XAUI link so the PHY can recover */
1933 if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
1934 falcon_reset_xaui(efx);
1935
1936 return 0;
1937} 506}
1938 507
1939void falcon_drain_tx_fifo(struct efx_nic *efx) 508void falcon_drain_tx_fifo(struct efx_nic *efx)
1940{ 509{
1941 efx_oword_t reg; 510 efx_oword_t reg;
1942 511
1943 if ((falcon_rev(efx) < FALCON_REV_B0) || 512 if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
1944 (efx->loopback_mode != LOOPBACK_NONE)) 513 (efx->loopback_mode != LOOPBACK_NONE))
1945 return; 514 return;
1946 515
1947 falcon_read(efx, &reg, MAC0_CTRL_REG_KER); 516 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1948 /* There is no point in draining more than once */ 517 /* There is no point in draining more than once */
1949 if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0)) 518 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1950 return; 519 return;
1951 520
1952 falcon_reset_macs(efx); 521 falcon_reset_macs(efx);
1953} 522}
1954 523
1955void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) 524static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1956{ 525{
1957 efx_oword_t reg; 526 efx_oword_t reg;
1958 527
1959 if (falcon_rev(efx) < FALCON_REV_B0) 528 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1960 return; 529 return;
1961 530
1962 /* Isolate the MAC -> RX */ 531 /* Isolate the MAC -> RX */
1963 falcon_read(efx, &reg, RX_CFG_REG_KER); 532 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1964 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0); 533 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1965 falcon_write(efx, &reg, RX_CFG_REG_KER); 534 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1966 535
1967 if (!efx->link_up) 536 /* Isolate TX -> MAC */
1968 falcon_drain_tx_fifo(efx); 537 falcon_drain_tx_fifo(efx);
1969} 538}
1970 539
1971void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) 540void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1972{ 541{
542 struct efx_link_state *link_state = &efx->link_state;
1973 efx_oword_t reg; 543 efx_oword_t reg;
1974 int link_speed; 544 int link_speed;
1975 bool tx_fc;
1976 545
1977 switch (efx->link_speed) { 546 switch (link_state->speed) {
1978 case 10000: link_speed = 3; break; 547 case 10000: link_speed = 3; break;
1979 case 1000: link_speed = 2; break; 548 case 1000: link_speed = 2; break;
1980 case 100: link_speed = 1; break; 549 case 100: link_speed = 1; break;
@@ -1985,75 +554,139 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1985 * indefinitely held and TX queue can be flushed at any point 554 * indefinitely held and TX queue can be flushed at any point
1986 * while the link is down. */ 555 * while the link is down. */
1987 EFX_POPULATE_OWORD_5(reg, 556 EFX_POPULATE_OWORD_5(reg,
1988 MAC_XOFF_VAL, 0xffff /* max pause time */, 557 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1989 MAC_BCAD_ACPT, 1, 558 FRF_AB_MAC_BCAD_ACPT, 1,
1990 MAC_UC_PROM, efx->promiscuous, 559 FRF_AB_MAC_UC_PROM, efx->promiscuous,
1991 MAC_LINK_STATUS, 1, /* always set */ 560 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1992 MAC_SPEED, link_speed); 561 FRF_AB_MAC_SPEED, link_speed);
1993 /* On B0, MAC backpressure can be disabled and packets get 562 /* On B0, MAC backpressure can be disabled and packets get
1994 * discarded. */ 563 * discarded. */
1995 if (falcon_rev(efx) >= FALCON_REV_B0) { 564 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1996 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 565 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1997 !efx->link_up); 566 !link_state->up);
1998 } 567 }
1999 568
2000 falcon_write(efx, &reg, MAC0_CTRL_REG_KER); 569 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
2001 570
2002 /* Restore the multicast hash registers. */ 571 /* Restore the multicast hash registers. */
2003 falcon_set_multicast_hash(efx); 572 falcon_push_multicast_hash(efx);
2004
2005 /* Transmission of pause frames when RX crosses the threshold is
2006 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
2007 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
2008 tx_fc = !!(efx->link_fc & EFX_FC_TX);
2009 falcon_read(efx, &reg, RX_CFG_REG_KER);
2010 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
2011 573
574 efx_reado(efx, &reg, FR_AZ_RX_CFG);
575 /* Enable XOFF signal from RX FIFO (we enabled it during NIC
576 * initialisation but it may read back as 0) */
577 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
2012 /* Unisolate the MAC -> RX */ 578 /* Unisolate the MAC -> RX */
2013 if (falcon_rev(efx) >= FALCON_REV_B0) 579 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
2014 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 580 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2015 falcon_write(efx, &reg, RX_CFG_REG_KER); 581 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
2016} 582}
2017 583
2018int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) 584static void falcon_stats_request(struct efx_nic *efx)
2019{ 585{
586 struct falcon_nic_data *nic_data = efx->nic_data;
2020 efx_oword_t reg; 587 efx_oword_t reg;
2021 u32 *dma_done;
2022 int i;
2023 588
2024 if (disable_dma_stats) 589 WARN_ON(nic_data->stats_pending);
2025 return 0; 590 WARN_ON(nic_data->stats_disable_count);
2026 591
2027 /* Statistics fetch will fail if the MAC is in TX drain */ 592 if (nic_data->stats_dma_done == NULL)
2028 if (falcon_rev(efx) >= FALCON_REV_B0) { 593 return; /* no mac selected */
2029 efx_oword_t temp;
2030 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
2031 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
2032 return 0;
2033 }
2034 594
2035 dma_done = (efx->stats_buffer.addr + done_offset); 595 *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
2036 *dma_done = FALCON_STATS_NOT_DONE; 596 nic_data->stats_pending = true;
2037 wmb(); /* ensure done flag is clear */ 597 wmb(); /* ensure done flag is clear */
2038 598
2039 /* Initiate DMA transfer of stats */ 599 /* Initiate DMA transfer of stats */
2040 EFX_POPULATE_OWORD_2(reg, 600 EFX_POPULATE_OWORD_2(reg,
2041 MAC_STAT_DMA_CMD, 1, 601 FRF_AB_MAC_STAT_DMA_CMD, 1,
2042 MAC_STAT_DMA_ADR, 602 FRF_AB_MAC_STAT_DMA_ADR,
2043 efx->stats_buffer.dma_addr); 603 efx->stats_buffer.dma_addr);
2044 falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER); 604 efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
2045 605
2046 /* Wait for transfer to complete */ 606 mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
2047 for (i = 0; i < 400; i++) { 607}
2048 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) { 608
2049 rmb(); /* Ensure the stats are valid. */ 609static void falcon_stats_complete(struct efx_nic *efx)
2050 return 0; 610{
2051 } 611 struct falcon_nic_data *nic_data = efx->nic_data;
2052 udelay(10); 612
613 if (!nic_data->stats_pending)
614 return;
615
616 nic_data->stats_pending = 0;
617 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
618 rmb(); /* read the done flag before the stats */
619 efx->mac_op->update_stats(efx);
620 } else {
621 EFX_ERR(efx, "timed out waiting for statistics\n");
2053 } 622 }
623}
2054 624
2055 EFX_ERR(efx, "timed out waiting for statistics\n"); 625static void falcon_stats_timer_func(unsigned long context)
2056 return -ETIMEDOUT; 626{
627 struct efx_nic *efx = (struct efx_nic *)context;
628 struct falcon_nic_data *nic_data = efx->nic_data;
629
630 spin_lock(&efx->stats_lock);
631
632 falcon_stats_complete(efx);
633 if (nic_data->stats_disable_count == 0)
634 falcon_stats_request(efx);
635
636 spin_unlock(&efx->stats_lock);
637}
638
639static void falcon_switch_mac(struct efx_nic *efx);
640
641static bool falcon_loopback_link_poll(struct efx_nic *efx)
642{
643 struct efx_link_state old_state = efx->link_state;
644
645 WARN_ON(!mutex_is_locked(&efx->mac_lock));
646 WARN_ON(!LOOPBACK_INTERNAL(efx));
647
648 efx->link_state.fd = true;
649 efx->link_state.fc = efx->wanted_fc;
650 efx->link_state.up = true;
651
652 if (efx->loopback_mode == LOOPBACK_GMAC)
653 efx->link_state.speed = 1000;
654 else
655 efx->link_state.speed = 10000;
656
657 return !efx_link_state_equal(&efx->link_state, &old_state);
658}
659
660static int falcon_reconfigure_port(struct efx_nic *efx)
661{
662 int rc;
663
664 WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
665
666 /* Poll the PHY link state *before* reconfiguring it. This means we
667 * will pick up the correct speed (in loopback) to select the correct
668 * MAC.
669 */
670 if (LOOPBACK_INTERNAL(efx))
671 falcon_loopback_link_poll(efx);
672 else
673 efx->phy_op->poll(efx);
674
675 falcon_stop_nic_stats(efx);
676 falcon_deconfigure_mac_wrapper(efx);
677
678 falcon_switch_mac(efx);
679
680 efx->phy_op->reconfigure(efx);
681 rc = efx->mac_op->reconfigure(efx);
682 BUG_ON(rc);
683
684 falcon_start_nic_stats(efx);
685
686 /* Synchronise efx->link_state with the kernel */
687 efx_link_status_changed(efx);
688
689 return 0;
2057} 690}
2058 691
2059/************************************************************************** 692/**************************************************************************
@@ -2066,18 +699,18 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
2066/* Wait for GMII access to complete */ 699/* Wait for GMII access to complete */
2067static int falcon_gmii_wait(struct efx_nic *efx) 700static int falcon_gmii_wait(struct efx_nic *efx)
2068{ 701{
2069 efx_dword_t md_stat; 702 efx_oword_t md_stat;
2070 int count; 703 int count;
2071 704
2072 /* wait upto 50ms - taken max from datasheet */ 705 /* wait upto 50ms - taken max from datasheet */
2073 for (count = 0; count < 5000; count++) { 706 for (count = 0; count < 5000; count++) {
2074 falcon_readl(efx, &md_stat, MD_STAT_REG_KER); 707 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
2075 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) { 708 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
2076 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 || 709 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
2077 EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) { 710 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
2078 EFX_ERR(efx, "error from GMII access " 711 EFX_ERR(efx, "error from GMII access "
2079 EFX_DWORD_FMT"\n", 712 EFX_OWORD_FMT"\n",
2080 EFX_DWORD_VAL(md_stat)); 713 EFX_OWORD_VAL(md_stat));
2081 return -EIO; 714 return -EIO;
2082 } 715 }
2083 return 0; 716 return 0;
@@ -2099,7 +732,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
2099 EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n", 732 EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
2100 prtad, devad, addr, value); 733 prtad, devad, addr, value);
2101 734
2102 spin_lock_bh(&efx->phy_lock); 735 mutex_lock(&efx->mdio_lock);
2103 736
2104 /* Check MDIO not currently being accessed */ 737 /* Check MDIO not currently being accessed */
2105 rc = falcon_gmii_wait(efx); 738 rc = falcon_gmii_wait(efx);
@@ -2107,34 +740,35 @@ static int falcon_mdio_write(struct net_device *net_dev,
2107 goto out; 740 goto out;
2108 741
2109 /* Write the address/ID register */ 742 /* Write the address/ID register */
2110 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); 743 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2111 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER); 744 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
2112 745
2113 EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); 746 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2114 falcon_write(efx, &reg, MD_ID_REG_KER); 747 FRF_AB_MD_DEV_ADR, devad);
748 efx_writeo(efx, &reg, FR_AB_MD_ID);
2115 749
2116 /* Write data */ 750 /* Write data */
2117 EFX_POPULATE_OWORD_1(reg, MD_TXD, value); 751 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
2118 falcon_write(efx, &reg, MD_TXD_REG_KER); 752 efx_writeo(efx, &reg, FR_AB_MD_TXD);
2119 753
2120 EFX_POPULATE_OWORD_2(reg, 754 EFX_POPULATE_OWORD_2(reg,
2121 MD_WRC, 1, 755 FRF_AB_MD_WRC, 1,
2122 MD_GC, 0); 756 FRF_AB_MD_GC, 0);
2123 falcon_write(efx, &reg, MD_CS_REG_KER); 757 efx_writeo(efx, &reg, FR_AB_MD_CS);
2124 758
2125 /* Wait for data to be written */ 759 /* Wait for data to be written */
2126 rc = falcon_gmii_wait(efx); 760 rc = falcon_gmii_wait(efx);
2127 if (rc) { 761 if (rc) {
2128 /* Abort the write operation */ 762 /* Abort the write operation */
2129 EFX_POPULATE_OWORD_2(reg, 763 EFX_POPULATE_OWORD_2(reg,
2130 MD_WRC, 0, 764 FRF_AB_MD_WRC, 0,
2131 MD_GC, 1); 765 FRF_AB_MD_GC, 1);
2132 falcon_write(efx, &reg, MD_CS_REG_KER); 766 efx_writeo(efx, &reg, FR_AB_MD_CS);
2133 udelay(10); 767 udelay(10);
2134 } 768 }
2135 769
2136 out: 770out:
2137 spin_unlock_bh(&efx->phy_lock); 771 mutex_unlock(&efx->mdio_lock);
2138 return rc; 772 return rc;
2139} 773}
2140 774
@@ -2146,152 +780,139 @@ static int falcon_mdio_read(struct net_device *net_dev,
2146 efx_oword_t reg; 780 efx_oword_t reg;
2147 int rc; 781 int rc;
2148 782
2149 spin_lock_bh(&efx->phy_lock); 783 mutex_lock(&efx->mdio_lock);
2150 784
2151 /* Check MDIO not currently being accessed */ 785 /* Check MDIO not currently being accessed */
2152 rc = falcon_gmii_wait(efx); 786 rc = falcon_gmii_wait(efx);
2153 if (rc) 787 if (rc)
2154 goto out; 788 goto out;
2155 789
2156 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); 790 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2157 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER); 791 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
2158 792
2159 EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); 793 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2160 falcon_write(efx, &reg, MD_ID_REG_KER); 794 FRF_AB_MD_DEV_ADR, devad);
795 efx_writeo(efx, &reg, FR_AB_MD_ID);
2161 796
2162 /* Request data to be read */ 797 /* Request data to be read */
2163 EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0); 798 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
2164 falcon_write(efx, &reg, MD_CS_REG_KER); 799 efx_writeo(efx, &reg, FR_AB_MD_CS);
2165 800
2166 /* Wait for data to become available */ 801 /* Wait for data to become available */
2167 rc = falcon_gmii_wait(efx); 802 rc = falcon_gmii_wait(efx);
2168 if (rc == 0) { 803 if (rc == 0) {
2169 falcon_read(efx, &reg, MD_RXD_REG_KER); 804 efx_reado(efx, &reg, FR_AB_MD_RXD);
2170 rc = EFX_OWORD_FIELD(reg, MD_RXD); 805 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
2171 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", 806 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
2172 prtad, devad, addr, rc); 807 prtad, devad, addr, rc);
2173 } else { 808 } else {
2174 /* Abort the read operation */ 809 /* Abort the read operation */
2175 EFX_POPULATE_OWORD_2(reg, 810 EFX_POPULATE_OWORD_2(reg,
2176 MD_RIC, 0, 811 FRF_AB_MD_RIC, 0,
2177 MD_GC, 1); 812 FRF_AB_MD_GC, 1);
2178 falcon_write(efx, &reg, MD_CS_REG_KER); 813 efx_writeo(efx, &reg, FR_AB_MD_CS);
2179 814
2180 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", 815 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
2181 prtad, devad, addr, rc); 816 prtad, devad, addr, rc);
2182 } 817 }
2183 818
2184 out: 819out:
2185 spin_unlock_bh(&efx->phy_lock); 820 mutex_unlock(&efx->mdio_lock);
2186 return rc; 821 return rc;
2187} 822}
2188 823
2189static int falcon_probe_phy(struct efx_nic *efx) 824static void falcon_clock_mac(struct efx_nic *efx)
2190{ 825{
2191 switch (efx->phy_type) { 826 unsigned strap_val;
2192 case PHY_TYPE_SFX7101: 827 efx_oword_t nic_stat;
2193 efx->phy_op = &falcon_sfx7101_phy_ops;
2194 break;
2195 case PHY_TYPE_SFT9001A:
2196 case PHY_TYPE_SFT9001B:
2197 efx->phy_op = &falcon_sft9001_phy_ops;
2198 break;
2199 case PHY_TYPE_QT2022C2:
2200 case PHY_TYPE_QT2025C:
2201 efx->phy_op = &falcon_xfp_phy_ops;
2202 break;
2203 default:
2204 EFX_ERR(efx, "Unknown PHY type %d\n",
2205 efx->phy_type);
2206 return -1;
2207 }
2208
2209 if (efx->phy_op->macs & EFX_XMAC)
2210 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2211 (1 << LOOPBACK_XGXS) |
2212 (1 << LOOPBACK_XAUI));
2213 if (efx->phy_op->macs & EFX_GMAC)
2214 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2215 efx->loopback_modes |= efx->phy_op->loopbacks;
2216 828
2217 return 0; 829 /* Configure the NIC generated MAC clock correctly */
830 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
831 strap_val = EFX_IS10G(efx) ? 5 : 3;
832 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
833 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
834 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
835 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
836 } else {
837 /* Falcon A1 does not support 1G/10G speed switching
838 * and must not be used with a PHY that does. */
839 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
840 strap_val);
841 }
2218} 842}
2219 843
2220int falcon_switch_mac(struct efx_nic *efx) 844static void falcon_switch_mac(struct efx_nic *efx)
2221{ 845{
2222 struct efx_mac_operations *old_mac_op = efx->mac_op; 846 struct efx_mac_operations *old_mac_op = efx->mac_op;
2223 efx_oword_t nic_stat; 847 struct falcon_nic_data *nic_data = efx->nic_data;
2224 unsigned strap_val; 848 unsigned int stats_done_offset;
2225 int rc = 0;
2226
2227 /* Don't try to fetch MAC stats while we're switching MACs */
2228 efx_stats_disable(efx);
2229
2230 /* Internal loopbacks override the phy speed setting */
2231 if (efx->loopback_mode == LOOPBACK_GMAC) {
2232 efx->link_speed = 1000;
2233 efx->link_fd = true;
2234 } else if (LOOPBACK_INTERNAL(efx)) {
2235 efx->link_speed = 10000;
2236 efx->link_fd = true;
2237 }
2238 849
2239 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 850 WARN_ON(!mutex_is_locked(&efx->mac_lock));
851 WARN_ON(nic_data->stats_disable_count == 0);
852
2240 efx->mac_op = (EFX_IS10G(efx) ? 853 efx->mac_op = (EFX_IS10G(efx) ?
2241 &falcon_xmac_operations : &falcon_gmac_operations); 854 &falcon_xmac_operations : &falcon_gmac_operations);
2242 855
2243 /* Always push the NIC_STAT_REG setting even if the mac hasn't 856 if (EFX_IS10G(efx))
2244 * changed, because this function is run post online reset */ 857 stats_done_offset = XgDmaDone_offset;
2245 falcon_read(efx, &nic_stat, NIC_STAT_REG); 858 else
2246 strap_val = EFX_IS10G(efx) ? 5 : 3; 859 stats_done_offset = GDmaDone_offset;
2247 if (falcon_rev(efx) >= FALCON_REV_B0) { 860 nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
2248 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1);
2249 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val);
2250 falcon_write(efx, &nic_stat, NIC_STAT_REG);
2251 } else {
2252 /* Falcon A1 does not support 1G/10G speed switching
2253 * and must not be used with a PHY that does. */
2254 BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
2255 }
2256 861
2257 if (old_mac_op == efx->mac_op) 862 if (old_mac_op == efx->mac_op)
2258 goto out; 863 return;
864
865 falcon_clock_mac(efx);
2259 866
2260 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); 867 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2261 /* Not all macs support a mac-level link state */ 868 /* Not all macs support a mac-level link state */
2262 efx->mac_up = true; 869 efx->xmac_poll_required = false;
2263 870 falcon_reset_macs(efx);
2264 rc = falcon_reset_macs(efx);
2265out:
2266 efx_stats_enable(efx);
2267 return rc;
2268} 871}
2269 872
2270/* This call is responsible for hooking in the MAC and PHY operations */ 873/* This call is responsible for hooking in the MAC and PHY operations */
2271int falcon_probe_port(struct efx_nic *efx) 874static int falcon_probe_port(struct efx_nic *efx)
2272{ 875{
2273 int rc; 876 int rc;
2274 877
2275 /* Hook in PHY operations table */ 878 switch (efx->phy_type) {
2276 rc = falcon_probe_phy(efx); 879 case PHY_TYPE_SFX7101:
2277 if (rc) 880 efx->phy_op = &falcon_sfx7101_phy_ops;
2278 return rc; 881 break;
882 case PHY_TYPE_SFT9001A:
883 case PHY_TYPE_SFT9001B:
884 efx->phy_op = &falcon_sft9001_phy_ops;
885 break;
886 case PHY_TYPE_QT2022C2:
887 case PHY_TYPE_QT2025C:
888 efx->phy_op = &falcon_qt202x_phy_ops;
889 break;
890 default:
891 EFX_ERR(efx, "Unknown PHY type %d\n",
892 efx->phy_type);
893 return -ENODEV;
894 }
2279 895
2280 /* Set up MDIO structure for PHY */ 896 /* Fill out MDIO structure and loopback modes */
2281 efx->mdio.mmds = efx->phy_op->mmds;
2282 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
2283 efx->mdio.mdio_read = falcon_mdio_read; 897 efx->mdio.mdio_read = falcon_mdio_read;
2284 efx->mdio.mdio_write = falcon_mdio_write; 898 efx->mdio.mdio_write = falcon_mdio_write;
899 rc = efx->phy_op->probe(efx);
900 if (rc != 0)
901 return rc;
902
903 /* Initial assumption */
904 efx->link_state.speed = 10000;
905 efx->link_state.fd = true;
2285 906
2286 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 907 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2287 if (falcon_rev(efx) >= FALCON_REV_B0) 908 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
2288 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; 909 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
2289 else 910 else
2290 efx->wanted_fc = EFX_FC_RX; 911 efx->wanted_fc = EFX_FC_RX;
2291 912
2292 /* Allocate buffer for stats */ 913 /* Allocate buffer for stats */
2293 rc = falcon_alloc_buffer(efx, &efx->stats_buffer, 914 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
2294 FALCON_MAC_STATS_SIZE); 915 FALCON_MAC_STATS_SIZE);
2295 if (rc) 916 if (rc)
2296 return rc; 917 return rc;
2297 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", 918 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
@@ -2302,40 +923,19 @@ int falcon_probe_port(struct efx_nic *efx)
2302 return 0; 923 return 0;
2303} 924}
2304 925
2305void falcon_remove_port(struct efx_nic *efx) 926static void falcon_remove_port(struct efx_nic *efx)
2306{ 927{
2307 falcon_free_buffer(efx, &efx->stats_buffer); 928 efx_nic_free_buffer(efx, &efx->stats_buffer);
2308} 929}
2309 930
2310/************************************************************************** 931/**************************************************************************
2311 * 932 *
2312 * Multicast filtering
2313 *
2314 **************************************************************************
2315 */
2316
2317void falcon_set_multicast_hash(struct efx_nic *efx)
2318{
2319 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2320
2321 /* Broadcast packets go through the multicast hash filter.
2322 * ether_crc_le() of the broadcast address is 0xbe2612ff
2323 * so we always add bit 0xff to the mask.
2324 */
2325 set_bit_le(0xff, mc_hash->byte);
2326
2327 falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
2328 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
2329}
2330
2331
2332/**************************************************************************
2333 *
2334 * Falcon test code 933 * Falcon test code
2335 * 934 *
2336 **************************************************************************/ 935 **************************************************************************/
2337 936
2338int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) 937static int
938falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2339{ 939{
2340 struct falcon_nvconfig *nvconfig; 940 struct falcon_nvconfig *nvconfig;
2341 struct efx_spi_device *spi; 941 struct efx_spi_device *spi;
@@ -2351,10 +951,10 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2351 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); 951 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2352 if (!region) 952 if (!region)
2353 return -ENOMEM; 953 return -ENOMEM;
2354 nvconfig = region + NVCONFIG_OFFSET; 954 nvconfig = region + FALCON_NVCONFIG_OFFSET;
2355 955
2356 mutex_lock(&efx->spi_lock); 956 mutex_lock(&efx->spi_lock);
2357 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region); 957 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
2358 mutex_unlock(&efx->spi_lock); 958 mutex_unlock(&efx->spi_lock);
2359 if (rc) { 959 if (rc) {
2360 EFX_ERR(efx, "Failed to read %s\n", 960 EFX_ERR(efx, "Failed to read %s\n",
@@ -2367,7 +967,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2367 struct_ver = le16_to_cpu(nvconfig->board_struct_ver); 967 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2368 968
2369 rc = -EINVAL; 969 rc = -EINVAL;
2370 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) { 970 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
2371 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); 971 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2372 goto out; 972 goto out;
2373 } 973 }
@@ -2398,107 +998,54 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2398 return rc; 998 return rc;
2399} 999}
2400 1000
2401/* Registers tested in the falcon register test */ 1001static int falcon_test_nvram(struct efx_nic *efx)
2402static struct { 1002{
2403 unsigned address; 1003 return falcon_read_nvram(efx, NULL);
2404 efx_oword_t mask; 1004}
2405} efx_test_registers[] = { 1005
2406 { ADR_REGION_REG_KER, 1006static const struct efx_nic_register_test falcon_b0_register_tests[] = {
1007 { FR_AZ_ADR_REGION,
2407 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, 1008 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2408 { RX_CFG_REG_KER, 1009 { FR_AZ_RX_CFG,
2409 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, 1010 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2410 { TX_CFG_REG_KER, 1011 { FR_AZ_TX_CFG,
2411 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, 1012 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2412 { TX_CFG2_REG_KER, 1013 { FR_AZ_TX_RESERVED,
2413 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, 1014 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2414 { MAC0_CTRL_REG_KER, 1015 { FR_AB_MAC_CTRL,
2415 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, 1016 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2416 { SRM_TX_DC_CFG_REG_KER, 1017 { FR_AZ_SRM_TX_DC_CFG,
2417 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, 1018 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2418 { RX_DC_CFG_REG_KER, 1019 { FR_AZ_RX_DC_CFG,
2419 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, 1020 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2420 { RX_DC_PF_WM_REG_KER, 1021 { FR_AZ_RX_DC_PF_WM,
2421 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, 1022 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2422 { DP_CTRL_REG, 1023 { FR_BZ_DP_CTRL,
2423 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, 1024 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2424 { GM_CFG2_REG, 1025 { FR_AB_GM_CFG2,
2425 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, 1026 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2426 { GMF_CFG0_REG, 1027 { FR_AB_GMF_CFG0,
2427 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, 1028 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2428 { XM_GLB_CFG_REG, 1029 { FR_AB_XM_GLB_CFG,
2429 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, 1030 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2430 { XM_TX_CFG_REG, 1031 { FR_AB_XM_TX_CFG,
2431 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, 1032 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2432 { XM_RX_CFG_REG, 1033 { FR_AB_XM_RX_CFG,
2433 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, 1034 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2434 { XM_RX_PARAM_REG, 1035 { FR_AB_XM_RX_PARAM,
2435 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, 1036 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2436 { XM_FC_REG, 1037 { FR_AB_XM_FC,
2437 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, 1038 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2438 { XM_ADR_LO_REG, 1039 { FR_AB_XM_ADR_LO,
2439 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, 1040 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2440 { XX_SD_CTL_REG, 1041 { FR_AB_XX_SD_CTL,
2441 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, 1042 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2442}; 1043};
2443 1044
2444static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 1045static int falcon_b0_test_registers(struct efx_nic *efx)
2445 const efx_oword_t *mask)
2446{ 1046{
2447 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 1047 return efx_nic_test_registers(efx, falcon_b0_register_tests,
2448 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 1048 ARRAY_SIZE(falcon_b0_register_tests));
2449}
2450
2451int falcon_test_registers(struct efx_nic *efx)
2452{
2453 unsigned address = 0, i, j;
2454 efx_oword_t mask, imask, original, reg, buf;
2455
2456 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2457 WARN_ON(!LOOPBACK_INTERNAL(efx));
2458
2459 for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
2460 address = efx_test_registers[i].address;
2461 mask = imask = efx_test_registers[i].mask;
2462 EFX_INVERT_OWORD(imask);
2463
2464 falcon_read(efx, &original, address);
2465
2466 /* bit sweep on and off */
2467 for (j = 0; j < 128; j++) {
2468 if (!EFX_EXTRACT_OWORD32(mask, j, j))
2469 continue;
2470
2471 /* Test this testable bit can be set in isolation */
2472 EFX_AND_OWORD(reg, original, mask);
2473 EFX_SET_OWORD32(reg, j, j, 1);
2474
2475 falcon_write(efx, &reg, address);
2476 falcon_read(efx, &buf, address);
2477
2478 if (efx_masked_compare_oword(&reg, &buf, &mask))
2479 goto fail;
2480
2481 /* Test this testable bit can be cleared in isolation */
2482 EFX_OR_OWORD(reg, original, mask);
2483 EFX_SET_OWORD32(reg, j, j, 0);
2484
2485 falcon_write(efx, &reg, address);
2486 falcon_read(efx, &buf, address);
2487
2488 if (efx_masked_compare_oword(&reg, &buf, &mask))
2489 goto fail;
2490 }
2491
2492 falcon_write(efx, &original, address);
2493 }
2494
2495 return 0;
2496
2497fail:
2498 EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
2499 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
2500 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
2501 return -EIO;
2502} 1049}
2503 1050
2504/************************************************************************** 1051/**************************************************************************
@@ -2510,13 +1057,13 @@ fail:
2510 1057
2511/* Resets NIC to known state. This routine must be called in process 1058/* Resets NIC to known state. This routine must be called in process
2512 * context and is allowed to sleep. */ 1059 * context and is allowed to sleep. */
2513int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) 1060static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2514{ 1061{
2515 struct falcon_nic_data *nic_data = efx->nic_data; 1062 struct falcon_nic_data *nic_data = efx->nic_data;
2516 efx_oword_t glb_ctl_reg_ker; 1063 efx_oword_t glb_ctl_reg_ker;
2517 int rc; 1064 int rc;
2518 1065
2519 EFX_LOG(efx, "performing hardware reset (%d)\n", method); 1066 EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method));
2520 1067
2521 /* Initiate device reset */ 1068 /* Initiate device reset */
2522 if (method == RESET_TYPE_WORLD) { 1069 if (method == RESET_TYPE_WORLD) {
@@ -2526,7 +1073,7 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2526 "function prior to hardware reset\n"); 1073 "function prior to hardware reset\n");
2527 goto fail1; 1074 goto fail1;
2528 } 1075 }
2529 if (FALCON_IS_DUAL_FUNC(efx)) { 1076 if (efx_nic_is_dual_func(efx)) {
2530 rc = pci_save_state(nic_data->pci_dev2); 1077 rc = pci_save_state(nic_data->pci_dev2);
2531 if (rc) { 1078 if (rc) {
2532 EFX_ERR(efx, "failed to backup PCI state of " 1079 EFX_ERR(efx, "failed to backup PCI state of "
@@ -2537,29 +1084,31 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2537 } 1084 }
2538 1085
2539 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, 1086 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2540 EXT_PHY_RST_DUR, 0x7, 1087 FRF_AB_EXT_PHY_RST_DUR,
2541 SWRST, 1); 1088 FFE_AB_EXT_PHY_RST_DUR_10240US,
1089 FRF_AB_SWRST, 1);
2542 } else { 1090 } else {
2543 int reset_phy = (method == RESET_TYPE_INVISIBLE ?
2544 EXCLUDE_FROM_RESET : 0);
2545
2546 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, 1091 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2547 EXT_PHY_RST_CTL, reset_phy, 1092 /* exclude PHY from "invisible" reset */
2548 PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET, 1093 FRF_AB_EXT_PHY_RST_CTL,
2549 PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET, 1094 method == RESET_TYPE_INVISIBLE,
2550 PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET, 1095 /* exclude EEPROM/flash and PCIe */
2551 EE_RST_CTL, EXCLUDE_FROM_RESET, 1096 FRF_AB_PCIE_CORE_RST_CTL, 1,
2552 EXT_PHY_RST_DUR, 0x7 /* 10ms */, 1097 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2553 SWRST, 1); 1098 FRF_AB_PCIE_SD_RST_CTL, 1,
2554 } 1099 FRF_AB_EE_RST_CTL, 1,
2555 falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); 1100 FRF_AB_EXT_PHY_RST_DUR,
1101 FFE_AB_EXT_PHY_RST_DUR_10240US,
1102 FRF_AB_SWRST, 1);
1103 }
1104 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2556 1105
2557 EFX_LOG(efx, "waiting for hardware reset\n"); 1106 EFX_LOG(efx, "waiting for hardware reset\n");
2558 schedule_timeout_uninterruptible(HZ / 20); 1107 schedule_timeout_uninterruptible(HZ / 20);
2559 1108
2560 /* Restore PCI configuration if needed */ 1109 /* Restore PCI configuration if needed */
2561 if (method == RESET_TYPE_WORLD) { 1110 if (method == RESET_TYPE_WORLD) {
2562 if (FALCON_IS_DUAL_FUNC(efx)) { 1111 if (efx_nic_is_dual_func(efx)) {
2563 rc = pci_restore_state(nic_data->pci_dev2); 1112 rc = pci_restore_state(nic_data->pci_dev2);
2564 if (rc) { 1113 if (rc) {
2565 EFX_ERR(efx, "failed to restore PCI config for " 1114 EFX_ERR(efx, "failed to restore PCI config for "
@@ -2577,8 +1126,8 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2577 } 1126 }
2578 1127
2579 /* Assert that reset complete */ 1128 /* Assert that reset complete */
2580 falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); 1129 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2581 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) { 1130 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2582 rc = -ETIMEDOUT; 1131 rc = -ETIMEDOUT;
2583 EFX_ERR(efx, "timed out waiting for hardware reset\n"); 1132 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2584 goto fail5; 1133 goto fail5;
@@ -2597,6 +1146,44 @@ fail5:
2597 return rc; 1146 return rc;
2598} 1147}
2599 1148
1149static void falcon_monitor(struct efx_nic *efx)
1150{
1151 bool link_changed;
1152 int rc;
1153
1154 BUG_ON(!mutex_is_locked(&efx->mac_lock));
1155
1156 rc = falcon_board(efx)->type->monitor(efx);
1157 if (rc) {
1158 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
1159 (rc == -ERANGE) ? "reported fault" : "failed");
1160 efx->phy_mode |= PHY_MODE_LOW_POWER;
1161 rc = __efx_reconfigure_port(efx);
1162 WARN_ON(rc);
1163 }
1164
1165 if (LOOPBACK_INTERNAL(efx))
1166 link_changed = falcon_loopback_link_poll(efx);
1167 else
1168 link_changed = efx->phy_op->poll(efx);
1169
1170 if (link_changed) {
1171 falcon_stop_nic_stats(efx);
1172 falcon_deconfigure_mac_wrapper(efx);
1173
1174 falcon_switch_mac(efx);
1175 rc = efx->mac_op->reconfigure(efx);
1176 BUG_ON(rc);
1177
1178 falcon_start_nic_stats(efx);
1179
1180 efx_link_status_changed(efx);
1181 }
1182
1183 if (EFX_IS10G(efx))
1184 falcon_poll_xmac(efx);
1185}
1186
2600/* Zeroes out the SRAM contents. This routine must be called in 1187/* Zeroes out the SRAM contents. This routine must be called in
2601 * process context and is allowed to sleep. 1188 * process context and is allowed to sleep.
2602 */ 1189 */
@@ -2606,16 +1193,16 @@ static int falcon_reset_sram(struct efx_nic *efx)
2606 int count; 1193 int count;
2607 1194
2608 /* Set the SRAM wake/sleep GPIO appropriately. */ 1195 /* Set the SRAM wake/sleep GPIO appropriately. */
2609 falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); 1196 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2610 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1); 1197 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2611 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1); 1198 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2612 falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); 1199 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2613 1200
2614 /* Initiate SRAM reset */ 1201 /* Initiate SRAM reset */
2615 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, 1202 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2616 SRAM_OOB_BT_INIT_EN, 1, 1203 FRF_AZ_SRM_INIT_EN, 1,
2617 SRM_NUM_BANKS_AND_BANK_SIZE, 0); 1204 FRF_AZ_SRM_NB_SZ, 0);
2618 falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); 1205 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2619 1206
2620 /* Wait for SRAM reset to complete */ 1207 /* Wait for SRAM reset to complete */
2621 count = 0; 1208 count = 0;
@@ -2626,8 +1213,8 @@ static int falcon_reset_sram(struct efx_nic *efx)
2626 schedule_timeout_uninterruptible(HZ / 50); 1213 schedule_timeout_uninterruptible(HZ / 50);
2627 1214
2628 /* Check for reset complete */ 1215 /* Check for reset complete */
2629 falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); 1216 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2630 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) { 1217 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2631 EFX_LOG(efx, "SRAM reset complete\n"); 1218 EFX_LOG(efx, "SRAM reset complete\n");
2632 1219
2633 return 0; 1220 return 0;
@@ -2663,8 +1250,6 @@ static int falcon_spi_device_init(struct efx_nic *efx,
2663 spi_device->block_size = 1250 spi_device->block_size =
2664 1 << SPI_DEV_TYPE_FIELD(device_type, 1251 1 << SPI_DEV_TYPE_FIELD(device_type,
2665 SPI_DEV_TYPE_BLOCK_SIZE); 1252 SPI_DEV_TYPE_BLOCK_SIZE);
2666
2667 spi_device->efx = efx;
2668 } else { 1253 } else {
2669 spi_device = NULL; 1254 spi_device = NULL;
2670 } 1255 }
@@ -2674,7 +1259,6 @@ static int falcon_spi_device_init(struct efx_nic *efx,
2674 return 0; 1259 return 0;
2675} 1260}
2676 1261
2677
2678static void falcon_remove_spi_devices(struct efx_nic *efx) 1262static void falcon_remove_spi_devices(struct efx_nic *efx)
2679{ 1263{
2680 kfree(efx->spi_eeprom); 1264 kfree(efx->spi_eeprom);
@@ -2712,16 +1296,16 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2712 board_rev = le16_to_cpu(v2->board_revision); 1296 board_rev = le16_to_cpu(v2->board_revision);
2713 1297
2714 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { 1298 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2715 __le32 fl = v3->spi_device_type[EE_SPI_FLASH]; 1299 rc = falcon_spi_device_init(
2716 __le32 ee = v3->spi_device_type[EE_SPI_EEPROM]; 1300 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2717 rc = falcon_spi_device_init(efx, &efx->spi_flash, 1301 le32_to_cpu(v3->spi_device_type
2718 EE_SPI_FLASH, 1302 [FFE_AB_SPI_DEVICE_FLASH]));
2719 le32_to_cpu(fl));
2720 if (rc) 1303 if (rc)
2721 goto fail2; 1304 goto fail2;
2722 rc = falcon_spi_device_init(efx, &efx->spi_eeprom, 1305 rc = falcon_spi_device_init(
2723 EE_SPI_EEPROM, 1306 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2724 le32_to_cpu(ee)); 1307 le32_to_cpu(v3->spi_device_type
1308 [FFE_AB_SPI_DEVICE_EEPROM]));
2725 if (rc) 1309 if (rc)
2726 goto fail2; 1310 goto fail2;
2727 } 1311 }
@@ -2732,7 +1316,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2732 1316
2733 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 1317 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
2734 1318
2735 efx_set_board_info(efx, board_rev); 1319 falcon_probe_board(efx, board_rev);
2736 1320
2737 kfree(nvconfig); 1321 kfree(nvconfig);
2738 return 0; 1322 return 0;
@@ -2744,89 +1328,49 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2744 return rc; 1328 return rc;
2745} 1329}
2746 1330
2747/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2748 * count, port speed). Set workaround and feature flags accordingly.
2749 */
2750static int falcon_probe_nic_variant(struct efx_nic *efx)
2751{
2752 efx_oword_t altera_build;
2753 efx_oword_t nic_stat;
2754
2755 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
2756 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
2757 EFX_ERR(efx, "Falcon FPGA not supported\n");
2758 return -ENODEV;
2759 }
2760
2761 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2762
2763 switch (falcon_rev(efx)) {
2764 case FALCON_REV_A0:
2765 case 0xff:
2766 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2767 return -ENODEV;
2768
2769 case FALCON_REV_A1:
2770 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
2771 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2772 return -ENODEV;
2773 }
2774 break;
2775
2776 case FALCON_REV_B0:
2777 break;
2778
2779 default:
2780 EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2781 return -ENODEV;
2782 }
2783
2784 /* Initial assumed speed */
2785 efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000;
2786
2787 return 0;
2788}
2789
2790/* Probe all SPI devices on the NIC */ 1331/* Probe all SPI devices on the NIC */
2791static void falcon_probe_spi_devices(struct efx_nic *efx) 1332static void falcon_probe_spi_devices(struct efx_nic *efx)
2792{ 1333{
2793 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 1334 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2794 int boot_dev; 1335 int boot_dev;
2795 1336
2796 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER); 1337 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2797 falcon_read(efx, &nic_stat, NIC_STAT_REG); 1338 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2798 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); 1339 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2799 1340
2800 if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) { 1341 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2801 boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ? 1342 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2802 EE_SPI_FLASH : EE_SPI_EEPROM); 1343 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2803 EFX_LOG(efx, "Booted from %s\n", 1344 EFX_LOG(efx, "Booted from %s\n",
2804 boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM"); 1345 boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
2805 } else { 1346 } else {
2806 /* Disable VPD and set clock dividers to safe 1347 /* Disable VPD and set clock dividers to safe
2807 * values for initial programming. */ 1348 * values for initial programming. */
2808 boot_dev = -1; 1349 boot_dev = -1;
2809 EFX_LOG(efx, "Booted from internal ASIC settings;" 1350 EFX_LOG(efx, "Booted from internal ASIC settings;"
2810 " setting SPI config\n"); 1351 " setting SPI config\n");
2811 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0, 1352 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2812 /* 125 MHz / 7 ~= 20 MHz */ 1353 /* 125 MHz / 7 ~= 20 MHz */
2813 EE_SF_CLOCK_DIV, 7, 1354 FRF_AB_EE_SF_CLOCK_DIV, 7,
2814 /* 125 MHz / 63 ~= 2 MHz */ 1355 /* 125 MHz / 63 ~= 2 MHz */
2815 EE_EE_CLOCK_DIV, 63); 1356 FRF_AB_EE_EE_CLOCK_DIV, 63);
2816 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); 1357 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2817 } 1358 }
2818 1359
2819 if (boot_dev == EE_SPI_FLASH) 1360 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2820 falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH, 1361 falcon_spi_device_init(efx, &efx->spi_flash,
1362 FFE_AB_SPI_DEVICE_FLASH,
2821 default_flash_type); 1363 default_flash_type);
2822 if (boot_dev == EE_SPI_EEPROM) 1364 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2823 falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM, 1365 falcon_spi_device_init(efx, &efx->spi_eeprom,
1366 FFE_AB_SPI_DEVICE_EEPROM,
2824 large_eeprom_type); 1367 large_eeprom_type);
2825} 1368}
2826 1369
2827int falcon_probe_nic(struct efx_nic *efx) 1370static int falcon_probe_nic(struct efx_nic *efx)
2828{ 1371{
2829 struct falcon_nic_data *nic_data; 1372 struct falcon_nic_data *nic_data;
1373 struct falcon_board *board;
2830 int rc; 1374 int rc;
2831 1375
2832 /* Allocate storage for hardware specific data */ 1376 /* Allocate storage for hardware specific data */
@@ -2835,15 +1379,33 @@ int falcon_probe_nic(struct efx_nic *efx)
2835 return -ENOMEM; 1379 return -ENOMEM;
2836 efx->nic_data = nic_data; 1380 efx->nic_data = nic_data;
2837 1381
2838 /* Determine number of ports etc. */ 1382 rc = -ENODEV;
2839 rc = falcon_probe_nic_variant(efx); 1383
2840 if (rc) 1384 if (efx_nic_fpga_ver(efx) != 0) {
1385 EFX_ERR(efx, "Falcon FPGA not supported\n");
2841 goto fail1; 1386 goto fail1;
1387 }
2842 1388
2843 /* Probe secondary function if expected */ 1389 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2844 if (FALCON_IS_DUAL_FUNC(efx)) { 1390 efx_oword_t nic_stat;
2845 struct pci_dev *dev = pci_dev_get(efx->pci_dev); 1391 struct pci_dev *dev;
1392 u8 pci_rev = efx->pci_dev->revision;
1393
1394 if ((pci_rev == 0xff) || (pci_rev == 0)) {
1395 EFX_ERR(efx, "Falcon rev A0 not supported\n");
1396 goto fail1;
1397 }
1398 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1399 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
1400 EFX_ERR(efx, "Falcon rev A1 1G not supported\n");
1401 goto fail1;
1402 }
1403 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
1404 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
1405 goto fail1;
1406 }
2846 1407
1408 dev = pci_dev_get(efx->pci_dev);
2847 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, 1409 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
2848 dev))) { 1410 dev))) {
2849 if (dev->bus == efx->pci_dev->bus && 1411 if (dev->bus == efx->pci_dev->bus &&
@@ -2867,7 +1429,7 @@ int falcon_probe_nic(struct efx_nic *efx)
2867 } 1429 }
2868 1430
2869 /* Allocate memory for INT_KER */ 1431 /* Allocate memory for INT_KER */
2870 rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); 1432 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
2871 if (rc) 1433 if (rc)
2872 goto fail4; 1434 goto fail4;
2873 BUG_ON(efx->irq_status.dma_addr & 0x0f); 1435 BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -2884,21 +1446,36 @@ int falcon_probe_nic(struct efx_nic *efx)
2884 goto fail5; 1446 goto fail5;
2885 1447
2886 /* Initialise I2C adapter */ 1448 /* Initialise I2C adapter */
2887 efx->i2c_adap.owner = THIS_MODULE; 1449 board = falcon_board(efx);
2888 nic_data->i2c_data = falcon_i2c_bit_operations; 1450 board->i2c_adap.owner = THIS_MODULE;
2889 nic_data->i2c_data.data = efx; 1451 board->i2c_data = falcon_i2c_bit_operations;
2890 efx->i2c_adap.algo_data = &nic_data->i2c_data; 1452 board->i2c_data.data = efx;
2891 efx->i2c_adap.dev.parent = &efx->pci_dev->dev; 1453 board->i2c_adap.algo_data = &board->i2c_data;
2892 strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name)); 1454 board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2893 rc = i2c_bit_add_bus(&efx->i2c_adap); 1455 strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
1456 sizeof(board->i2c_adap.name));
1457 rc = i2c_bit_add_bus(&board->i2c_adap);
2894 if (rc) 1458 if (rc)
2895 goto fail5; 1459 goto fail5;
2896 1460
1461 rc = falcon_board(efx)->type->init(efx);
1462 if (rc) {
1463 EFX_ERR(efx, "failed to initialise board\n");
1464 goto fail6;
1465 }
1466
1467 nic_data->stats_disable_count = 1;
1468 setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
1469 (unsigned long)efx);
1470
2897 return 0; 1471 return 0;
2898 1472
1473 fail6:
1474 BUG_ON(i2c_del_adapter(&board->i2c_adap));
1475 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2899 fail5: 1476 fail5:
2900 falcon_remove_spi_devices(efx); 1477 falcon_remove_spi_devices(efx);
2901 falcon_free_buffer(efx, &efx->irq_status); 1478 efx_nic_free_buffer(efx, &efx->irq_status);
2902 fail4: 1479 fail4:
2903 fail3: 1480 fail3:
2904 if (nic_data->pci_dev2) { 1481 if (nic_data->pci_dev2) {
@@ -2911,166 +1488,147 @@ int falcon_probe_nic(struct efx_nic *efx)
2911 return rc; 1488 return rc;
2912} 1489}
2913 1490
1491static void falcon_init_rx_cfg(struct efx_nic *efx)
1492{
1493 /* Prior to Siena the RX DMA engine will split each frame at
1494 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
1495 * be so large that that never happens. */
1496 const unsigned huge_buf_size = (3 * 4096) >> 5;
1497 /* RX control FIFO thresholds (32 entries) */
1498 const unsigned ctrl_xon_thr = 20;
1499 const unsigned ctrl_xoff_thr = 25;
1500 /* RX data FIFO thresholds (256-byte units; size varies) */
1501 int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
1502 int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
1503 efx_oword_t reg;
1504
1505 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1506 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1507 /* Data FIFO size is 5.5K */
1508 if (data_xon_thr < 0)
1509 data_xon_thr = 512 >> 8;
1510 if (data_xoff_thr < 0)
1511 data_xoff_thr = 2048 >> 8;
1512 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
1513 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
1514 huge_buf_size);
1515 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
1516 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
1517 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
1518 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
1519 } else {
1520 /* Data FIFO size is 80K; register fields moved */
1521 if (data_xon_thr < 0)
1522 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
1523 if (data_xoff_thr < 0)
1524 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
1525 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
1526 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
1527 huge_buf_size);
1528 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
1529 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
1530 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
1531 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
1532 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
1533 }
1534 /* Always enable XOFF signal from RX FIFO. We enable
1535 * or disable transmission of pause frames at the MAC. */
1536 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1537 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1538}
1539
2914/* This call performs hardware-specific global initialisation, such as 1540/* This call performs hardware-specific global initialisation, such as
2915 * defining the descriptor cache sizes and number of RSS channels. 1541 * defining the descriptor cache sizes and number of RSS channels.
2916 * It does not set up any buffers, descriptor rings or event queues. 1542 * It does not set up any buffers, descriptor rings or event queues.
2917 */ 1543 */
2918int falcon_init_nic(struct efx_nic *efx) 1544static int falcon_init_nic(struct efx_nic *efx)
2919{ 1545{
2920 efx_oword_t temp; 1546 efx_oword_t temp;
2921 unsigned thresh;
2922 int rc; 1547 int rc;
2923 1548
2924 /* Use on-chip SRAM */ 1549 /* Use on-chip SRAM */
2925 falcon_read(efx, &temp, NIC_STAT_REG); 1550 efx_reado(efx, &temp, FR_AB_NIC_STAT);
2926 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1); 1551 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2927 falcon_write(efx, &temp, NIC_STAT_REG); 1552 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2928 1553
2929 /* Set the source of the GMAC clock */ 1554 /* Set the source of the GMAC clock */
2930 if (falcon_rev(efx) == FALCON_REV_B0) { 1555 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
2931 falcon_read(efx, &temp, GPIO_CTL_REG_KER); 1556 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
2932 EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true); 1557 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
2933 falcon_write(efx, &temp, GPIO_CTL_REG_KER); 1558 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
2934 } 1559 }
2935 1560
2936 /* Set buffer table mode */ 1561 /* Select the correct MAC */
2937 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL); 1562 falcon_clock_mac(efx);
2938 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
2939 1563
2940 rc = falcon_reset_sram(efx); 1564 rc = falcon_reset_sram(efx);
2941 if (rc) 1565 if (rc)
2942 return rc; 1566 return rc;
2943 1567
2944 /* Set positions of descriptor caches in SRAM. */
2945 EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
2946 falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
2947 EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
2948 falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
2949
2950 /* Set TX descriptor cache size. */
2951 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
2952 EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
2953 falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
2954
2955 /* Set RX descriptor cache size. Set low watermark to size-8, as
2956 * this allows most efficient prefetching.
2957 */
2958 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
2959 EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
2960 falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
2961 EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
2962 falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
2963
2964 /* Clear the parity enables on the TX data fifos as 1568 /* Clear the parity enables on the TX data fifos as
2965 * they produce false parity errors because of timing issues 1569 * they produce false parity errors because of timing issues
2966 */ 1570 */
2967 if (EFX_WORKAROUND_5129(efx)) { 1571 if (EFX_WORKAROUND_5129(efx)) {
2968 falcon_read(efx, &temp, SPARE_REG_KER); 1572 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
2969 EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0); 1573 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2970 falcon_write(efx, &temp, SPARE_REG_KER); 1574 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2971 } 1575 }
2972 1576
2973 /* Enable all the genuinely fatal interrupts. (They are still
2974 * masked by the overall interrupt mask, controlled by
2975 * falcon_interrupts()).
2976 *
2977 * Note: All other fatal interrupts are enabled
2978 */
2979 EFX_POPULATE_OWORD_3(temp,
2980 ILL_ADR_INT_KER_EN, 1,
2981 RBUF_OWN_INT_KER_EN, 1,
2982 TBUF_OWN_INT_KER_EN, 1);
2983 EFX_INVERT_OWORD(temp);
2984 falcon_write(efx, &temp, FATAL_INTR_REG_KER);
2985
2986 if (EFX_WORKAROUND_7244(efx)) { 1577 if (EFX_WORKAROUND_7244(efx)) {
2987 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 1578 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2988 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); 1579 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2989 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); 1580 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2990 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); 1581 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2991 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); 1582 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2992 falcon_write(efx, &temp, RX_FILTER_CTL_REG); 1583 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2993 } 1584 }
2994 1585
2995 falcon_setup_rss_indir_table(efx); 1586 /* XXX This is documented only for Falcon A0/A1 */
2996
2997 /* Setup RX. Wait for descriptor is broken and must 1587 /* Setup RX. Wait for descriptor is broken and must
2998 * be disabled. RXDP recovery shouldn't be needed, but is. 1588 * be disabled. RXDP recovery shouldn't be needed, but is.
2999 */ 1589 */
3000 falcon_read(efx, &temp, RX_SELF_RST_REG_KER); 1590 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
3001 EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1); 1591 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
3002 EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1); 1592 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
3003 if (EFX_WORKAROUND_5583(efx)) 1593 if (EFX_WORKAROUND_5583(efx))
3004 EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1); 1594 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
3005 falcon_write(efx, &temp, RX_SELF_RST_REG_KER); 1595 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
3006
3007 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3008 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3009 */
3010 falcon_read(efx, &temp, TX_CFG2_REG_KER);
3011 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
3012 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
3013 EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
3014 EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
3015 EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
3016 /* Enable SW_EV to inherit in char driver - assume harmless here */
3017 EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
3018 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3019 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
3020 /* Squash TX of packets of 16 bytes or less */
3021 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
3022 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
3023 falcon_write(efx, &temp, TX_CFG2_REG_KER);
3024 1596
3025 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 1597 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3026 * descriptors (which is bad). 1598 * descriptors (which is bad).
3027 */ 1599 */
3028 falcon_read(efx, &temp, TX_CFG_REG_KER); 1600 efx_reado(efx, &temp, FR_AZ_TX_CFG);
3029 EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0); 1601 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
3030 falcon_write(efx, &temp, TX_CFG_REG_KER); 1602 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
3031 1603
3032 /* RX config */ 1604 falcon_init_rx_cfg(efx);
3033 falcon_read(efx, &temp, RX_CFG_REG_KER);
3034 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
3035 if (EFX_WORKAROUND_7575(efx))
3036 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
3037 (3 * 4096) / 32);
3038 if (falcon_rev(efx) >= FALCON_REV_B0)
3039 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
3040
3041 /* RX FIFO flow control thresholds */
3042 thresh = ((rx_xon_thresh_bytes >= 0) ?
3043 rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
3044 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
3045 thresh = ((rx_xoff_thresh_bytes >= 0) ?
3046 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
3047 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
3048 /* RX control FIFO thresholds [32 entries] */
3049 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
3050 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
3051 falcon_write(efx, &temp, RX_CFG_REG_KER);
3052 1605
3053 /* Set destination of both TX and RX Flush events */ 1606 /* Set destination of both TX and RX Flush events */
3054 if (falcon_rev(efx) >= FALCON_REV_B0) { 1607 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
3055 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 1608 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
3056 falcon_write(efx, &temp, DP_CTRL_REG); 1609 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
3057 } 1610 }
3058 1611
1612 efx_nic_init_common(efx);
1613
3059 return 0; 1614 return 0;
3060} 1615}
3061 1616
3062void falcon_remove_nic(struct efx_nic *efx) 1617static void falcon_remove_nic(struct efx_nic *efx)
3063{ 1618{
3064 struct falcon_nic_data *nic_data = efx->nic_data; 1619 struct falcon_nic_data *nic_data = efx->nic_data;
1620 struct falcon_board *board = falcon_board(efx);
3065 int rc; 1621 int rc;
3066 1622
1623 board->type->fini(efx);
1624
3067 /* Remove I2C adapter and clear it in preparation for a retry */ 1625 /* Remove I2C adapter and clear it in preparation for a retry */
3068 rc = i2c_del_adapter(&efx->i2c_adap); 1626 rc = i2c_del_adapter(&board->i2c_adap);
3069 BUG_ON(rc); 1627 BUG_ON(rc);
3070 memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap)); 1628 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
3071 1629
3072 falcon_remove_spi_devices(efx); 1630 falcon_remove_spi_devices(efx);
3073 falcon_free_buffer(efx, &efx->irq_status); 1631 efx_nic_free_buffer(efx, &efx->irq_status);
3074 1632
3075 falcon_reset_hw(efx, RESET_TYPE_ALL); 1633 falcon_reset_hw(efx, RESET_TYPE_ALL);
3076 1634
@@ -3085,12 +1643,86 @@ void falcon_remove_nic(struct efx_nic *efx)
3085 efx->nic_data = NULL; 1643 efx->nic_data = NULL;
3086} 1644}
3087 1645
3088void falcon_update_nic_stats(struct efx_nic *efx) 1646static void falcon_update_nic_stats(struct efx_nic *efx)
3089{ 1647{
1648 struct falcon_nic_data *nic_data = efx->nic_data;
3090 efx_oword_t cnt; 1649 efx_oword_t cnt;
3091 1650
3092 falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER); 1651 if (nic_data->stats_disable_count)
3093 efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT); 1652 return;
1653
1654 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
1655 efx->n_rx_nodesc_drop_cnt +=
1656 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
1657
1658 if (nic_data->stats_pending &&
1659 *nic_data->stats_dma_done == FALCON_STATS_DONE) {
1660 nic_data->stats_pending = false;
1661 rmb(); /* read the done flag before the stats */
1662 efx->mac_op->update_stats(efx);
1663 }
1664}
1665
1666void falcon_start_nic_stats(struct efx_nic *efx)
1667{
1668 struct falcon_nic_data *nic_data = efx->nic_data;
1669
1670 spin_lock_bh(&efx->stats_lock);
1671 if (--nic_data->stats_disable_count == 0)
1672 falcon_stats_request(efx);
1673 spin_unlock_bh(&efx->stats_lock);
1674}
1675
1676void falcon_stop_nic_stats(struct efx_nic *efx)
1677{
1678 struct falcon_nic_data *nic_data = efx->nic_data;
1679 int i;
1680
1681 might_sleep();
1682
1683 spin_lock_bh(&efx->stats_lock);
1684 ++nic_data->stats_disable_count;
1685 spin_unlock_bh(&efx->stats_lock);
1686
1687 del_timer_sync(&nic_data->stats_timer);
1688
1689 /* Wait enough time for the most recent transfer to
1690 * complete. */
1691 for (i = 0; i < 4 && nic_data->stats_pending; i++) {
1692 if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
1693 break;
1694 msleep(1);
1695 }
1696
1697 spin_lock_bh(&efx->stats_lock);
1698 falcon_stats_complete(efx);
1699 spin_unlock_bh(&efx->stats_lock);
1700}
1701
1702static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1703{
1704 falcon_board(efx)->type->set_id_led(efx, mode);
1705}
1706
1707/**************************************************************************
1708 *
1709 * Wake on LAN
1710 *
1711 **************************************************************************
1712 */
1713
1714static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1715{
1716 wol->supported = 0;
1717 wol->wolopts = 0;
1718 memset(&wol->sopass, 0, sizeof(wol->sopass));
1719}
1720
1721static int falcon_set_wol(struct efx_nic *efx, u32 type)
1722{
1723 if (type != 0)
1724 return -EINVAL;
1725 return 0;
3094} 1726}
3095 1727
3096/************************************************************************** 1728/**************************************************************************
@@ -3100,50 +1732,91 @@ void falcon_update_nic_stats(struct efx_nic *efx)
3100 ************************************************************************** 1732 **************************************************************************
3101 */ 1733 */
3102 1734
3103struct efx_nic_type falcon_a_nic_type = { 1735struct efx_nic_type falcon_a1_nic_type = {
3104 .mem_bar = 2, 1736 .probe = falcon_probe_nic,
1737 .remove = falcon_remove_nic,
1738 .init = falcon_init_nic,
1739 .fini = efx_port_dummy_op_void,
1740 .monitor = falcon_monitor,
1741 .reset = falcon_reset_hw,
1742 .probe_port = falcon_probe_port,
1743 .remove_port = falcon_remove_port,
1744 .prepare_flush = falcon_prepare_flush,
1745 .update_stats = falcon_update_nic_stats,
1746 .start_stats = falcon_start_nic_stats,
1747 .stop_stats = falcon_stop_nic_stats,
1748 .set_id_led = falcon_set_id_led,
1749 .push_irq_moderation = falcon_push_irq_moderation,
1750 .push_multicast_hash = falcon_push_multicast_hash,
1751 .reconfigure_port = falcon_reconfigure_port,
1752 .get_wol = falcon_get_wol,
1753 .set_wol = falcon_set_wol,
1754 .resume_wol = efx_port_dummy_op_void,
1755 .test_nvram = falcon_test_nvram,
1756 .default_mac_ops = &falcon_xmac_operations,
1757
1758 .revision = EFX_REV_FALCON_A1,
3105 .mem_map_size = 0x20000, 1759 .mem_map_size = 0x20000,
3106 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1, 1760 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
3107 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1, 1761 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
3108 .buf_tbl_base = BUF_TBL_KER_A1, 1762 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
3109 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1, 1763 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
3110 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1, 1764 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
3111 .txd_ring_mask = FALCON_TXD_RING_MASK, 1765 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3112 .rxd_ring_mask = FALCON_RXD_RING_MASK,
3113 .evq_size = FALCON_EVQ_SIZE,
3114 .max_dma_mask = FALCON_DMA_MASK,
3115 .tx_dma_mask = FALCON_TX_DMA_MASK,
3116 .bug5391_mask = 0xf,
3117 .rx_xoff_thresh = 2048,
3118 .rx_xon_thresh = 512,
3119 .rx_buffer_padding = 0x24, 1766 .rx_buffer_padding = 0x24,
3120 .max_interrupt_mode = EFX_INT_MODE_MSI, 1767 .max_interrupt_mode = EFX_INT_MODE_MSI,
3121 .phys_addr_channels = 4, 1768 .phys_addr_channels = 4,
1769 .tx_dc_base = 0x130000,
1770 .rx_dc_base = 0x100000,
1771 .offload_features = NETIF_F_IP_CSUM,
1772 .reset_world_flags = ETH_RESET_IRQ,
3122}; 1773};
3123 1774
3124struct efx_nic_type falcon_b_nic_type = { 1775struct efx_nic_type falcon_b0_nic_type = {
3125 .mem_bar = 2, 1776 .probe = falcon_probe_nic,
1777 .remove = falcon_remove_nic,
1778 .init = falcon_init_nic,
1779 .fini = efx_port_dummy_op_void,
1780 .monitor = falcon_monitor,
1781 .reset = falcon_reset_hw,
1782 .probe_port = falcon_probe_port,
1783 .remove_port = falcon_remove_port,
1784 .prepare_flush = falcon_prepare_flush,
1785 .update_stats = falcon_update_nic_stats,
1786 .start_stats = falcon_start_nic_stats,
1787 .stop_stats = falcon_stop_nic_stats,
1788 .set_id_led = falcon_set_id_led,
1789 .push_irq_moderation = falcon_push_irq_moderation,
1790 .push_multicast_hash = falcon_push_multicast_hash,
1791 .reconfigure_port = falcon_reconfigure_port,
1792 .get_wol = falcon_get_wol,
1793 .set_wol = falcon_set_wol,
1794 .resume_wol = efx_port_dummy_op_void,
1795 .test_registers = falcon_b0_test_registers,
1796 .test_nvram = falcon_test_nvram,
1797 .default_mac_ops = &falcon_xmac_operations,
1798
1799 .revision = EFX_REV_FALCON_B0,
3126 /* Map everything up to and including the RSS indirection 1800 /* Map everything up to and including the RSS indirection
3127 * table. Don't map MSI-X table, MSI-X PBA since Linux 1801 * table. Don't map MSI-X table, MSI-X PBA since Linux
3128 * requires that they not be mapped. */ 1802 * requires that they not be mapped. */
3129 .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800, 1803 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
3130 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0, 1804 FR_BZ_RX_INDIRECTION_TBL_STEP *
3131 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0, 1805 FR_BZ_RX_INDIRECTION_TBL_ROWS),
3132 .buf_tbl_base = BUF_TBL_KER_B0, 1806 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
3133 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0, 1807 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
3134 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0, 1808 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
3135 .txd_ring_mask = FALCON_TXD_RING_MASK, 1809 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
3136 .rxd_ring_mask = FALCON_RXD_RING_MASK, 1810 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
3137 .evq_size = FALCON_EVQ_SIZE, 1811 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3138 .max_dma_mask = FALCON_DMA_MASK,
3139 .tx_dma_mask = FALCON_TX_DMA_MASK,
3140 .bug5391_mask = 0,
3141 .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
3142 .rx_xon_thresh = 27648, /* ~3*max MTU */
3143 .rx_buffer_padding = 0, 1812 .rx_buffer_padding = 0,
3144 .max_interrupt_mode = EFX_INT_MODE_MSIX, 1813 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3145 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 1814 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
3146 * interrupt handler only supports 32 1815 * interrupt handler only supports 32
3147 * channels */ 1816 * channels */
1817 .tx_dc_base = 0x130000,
1818 .rx_dc_base = 0x100000,
1819 .offload_features = NETIF_F_IP_CSUM,
1820 .reset_world_flags = ETH_RESET_IRQ,
3148}; 1821};
3149 1822
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
deleted file mode 100644
index 77f2e0db7ca1..000000000000
--- a/drivers/net/sfc/falcon.h
+++ /dev/null
@@ -1,145 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_H
12#define EFX_FALCON_H
13
14#include "net_driver.h"
15#include "efx.h"
16
17/*
18 * Falcon hardware control
19 */
20
21enum falcon_revision {
22 FALCON_REV_A0 = 0,
23 FALCON_REV_A1 = 1,
24 FALCON_REV_B0 = 2,
25};
26
27static inline int falcon_rev(struct efx_nic *efx)
28{
29 return efx->pci_dev->revision;
30}
31
32extern struct efx_nic_type falcon_a_nic_type;
33extern struct efx_nic_type falcon_b_nic_type;
34
35/**************************************************************************
36 *
37 * Externs
38 *
39 **************************************************************************
40 */
41
42/* TX data path */
43extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
44extern void falcon_init_tx(struct efx_tx_queue *tx_queue);
45extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
46extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
47extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
48
49/* RX data path */
50extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
51extern void falcon_init_rx(struct efx_rx_queue *rx_queue);
52extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
53extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
54extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
55
56/* Event data path */
57extern int falcon_probe_eventq(struct efx_channel *channel);
58extern void falcon_init_eventq(struct efx_channel *channel);
59extern void falcon_fini_eventq(struct efx_channel *channel);
60extern void falcon_remove_eventq(struct efx_channel *channel);
61extern int falcon_process_eventq(struct efx_channel *channel, int rx_quota);
62extern void falcon_eventq_read_ack(struct efx_channel *channel);
63
64/* Ports */
65extern int falcon_probe_port(struct efx_nic *efx);
66extern void falcon_remove_port(struct efx_nic *efx);
67
68/* MAC/PHY */
69extern int falcon_switch_mac(struct efx_nic *efx);
70extern bool falcon_xaui_link_ok(struct efx_nic *efx);
71extern int falcon_dma_stats(struct efx_nic *efx,
72 unsigned int done_offset);
73extern void falcon_drain_tx_fifo(struct efx_nic *efx);
74extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
75extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
76
77/* Interrupts and test events */
78extern int falcon_init_interrupt(struct efx_nic *efx);
79extern void falcon_enable_interrupts(struct efx_nic *efx);
80extern void falcon_generate_test_event(struct efx_channel *channel,
81 unsigned int magic);
82extern void falcon_sim_phy_event(struct efx_nic *efx);
83extern void falcon_generate_interrupt(struct efx_nic *efx);
84extern void falcon_set_int_moderation(struct efx_channel *channel);
85extern void falcon_disable_interrupts(struct efx_nic *efx);
86extern void falcon_fini_interrupt(struct efx_nic *efx);
87
88#define FALCON_IRQ_MOD_RESOLUTION 5
89
90/* Global Resources */
91extern int falcon_probe_nic(struct efx_nic *efx);
92extern int falcon_probe_resources(struct efx_nic *efx);
93extern int falcon_init_nic(struct efx_nic *efx);
94extern int falcon_flush_queues(struct efx_nic *efx);
95extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
96extern void falcon_remove_resources(struct efx_nic *efx);
97extern void falcon_remove_nic(struct efx_nic *efx);
98extern void falcon_update_nic_stats(struct efx_nic *efx);
99extern void falcon_set_multicast_hash(struct efx_nic *efx);
100extern int falcon_reset_xaui(struct efx_nic *efx);
101
102/* Tests */
103struct falcon_nvconfig;
104extern int falcon_read_nvram(struct efx_nic *efx,
105 struct falcon_nvconfig *nvconfig);
106extern int falcon_test_registers(struct efx_nic *efx);
107
108/**************************************************************************
109 *
110 * Falcon MAC stats
111 *
112 **************************************************************************
113 */
114
115#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
116#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
117
118/* Retrieve statistic from statistics block */
119#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
120 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
121 (efx)->mac_stats.efx_stat += le16_to_cpu( \
122 *((__force __le16 *) \
123 (efx->stats_buffer.addr + \
124 FALCON_STAT_OFFSET(falcon_stat)))); \
125 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
126 (efx)->mac_stats.efx_stat += le32_to_cpu( \
127 *((__force __le32 *) \
128 (efx->stats_buffer.addr + \
129 FALCON_STAT_OFFSET(falcon_stat)))); \
130 else \
131 (efx)->mac_stats.efx_stat += le64_to_cpu( \
132 *((__force __le64 *) \
133 (efx->stats_buffer.addr + \
134 FALCON_STAT_OFFSET(falcon_stat)))); \
135 } while (0)
136
137#define FALCON_MAC_STATS_SIZE 0x100
138
139#define MAC_DATA_LBN 0
140#define MAC_DATA_WIDTH 32
141
142extern void falcon_generate_event(struct efx_channel *channel,
143 efx_qword_t *event);
144
145#endif /* EFX_FALCON_H */
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
new file mode 100644
index 000000000000..bf0b96af5334
--- /dev/null
+++ b/drivers/net/sfc/falcon_boards.c
@@ -0,0 +1,752 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/rtnetlink.h>
11
12#include "net_driver.h"
13#include "phy.h"
14#include "efx.h"
15#include "nic.h"
16#include "regs.h"
17#include "io.h"
18#include "workarounds.h"
19
20/* Macros for unpacking the board revision */
21/* The revision info is in host byte order. */
22#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
23#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
24#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
25
26/* Board types */
27#define FALCON_BOARD_SFE4001 0x01
28#define FALCON_BOARD_SFE4002 0x02
29#define FALCON_BOARD_SFN4111T 0x51
30#define FALCON_BOARD_SFN4112F 0x52
31
32/*****************************************************************************
33 * Support for LM87 sensor chip used on several boards
34 */
35#define LM87_REG_ALARMS1 0x41
36#define LM87_REG_ALARMS2 0x42
37#define LM87_IN_LIMITS(nr, _min, _max) \
38 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
39#define LM87_AIN_LIMITS(nr, _min, _max) \
40 0x3B + (nr), _max, 0x1A + (nr), _min
41#define LM87_TEMP_INT_LIMITS(_min, _max) \
42 0x39, _max, 0x3A, _min
43#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
44 0x37, _max, 0x38, _min
45
46#define LM87_ALARM_TEMP_INT 0x10
47#define LM87_ALARM_TEMP_EXT1 0x20
48
49#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
50
51static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
52 const u8 *reg_values)
53{
54 struct falcon_board *board = falcon_board(efx);
55 struct i2c_client *client = i2c_new_device(&board->i2c_adap, info);
56 int rc;
57
58 if (!client)
59 return -EIO;
60
61 while (*reg_values) {
62 u8 reg = *reg_values++;
63 u8 value = *reg_values++;
64 rc = i2c_smbus_write_byte_data(client, reg, value);
65 if (rc)
66 goto err;
67 }
68
69 board->hwmon_client = client;
70 return 0;
71
72err:
73 i2c_unregister_device(client);
74 return rc;
75}
76
77static void efx_fini_lm87(struct efx_nic *efx)
78{
79 i2c_unregister_device(falcon_board(efx)->hwmon_client);
80}
81
82static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
83{
84 struct i2c_client *client = falcon_board(efx)->hwmon_client;
85 s32 alarms1, alarms2;
86
87 /* If link is up then do not monitor temperature */
88 if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
89 return 0;
90
91 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
92 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
93 if (alarms1 < 0)
94 return alarms1;
95 if (alarms2 < 0)
96 return alarms2;
97 alarms1 &= mask;
98 alarms2 &= mask >> 8;
99 if (alarms1 || alarms2) {
100 EFX_ERR(efx,
101 "LM87 detected a hardware failure (status %02x:%02x)"
102 "%s%s\n",
103 alarms1, alarms2,
104 (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
105 (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
106 return -ERANGE;
107 }
108
109 return 0;
110}
111
112#else /* !CONFIG_SENSORS_LM87 */
113
114static inline int
115efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
116 const u8 *reg_values)
117{
118 return 0;
119}
120static inline void efx_fini_lm87(struct efx_nic *efx)
121{
122}
123static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
124{
125 return 0;
126}
127
128#endif /* CONFIG_SENSORS_LM87 */
129
130/*****************************************************************************
131 * Support for the SFE4001 and SFN4111T NICs.
132 *
133 * The SFE4001 does not power-up fully at reset due to its high power
134 * consumption. We control its power via a PCA9539 I/O expander.
135 * Both boards have a MAX6647 temperature monitor which we expose to
136 * the lm90 driver.
137 *
138 * This also provides minimal support for reflashing the PHY, which is
139 * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
140 * On SFE4001 rev A2 and later this is connected to the 3V3X output of
141 * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3.
142 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
143 * exclusive with the network device being open.
144 */
145
146/**************************************************************************
147 * Support for I2C IO Expander device on SFE4001
148 */
149#define PCA9539 0x74
150
151#define P0_IN 0x00
152#define P0_OUT 0x02
153#define P0_INVERT 0x04
154#define P0_CONFIG 0x06
155
156#define P0_EN_1V0X_LBN 0
157#define P0_EN_1V0X_WIDTH 1
158#define P0_EN_1V2_LBN 1
159#define P0_EN_1V2_WIDTH 1
160#define P0_EN_2V5_LBN 2
161#define P0_EN_2V5_WIDTH 1
162#define P0_EN_3V3X_LBN 3
163#define P0_EN_3V3X_WIDTH 1
164#define P0_EN_5V_LBN 4
165#define P0_EN_5V_WIDTH 1
166#define P0_SHORTEN_JTAG_LBN 5
167#define P0_SHORTEN_JTAG_WIDTH 1
168#define P0_X_TRST_LBN 6
169#define P0_X_TRST_WIDTH 1
170#define P0_DSP_RESET_LBN 7
171#define P0_DSP_RESET_WIDTH 1
172
173#define P1_IN 0x01
174#define P1_OUT 0x03
175#define P1_INVERT 0x05
176#define P1_CONFIG 0x07
177
178#define P1_AFE_PWD_LBN 0
179#define P1_AFE_PWD_WIDTH 1
180#define P1_DSP_PWD25_LBN 1
181#define P1_DSP_PWD25_WIDTH 1
182#define P1_RESERVED_LBN 2
183#define P1_RESERVED_WIDTH 2
184#define P1_SPARE_LBN 4
185#define P1_SPARE_WIDTH 4
186
187/* Temperature Sensor */
188#define MAX664X_REG_RSL 0x02
189#define MAX664X_REG_WLHO 0x0B
190
191static void sfe4001_poweroff(struct efx_nic *efx)
192{
193 struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
194 struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
195
196 /* Turn off all power rails and disable outputs */
197 i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
198 i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
199 i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
200
201 /* Clear any over-temperature alert */
202 i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
203}
204
205static int sfe4001_poweron(struct efx_nic *efx)
206{
207 struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
208 struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
209 unsigned int i, j;
210 int rc;
211 u8 out;
212
213 /* Clear any previous over-temperature alert */
214 rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
215 if (rc < 0)
216 return rc;
217
218 /* Enable port 0 and port 1 outputs on IO expander */
219 rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
220 if (rc)
221 return rc;
222 rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
223 0xff & ~(1 << P1_SPARE_LBN));
224 if (rc)
225 goto fail_on;
226
227 /* If PHY power is on, turn it all off and wait 1 second to
228 * ensure a full reset.
229 */
230 rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
231 if (rc < 0)
232 goto fail_on;
233 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
234 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
235 (0 << P0_EN_1V0X_LBN));
236 if (rc != out) {
237 EFX_INFO(efx, "power-cycling PHY\n");
238 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
239 if (rc)
240 goto fail_on;
241 schedule_timeout_uninterruptible(HZ);
242 }
243
244 for (i = 0; i < 20; ++i) {
245 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
246 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
247 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
248 (1 << P0_X_TRST_LBN));
249 if (efx->phy_mode & PHY_MODE_SPECIAL)
250 out |= 1 << P0_EN_3V3X_LBN;
251
252 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
253 if (rc)
254 goto fail_on;
255 msleep(10);
256
257 /* Turn on 1V power rail */
258 out &= ~(1 << P0_EN_1V0X_LBN);
259 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
260 if (rc)
261 goto fail_on;
262
263 EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
264
265 /* In flash config mode, DSP does not turn on AFE, so
266 * just wait 1 second.
267 */
268 if (efx->phy_mode & PHY_MODE_SPECIAL) {
269 schedule_timeout_uninterruptible(HZ);
270 return 0;
271 }
272
273 for (j = 0; j < 10; ++j) {
274 msleep(100);
275
276 /* Check DSP has asserted AFE power line */
277 rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
278 if (rc < 0)
279 goto fail_on;
280 if (rc & (1 << P1_AFE_PWD_LBN))
281 return 0;
282 }
283 }
284
285 EFX_INFO(efx, "timed out waiting for DSP boot\n");
286 rc = -ETIMEDOUT;
287fail_on:
288 sfe4001_poweroff(efx);
289 return rc;
290}
291
292static int sfn4111t_reset(struct efx_nic *efx)
293{
294 struct falcon_board *board = falcon_board(efx);
295 efx_oword_t reg;
296
297 /* GPIO 3 and the GPIO register are shared with I2C, so block that */
298 i2c_lock_adapter(&board->i2c_adap);
299
300 /* Pull RST_N (GPIO 2) low then let it up again, setting the
301 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
302 * output enables; the output levels should always be 0 (low)
303 * and we rely on external pull-ups. */
304 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
305 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true);
306 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
307 msleep(1000);
308 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false);
309 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN,
310 !!(efx->phy_mode & PHY_MODE_SPECIAL));
311 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
312 msleep(1);
313
314 i2c_unlock_adapter(&board->i2c_adap);
315
316 ssleep(1);
317 return 0;
318}
319
320static ssize_t show_phy_flash_cfg(struct device *dev,
321 struct device_attribute *attr, char *buf)
322{
323 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
324 return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
325}
326
327static ssize_t set_phy_flash_cfg(struct device *dev,
328 struct device_attribute *attr,
329 const char *buf, size_t count)
330{
331 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
332 enum efx_phy_mode old_mode, new_mode;
333 int err;
334
335 rtnl_lock();
336 old_mode = efx->phy_mode;
337 if (count == 0 || *buf == '0')
338 new_mode = old_mode & ~PHY_MODE_SPECIAL;
339 else
340 new_mode = PHY_MODE_SPECIAL;
341 if (old_mode == new_mode) {
342 err = 0;
343 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
344 err = -EBUSY;
345 } else {
346 /* Reset the PHY, reconfigure the MAC and enable/disable
347 * MAC stats accordingly. */
348 efx->phy_mode = new_mode;
349 if (new_mode & PHY_MODE_SPECIAL)
350 falcon_stop_nic_stats(efx);
351 if (falcon_board(efx)->type->id == FALCON_BOARD_SFE4001)
352 err = sfe4001_poweron(efx);
353 else
354 err = sfn4111t_reset(efx);
355 if (!err)
356 err = efx_reconfigure_port(efx);
357 if (!(new_mode & PHY_MODE_SPECIAL))
358 falcon_start_nic_stats(efx);
359 }
360 rtnl_unlock();
361
362 return err ? err : count;
363}
364
365static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
366
367static void sfe4001_fini(struct efx_nic *efx)
368{
369 struct falcon_board *board = falcon_board(efx);
370
371 EFX_INFO(efx, "%s\n", __func__);
372
373 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
374 sfe4001_poweroff(efx);
375 i2c_unregister_device(board->ioexp_client);
376 i2c_unregister_device(board->hwmon_client);
377}
378
379static int sfe4001_check_hw(struct efx_nic *efx)
380{
381 s32 status;
382
383 /* If XAUI link is up then do not monitor */
384 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
385 return 0;
386
387 /* Check the powered status of the PHY. Lack of power implies that
388 * the MAX6647 has shut down power to it, probably due to a temp.
389 * alarm. Reading the power status rather than the MAX6647 status
390 * directly because the later is read-to-clear and would thus
391 * start to power up the PHY again when polled, causing us to blip
392 * the power undesirably.
393 * We know we can read from the IO expander because we did
394 * it during power-on. Assume failure now is bad news. */
395 status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN);
396 if (status >= 0 &&
397 (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
398 return 0;
399
400 /* Use board power control, not PHY power control */
401 sfe4001_poweroff(efx);
402 efx->phy_mode = PHY_MODE_OFF;
403
404 return (status < 0) ? -EIO : -ERANGE;
405}
406
407static struct i2c_board_info sfe4001_hwmon_info = {
408 I2C_BOARD_INFO("max6647", 0x4e),
409};
410
411/* This board uses an I2C expander to provider power to the PHY, which needs to
412 * be turned on before the PHY can be used.
413 * Context: Process context, rtnl lock held
414 */
415static int sfe4001_init(struct efx_nic *efx)
416{
417 struct falcon_board *board = falcon_board(efx);
418 int rc;
419
420#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
421 board->hwmon_client =
422 i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
423#else
424 board->hwmon_client =
425 i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
426#endif
427 if (!board->hwmon_client)
428 return -EIO;
429
430 /* Raise board/PHY high limit from 85 to 90 degrees Celsius */
431 rc = i2c_smbus_write_byte_data(board->hwmon_client,
432 MAX664X_REG_WLHO, 90);
433 if (rc)
434 goto fail_hwmon;
435
436 board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
437 if (!board->ioexp_client) {
438 rc = -EIO;
439 goto fail_hwmon;
440 }
441
442 if (efx->phy_mode & PHY_MODE_SPECIAL) {
443 /* PHY won't generate a 156.25 MHz clock and MAC stats fetch
444 * will fail. */
445 falcon_stop_nic_stats(efx);
446 }
447 rc = sfe4001_poweron(efx);
448 if (rc)
449 goto fail_ioexp;
450
451 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
452 if (rc)
453 goto fail_on;
454
455 EFX_INFO(efx, "PHY is powered on\n");
456 return 0;
457
458fail_on:
459 sfe4001_poweroff(efx);
460fail_ioexp:
461 i2c_unregister_device(board->ioexp_client);
462fail_hwmon:
463 i2c_unregister_device(board->hwmon_client);
464 return rc;
465}
466
467static int sfn4111t_check_hw(struct efx_nic *efx)
468{
469 s32 status;
470
471 /* If XAUI link is up then do not monitor */
472 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
473 return 0;
474
475 /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
476 status = i2c_smbus_read_byte_data(falcon_board(efx)->hwmon_client,
477 MAX664X_REG_RSL);
478 if (status < 0)
479 return -EIO;
480 if (status & 0x57)
481 return -ERANGE;
482 return 0;
483}
484
485static void sfn4111t_fini(struct efx_nic *efx)
486{
487 EFX_INFO(efx, "%s\n", __func__);
488
489 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
490 i2c_unregister_device(falcon_board(efx)->hwmon_client);
491}
492
493static struct i2c_board_info sfn4111t_a0_hwmon_info = {
494 I2C_BOARD_INFO("max6647", 0x4e),
495};
496
497static struct i2c_board_info sfn4111t_r5_hwmon_info = {
498 I2C_BOARD_INFO("max6646", 0x4d),
499};
500
501static void sfn4111t_init_phy(struct efx_nic *efx)
502{
503 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
504 if (sft9001_wait_boot(efx) != -EINVAL)
505 return;
506
507 efx->phy_mode = PHY_MODE_SPECIAL;
508 falcon_stop_nic_stats(efx);
509 }
510
511 sfn4111t_reset(efx);
512 sft9001_wait_boot(efx);
513}
514
515static int sfn4111t_init(struct efx_nic *efx)
516{
517 struct falcon_board *board = falcon_board(efx);
518 int rc;
519
520 board->hwmon_client =
521 i2c_new_device(&board->i2c_adap,
522 (board->minor < 5) ?
523 &sfn4111t_a0_hwmon_info :
524 &sfn4111t_r5_hwmon_info);
525 if (!board->hwmon_client)
526 return -EIO;
527
528 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
529 if (rc)
530 goto fail_hwmon;
531
532 if (efx->phy_mode & PHY_MODE_SPECIAL)
533 /* PHY may not generate a 156.25 MHz clock and MAC
534 * stats fetch will fail. */
535 falcon_stop_nic_stats(efx);
536
537 return 0;
538
539fail_hwmon:
540 i2c_unregister_device(board->hwmon_client);
541 return rc;
542}
543
544/*****************************************************************************
545 * Support for the SFE4002
546 *
547 */
548static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
549
550static const u8 sfe4002_lm87_regs[] = {
551 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
552 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
553 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
554 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
555 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
556 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
557 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
558 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
559 LM87_TEMP_INT_LIMITS(10, 60), /* board */
560 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
561 0
562};
563
564static struct i2c_board_info sfe4002_hwmon_info = {
565 I2C_BOARD_INFO("lm87", 0x2e),
566 .platform_data = &sfe4002_lm87_channel,
567};
568
569/****************************************************************************/
570/* LED allocations. Note that on rev A0 boards the schematic and the reality
571 * differ: red and green are swapped. Below is the fixed (A1) layout (there
572 * are only 3 A0 boards in existence, so no real reason to make this
573 * conditional).
574 */
575#define SFE4002_FAULT_LED (2) /* Red */
576#define SFE4002_RX_LED (0) /* Green */
577#define SFE4002_TX_LED (1) /* Amber */
578
579static void sfe4002_init_phy(struct efx_nic *efx)
580{
581 /* Set the TX and RX LEDs to reflect status and activity, and the
582 * fault LED off */
583 falcon_qt202x_set_led(efx, SFE4002_TX_LED,
584 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
585 falcon_qt202x_set_led(efx, SFE4002_RX_LED,
586 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
587 falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
588}
589
590static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
591{
592 falcon_qt202x_set_led(
593 efx, SFE4002_FAULT_LED,
594 (mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
595}
596
597static int sfe4002_check_hw(struct efx_nic *efx)
598{
599 struct falcon_board *board = falcon_board(efx);
600
601 /* A0 board rev. 4002s report a temperature fault the whole time
602 * (bad sensor) so we mask it out. */
603 unsigned alarm_mask =
604 (board->major == 0 && board->minor == 0) ?
605 ~LM87_ALARM_TEMP_EXT1 : ~0;
606
607 return efx_check_lm87(efx, alarm_mask);
608}
609
610static int sfe4002_init(struct efx_nic *efx)
611{
612 return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
613}
614
615/*****************************************************************************
616 * Support for the SFN4112F
617 *
618 */
619static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
620
621static const u8 sfn4112f_lm87_regs[] = {
622 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
623 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
624 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
625 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
626 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
627 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
628 LM87_TEMP_INT_LIMITS(10, 60), /* board */
629 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
630 0
631};
632
633static struct i2c_board_info sfn4112f_hwmon_info = {
634 I2C_BOARD_INFO("lm87", 0x2e),
635 .platform_data = &sfn4112f_lm87_channel,
636};
637
638#define SFN4112F_ACT_LED 0
639#define SFN4112F_LINK_LED 1
640
641static void sfn4112f_init_phy(struct efx_nic *efx)
642{
643 falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
644 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
645 falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
646 QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
647}
648
649static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
650{
651 int reg;
652
653 switch (mode) {
654 case EFX_LED_OFF:
655 reg = QUAKE_LED_OFF;
656 break;
657 case EFX_LED_ON:
658 reg = QUAKE_LED_ON;
659 break;
660 default:
661 reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT;
662 break;
663 }
664
665 falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg);
666}
667
668static int sfn4112f_check_hw(struct efx_nic *efx)
669{
670 /* Mask out unused sensors */
671 return efx_check_lm87(efx, ~0x48);
672}
673
674static int sfn4112f_init(struct efx_nic *efx)
675{
676 return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
677}
678
679static const struct falcon_board_type board_types[] = {
680 {
681 .id = FALCON_BOARD_SFE4001,
682 .ref_model = "SFE4001",
683 .gen_type = "10GBASE-T adapter",
684 .init = sfe4001_init,
685 .init_phy = efx_port_dummy_op_void,
686 .fini = sfe4001_fini,
687 .set_id_led = tenxpress_set_id_led,
688 .monitor = sfe4001_check_hw,
689 },
690 {
691 .id = FALCON_BOARD_SFE4002,
692 .ref_model = "SFE4002",
693 .gen_type = "XFP adapter",
694 .init = sfe4002_init,
695 .init_phy = sfe4002_init_phy,
696 .fini = efx_fini_lm87,
697 .set_id_led = sfe4002_set_id_led,
698 .monitor = sfe4002_check_hw,
699 },
700 {
701 .id = FALCON_BOARD_SFN4111T,
702 .ref_model = "SFN4111T",
703 .gen_type = "100/1000/10GBASE-T adapter",
704 .init = sfn4111t_init,
705 .init_phy = sfn4111t_init_phy,
706 .fini = sfn4111t_fini,
707 .set_id_led = tenxpress_set_id_led,
708 .monitor = sfn4111t_check_hw,
709 },
710 {
711 .id = FALCON_BOARD_SFN4112F,
712 .ref_model = "SFN4112F",
713 .gen_type = "SFP+ adapter",
714 .init = sfn4112f_init,
715 .init_phy = sfn4112f_init_phy,
716 .fini = efx_fini_lm87,
717 .set_id_led = sfn4112f_set_id_led,
718 .monitor = sfn4112f_check_hw,
719 },
720};
721
722static const struct falcon_board_type falcon_dummy_board = {
723 .init = efx_port_dummy_op_int,
724 .init_phy = efx_port_dummy_op_void,
725 .fini = efx_port_dummy_op_void,
726 .set_id_led = efx_port_dummy_op_set_id_led,
727 .monitor = efx_port_dummy_op_int,
728};
729
730void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
731{
732 struct falcon_board *board = falcon_board(efx);
733 u8 type_id = FALCON_BOARD_TYPE(revision_info);
734 int i;
735
736 board->major = FALCON_BOARD_MAJOR(revision_info);
737 board->minor = FALCON_BOARD_MINOR(revision_info);
738
739 for (i = 0; i < ARRAY_SIZE(board_types); i++)
740 if (board_types[i].id == type_id)
741 board->type = &board_types[i];
742
743 if (board->type) {
744 EFX_INFO(efx, "board is %s rev %c%d\n",
745 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
746 ? board->type->ref_model : board->type->gen_type,
747 'A' + board->major, board->minor);
748 } else {
749 EFX_ERR(efx, "unknown board type %d\n", type_id);
750 board->type = &falcon_dummy_board;
751 }
752}
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
index 8865eae20ac5..7dadfcbd6ce7 100644
--- a/drivers/net/sfc/falcon_gmac.c
+++ b/drivers/net/sfc/falcon_gmac.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -11,11 +11,10 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include "net_driver.h" 12#include "net_driver.h"
13#include "efx.h" 13#include "efx.h"
14#include "falcon.h" 14#include "nic.h"
15#include "mac.h" 15#include "mac.h"
16#include "falcon_hwdefs.h" 16#include "regs.h"
17#include "falcon_io.h" 17#include "io.h"
18#include "gmii.h"
19 18
20/************************************************************************** 19/**************************************************************************
21 * 20 *
@@ -23,106 +22,109 @@
23 * 22 *
24 *************************************************************************/ 23 *************************************************************************/
25 24
26static void falcon_reconfigure_gmac(struct efx_nic *efx) 25static int falcon_reconfigure_gmac(struct efx_nic *efx)
27{ 26{
27 struct efx_link_state *link_state = &efx->link_state;
28 bool loopback, tx_fc, rx_fc, bytemode; 28 bool loopback, tx_fc, rx_fc, bytemode;
29 int if_mode; 29 int if_mode;
30 unsigned int max_frame_len; 30 unsigned int max_frame_len;
31 efx_oword_t reg; 31 efx_oword_t reg;
32 32
33 /* Configuration register 1 */ 33 /* Configuration register 1 */
34 tx_fc = (efx->link_fc & EFX_FC_TX) || !efx->link_fd; 34 tx_fc = (link_state->fc & EFX_FC_TX) || !link_state->fd;
35 rx_fc = !!(efx->link_fc & EFX_FC_RX); 35 rx_fc = !!(link_state->fc & EFX_FC_RX);
36 loopback = (efx->loopback_mode == LOOPBACK_GMAC); 36 loopback = (efx->loopback_mode == LOOPBACK_GMAC);
37 bytemode = (efx->link_speed == 1000); 37 bytemode = (link_state->speed == 1000);
38 38
39 EFX_POPULATE_OWORD_5(reg, 39 EFX_POPULATE_OWORD_5(reg,
40 GM_LOOP, loopback, 40 FRF_AB_GM_LOOP, loopback,
41 GM_TX_EN, 1, 41 FRF_AB_GM_TX_EN, 1,
42 GM_TX_FC_EN, tx_fc, 42 FRF_AB_GM_TX_FC_EN, tx_fc,
43 GM_RX_EN, 1, 43 FRF_AB_GM_RX_EN, 1,
44 GM_RX_FC_EN, rx_fc); 44 FRF_AB_GM_RX_FC_EN, rx_fc);
45 falcon_write(efx, &reg, GM_CFG1_REG); 45 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
46 udelay(10); 46 udelay(10);
47 47
48 /* Configuration register 2 */ 48 /* Configuration register 2 */
49 if_mode = (bytemode) ? 2 : 1; 49 if_mode = (bytemode) ? 2 : 1;
50 EFX_POPULATE_OWORD_5(reg, 50 EFX_POPULATE_OWORD_5(reg,
51 GM_IF_MODE, if_mode, 51 FRF_AB_GM_IF_MODE, if_mode,
52 GM_PAD_CRC_EN, 1, 52 FRF_AB_GM_PAD_CRC_EN, 1,
53 GM_LEN_CHK, 1, 53 FRF_AB_GM_LEN_CHK, 1,
54 GM_FD, efx->link_fd, 54 FRF_AB_GM_FD, link_state->fd,
55 GM_PAMBL_LEN, 0x7/*datasheet recommended */); 55 FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */);
56 56
57 falcon_write(efx, &reg, GM_CFG2_REG); 57 efx_writeo(efx, &reg, FR_AB_GM_CFG2);
58 udelay(10); 58 udelay(10);
59 59
60 /* Max frame len register */ 60 /* Max frame len register */
61 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); 61 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
62 EFX_POPULATE_OWORD_1(reg, GM_MAX_FLEN, max_frame_len); 62 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len);
63 falcon_write(efx, &reg, GM_MAX_FLEN_REG); 63 efx_writeo(efx, &reg, FR_AB_GM_MAX_FLEN);
64 udelay(10); 64 udelay(10);
65 65
66 /* FIFO configuration register 0 */ 66 /* FIFO configuration register 0 */
67 EFX_POPULATE_OWORD_5(reg, 67 EFX_POPULATE_OWORD_5(reg,
68 GMF_FTFENREQ, 1, 68 FRF_AB_GMF_FTFENREQ, 1,
69 GMF_STFENREQ, 1, 69 FRF_AB_GMF_STFENREQ, 1,
70 GMF_FRFENREQ, 1, 70 FRF_AB_GMF_FRFENREQ, 1,
71 GMF_SRFENREQ, 1, 71 FRF_AB_GMF_SRFENREQ, 1,
72 GMF_WTMENREQ, 1); 72 FRF_AB_GMF_WTMENREQ, 1);
73 falcon_write(efx, &reg, GMF_CFG0_REG); 73 efx_writeo(efx, &reg, FR_AB_GMF_CFG0);
74 udelay(10); 74 udelay(10);
75 75
76 /* FIFO configuration register 1 */ 76 /* FIFO configuration register 1 */
77 EFX_POPULATE_OWORD_2(reg, 77 EFX_POPULATE_OWORD_2(reg,
78 GMF_CFGFRTH, 0x12, 78 FRF_AB_GMF_CFGFRTH, 0x12,
79 GMF_CFGXOFFRTX, 0xffff); 79 FRF_AB_GMF_CFGXOFFRTX, 0xffff);
80 falcon_write(efx, &reg, GMF_CFG1_REG); 80 efx_writeo(efx, &reg, FR_AB_GMF_CFG1);
81 udelay(10); 81 udelay(10);
82 82
83 /* FIFO configuration register 2 */ 83 /* FIFO configuration register 2 */
84 EFX_POPULATE_OWORD_2(reg, 84 EFX_POPULATE_OWORD_2(reg,
85 GMF_CFGHWM, 0x3f, 85 FRF_AB_GMF_CFGHWM, 0x3f,
86 GMF_CFGLWM, 0xa); 86 FRF_AB_GMF_CFGLWM, 0xa);
87 falcon_write(efx, &reg, GMF_CFG2_REG); 87 efx_writeo(efx, &reg, FR_AB_GMF_CFG2);
88 udelay(10); 88 udelay(10);
89 89
90 /* FIFO configuration register 3 */ 90 /* FIFO configuration register 3 */
91 EFX_POPULATE_OWORD_2(reg, 91 EFX_POPULATE_OWORD_2(reg,
92 GMF_CFGHWMFT, 0x1c, 92 FRF_AB_GMF_CFGHWMFT, 0x1c,
93 GMF_CFGFTTH, 0x08); 93 FRF_AB_GMF_CFGFTTH, 0x08);
94 falcon_write(efx, &reg, GMF_CFG3_REG); 94 efx_writeo(efx, &reg, FR_AB_GMF_CFG3);
95 udelay(10); 95 udelay(10);
96 96
97 /* FIFO configuration register 4 */ 97 /* FIFO configuration register 4 */
98 EFX_POPULATE_OWORD_1(reg, GMF_HSTFLTRFRM_PAUSE, 1); 98 EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1);
99 falcon_write(efx, &reg, GMF_CFG4_REG); 99 efx_writeo(efx, &reg, FR_AB_GMF_CFG4);
100 udelay(10); 100 udelay(10);
101 101
102 /* FIFO configuration register 5 */ 102 /* FIFO configuration register 5 */
103 falcon_read(efx, &reg, GMF_CFG5_REG); 103 efx_reado(efx, &reg, FR_AB_GMF_CFG5);
104 EFX_SET_OWORD_FIELD(reg, GMF_CFGBYTMODE, bytemode); 104 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode);
105 EFX_SET_OWORD_FIELD(reg, GMF_CFGHDPLX, !efx->link_fd); 105 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !link_state->fd);
106 EFX_SET_OWORD_FIELD(reg, GMF_HSTDRPLT64, !efx->link_fd); 106 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !link_state->fd);
107 EFX_SET_OWORD_FIELD(reg, GMF_HSTFLTRFRMDC_PAUSE, 0); 107 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0);
108 falcon_write(efx, &reg, GMF_CFG5_REG); 108 efx_writeo(efx, &reg, FR_AB_GMF_CFG5);
109 udelay(10); 109 udelay(10);
110 110
111 /* MAC address */ 111 /* MAC address */
112 EFX_POPULATE_OWORD_4(reg, 112 EFX_POPULATE_OWORD_4(reg,
113 GM_HWADDR_5, efx->net_dev->dev_addr[5], 113 FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5],
114 GM_HWADDR_4, efx->net_dev->dev_addr[4], 114 FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4],
115 GM_HWADDR_3, efx->net_dev->dev_addr[3], 115 FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3],
116 GM_HWADDR_2, efx->net_dev->dev_addr[2]); 116 FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]);
117 falcon_write(efx, &reg, GM_ADR1_REG); 117 efx_writeo(efx, &reg, FR_AB_GM_ADR1);
118 udelay(10); 118 udelay(10);
119 EFX_POPULATE_OWORD_2(reg, 119 EFX_POPULATE_OWORD_2(reg,
120 GM_HWADDR_1, efx->net_dev->dev_addr[1], 120 FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1],
121 GM_HWADDR_0, efx->net_dev->dev_addr[0]); 121 FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]);
122 falcon_write(efx, &reg, GM_ADR2_REG); 122 efx_writeo(efx, &reg, FR_AB_GM_ADR2);
123 udelay(10); 123 udelay(10);
124 124
125 falcon_reconfigure_mac_wrapper(efx); 125 falcon_reconfigure_mac_wrapper(efx);
126
127 return 0;
126} 128}
127 129
128static void falcon_update_stats_gmac(struct efx_nic *efx) 130static void falcon_update_stats_gmac(struct efx_nic *efx)
@@ -130,11 +132,6 @@ static void falcon_update_stats_gmac(struct efx_nic *efx)
130 struct efx_mac_stats *mac_stats = &efx->mac_stats; 132 struct efx_mac_stats *mac_stats = &efx->mac_stats;
131 unsigned long old_rx_pause, old_tx_pause; 133 unsigned long old_rx_pause, old_tx_pause;
132 unsigned long new_rx_pause, new_tx_pause; 134 unsigned long new_rx_pause, new_tx_pause;
133 int rc;
134
135 rc = falcon_dma_stats(efx, GDmaDone_offset);
136 if (rc)
137 return;
138 135
139 /* Pause frames are erroneously counted as errors (SFC bug 3269) */ 136 /* Pause frames are erroneously counted as errors (SFC bug 3269) */
140 old_rx_pause = mac_stats->rx_pause; 137 old_rx_pause = mac_stats->rx_pause;
@@ -221,9 +218,13 @@ static void falcon_update_stats_gmac(struct efx_nic *efx)
221 mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64; 218 mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64;
222} 219}
223 220
221static bool falcon_gmac_check_fault(struct efx_nic *efx)
222{
223 return false;
224}
225
224struct efx_mac_operations falcon_gmac_operations = { 226struct efx_mac_operations falcon_gmac_operations = {
225 .reconfigure = falcon_reconfigure_gmac, 227 .reconfigure = falcon_reconfigure_gmac,
226 .update_stats = falcon_update_stats_gmac, 228 .update_stats = falcon_update_stats_gmac,
227 .irq = efx_port_dummy_op_void, 229 .check_fault = falcon_gmac_check_fault,
228 .poll = efx_port_dummy_op_void,
229}; 230};
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
deleted file mode 100644
index 2d2261117ace..000000000000
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ /dev/null
@@ -1,1333 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_HWDEFS_H
12#define EFX_FALCON_HWDEFS_H
13
14/*
15 * Falcon hardware value definitions.
16 * Falcon is the internal codename for the SFC4000 controller that is
17 * present in SFE400X evaluation boards
18 */
19
20/**************************************************************************
21 *
22 * Falcon registers
23 *
24 **************************************************************************
25 */
26
27/* Address region register */
28#define ADR_REGION_REG_KER 0x00
29#define ADR_REGION0_LBN 0
30#define ADR_REGION0_WIDTH 18
31#define ADR_REGION1_LBN 32
32#define ADR_REGION1_WIDTH 18
33#define ADR_REGION2_LBN 64
34#define ADR_REGION2_WIDTH 18
35#define ADR_REGION3_LBN 96
36#define ADR_REGION3_WIDTH 18
37
38/* Interrupt enable register */
39#define INT_EN_REG_KER 0x0010
40#define KER_INT_KER_LBN 3
41#define KER_INT_KER_WIDTH 1
42#define DRV_INT_EN_KER_LBN 0
43#define DRV_INT_EN_KER_WIDTH 1
44
45/* Interrupt status address register */
46#define INT_ADR_REG_KER 0x0030
47#define NORM_INT_VEC_DIS_KER_LBN 64
48#define NORM_INT_VEC_DIS_KER_WIDTH 1
49#define INT_ADR_KER_LBN 0
50#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
51
52/* Interrupt status register (B0 only) */
53#define INT_ISR0_B0 0x90
54#define INT_ISR1_B0 0xA0
55
56/* Interrupt acknowledge register (A0/A1 only) */
57#define INT_ACK_REG_KER_A1 0x0050
58#define INT_ACK_DUMMY_DATA_LBN 0
59#define INT_ACK_DUMMY_DATA_WIDTH 32
60
61/* Interrupt acknowledge work-around register (A0/A1 only )*/
62#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
63
64/* SPI host command register */
65#define EE_SPI_HCMD_REG_KER 0x0100
66#define EE_SPI_HCMD_CMD_EN_LBN 31
67#define EE_SPI_HCMD_CMD_EN_WIDTH 1
68#define EE_WR_TIMER_ACTIVE_LBN 28
69#define EE_WR_TIMER_ACTIVE_WIDTH 1
70#define EE_SPI_HCMD_SF_SEL_LBN 24
71#define EE_SPI_HCMD_SF_SEL_WIDTH 1
72#define EE_SPI_EEPROM 0
73#define EE_SPI_FLASH 1
74#define EE_SPI_HCMD_DABCNT_LBN 16
75#define EE_SPI_HCMD_DABCNT_WIDTH 5
76#define EE_SPI_HCMD_READ_LBN 15
77#define EE_SPI_HCMD_READ_WIDTH 1
78#define EE_SPI_READ 1
79#define EE_SPI_WRITE 0
80#define EE_SPI_HCMD_DUBCNT_LBN 12
81#define EE_SPI_HCMD_DUBCNT_WIDTH 2
82#define EE_SPI_HCMD_ADBCNT_LBN 8
83#define EE_SPI_HCMD_ADBCNT_WIDTH 2
84#define EE_SPI_HCMD_ENC_LBN 0
85#define EE_SPI_HCMD_ENC_WIDTH 8
86
87/* SPI host address register */
88#define EE_SPI_HADR_REG_KER 0x0110
89#define EE_SPI_HADR_ADR_LBN 0
90#define EE_SPI_HADR_ADR_WIDTH 24
91
92/* SPI host data register */
93#define EE_SPI_HDATA_REG_KER 0x0120
94
95/* SPI/VPD config register */
96#define EE_VPD_CFG_REG_KER 0x0140
97#define EE_VPD_EN_LBN 0
98#define EE_VPD_EN_WIDTH 1
99#define EE_VPD_EN_AD9_MODE_LBN 1
100#define EE_VPD_EN_AD9_MODE_WIDTH 1
101#define EE_EE_CLOCK_DIV_LBN 112
102#define EE_EE_CLOCK_DIV_WIDTH 7
103#define EE_SF_CLOCK_DIV_LBN 120
104#define EE_SF_CLOCK_DIV_WIDTH 7
105
106/* PCIE CORE ACCESS REG */
107#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
108#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
109#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
110#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
111
112/* NIC status register */
113#define NIC_STAT_REG 0x0200
114#define EE_STRAP_EN_LBN 31
115#define EE_STRAP_EN_WIDTH 1
116#define EE_STRAP_OVR_LBN 24
117#define EE_STRAP_OVR_WIDTH 4
118#define ONCHIP_SRAM_LBN 16
119#define ONCHIP_SRAM_WIDTH 1
120#define SF_PRST_LBN 9
121#define SF_PRST_WIDTH 1
122#define EE_PRST_LBN 8
123#define EE_PRST_WIDTH 1
124#define STRAP_PINS_LBN 0
125#define STRAP_PINS_WIDTH 3
126/* These bit definitions are extrapolated from the list of numerical
127 * values for STRAP_PINS.
128 */
129#define STRAP_10G_LBN 2
130#define STRAP_10G_WIDTH 1
131#define STRAP_PCIE_LBN 0
132#define STRAP_PCIE_WIDTH 1
133
134#define BOOTED_USING_NVDEVICE_LBN 3
135#define BOOTED_USING_NVDEVICE_WIDTH 1
136
137/* GPIO control register */
138#define GPIO_CTL_REG_KER 0x0210
139#define GPIO_USE_NIC_CLK_LBN (30)
140#define GPIO_USE_NIC_CLK_WIDTH (1)
141#define GPIO_OUTPUTS_LBN (16)
142#define GPIO_OUTPUTS_WIDTH (4)
143#define GPIO_INPUTS_LBN (8)
144#define GPIO_DIRECTION_LBN (24)
145#define GPIO_DIRECTION_WIDTH (4)
146#define GPIO_DIRECTION_OUT (1)
147#define GPIO_SRAM_SLEEP (1 << 1)
148
149#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
150#define GPIO3_OEN_WIDTH 1
151#define GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
152#define GPIO2_OEN_WIDTH 1
153#define GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
154#define GPIO1_OEN_WIDTH 1
155#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
156#define GPIO0_OEN_WIDTH 1
157
158#define GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
159#define GPIO3_OUT_WIDTH 1
160#define GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
161#define GPIO2_OUT_WIDTH 1
162#define GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
163#define GPIO1_OUT_WIDTH 1
164#define GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
165#define GPIO0_OUT_WIDTH 1
166
167#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
168#define GPIO3_IN_WIDTH 1
169#define GPIO2_IN_WIDTH 1
170#define GPIO1_IN_WIDTH 1
171#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
172#define GPIO0_IN_WIDTH 1
173
174/* Global control register */
175#define GLB_CTL_REG_KER 0x0220
176#define EXT_PHY_RST_CTL_LBN 63
177#define EXT_PHY_RST_CTL_WIDTH 1
178#define PCIE_SD_RST_CTL_LBN 61
179#define PCIE_SD_RST_CTL_WIDTH 1
180
181#define PCIE_NSTCK_RST_CTL_LBN 58
182#define PCIE_NSTCK_RST_CTL_WIDTH 1
183#define PCIE_CORE_RST_CTL_LBN 57
184#define PCIE_CORE_RST_CTL_WIDTH 1
185#define EE_RST_CTL_LBN 49
186#define EE_RST_CTL_WIDTH 1
187#define RST_XGRX_LBN 24
188#define RST_XGRX_WIDTH 1
189#define RST_XGTX_LBN 23
190#define RST_XGTX_WIDTH 1
191#define RST_EM_LBN 22
192#define RST_EM_WIDTH 1
193#define EXT_PHY_RST_DUR_LBN 1
194#define EXT_PHY_RST_DUR_WIDTH 3
195#define SWRST_LBN 0
196#define SWRST_WIDTH 1
197#define INCLUDE_IN_RESET 0
198#define EXCLUDE_FROM_RESET 1
199
200/* Fatal interrupt register */
201#define FATAL_INTR_REG_KER 0x0230
202#define RBUF_OWN_INT_KER_EN_LBN 39
203#define RBUF_OWN_INT_KER_EN_WIDTH 1
204#define TBUF_OWN_INT_KER_EN_LBN 38
205#define TBUF_OWN_INT_KER_EN_WIDTH 1
206#define ILL_ADR_INT_KER_EN_LBN 33
207#define ILL_ADR_INT_KER_EN_WIDTH 1
208#define MEM_PERR_INT_KER_LBN 8
209#define MEM_PERR_INT_KER_WIDTH 1
210#define INT_KER_ERROR_LBN 0
211#define INT_KER_ERROR_WIDTH 12
212
213#define DP_CTRL_REG 0x250
214#define FLS_EVQ_ID_LBN 0
215#define FLS_EVQ_ID_WIDTH 11
216
217#define MEM_STAT_REG_KER 0x260
218
219/* Debug probe register */
220#define DEBUG_BLK_SEL_MISC 7
221#define DEBUG_BLK_SEL_SERDES 6
222#define DEBUG_BLK_SEL_EM 5
223#define DEBUG_BLK_SEL_SR 4
224#define DEBUG_BLK_SEL_EV 3
225#define DEBUG_BLK_SEL_RX 2
226#define DEBUG_BLK_SEL_TX 1
227#define DEBUG_BLK_SEL_BIU 0
228
229/* FPGA build version */
230#define ALTERA_BUILD_REG_KER 0x0300
231#define VER_ALL_LBN 0
232#define VER_ALL_WIDTH 32
233
234/* Spare EEPROM bits register (flash 0x390) */
235#define SPARE_REG_KER 0x310
236#define MEM_PERR_EN_TX_DATA_LBN 72
237#define MEM_PERR_EN_TX_DATA_WIDTH 2
238
239/* Timer table for kernel access */
240#define TIMER_CMD_REG_KER 0x420
241#define TIMER_MODE_LBN 12
242#define TIMER_MODE_WIDTH 2
243#define TIMER_MODE_DIS 0
244#define TIMER_MODE_INT_HLDOFF 2
245#define TIMER_VAL_LBN 0
246#define TIMER_VAL_WIDTH 12
247
248/* Driver generated event register */
249#define DRV_EV_REG_KER 0x440
250#define DRV_EV_QID_LBN 64
251#define DRV_EV_QID_WIDTH 12
252#define DRV_EV_DATA_LBN 0
253#define DRV_EV_DATA_WIDTH 64
254
255/* Buffer table configuration register */
256#define BUF_TBL_CFG_REG_KER 0x600
257#define BUF_TBL_MODE_LBN 3
258#define BUF_TBL_MODE_WIDTH 1
259#define BUF_TBL_MODE_HALF 0
260#define BUF_TBL_MODE_FULL 1
261
262/* SRAM receive descriptor cache configuration register */
263#define SRM_RX_DC_CFG_REG_KER 0x610
264#define SRM_RX_DC_BASE_ADR_LBN 0
265#define SRM_RX_DC_BASE_ADR_WIDTH 21
266
267/* SRAM transmit descriptor cache configuration register */
268#define SRM_TX_DC_CFG_REG_KER 0x620
269#define SRM_TX_DC_BASE_ADR_LBN 0
270#define SRM_TX_DC_BASE_ADR_WIDTH 21
271
272/* SRAM configuration register */
273#define SRM_CFG_REG_KER 0x630
274#define SRAM_OOB_BT_INIT_EN_LBN 3
275#define SRAM_OOB_BT_INIT_EN_WIDTH 1
276#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
277#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
278#define SRM_NB_BSZ_1BANKS_2M 0
279#define SRM_NB_BSZ_1BANKS_4M 1
280#define SRM_NB_BSZ_1BANKS_8M 2
281#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
282#define SRM_NB_BSZ_2BANKS_4M 4
283#define SRM_NB_BSZ_2BANKS_8M 5
284#define SRM_NB_BSZ_2BANKS_16M 6
285#define SRM_NB_BSZ_RESERVED 7
286
287/* Special buffer table update register */
288#define BUF_TBL_UPD_REG_KER 0x0650
289#define BUF_UPD_CMD_LBN 63
290#define BUF_UPD_CMD_WIDTH 1
291#define BUF_CLR_CMD_LBN 62
292#define BUF_CLR_CMD_WIDTH 1
293#define BUF_CLR_END_ID_LBN 32
294#define BUF_CLR_END_ID_WIDTH 20
295#define BUF_CLR_START_ID_LBN 0
296#define BUF_CLR_START_ID_WIDTH 20
297
298/* Receive configuration register */
299#define RX_CFG_REG_KER 0x800
300
301/* B0 */
302#define RX_INGR_EN_B0_LBN 47
303#define RX_INGR_EN_B0_WIDTH 1
304#define RX_DESC_PUSH_EN_B0_LBN 43
305#define RX_DESC_PUSH_EN_B0_WIDTH 1
306#define RX_XON_TX_TH_B0_LBN 33
307#define RX_XON_TX_TH_B0_WIDTH 5
308#define RX_XOFF_TX_TH_B0_LBN 28
309#define RX_XOFF_TX_TH_B0_WIDTH 5
310#define RX_USR_BUF_SIZE_B0_LBN 19
311#define RX_USR_BUF_SIZE_B0_WIDTH 9
312#define RX_XON_MAC_TH_B0_LBN 10
313#define RX_XON_MAC_TH_B0_WIDTH 9
314#define RX_XOFF_MAC_TH_B0_LBN 1
315#define RX_XOFF_MAC_TH_B0_WIDTH 9
316#define RX_XOFF_MAC_EN_B0_LBN 0
317#define RX_XOFF_MAC_EN_B0_WIDTH 1
318
319/* A1 */
320#define RX_DESC_PUSH_EN_A1_LBN 35
321#define RX_DESC_PUSH_EN_A1_WIDTH 1
322#define RX_XON_TX_TH_A1_LBN 25
323#define RX_XON_TX_TH_A1_WIDTH 5
324#define RX_XOFF_TX_TH_A1_LBN 20
325#define RX_XOFF_TX_TH_A1_WIDTH 5
326#define RX_USR_BUF_SIZE_A1_LBN 11
327#define RX_USR_BUF_SIZE_A1_WIDTH 9
328#define RX_XON_MAC_TH_A1_LBN 6
329#define RX_XON_MAC_TH_A1_WIDTH 5
330#define RX_XOFF_MAC_TH_A1_LBN 1
331#define RX_XOFF_MAC_TH_A1_WIDTH 5
332#define RX_XOFF_MAC_EN_A1_LBN 0
333#define RX_XOFF_MAC_EN_A1_WIDTH 1
334
335/* Receive filter control register */
336#define RX_FILTER_CTL_REG 0x810
337#define UDP_FULL_SRCH_LIMIT_LBN 32
338#define UDP_FULL_SRCH_LIMIT_WIDTH 8
339#define NUM_KER_LBN 24
340#define NUM_KER_WIDTH 2
341#define UDP_WILD_SRCH_LIMIT_LBN 16
342#define UDP_WILD_SRCH_LIMIT_WIDTH 8
343#define TCP_WILD_SRCH_LIMIT_LBN 8
344#define TCP_WILD_SRCH_LIMIT_WIDTH 8
345#define TCP_FULL_SRCH_LIMIT_LBN 0
346#define TCP_FULL_SRCH_LIMIT_WIDTH 8
347
348/* RX queue flush register */
349#define RX_FLUSH_DESCQ_REG_KER 0x0820
350#define RX_FLUSH_DESCQ_CMD_LBN 24
351#define RX_FLUSH_DESCQ_CMD_WIDTH 1
352#define RX_FLUSH_DESCQ_LBN 0
353#define RX_FLUSH_DESCQ_WIDTH 12
354
355/* Receive descriptor update register */
356#define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12)
357#define RX_DESC_WPTR_DWORD_LBN 0
358#define RX_DESC_WPTR_DWORD_WIDTH 12
359
360/* Receive descriptor cache configuration register */
361#define RX_DC_CFG_REG_KER 0x840
362#define RX_DC_SIZE_LBN 0
363#define RX_DC_SIZE_WIDTH 2
364
365#define RX_DC_PF_WM_REG_KER 0x850
366#define RX_DC_PF_LWM_LBN 0
367#define RX_DC_PF_LWM_WIDTH 6
368
369/* RX no descriptor drop counter */
370#define RX_NODESC_DROP_REG_KER 0x880
371#define RX_NODESC_DROP_CNT_LBN 0
372#define RX_NODESC_DROP_CNT_WIDTH 16
373
374/* RX black magic register */
375#define RX_SELF_RST_REG_KER 0x890
376#define RX_ISCSI_DIS_LBN 17
377#define RX_ISCSI_DIS_WIDTH 1
378#define RX_NODESC_WAIT_DIS_LBN 9
379#define RX_NODESC_WAIT_DIS_WIDTH 1
380#define RX_RECOVERY_EN_LBN 8
381#define RX_RECOVERY_EN_WIDTH 1
382
383/* TX queue flush register */
384#define TX_FLUSH_DESCQ_REG_KER 0x0a00
385#define TX_FLUSH_DESCQ_CMD_LBN 12
386#define TX_FLUSH_DESCQ_CMD_WIDTH 1
387#define TX_FLUSH_DESCQ_LBN 0
388#define TX_FLUSH_DESCQ_WIDTH 12
389
390/* Transmit descriptor update register */
391#define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12)
392#define TX_DESC_WPTR_DWORD_LBN 0
393#define TX_DESC_WPTR_DWORD_WIDTH 12
394
395/* Transmit descriptor cache configuration register */
396#define TX_DC_CFG_REG_KER 0xa20
397#define TX_DC_SIZE_LBN 0
398#define TX_DC_SIZE_WIDTH 2
399
400/* Transmit checksum configuration register (A0/A1 only) */
401#define TX_CHKSM_CFG_REG_KER_A1 0xa30
402
403/* Transmit configuration register */
404#define TX_CFG_REG_KER 0xa50
405#define TX_NO_EOP_DISC_EN_LBN 5
406#define TX_NO_EOP_DISC_EN_WIDTH 1
407
408/* Transmit configuration register 2 */
409#define TX_CFG2_REG_KER 0xa80
410#define TX_CSR_PUSH_EN_LBN 89
411#define TX_CSR_PUSH_EN_WIDTH 1
412#define TX_RX_SPACER_LBN 64
413#define TX_RX_SPACER_WIDTH 8
414#define TX_SW_EV_EN_LBN 59
415#define TX_SW_EV_EN_WIDTH 1
416#define TX_RX_SPACER_EN_LBN 57
417#define TX_RX_SPACER_EN_WIDTH 1
418#define TX_PREF_THRESHOLD_LBN 19
419#define TX_PREF_THRESHOLD_WIDTH 2
420#define TX_ONE_PKT_PER_Q_LBN 18
421#define TX_ONE_PKT_PER_Q_WIDTH 1
422#define TX_DIS_NON_IP_EV_LBN 17
423#define TX_DIS_NON_IP_EV_WIDTH 1
424#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
425#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
426
427/* PHY management transmit data register */
428#define MD_TXD_REG_KER 0xc00
429#define MD_TXD_LBN 0
430#define MD_TXD_WIDTH 16
431
432/* PHY management receive data register */
433#define MD_RXD_REG_KER 0xc10
434#define MD_RXD_LBN 0
435#define MD_RXD_WIDTH 16
436
437/* PHY management configuration & status register */
438#define MD_CS_REG_KER 0xc20
439#define MD_GC_LBN 4
440#define MD_GC_WIDTH 1
441#define MD_RIC_LBN 2
442#define MD_RIC_WIDTH 1
443#define MD_RDC_LBN 1
444#define MD_RDC_WIDTH 1
445#define MD_WRC_LBN 0
446#define MD_WRC_WIDTH 1
447
448/* PHY management PHY address register */
449#define MD_PHY_ADR_REG_KER 0xc30
450#define MD_PHY_ADR_LBN 0
451#define MD_PHY_ADR_WIDTH 16
452
453/* PHY management ID register */
454#define MD_ID_REG_KER 0xc40
455#define MD_PRT_ADR_LBN 11
456#define MD_PRT_ADR_WIDTH 5
457#define MD_DEV_ADR_LBN 6
458#define MD_DEV_ADR_WIDTH 5
459
460/* PHY management status & mask register (DWORD read only) */
461#define MD_STAT_REG_KER 0xc50
462#define MD_BSERR_LBN 2
463#define MD_BSERR_WIDTH 1
464#define MD_LNFL_LBN 1
465#define MD_LNFL_WIDTH 1
466#define MD_BSY_LBN 0
467#define MD_BSY_WIDTH 1
468
469/* Port 0 and 1 MAC stats registers */
470#define MAC0_STAT_DMA_REG_KER 0xc60
471#define MAC_STAT_DMA_CMD_LBN 48
472#define MAC_STAT_DMA_CMD_WIDTH 1
473#define MAC_STAT_DMA_ADR_LBN 0
474#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
475
476/* Port 0 and 1 MAC control registers */
477#define MAC0_CTRL_REG_KER 0xc80
478#define MAC_XOFF_VAL_LBN 16
479#define MAC_XOFF_VAL_WIDTH 16
480#define TXFIFO_DRAIN_EN_B0_LBN 7
481#define TXFIFO_DRAIN_EN_B0_WIDTH 1
482#define MAC_BCAD_ACPT_LBN 4
483#define MAC_BCAD_ACPT_WIDTH 1
484#define MAC_UC_PROM_LBN 3
485#define MAC_UC_PROM_WIDTH 1
486#define MAC_LINK_STATUS_LBN 2
487#define MAC_LINK_STATUS_WIDTH 1
488#define MAC_SPEED_LBN 0
489#define MAC_SPEED_WIDTH 2
490
491/* 10G XAUI XGXS default values */
492#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
493#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
494#define XX_SD_CTL_DRV_DEFAULT 0 /* 20mA */
495
496/* Multicast address hash table */
497#define MAC_MCAST_HASH_REG0_KER 0xca0
498#define MAC_MCAST_HASH_REG1_KER 0xcb0
499
500/* GMAC configuration register 1 */
501#define GM_CFG1_REG 0xe00
502#define GM_SW_RST_LBN 31
503#define GM_SW_RST_WIDTH 1
504#define GM_LOOP_LBN 8
505#define GM_LOOP_WIDTH 1
506#define GM_RX_FC_EN_LBN 5
507#define GM_RX_FC_EN_WIDTH 1
508#define GM_TX_FC_EN_LBN 4
509#define GM_TX_FC_EN_WIDTH 1
510#define GM_RX_EN_LBN 2
511#define GM_RX_EN_WIDTH 1
512#define GM_TX_EN_LBN 0
513#define GM_TX_EN_WIDTH 1
514
515/* GMAC configuration register 2 */
516#define GM_CFG2_REG 0xe10
517#define GM_PAMBL_LEN_LBN 12
518#define GM_PAMBL_LEN_WIDTH 4
519#define GM_IF_MODE_LBN 8
520#define GM_IF_MODE_WIDTH 2
521#define GM_LEN_CHK_LBN 4
522#define GM_LEN_CHK_WIDTH 1
523#define GM_PAD_CRC_EN_LBN 2
524#define GM_PAD_CRC_EN_WIDTH 1
525#define GM_FD_LBN 0
526#define GM_FD_WIDTH 1
527
528/* GMAC maximum frame length register */
529#define GM_MAX_FLEN_REG 0xe40
530#define GM_MAX_FLEN_LBN 0
531#define GM_MAX_FLEN_WIDTH 16
532
533/* GMAC station address register 1 */
534#define GM_ADR1_REG 0xf00
535#define GM_HWADDR_5_LBN 24
536#define GM_HWADDR_5_WIDTH 8
537#define GM_HWADDR_4_LBN 16
538#define GM_HWADDR_4_WIDTH 8
539#define GM_HWADDR_3_LBN 8
540#define GM_HWADDR_3_WIDTH 8
541#define GM_HWADDR_2_LBN 0
542#define GM_HWADDR_2_WIDTH 8
543
544/* GMAC station address register 2 */
545#define GM_ADR2_REG 0xf10
546#define GM_HWADDR_1_LBN 24
547#define GM_HWADDR_1_WIDTH 8
548#define GM_HWADDR_0_LBN 16
549#define GM_HWADDR_0_WIDTH 8
550
551/* GMAC FIFO configuration register 0 */
552#define GMF_CFG0_REG 0xf20
553#define GMF_FTFENREQ_LBN 12
554#define GMF_FTFENREQ_WIDTH 1
555#define GMF_STFENREQ_LBN 11
556#define GMF_STFENREQ_WIDTH 1
557#define GMF_FRFENREQ_LBN 10
558#define GMF_FRFENREQ_WIDTH 1
559#define GMF_SRFENREQ_LBN 9
560#define GMF_SRFENREQ_WIDTH 1
561#define GMF_WTMENREQ_LBN 8
562#define GMF_WTMENREQ_WIDTH 1
563
564/* GMAC FIFO configuration register 1 */
565#define GMF_CFG1_REG 0xf30
566#define GMF_CFGFRTH_LBN 16
567#define GMF_CFGFRTH_WIDTH 5
568#define GMF_CFGXOFFRTX_LBN 0
569#define GMF_CFGXOFFRTX_WIDTH 16
570
571/* GMAC FIFO configuration register 2 */
572#define GMF_CFG2_REG 0xf40
573#define GMF_CFGHWM_LBN 16
574#define GMF_CFGHWM_WIDTH 6
575#define GMF_CFGLWM_LBN 0
576#define GMF_CFGLWM_WIDTH 6
577
578/* GMAC FIFO configuration register 3 */
579#define GMF_CFG3_REG 0xf50
580#define GMF_CFGHWMFT_LBN 16
581#define GMF_CFGHWMFT_WIDTH 6
582#define GMF_CFGFTTH_LBN 0
583#define GMF_CFGFTTH_WIDTH 6
584
585/* GMAC FIFO configuration register 4 */
586#define GMF_CFG4_REG 0xf60
587#define GMF_HSTFLTRFRM_PAUSE_LBN 12
588#define GMF_HSTFLTRFRM_PAUSE_WIDTH 12
589
590/* GMAC FIFO configuration register 5 */
591#define GMF_CFG5_REG 0xf70
592#define GMF_CFGHDPLX_LBN 22
593#define GMF_CFGHDPLX_WIDTH 1
594#define GMF_CFGBYTMODE_LBN 19
595#define GMF_CFGBYTMODE_WIDTH 1
596#define GMF_HSTDRPLT64_LBN 18
597#define GMF_HSTDRPLT64_WIDTH 1
598#define GMF_HSTFLTRFRMDC_PAUSE_LBN 12
599#define GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
600
601/* XGMAC address register low */
602#define XM_ADR_LO_REG 0x1200
603#define XM_ADR_3_LBN 24
604#define XM_ADR_3_WIDTH 8
605#define XM_ADR_2_LBN 16
606#define XM_ADR_2_WIDTH 8
607#define XM_ADR_1_LBN 8
608#define XM_ADR_1_WIDTH 8
609#define XM_ADR_0_LBN 0
610#define XM_ADR_0_WIDTH 8
611
612/* XGMAC address register high */
613#define XM_ADR_HI_REG 0x1210
614#define XM_ADR_5_LBN 8
615#define XM_ADR_5_WIDTH 8
616#define XM_ADR_4_LBN 0
617#define XM_ADR_4_WIDTH 8
618
619/* XGMAC global configuration */
620#define XM_GLB_CFG_REG 0x1220
621#define XM_RX_STAT_EN_LBN 11
622#define XM_RX_STAT_EN_WIDTH 1
623#define XM_TX_STAT_EN_LBN 10
624#define XM_TX_STAT_EN_WIDTH 1
625#define XM_RX_JUMBO_MODE_LBN 6
626#define XM_RX_JUMBO_MODE_WIDTH 1
627#define XM_INTCLR_MODE_LBN 3
628#define XM_INTCLR_MODE_WIDTH 1
629#define XM_CORE_RST_LBN 0
630#define XM_CORE_RST_WIDTH 1
631
632/* XGMAC transmit configuration */
633#define XM_TX_CFG_REG 0x1230
634#define XM_IPG_LBN 16
635#define XM_IPG_WIDTH 4
636#define XM_FCNTL_LBN 10
637#define XM_FCNTL_WIDTH 1
638#define XM_TXCRC_LBN 8
639#define XM_TXCRC_WIDTH 1
640#define XM_AUTO_PAD_LBN 5
641#define XM_AUTO_PAD_WIDTH 1
642#define XM_TX_PRMBL_LBN 2
643#define XM_TX_PRMBL_WIDTH 1
644#define XM_TXEN_LBN 1
645#define XM_TXEN_WIDTH 1
646
647/* XGMAC receive configuration */
648#define XM_RX_CFG_REG 0x1240
649#define XM_PASS_CRC_ERR_LBN 25
650#define XM_PASS_CRC_ERR_WIDTH 1
651#define XM_ACPT_ALL_MCAST_LBN 11
652#define XM_ACPT_ALL_MCAST_WIDTH 1
653#define XM_ACPT_ALL_UCAST_LBN 9
654#define XM_ACPT_ALL_UCAST_WIDTH 1
655#define XM_AUTO_DEPAD_LBN 8
656#define XM_AUTO_DEPAD_WIDTH 1
657#define XM_RXEN_LBN 1
658#define XM_RXEN_WIDTH 1
659
660/* XGMAC management interrupt mask register */
661#define XM_MGT_INT_MSK_REG_B0 0x1250
662#define XM_MSK_PRMBLE_ERR_LBN 2
663#define XM_MSK_PRMBLE_ERR_WIDTH 1
664#define XM_MSK_RMTFLT_LBN 1
665#define XM_MSK_RMTFLT_WIDTH 1
666#define XM_MSK_LCLFLT_LBN 0
667#define XM_MSK_LCLFLT_WIDTH 1
668
669/* XGMAC flow control register */
670#define XM_FC_REG 0x1270
671#define XM_PAUSE_TIME_LBN 16
672#define XM_PAUSE_TIME_WIDTH 16
673#define XM_DIS_FCNTL_LBN 0
674#define XM_DIS_FCNTL_WIDTH 1
675
676/* XGMAC pause time count register */
677#define XM_PAUSE_TIME_REG 0x1290
678
679/* XGMAC transmit parameter register */
680#define XM_TX_PARAM_REG 0x012d0
681#define XM_TX_JUMBO_MODE_LBN 31
682#define XM_TX_JUMBO_MODE_WIDTH 1
683#define XM_MAX_TX_FRM_SIZE_LBN 16
684#define XM_MAX_TX_FRM_SIZE_WIDTH 14
685
686/* XGMAC receive parameter register */
687#define XM_RX_PARAM_REG 0x12e0
688#define XM_MAX_RX_FRM_SIZE_LBN 0
689#define XM_MAX_RX_FRM_SIZE_WIDTH 14
690
691/* XGMAC management interrupt status register */
692#define XM_MGT_INT_REG_B0 0x12f0
693#define XM_PRMBLE_ERR 2
694#define XM_PRMBLE_WIDTH 1
695#define XM_RMTFLT_LBN 1
696#define XM_RMTFLT_WIDTH 1
697#define XM_LCLFLT_LBN 0
698#define XM_LCLFLT_WIDTH 1
699
700/* XGXS/XAUI powerdown/reset register */
701#define XX_PWR_RST_REG 0x1300
702
703#define XX_SD_RST_ACT_LBN 16
704#define XX_SD_RST_ACT_WIDTH 1
705#define XX_PWRDND_EN_LBN 15
706#define XX_PWRDND_EN_WIDTH 1
707#define XX_PWRDNC_EN_LBN 14
708#define XX_PWRDNC_EN_WIDTH 1
709#define XX_PWRDNB_EN_LBN 13
710#define XX_PWRDNB_EN_WIDTH 1
711#define XX_PWRDNA_EN_LBN 12
712#define XX_PWRDNA_EN_WIDTH 1
713#define XX_RSTPLLCD_EN_LBN 9
714#define XX_RSTPLLCD_EN_WIDTH 1
715#define XX_RSTPLLAB_EN_LBN 8
716#define XX_RSTPLLAB_EN_WIDTH 1
717#define XX_RESETD_EN_LBN 7
718#define XX_RESETD_EN_WIDTH 1
719#define XX_RESETC_EN_LBN 6
720#define XX_RESETC_EN_WIDTH 1
721#define XX_RESETB_EN_LBN 5
722#define XX_RESETB_EN_WIDTH 1
723#define XX_RESETA_EN_LBN 4
724#define XX_RESETA_EN_WIDTH 1
725#define XX_RSTXGXSRX_EN_LBN 2
726#define XX_RSTXGXSRX_EN_WIDTH 1
727#define XX_RSTXGXSTX_EN_LBN 1
728#define XX_RSTXGXSTX_EN_WIDTH 1
729#define XX_RST_XX_EN_LBN 0
730#define XX_RST_XX_EN_WIDTH 1
731
732/* XGXS/XAUI powerdown/reset control register */
733#define XX_SD_CTL_REG 0x1310
734#define XX_HIDRVD_LBN 15
735#define XX_HIDRVD_WIDTH 1
736#define XX_LODRVD_LBN 14
737#define XX_LODRVD_WIDTH 1
738#define XX_HIDRVC_LBN 13
739#define XX_HIDRVC_WIDTH 1
740#define XX_LODRVC_LBN 12
741#define XX_LODRVC_WIDTH 1
742#define XX_HIDRVB_LBN 11
743#define XX_HIDRVB_WIDTH 1
744#define XX_LODRVB_LBN 10
745#define XX_LODRVB_WIDTH 1
746#define XX_HIDRVA_LBN 9
747#define XX_HIDRVA_WIDTH 1
748#define XX_LODRVA_LBN 8
749#define XX_LODRVA_WIDTH 1
750#define XX_LPBKD_LBN 3
751#define XX_LPBKD_WIDTH 1
752#define XX_LPBKC_LBN 2
753#define XX_LPBKC_WIDTH 1
754#define XX_LPBKB_LBN 1
755#define XX_LPBKB_WIDTH 1
756#define XX_LPBKA_LBN 0
757#define XX_LPBKA_WIDTH 1
758
759#define XX_TXDRV_CTL_REG 0x1320
760#define XX_DEQD_LBN 28
761#define XX_DEQD_WIDTH 4
762#define XX_DEQC_LBN 24
763#define XX_DEQC_WIDTH 4
764#define XX_DEQB_LBN 20
765#define XX_DEQB_WIDTH 4
766#define XX_DEQA_LBN 16
767#define XX_DEQA_WIDTH 4
768#define XX_DTXD_LBN 12
769#define XX_DTXD_WIDTH 4
770#define XX_DTXC_LBN 8
771#define XX_DTXC_WIDTH 4
772#define XX_DTXB_LBN 4
773#define XX_DTXB_WIDTH 4
774#define XX_DTXA_LBN 0
775#define XX_DTXA_WIDTH 4
776
777/* XAUI XGXS core status register */
778#define XX_CORE_STAT_REG 0x1360
779#define XX_FORCE_SIG_LBN 24
780#define XX_FORCE_SIG_WIDTH 8
781#define XX_FORCE_SIG_DECODE_FORCED 0xff
782#define XX_XGXS_LB_EN_LBN 23
783#define XX_XGXS_LB_EN_WIDTH 1
784#define XX_XGMII_LB_EN_LBN 22
785#define XX_XGMII_LB_EN_WIDTH 1
786#define XX_ALIGN_DONE_LBN 20
787#define XX_ALIGN_DONE_WIDTH 1
788#define XX_SYNC_STAT_LBN 16
789#define XX_SYNC_STAT_WIDTH 4
790#define XX_SYNC_STAT_DECODE_SYNCED 0xf
791#define XX_COMMA_DET_LBN 12
792#define XX_COMMA_DET_WIDTH 4
793#define XX_COMMA_DET_DECODE_DETECTED 0xf
794#define XX_COMMA_DET_RESET 0xf
795#define XX_CHARERR_LBN 4
796#define XX_CHARERR_WIDTH 4
797#define XX_CHARERR_RESET 0xf
798#define XX_DISPERR_LBN 0
799#define XX_DISPERR_WIDTH 4
800#define XX_DISPERR_RESET 0xf
801
802/* Receive filter table */
803#define RX_FILTER_TBL0 0xF00000
804
805/* Receive descriptor pointer table */
806#define RX_DESC_PTR_TBL_KER_A1 0x11800
807#define RX_DESC_PTR_TBL_KER_B0 0xF40000
808#define RX_DESC_PTR_TBL_KER_P0 0x900
809#define RX_ISCSI_DDIG_EN_LBN 88
810#define RX_ISCSI_DDIG_EN_WIDTH 1
811#define RX_ISCSI_HDIG_EN_LBN 87
812#define RX_ISCSI_HDIG_EN_WIDTH 1
813#define RX_DESCQ_BUF_BASE_ID_LBN 36
814#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
815#define RX_DESCQ_EVQ_ID_LBN 24
816#define RX_DESCQ_EVQ_ID_WIDTH 12
817#define RX_DESCQ_OWNER_ID_LBN 10
818#define RX_DESCQ_OWNER_ID_WIDTH 14
819#define RX_DESCQ_LABEL_LBN 5
820#define RX_DESCQ_LABEL_WIDTH 5
821#define RX_DESCQ_SIZE_LBN 3
822#define RX_DESCQ_SIZE_WIDTH 2
823#define RX_DESCQ_SIZE_4K 3
824#define RX_DESCQ_SIZE_2K 2
825#define RX_DESCQ_SIZE_1K 1
826#define RX_DESCQ_SIZE_512 0
827#define RX_DESCQ_TYPE_LBN 2
828#define RX_DESCQ_TYPE_WIDTH 1
829#define RX_DESCQ_JUMBO_LBN 1
830#define RX_DESCQ_JUMBO_WIDTH 1
831#define RX_DESCQ_EN_LBN 0
832#define RX_DESCQ_EN_WIDTH 1
833
834/* Transmit descriptor pointer table */
835#define TX_DESC_PTR_TBL_KER_A1 0x11900
836#define TX_DESC_PTR_TBL_KER_B0 0xF50000
837#define TX_DESC_PTR_TBL_KER_P0 0xa40
838#define TX_NON_IP_DROP_DIS_B0_LBN 91
839#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
840#define TX_IP_CHKSM_DIS_B0_LBN 90
841#define TX_IP_CHKSM_DIS_B0_WIDTH 1
842#define TX_TCP_CHKSM_DIS_B0_LBN 89
843#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
844#define TX_DESCQ_EN_LBN 88
845#define TX_DESCQ_EN_WIDTH 1
846#define TX_ISCSI_DDIG_EN_LBN 87
847#define TX_ISCSI_DDIG_EN_WIDTH 1
848#define TX_ISCSI_HDIG_EN_LBN 86
849#define TX_ISCSI_HDIG_EN_WIDTH 1
850#define TX_DESCQ_BUF_BASE_ID_LBN 36
851#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
852#define TX_DESCQ_EVQ_ID_LBN 24
853#define TX_DESCQ_EVQ_ID_WIDTH 12
854#define TX_DESCQ_OWNER_ID_LBN 10
855#define TX_DESCQ_OWNER_ID_WIDTH 14
856#define TX_DESCQ_LABEL_LBN 5
857#define TX_DESCQ_LABEL_WIDTH 5
858#define TX_DESCQ_SIZE_LBN 3
859#define TX_DESCQ_SIZE_WIDTH 2
860#define TX_DESCQ_SIZE_4K 3
861#define TX_DESCQ_SIZE_2K 2
862#define TX_DESCQ_SIZE_1K 1
863#define TX_DESCQ_SIZE_512 0
864#define TX_DESCQ_TYPE_LBN 1
865#define TX_DESCQ_TYPE_WIDTH 2
866
867/* Event queue pointer */
868#define EVQ_PTR_TBL_KER_A1 0x11a00
869#define EVQ_PTR_TBL_KER_B0 0xf60000
870#define EVQ_PTR_TBL_KER_P0 0x500
871#define EVQ_EN_LBN 23
872#define EVQ_EN_WIDTH 1
873#define EVQ_SIZE_LBN 20
874#define EVQ_SIZE_WIDTH 3
875#define EVQ_SIZE_32K 6
876#define EVQ_SIZE_16K 5
877#define EVQ_SIZE_8K 4
878#define EVQ_SIZE_4K 3
879#define EVQ_SIZE_2K 2
880#define EVQ_SIZE_1K 1
881#define EVQ_SIZE_512 0
882#define EVQ_BUF_BASE_ID_LBN 0
883#define EVQ_BUF_BASE_ID_WIDTH 20
884
885/* Event queue read pointer */
886#define EVQ_RPTR_REG_KER_A1 0x11b00
887#define EVQ_RPTR_REG_KER_B0 0xfa0000
888#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
889#define EVQ_RPTR_DWORD_LBN 0
890#define EVQ_RPTR_DWORD_WIDTH 14
891
892/* RSS indirection table */
893#define RX_RSS_INDIR_TBL_B0 0xFB0000
894#define RX_RSS_INDIR_ENT_B0_LBN 0
895#define RX_RSS_INDIR_ENT_B0_WIDTH 6
896
897/* Special buffer descriptors (full-mode) */
898#define BUF_FULL_TBL_KER_A1 0x8000
899#define BUF_FULL_TBL_KER_B0 0x800000
900#define IP_DAT_BUF_SIZE_LBN 50
901#define IP_DAT_BUF_SIZE_WIDTH 1
902#define IP_DAT_BUF_SIZE_8K 1
903#define IP_DAT_BUF_SIZE_4K 0
904#define BUF_ADR_REGION_LBN 48
905#define BUF_ADR_REGION_WIDTH 2
906#define BUF_ADR_FBUF_LBN 14
907#define BUF_ADR_FBUF_WIDTH 34
908#define BUF_OWNER_ID_FBUF_LBN 0
909#define BUF_OWNER_ID_FBUF_WIDTH 14
910
911/* Transmit descriptor */
912#define TX_KER_PORT_LBN 63
913#define TX_KER_PORT_WIDTH 1
914#define TX_KER_CONT_LBN 62
915#define TX_KER_CONT_WIDTH 1
916#define TX_KER_BYTE_CNT_LBN 48
917#define TX_KER_BYTE_CNT_WIDTH 14
918#define TX_KER_BUF_REGION_LBN 46
919#define TX_KER_BUF_REGION_WIDTH 2
920#define TX_KER_BUF_REGION0_DECODE 0
921#define TX_KER_BUF_REGION1_DECODE 1
922#define TX_KER_BUF_REGION2_DECODE 2
923#define TX_KER_BUF_REGION3_DECODE 3
924#define TX_KER_BUF_ADR_LBN 0
925#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
926
927/* Receive descriptor */
928#define RX_KER_BUF_SIZE_LBN 48
929#define RX_KER_BUF_SIZE_WIDTH 14
930#define RX_KER_BUF_REGION_LBN 46
931#define RX_KER_BUF_REGION_WIDTH 2
932#define RX_KER_BUF_REGION0_DECODE 0
933#define RX_KER_BUF_REGION1_DECODE 1
934#define RX_KER_BUF_REGION2_DECODE 2
935#define RX_KER_BUF_REGION3_DECODE 3
936#define RX_KER_BUF_ADR_LBN 0
937#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
938
939/**************************************************************************
940 *
941 * Falcon events
942 *
943 **************************************************************************
944 */
945
946/* Event queue entries */
947#define EV_CODE_LBN 60
948#define EV_CODE_WIDTH 4
949#define RX_IP_EV_DECODE 0
950#define TX_IP_EV_DECODE 2
951#define DRIVER_EV_DECODE 5
952#define GLOBAL_EV_DECODE 6
953#define DRV_GEN_EV_DECODE 7
954#define WHOLE_EVENT_LBN 0
955#define WHOLE_EVENT_WIDTH 64
956
957/* Receive events */
958#define RX_EV_PKT_OK_LBN 56
959#define RX_EV_PKT_OK_WIDTH 1
960#define RX_EV_PAUSE_FRM_ERR_LBN 55
961#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
962#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
963#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
964#define RX_EV_IF_FRAG_ERR_LBN 53
965#define RX_EV_IF_FRAG_ERR_WIDTH 1
966#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
967#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
968#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
969#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
970#define RX_EV_ETH_CRC_ERR_LBN 50
971#define RX_EV_ETH_CRC_ERR_WIDTH 1
972#define RX_EV_FRM_TRUNC_LBN 49
973#define RX_EV_FRM_TRUNC_WIDTH 1
974#define RX_EV_DRIB_NIB_LBN 48
975#define RX_EV_DRIB_NIB_WIDTH 1
976#define RX_EV_TOBE_DISC_LBN 47
977#define RX_EV_TOBE_DISC_WIDTH 1
978#define RX_EV_PKT_TYPE_LBN 44
979#define RX_EV_PKT_TYPE_WIDTH 3
980#define RX_EV_PKT_TYPE_ETH_DECODE 0
981#define RX_EV_PKT_TYPE_LLC_DECODE 1
982#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
983#define RX_EV_PKT_TYPE_VLAN_DECODE 3
984#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
985#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
986#define RX_EV_HDR_TYPE_LBN 42
987#define RX_EV_HDR_TYPE_WIDTH 2
988#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
989#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
990#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
991#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
992#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
993 ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
994#define RX_EV_MCAST_HASH_MATCH_LBN 40
995#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
996#define RX_EV_MCAST_PKT_LBN 39
997#define RX_EV_MCAST_PKT_WIDTH 1
998#define RX_EV_Q_LABEL_LBN 32
999#define RX_EV_Q_LABEL_WIDTH 5
1000#define RX_EV_JUMBO_CONT_LBN 31
1001#define RX_EV_JUMBO_CONT_WIDTH 1
1002#define RX_EV_BYTE_CNT_LBN 16
1003#define RX_EV_BYTE_CNT_WIDTH 14
1004#define RX_EV_SOP_LBN 15
1005#define RX_EV_SOP_WIDTH 1
1006#define RX_EV_DESC_PTR_LBN 0
1007#define RX_EV_DESC_PTR_WIDTH 12
1008
1009/* Transmit events */
1010#define TX_EV_PKT_ERR_LBN 38
1011#define TX_EV_PKT_ERR_WIDTH 1
1012#define TX_EV_Q_LABEL_LBN 32
1013#define TX_EV_Q_LABEL_WIDTH 5
1014#define TX_EV_WQ_FF_FULL_LBN 15
1015#define TX_EV_WQ_FF_FULL_WIDTH 1
1016#define TX_EV_COMP_LBN 12
1017#define TX_EV_COMP_WIDTH 1
1018#define TX_EV_DESC_PTR_LBN 0
1019#define TX_EV_DESC_PTR_WIDTH 12
1020
1021/* Driver events */
1022#define DRIVER_EV_SUB_CODE_LBN 56
1023#define DRIVER_EV_SUB_CODE_WIDTH 4
1024#define DRIVER_EV_SUB_DATA_LBN 0
1025#define DRIVER_EV_SUB_DATA_WIDTH 14
1026#define TX_DESCQ_FLS_DONE_EV_DECODE 0
1027#define RX_DESCQ_FLS_DONE_EV_DECODE 1
1028#define EVQ_INIT_DONE_EV_DECODE 2
1029#define EVQ_NOT_EN_EV_DECODE 3
1030#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
1031#define SRM_UPD_DONE_EV_DECODE 5
1032#define WAKE_UP_EV_DECODE 6
1033#define TX_PKT_NON_TCP_UDP_DECODE 9
1034#define TIMER_EV_DECODE 10
1035#define RX_RECOVERY_EV_DECODE 11
1036#define RX_DSC_ERROR_EV_DECODE 14
1037#define TX_DSC_ERROR_EV_DECODE 15
1038#define DRIVER_EV_TX_DESCQ_ID_LBN 0
1039#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
1040#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
1041#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
1042#define DRIVER_EV_RX_DESCQ_ID_LBN 0
1043#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
1044#define SRM_CLR_EV_DECODE 0
1045#define SRM_UPD_EV_DECODE 1
1046#define SRM_ILLCLR_EV_DECODE 2
1047
1048/* Global events */
1049#define RX_RECOVERY_B0_LBN 12
1050#define RX_RECOVERY_B0_WIDTH 1
1051#define XG_MNT_INTR_B0_LBN 11
1052#define XG_MNT_INTR_B0_WIDTH 1
1053#define RX_RECOVERY_A1_LBN 11
1054#define RX_RECOVERY_A1_WIDTH 1
1055#define XFP_PHY_INTR_LBN 10
1056#define XFP_PHY_INTR_WIDTH 1
1057#define XG_PHY_INTR_LBN 9
1058#define XG_PHY_INTR_WIDTH 1
1059#define G_PHY1_INTR_LBN 8
1060#define G_PHY1_INTR_WIDTH 1
1061#define G_PHY0_INTR_LBN 7
1062#define G_PHY0_INTR_WIDTH 1
1063
1064/* Driver-generated test events */
1065#define EVQ_MAGIC_LBN 0
1066#define EVQ_MAGIC_WIDTH 32
1067
1068/**************************************************************************
1069 *
1070 * Falcon MAC stats
1071 *
1072 **************************************************************************
1073 *
1074 */
1075
1076#define GRxGoodOct_offset 0x0
1077#define GRxGoodOct_WIDTH 48
1078#define GRxBadOct_offset 0x8
1079#define GRxBadOct_WIDTH 48
1080#define GRxMissPkt_offset 0x10
1081#define GRxMissPkt_WIDTH 32
1082#define GRxFalseCRS_offset 0x14
1083#define GRxFalseCRS_WIDTH 32
1084#define GRxPausePkt_offset 0x18
1085#define GRxPausePkt_WIDTH 32
1086#define GRxBadPkt_offset 0x1C
1087#define GRxBadPkt_WIDTH 32
1088#define GRxUcastPkt_offset 0x20
1089#define GRxUcastPkt_WIDTH 32
1090#define GRxMcastPkt_offset 0x24
1091#define GRxMcastPkt_WIDTH 32
1092#define GRxBcastPkt_offset 0x28
1093#define GRxBcastPkt_WIDTH 32
1094#define GRxGoodLt64Pkt_offset 0x2C
1095#define GRxGoodLt64Pkt_WIDTH 32
1096#define GRxBadLt64Pkt_offset 0x30
1097#define GRxBadLt64Pkt_WIDTH 32
1098#define GRx64Pkt_offset 0x34
1099#define GRx64Pkt_WIDTH 32
1100#define GRx65to127Pkt_offset 0x38
1101#define GRx65to127Pkt_WIDTH 32
1102#define GRx128to255Pkt_offset 0x3C
1103#define GRx128to255Pkt_WIDTH 32
1104#define GRx256to511Pkt_offset 0x40
1105#define GRx256to511Pkt_WIDTH 32
1106#define GRx512to1023Pkt_offset 0x44
1107#define GRx512to1023Pkt_WIDTH 32
1108#define GRx1024to15xxPkt_offset 0x48
1109#define GRx1024to15xxPkt_WIDTH 32
1110#define GRx15xxtoJumboPkt_offset 0x4C
1111#define GRx15xxtoJumboPkt_WIDTH 32
1112#define GRxGtJumboPkt_offset 0x50
1113#define GRxGtJumboPkt_WIDTH 32
1114#define GRxFcsErr64to15xxPkt_offset 0x54
1115#define GRxFcsErr64to15xxPkt_WIDTH 32
1116#define GRxFcsErr15xxtoJumboPkt_offset 0x58
1117#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
1118#define GRxFcsErrGtJumboPkt_offset 0x5C
1119#define GRxFcsErrGtJumboPkt_WIDTH 32
1120#define GTxGoodBadOct_offset 0x80
1121#define GTxGoodBadOct_WIDTH 48
1122#define GTxGoodOct_offset 0x88
1123#define GTxGoodOct_WIDTH 48
1124#define GTxSglColPkt_offset 0x90
1125#define GTxSglColPkt_WIDTH 32
1126#define GTxMultColPkt_offset 0x94
1127#define GTxMultColPkt_WIDTH 32
1128#define GTxExColPkt_offset 0x98
1129#define GTxExColPkt_WIDTH 32
1130#define GTxDefPkt_offset 0x9C
1131#define GTxDefPkt_WIDTH 32
1132#define GTxLateCol_offset 0xA0
1133#define GTxLateCol_WIDTH 32
1134#define GTxExDefPkt_offset 0xA4
1135#define GTxExDefPkt_WIDTH 32
1136#define GTxPausePkt_offset 0xA8
1137#define GTxPausePkt_WIDTH 32
1138#define GTxBadPkt_offset 0xAC
1139#define GTxBadPkt_WIDTH 32
1140#define GTxUcastPkt_offset 0xB0
1141#define GTxUcastPkt_WIDTH 32
1142#define GTxMcastPkt_offset 0xB4
1143#define GTxMcastPkt_WIDTH 32
1144#define GTxBcastPkt_offset 0xB8
1145#define GTxBcastPkt_WIDTH 32
1146#define GTxLt64Pkt_offset 0xBC
1147#define GTxLt64Pkt_WIDTH 32
1148#define GTx64Pkt_offset 0xC0
1149#define GTx64Pkt_WIDTH 32
1150#define GTx65to127Pkt_offset 0xC4
1151#define GTx65to127Pkt_WIDTH 32
1152#define GTx128to255Pkt_offset 0xC8
1153#define GTx128to255Pkt_WIDTH 32
1154#define GTx256to511Pkt_offset 0xCC
1155#define GTx256to511Pkt_WIDTH 32
1156#define GTx512to1023Pkt_offset 0xD0
1157#define GTx512to1023Pkt_WIDTH 32
1158#define GTx1024to15xxPkt_offset 0xD4
1159#define GTx1024to15xxPkt_WIDTH 32
1160#define GTx15xxtoJumboPkt_offset 0xD8
1161#define GTx15xxtoJumboPkt_WIDTH 32
1162#define GTxGtJumboPkt_offset 0xDC
1163#define GTxGtJumboPkt_WIDTH 32
1164#define GTxNonTcpUdpPkt_offset 0xE0
1165#define GTxNonTcpUdpPkt_WIDTH 16
1166#define GTxMacSrcErrPkt_offset 0xE4
1167#define GTxMacSrcErrPkt_WIDTH 16
1168#define GTxIpSrcErrPkt_offset 0xE8
1169#define GTxIpSrcErrPkt_WIDTH 16
1170#define GDmaDone_offset 0xEC
1171#define GDmaDone_WIDTH 32
1172
1173#define XgRxOctets_offset 0x0
1174#define XgRxOctets_WIDTH 48
1175#define XgRxOctetsOK_offset 0x8
1176#define XgRxOctetsOK_WIDTH 48
1177#define XgRxPkts_offset 0x10
1178#define XgRxPkts_WIDTH 32
1179#define XgRxPktsOK_offset 0x14
1180#define XgRxPktsOK_WIDTH 32
1181#define XgRxBroadcastPkts_offset 0x18
1182#define XgRxBroadcastPkts_WIDTH 32
1183#define XgRxMulticastPkts_offset 0x1C
1184#define XgRxMulticastPkts_WIDTH 32
1185#define XgRxUnicastPkts_offset 0x20
1186#define XgRxUnicastPkts_WIDTH 32
1187#define XgRxUndersizePkts_offset 0x24
1188#define XgRxUndersizePkts_WIDTH 32
1189#define XgRxOversizePkts_offset 0x28
1190#define XgRxOversizePkts_WIDTH 32
1191#define XgRxJabberPkts_offset 0x2C
1192#define XgRxJabberPkts_WIDTH 32
1193#define XgRxUndersizeFCSerrorPkts_offset 0x30
1194#define XgRxUndersizeFCSerrorPkts_WIDTH 32
1195#define XgRxDropEvents_offset 0x34
1196#define XgRxDropEvents_WIDTH 32
1197#define XgRxFCSerrorPkts_offset 0x38
1198#define XgRxFCSerrorPkts_WIDTH 32
1199#define XgRxAlignError_offset 0x3C
1200#define XgRxAlignError_WIDTH 32
1201#define XgRxSymbolError_offset 0x40
1202#define XgRxSymbolError_WIDTH 32
1203#define XgRxInternalMACError_offset 0x44
1204#define XgRxInternalMACError_WIDTH 32
1205#define XgRxControlPkts_offset 0x48
1206#define XgRxControlPkts_WIDTH 32
1207#define XgRxPausePkts_offset 0x4C
1208#define XgRxPausePkts_WIDTH 32
1209#define XgRxPkts64Octets_offset 0x50
1210#define XgRxPkts64Octets_WIDTH 32
1211#define XgRxPkts65to127Octets_offset 0x54
1212#define XgRxPkts65to127Octets_WIDTH 32
1213#define XgRxPkts128to255Octets_offset 0x58
1214#define XgRxPkts128to255Octets_WIDTH 32
1215#define XgRxPkts256to511Octets_offset 0x5C
1216#define XgRxPkts256to511Octets_WIDTH 32
1217#define XgRxPkts512to1023Octets_offset 0x60
1218#define XgRxPkts512to1023Octets_WIDTH 32
1219#define XgRxPkts1024to15xxOctets_offset 0x64
1220#define XgRxPkts1024to15xxOctets_WIDTH 32
1221#define XgRxPkts15xxtoMaxOctets_offset 0x68
1222#define XgRxPkts15xxtoMaxOctets_WIDTH 32
1223#define XgRxLengthError_offset 0x6C
1224#define XgRxLengthError_WIDTH 32
1225#define XgTxPkts_offset 0x80
1226#define XgTxPkts_WIDTH 32
1227#define XgTxOctets_offset 0x88
1228#define XgTxOctets_WIDTH 48
1229#define XgTxMulticastPkts_offset 0x90
1230#define XgTxMulticastPkts_WIDTH 32
1231#define XgTxBroadcastPkts_offset 0x94
1232#define XgTxBroadcastPkts_WIDTH 32
1233#define XgTxUnicastPkts_offset 0x98
1234#define XgTxUnicastPkts_WIDTH 32
1235#define XgTxControlPkts_offset 0x9C
1236#define XgTxControlPkts_WIDTH 32
1237#define XgTxPausePkts_offset 0xA0
1238#define XgTxPausePkts_WIDTH 32
1239#define XgTxPkts64Octets_offset 0xA4
1240#define XgTxPkts64Octets_WIDTH 32
1241#define XgTxPkts65to127Octets_offset 0xA8
1242#define XgTxPkts65to127Octets_WIDTH 32
1243#define XgTxPkts128to255Octets_offset 0xAC
1244#define XgTxPkts128to255Octets_WIDTH 32
1245#define XgTxPkts256to511Octets_offset 0xB0
1246#define XgTxPkts256to511Octets_WIDTH 32
1247#define XgTxPkts512to1023Octets_offset 0xB4
1248#define XgTxPkts512to1023Octets_WIDTH 32
1249#define XgTxPkts1024to15xxOctets_offset 0xB8
1250#define XgTxPkts1024to15xxOctets_WIDTH 32
1251#define XgTxPkts1519toMaxOctets_offset 0xBC
1252#define XgTxPkts1519toMaxOctets_WIDTH 32
1253#define XgTxUndersizePkts_offset 0xC0
1254#define XgTxUndersizePkts_WIDTH 32
1255#define XgTxOversizePkts_offset 0xC4
1256#define XgTxOversizePkts_WIDTH 32
1257#define XgTxNonTcpUdpPkt_offset 0xC8
1258#define XgTxNonTcpUdpPkt_WIDTH 16
1259#define XgTxMacSrcErrPkt_offset 0xCC
1260#define XgTxMacSrcErrPkt_WIDTH 16
1261#define XgTxIpSrcErrPkt_offset 0xD0
1262#define XgTxIpSrcErrPkt_WIDTH 16
1263#define XgDmaDone_offset 0xD4
1264
1265#define FALCON_STATS_NOT_DONE 0x00000000
1266#define FALCON_STATS_DONE 0xffffffff
1267
1268/* Interrupt status register bits */
1269#define FATAL_INT_LBN 64
1270#define FATAL_INT_WIDTH 1
1271#define INT_EVQS_LBN 40
1272#define INT_EVQS_WIDTH 4
1273
1274/**************************************************************************
1275 *
1276 * Falcon non-volatile configuration
1277 *
1278 **************************************************************************
1279 */
1280
1281/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
1282struct falcon_nvconfig_board_v2 {
1283 __le16 nports;
1284 u8 port0_phy_addr;
1285 u8 port0_phy_type;
1286 u8 port1_phy_addr;
1287 u8 port1_phy_type;
1288 __le16 asic_sub_revision;
1289 __le16 board_revision;
1290} __packed;
1291
1292/* Board configuration v3 extra information */
1293struct falcon_nvconfig_board_v3 {
1294 __le32 spi_device_type[2];
1295} __packed;
1296
1297/* Bit numbers for spi_device_type */
1298#define SPI_DEV_TYPE_SIZE_LBN 0
1299#define SPI_DEV_TYPE_SIZE_WIDTH 5
1300#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
1301#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
1302#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
1303#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
1304#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
1305#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
1306#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
1307#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
1308#define SPI_DEV_TYPE_FIELD(type, field) \
1309 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
1310
1311#define NVCONFIG_OFFSET 0x300
1312
1313#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
1314struct falcon_nvconfig {
1315 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
1316 u8 mac_address[2][8]; /* 0x310 */
1317 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
1318 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
1319 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
1320 efx_oword_t hw_init_reg; /* 0x350 */
1321 efx_oword_t nic_stat_reg; /* 0x360 */
1322 efx_oword_t glb_ctl_reg; /* 0x370 */
1323 efx_oword_t srm_cfg_reg; /* 0x380 */
1324 efx_oword_t spare_reg; /* 0x390 */
1325 __le16 board_magic_num; /* 0x3A0 */
1326 __le16 board_struct_ver;
1327 __le16 board_checksum;
1328 struct falcon_nvconfig_board_v2 board_v2;
1329 efx_oword_t ee_base_page_reg; /* 0x3B0 */
1330 struct falcon_nvconfig_board_v3 board_v3;
1331} __packed;
1332
1333#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
deleted file mode 100644
index 8883092dae97..000000000000
--- a/drivers/net/sfc/falcon_io.h
+++ /dev/null
@@ -1,258 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_IO_H
12#define EFX_FALCON_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16
17/**************************************************************************
18 *
19 * Falcon hardware access
20 *
21 **************************************************************************
22 *
23 * Notes on locking strategy:
24 *
25 * Most Falcon registers require 16-byte (or 8-byte, for SRAM
26 * registers) atomic writes which necessitates locking.
27 * Under normal operation few writes to the Falcon BAR are made and these
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
29 * cased to allow 4-byte (hence lockless) accesses.
30 *
31 * It *is* safe to write to these 4-byte registers in the middle of an
32 * access to an 8-byte or 16-byte register. We therefore use a
33 * spinlock to protect accesses to the larger registers, but no locks
34 * for the 4-byte registers.
35 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
37 * due to the way the 16byte registers are "collected" in the Falcon BIU
38 *
39 * We also lock when carrying out reads, to ensure consistency of the
40 * data (made possible since the BIU reads all 128 bits into a cache).
41 * Reads are very rare, so this isn't a significant performance
42 * impact. (Most data transferred from NIC to host is DMAed directly
43 * into host memory).
44 *
45 * I/O BAR access uses locks for both reads and writes (but is only provided
46 * for testing purposes).
47 */
48
49/* Special buffer descriptors (Falcon SRAM) */
50#define BUF_TBL_KER_A1 0x18000
51#define BUF_TBL_KER_B0 0x800000
52
53
54#if BITS_PER_LONG == 64
55#define FALCON_USE_QWORD_IO 1
56#endif
57
58#ifdef FALCON_USE_QWORD_IO
59static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
60 unsigned int reg)
61{
62 __raw_writeq((__force u64)value, efx->membase + reg);
63}
64static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
65{
66 return (__force __le64)__raw_readq(efx->membase + reg);
67}
68#endif
69
70static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
71 unsigned int reg)
72{
73 __raw_writel((__force u32)value, efx->membase + reg);
74}
75static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
76{
77 return (__force __le32)__raw_readl(efx->membase + reg);
78}
79
80/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
81static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
82 unsigned int reg)
83{
84 unsigned long flags;
85
86 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
87 EFX_OWORD_VAL(*value));
88
89 spin_lock_irqsave(&efx->biu_lock, flags);
90#ifdef FALCON_USE_QWORD_IO
91 _falcon_writeq(efx, value->u64[0], reg + 0);
92 wmb();
93 _falcon_writeq(efx, value->u64[1], reg + 8);
94#else
95 _falcon_writel(efx, value->u32[0], reg + 0);
96 _falcon_writel(efx, value->u32[1], reg + 4);
97 _falcon_writel(efx, value->u32[2], reg + 8);
98 wmb();
99 _falcon_writel(efx, value->u32[3], reg + 12);
100#endif
101 mmiowb();
102 spin_unlock_irqrestore(&efx->biu_lock, flags);
103}
104
105/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
106static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
107 unsigned int index)
108{
109 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
110 unsigned long flags;
111
112 EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
113 reg, EFX_QWORD_VAL(*value));
114
115 spin_lock_irqsave(&efx->biu_lock, flags);
116#ifdef FALCON_USE_QWORD_IO
117 _falcon_writeq(efx, value->u64[0], reg + 0);
118#else
119 _falcon_writel(efx, value->u32[0], reg + 0);
120 wmb();
121 _falcon_writel(efx, value->u32[1], reg + 4);
122#endif
123 mmiowb();
124 spin_unlock_irqrestore(&efx->biu_lock, flags);
125}
126
127/* Write dword to Falcon register that allows partial writes
128 *
129 * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
130 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
131 * for lockless writes.
132 */
133static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
134 unsigned int reg)
135{
136 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
137 reg, EFX_DWORD_VAL(*value));
138
139 /* No lock required */
140 _falcon_writel(efx, value->u32[0], reg);
141}
142
143/* Read from a Falcon register
144 *
145 * This reads an entire 16-byte Falcon register in one go, locking as
146 * appropriate. It is essential to read the first dword first, as this
147 * prompts Falcon to load the current value into the shadow register.
148 */
149static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
150 unsigned int reg)
151{
152 unsigned long flags;
153
154 spin_lock_irqsave(&efx->biu_lock, flags);
155 value->u32[0] = _falcon_readl(efx, reg + 0);
156 rmb();
157 value->u32[1] = _falcon_readl(efx, reg + 4);
158 value->u32[2] = _falcon_readl(efx, reg + 8);
159 value->u32[3] = _falcon_readl(efx, reg + 12);
160 spin_unlock_irqrestore(&efx->biu_lock, flags);
161
162 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
163 EFX_OWORD_VAL(*value));
164}
165
166/* This reads an 8-byte Falcon SRAM entry in one go. */
167static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
168 unsigned int index)
169{
170 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
171 unsigned long flags;
172
173 spin_lock_irqsave(&efx->biu_lock, flags);
174#ifdef FALCON_USE_QWORD_IO
175 value->u64[0] = _falcon_readq(efx, reg + 0);
176#else
177 value->u32[0] = _falcon_readl(efx, reg + 0);
178 rmb();
179 value->u32[1] = _falcon_readl(efx, reg + 4);
180#endif
181 spin_unlock_irqrestore(&efx->biu_lock, flags);
182
183 EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
184 reg, EFX_QWORD_VAL(*value));
185}
186
187/* Read dword from Falcon register that allows partial writes (sic) */
188static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
189 unsigned int reg)
190{
191 value->u32[0] = _falcon_readl(efx, reg);
192 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
193 reg, EFX_DWORD_VAL(*value));
194}
195
196/* Write to a register forming part of a table */
197static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
198 unsigned int reg, unsigned int index)
199{
200 falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
201}
202
203/* Read to a register forming part of a table */
204static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
205 unsigned int reg, unsigned int index)
206{
207 falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
208}
209
210/* Write to a dword register forming part of a table */
211static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
212 unsigned int reg, unsigned int index)
213{
214 falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
215}
216
217/* Page-mapped register block size */
218#define FALCON_PAGE_BLOCK_SIZE 0x2000
219
220/* Calculate offset to page-mapped register block */
221#define FALCON_PAGED_REG(page, reg) \
222 ((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
223
224/* As for falcon_write(), but for a page-mapped register. */
225static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
226 unsigned int reg, unsigned int page)
227{
228 falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
229}
230
231/* As for falcon_writel(), but for a page-mapped register. */
232static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
233 unsigned int reg, unsigned int page)
234{
235 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
236}
237
238/* Write dword to Falcon page-mapped register with an extra lock.
239 *
240 * As for falcon_writel_page(), but for a register that suffers from
241 * SFC bug 3181. If writing to page 0, take out a lock so the BIU
242 * collector cannot be confused.
243 */
244static inline void falcon_writel_page_locked(struct efx_nic *efx,
245 efx_dword_t *value,
246 unsigned int reg,
247 unsigned int page)
248{
249 unsigned long flags = 0;
250
251 if (page == 0)
252 spin_lock_irqsave(&efx->biu_lock, flags);
253 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
254 if (page == 0)
255 spin_unlock_irqrestore(&efx->biu_lock, flags);
256}
257
258#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index bec52ca37eee..3da933f8f079 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -11,13 +11,12 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include "net_driver.h" 12#include "net_driver.h"
13#include "efx.h" 13#include "efx.h"
14#include "falcon.h" 14#include "nic.h"
15#include "falcon_hwdefs.h" 15#include "regs.h"
16#include "falcon_io.h" 16#include "io.h"
17#include "mac.h" 17#include "mac.h"
18#include "mdio_10g.h" 18#include "mdio_10g.h"
19#include "phy.h" 19#include "phy.h"
20#include "boards.h"
21#include "workarounds.h" 20#include "workarounds.h"
22 21
23/************************************************************************** 22/**************************************************************************
@@ -36,43 +35,47 @@ static void falcon_setup_xaui(struct efx_nic *efx)
36 if (efx->phy_type == PHY_TYPE_NONE) 35 if (efx->phy_type == PHY_TYPE_NONE)
37 return; 36 return;
38 37
39 falcon_read(efx, &sdctl, XX_SD_CTL_REG); 38 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
40 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT); 39 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
41 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT); 40 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
42 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT); 41 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
43 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT); 42 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
44 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT); 43 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
45 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT); 44 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
46 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT); 45 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
47 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT); 46 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
48 falcon_write(efx, &sdctl, XX_SD_CTL_REG); 47 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
49 48
50 EFX_POPULATE_OWORD_8(txdrv, 49 EFX_POPULATE_OWORD_8(txdrv,
51 XX_DEQD, XX_TXDRV_DEQ_DEFAULT, 50 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
52 XX_DEQC, XX_TXDRV_DEQ_DEFAULT, 51 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
53 XX_DEQB, XX_TXDRV_DEQ_DEFAULT, 52 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
54 XX_DEQA, XX_TXDRV_DEQ_DEFAULT, 53 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
55 XX_DTXD, XX_TXDRV_DTX_DEFAULT, 54 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
56 XX_DTXC, XX_TXDRV_DTX_DEFAULT, 55 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
57 XX_DTXB, XX_TXDRV_DTX_DEFAULT, 56 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
58 XX_DTXA, XX_TXDRV_DTX_DEFAULT); 57 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
59 falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG); 58 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
60} 59}
61 60
62int falcon_reset_xaui(struct efx_nic *efx) 61int falcon_reset_xaui(struct efx_nic *efx)
63{ 62{
63 struct falcon_nic_data *nic_data = efx->nic_data;
64 efx_oword_t reg; 64 efx_oword_t reg;
65 int count; 65 int count;
66 66
67 /* Don't fetch MAC statistics over an XMAC reset */
68 WARN_ON(nic_data->stats_disable_count == 0);
69
67 /* Start reset sequence */ 70 /* Start reset sequence */
68 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); 71 EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
69 falcon_write(efx, &reg, XX_PWR_RST_REG); 72 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
70 73
71 /* Wait up to 10 ms for completion, then reinitialise */ 74 /* Wait up to 10 ms for completion, then reinitialise */
72 for (count = 0; count < 1000; count++) { 75 for (count = 0; count < 1000; count++) {
73 falcon_read(efx, &reg, XX_PWR_RST_REG); 76 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
74 if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 && 77 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
75 EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) { 78 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
76 falcon_setup_xaui(efx); 79 falcon_setup_xaui(efx);
77 return 0; 80 return 0;
78 } 81 }
@@ -86,30 +89,30 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
86{ 89{
87 efx_oword_t reg; 90 efx_oword_t reg;
88 91
89 if ((falcon_rev(efx) != FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 92 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
90 return; 93 return;
91 94
92 /* We expect xgmii faults if the wireside link is up */ 95 /* We expect xgmii faults if the wireside link is up */
93 if (!EFX_WORKAROUND_5147(efx) || !efx->link_up) 96 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
94 return; 97 return;
95 98
96 /* We can only use this interrupt to signal the negative edge of 99 /* We can only use this interrupt to signal the negative edge of
97 * xaui_align [we have to poll the positive edge]. */ 100 * xaui_align [we have to poll the positive edge]. */
98 if (!efx->mac_up) 101 if (efx->xmac_poll_required)
99 return; 102 return;
100 103
101 /* Flush the ISR */ 104 /* Flush the ISR */
102 if (enable) 105 if (enable)
103 falcon_read(efx, &reg, XM_MGT_INT_REG_B0); 106 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
104 107
105 EFX_POPULATE_OWORD_2(reg, 108 EFX_POPULATE_OWORD_2(reg,
106 XM_MSK_RMTFLT, !enable, 109 FRF_AB_XM_MSK_RMTFLT, !enable,
107 XM_MSK_LCLFLT, !enable); 110 FRF_AB_XM_MSK_LCLFLT, !enable);
108 falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0); 111 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
109} 112}
110 113
111/* Get status of XAUI link */ 114/* Get status of XAUI link */
112bool falcon_xaui_link_ok(struct efx_nic *efx) 115static bool falcon_xaui_link_ok(struct efx_nic *efx)
113{ 116{
114 efx_oword_t reg; 117 efx_oword_t reg;
115 bool align_done, link_ok = false; 118 bool align_done, link_ok = false;
@@ -119,84 +122,79 @@ bool falcon_xaui_link_ok(struct efx_nic *efx)
119 return true; 122 return true;
120 123
121 /* Read link status */ 124 /* Read link status */
122 falcon_read(efx, &reg, XX_CORE_STAT_REG); 125 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
123 126
124 align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE); 127 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
125 sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT); 128 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
126 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED)) 129 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
127 link_ok = true; 130 link_ok = true;
128 131
129 /* Clear link status ready for next read */ 132 /* Clear link status ready for next read */
130 EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET); 133 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
131 EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET); 134 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
132 EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET); 135 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
133 falcon_write(efx, &reg, XX_CORE_STAT_REG); 136 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
134 137
135 /* If the link is up, then check the phy side of the xaui link */ 138 /* If the link is up, then check the phy side of the xaui link */
136 if (efx->link_up && link_ok) 139 if (efx->link_state.up && link_ok)
137 if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS)) 140 if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS))
138 link_ok = efx_mdio_phyxgxs_lane_sync(efx); 141 link_ok = efx_mdio_phyxgxs_lane_sync(efx);
139 142
140 return link_ok; 143 return link_ok;
141} 144}
142 145
143static void falcon_reconfigure_xmac_core(struct efx_nic *efx) 146void falcon_reconfigure_xmac_core(struct efx_nic *efx)
144{ 147{
145 unsigned int max_frame_len; 148 unsigned int max_frame_len;
146 efx_oword_t reg; 149 efx_oword_t reg;
147 bool rx_fc = !!(efx->link_fc & EFX_FC_RX); 150 bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
151 bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
148 152
149 /* Configure MAC - cut-thru mode is hard wired on */ 153 /* Configure MAC - cut-thru mode is hard wired on */
150 EFX_POPULATE_DWORD_3(reg, 154 EFX_POPULATE_OWORD_3(reg,
151 XM_RX_JUMBO_MODE, 1, 155 FRF_AB_XM_RX_JUMBO_MODE, 1,
152 XM_TX_STAT_EN, 1, 156 FRF_AB_XM_TX_STAT_EN, 1,
153 XM_RX_STAT_EN, 1); 157 FRF_AB_XM_RX_STAT_EN, 1);
154 falcon_write(efx, &reg, XM_GLB_CFG_REG); 158 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
155 159
156 /* Configure TX */ 160 /* Configure TX */
157 EFX_POPULATE_DWORD_6(reg, 161 EFX_POPULATE_OWORD_6(reg,
158 XM_TXEN, 1, 162 FRF_AB_XM_TXEN, 1,
159 XM_TX_PRMBL, 1, 163 FRF_AB_XM_TX_PRMBL, 1,
160 XM_AUTO_PAD, 1, 164 FRF_AB_XM_AUTO_PAD, 1,
161 XM_TXCRC, 1, 165 FRF_AB_XM_TXCRC, 1,
162 XM_FCNTL, 1, 166 FRF_AB_XM_FCNTL, tx_fc,
163 XM_IPG, 0x3); 167 FRF_AB_XM_IPG, 0x3);
164 falcon_write(efx, &reg, XM_TX_CFG_REG); 168 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
165 169
166 /* Configure RX */ 170 /* Configure RX */
167 EFX_POPULATE_DWORD_5(reg, 171 EFX_POPULATE_OWORD_5(reg,
168 XM_RXEN, 1, 172 FRF_AB_XM_RXEN, 1,
169 XM_AUTO_DEPAD, 0, 173 FRF_AB_XM_AUTO_DEPAD, 0,
170 XM_ACPT_ALL_MCAST, 1, 174 FRF_AB_XM_ACPT_ALL_MCAST, 1,
171 XM_ACPT_ALL_UCAST, efx->promiscuous, 175 FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
172 XM_PASS_CRC_ERR, 1); 176 FRF_AB_XM_PASS_CRC_ERR, 1);
173 falcon_write(efx, &reg, XM_RX_CFG_REG); 177 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
174 178
175 /* Set frame length */ 179 /* Set frame length */
176 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); 180 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
177 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len); 181 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
178 falcon_write(efx, &reg, XM_RX_PARAM_REG); 182 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
179 EFX_POPULATE_DWORD_2(reg, 183 EFX_POPULATE_OWORD_2(reg,
180 XM_MAX_TX_FRM_SIZE, max_frame_len, 184 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
181 XM_TX_JUMBO_MODE, 1); 185 FRF_AB_XM_TX_JUMBO_MODE, 1);
182 falcon_write(efx, &reg, XM_TX_PARAM_REG); 186 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
183 187
184 EFX_POPULATE_DWORD_2(reg, 188 EFX_POPULATE_OWORD_2(reg,
185 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ 189 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
186 XM_DIS_FCNTL, !rx_fc); 190 FRF_AB_XM_DIS_FCNTL, !rx_fc);
187 falcon_write(efx, &reg, XM_FC_REG); 191 efx_writeo(efx, &reg, FR_AB_XM_FC);
188 192
189 /* Set MAC address */ 193 /* Set MAC address */
190 EFX_POPULATE_DWORD_4(reg, 194 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
191 XM_ADR_0, efx->net_dev->dev_addr[0], 195 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
192 XM_ADR_1, efx->net_dev->dev_addr[1], 196 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
193 XM_ADR_2, efx->net_dev->dev_addr[2], 197 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
194 XM_ADR_3, efx->net_dev->dev_addr[3]);
195 falcon_write(efx, &reg, XM_ADR_LO_REG);
196 EFX_POPULATE_DWORD_2(reg,
197 XM_ADR_4, efx->net_dev->dev_addr[4],
198 XM_ADR_5, efx->net_dev->dev_addr[5]);
199 falcon_write(efx, &reg, XM_ADR_HI_REG);
200} 198}
201 199
202static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) 200static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
@@ -212,12 +210,13 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
212 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; 210 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
213 bool reset_xgxs; 211 bool reset_xgxs;
214 212
215 falcon_read(efx, &reg, XX_CORE_STAT_REG); 213 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
216 old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN); 214 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
217 old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN); 215 old_xgmii_loopback =
216 EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
218 217
219 falcon_read(efx, &reg, XX_SD_CTL_REG); 218 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
220 old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA); 219 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
221 220
222 /* The PHY driver may have turned XAUI off */ 221 /* The PHY driver may have turned XAUI off */
223 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || 222 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
@@ -228,45 +227,55 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
228 falcon_reset_xaui(efx); 227 falcon_reset_xaui(efx);
229 } 228 }
230 229
231 falcon_read(efx, &reg, XX_CORE_STAT_REG); 230 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
232 EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG, 231 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
233 (xgxs_loopback || xaui_loopback) ? 232 (xgxs_loopback || xaui_loopback) ?
234 XX_FORCE_SIG_DECODE_FORCED : 0); 233 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
235 EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); 234 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
236 EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); 235 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
237 falcon_write(efx, &reg, XX_CORE_STAT_REG); 236 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
238 237
239 falcon_read(efx, &reg, XX_SD_CTL_REG); 238 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
240 EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback); 239 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
241 EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback); 240 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
242 EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback); 241 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
243 EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback); 242 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
244 falcon_write(efx, &reg, XX_SD_CTL_REG); 243 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
245} 244}
246 245
247 246
248/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails 247/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
249 * to come back up. Bash it until it comes back up */ 248static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
250static void falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
251{ 249{
252 efx->mac_up = falcon_xaui_link_ok(efx); 250 bool mac_up = falcon_xaui_link_ok(efx);
253 251
254 if ((efx->loopback_mode == LOOPBACK_NETWORK) || 252 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
255 efx_phy_mode_disabled(efx->phy_mode)) 253 efx_phy_mode_disabled(efx->phy_mode))
256 /* XAUI link is expected to be down */ 254 /* XAUI link is expected to be down */
257 return; 255 return mac_up;
258 256
259 while (!efx->mac_up && tries) { 257 falcon_stop_nic_stats(efx);
258
259 while (!mac_up && tries) {
260 EFX_LOG(efx, "bashing xaui\n"); 260 EFX_LOG(efx, "bashing xaui\n");
261 falcon_reset_xaui(efx); 261 falcon_reset_xaui(efx);
262 udelay(200); 262 udelay(200);
263 263
264 efx->mac_up = falcon_xaui_link_ok(efx); 264 mac_up = falcon_xaui_link_ok(efx);
265 --tries; 265 --tries;
266 } 266 }
267
268 falcon_start_nic_stats(efx);
269
270 return mac_up;
267} 271}
268 272
269static void falcon_reconfigure_xmac(struct efx_nic *efx) 273static bool falcon_xmac_check_fault(struct efx_nic *efx)
274{
275 return !falcon_check_xaui_link_up(efx, 5);
276}
277
278static int falcon_reconfigure_xmac(struct efx_nic *efx)
270{ 279{
271 falcon_mask_status_intr(efx, false); 280 falcon_mask_status_intr(efx, false);
272 281
@@ -275,18 +284,15 @@ static void falcon_reconfigure_xmac(struct efx_nic *efx)
275 284
276 falcon_reconfigure_mac_wrapper(efx); 285 falcon_reconfigure_mac_wrapper(efx);
277 286
278 falcon_check_xaui_link_up(efx, 5); 287 efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5);
279 falcon_mask_status_intr(efx, true); 288 falcon_mask_status_intr(efx, true);
289
290 return 0;
280} 291}
281 292
282static void falcon_update_stats_xmac(struct efx_nic *efx) 293static void falcon_update_stats_xmac(struct efx_nic *efx)
283{ 294{
284 struct efx_mac_stats *mac_stats = &efx->mac_stats; 295 struct efx_mac_stats *mac_stats = &efx->mac_stats;
285 int rc;
286
287 rc = falcon_dma_stats(efx, XgDmaDone_offset);
288 if (rc)
289 return;
290 296
291 /* Update MAC stats from DMAed values */ 297 /* Update MAC stats from DMAed values */
292 FALCON_STAT(efx, XgRxOctets, rx_bytes); 298 FALCON_STAT(efx, XgRxOctets, rx_bytes);
@@ -344,35 +350,19 @@ static void falcon_update_stats_xmac(struct efx_nic *efx)
344 mac_stats->rx_control * 64); 350 mac_stats->rx_control * 64);
345} 351}
346 352
347static void falcon_xmac_irq(struct efx_nic *efx) 353void falcon_poll_xmac(struct efx_nic *efx)
348{
349 /* The XGMII link has a transient fault, which indicates either:
350 * - there's a transient xgmii fault
351 * - falcon's end of the xaui link may need a kick
352 * - the wire-side link may have gone down, but the lasi/poll()
353 * hasn't noticed yet.
354 *
355 * We only want to even bother polling XAUI if we're confident it's
356 * not (1) or (3). In both cases, the only reliable way to spot this
357 * is to wait a bit. We do this here by forcing the mac link state
358 * to down, and waiting for the mac poll to come round and check
359 */
360 efx->mac_up = false;
361}
362
363static void falcon_poll_xmac(struct efx_nic *efx)
364{ 354{
365 if (!EFX_WORKAROUND_5147(efx) || !efx->link_up || efx->mac_up) 355 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
356 !efx->xmac_poll_required)
366 return; 357 return;
367 358
368 falcon_mask_status_intr(efx, false); 359 falcon_mask_status_intr(efx, false);
369 falcon_check_xaui_link_up(efx, 1); 360 efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1);
370 falcon_mask_status_intr(efx, true); 361 falcon_mask_status_intr(efx, true);
371} 362}
372 363
373struct efx_mac_operations falcon_xmac_operations = { 364struct efx_mac_operations falcon_xmac_operations = {
374 .reconfigure = falcon_reconfigure_xmac, 365 .reconfigure = falcon_reconfigure_xmac,
375 .update_stats = falcon_update_stats_xmac, 366 .update_stats = falcon_update_stats_xmac,
376 .irq = falcon_xmac_irq, 367 .check_fault = falcon_xmac_check_fault,
377 .poll = falcon_poll_xmac,
378}; 368};
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
deleted file mode 100644
index dfccaa7b573e..000000000000
--- a/drivers/net/sfc/gmii.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_GMII_H
12#define EFX_GMII_H
13
14/*
15 * GMII interface
16 */
17
18#include <linux/mii.h>
19
20/* GMII registers, excluding registers already defined as MII
21 * registers in mii.h
22 */
23#define GMII_IER 0x12 /* Interrupt enable register */
24#define GMII_ISR 0x13 /* Interrupt status register */
25
26/* Interrupt enable register */
27#define IER_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
28#define IER_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
29#define IER_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
30#define IER_PAGE_RCVD 0x1000 /* Bit 12 - page received */
31#define IER_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
32#define IER_LINK_CHG 0x0400 /* Bit 10 - link status changed */
33#define IER_SYM_ERR 0x0200 /* Bit 9 - symbol error */
34#define IER_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
35#define IER_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
36#define IER_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
37#define IER_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
38#define IER_ENERGY 0x0010 /* Bit 4 - energy detect */
39#define IER_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
40#define IER_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
41#define IER_JABBER 0x0001 /* Bit 0 - jabber */
42
43/* Interrupt status register */
44#define ISR_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
45#define ISR_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
46#define ISR_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
47#define ISR_PAGE_RCVD 0x1000 /* Bit 12 - page received */
48#define ISR_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
49#define ISR_LINK_CHG 0x0400 /* Bit 10 - link status changed */
50#define ISR_SYM_ERR 0x0200 /* Bit 9 - symbol error */
51#define ISR_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
52#define ISR_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
53#define ISR_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
54#define ISR_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
55#define ISR_ENERGY 0x0010 /* Bit 4 - energy detect */
56#define ISR_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
57#define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
58#define ISR_JABBER 0x0001 /* Bit 0 - jabber */
59
60#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
new file mode 100644
index 000000000000..b89177c27f4a
--- /dev/null
+++ b/drivers/net/sfc/io.h
@@ -0,0 +1,256 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_IO_H
12#define EFX_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16
17/**************************************************************************
18 *
19 * NIC register I/O
20 *
21 **************************************************************************
22 *
23 * Notes on locking strategy:
24 *
25 * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes
26 * which necessitates locking.
27 * Under normal operation few writes to NIC registers are made and these
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
29 * cased to allow 4-byte (hence lockless) accesses.
30 *
31 * It *is* safe to write to these 4-byte registers in the middle of an
32 * access to an 8-byte or 16-byte register. We therefore use a
33 * spinlock to protect accesses to the larger registers, but no locks
34 * for the 4-byte registers.
35 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
37 * due to the way the 16byte registers are "collected" in the BIU.
38 *
39 * We also lock when carrying out reads, to ensure consistency of the
40 * data (made possible since the BIU reads all 128 bits into a cache).
41 * Reads are very rare, so this isn't a significant performance
42 * impact. (Most data transferred from NIC to host is DMAed directly
43 * into host memory).
44 *
45 * I/O BAR access uses locks for both reads and writes (but is only provided
46 * for testing purposes).
47 */
48
49#if BITS_PER_LONG == 64
50#define EFX_USE_QWORD_IO 1
51#endif
52
53#ifdef EFX_USE_QWORD_IO
54static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
55 unsigned int reg)
56{
57 __raw_writeq((__force u64)value, efx->membase + reg);
58}
59static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
60{
61 return (__force __le64)__raw_readq(efx->membase + reg);
62}
63#endif
64
65static inline void _efx_writed(struct efx_nic *efx, __le32 value,
66 unsigned int reg)
67{
68 __raw_writel((__force u32)value, efx->membase + reg);
69}
70static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
71{
72 return (__force __le32)__raw_readl(efx->membase + reg);
73}
74
75/* Writes to a normal 16-byte Efx register, locking as appropriate. */
76static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
77 unsigned int reg)
78{
79 unsigned long flags __attribute__ ((unused));
80
81 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
82 EFX_OWORD_VAL(*value));
83
84 spin_lock_irqsave(&efx->biu_lock, flags);
85#ifdef EFX_USE_QWORD_IO
86 _efx_writeq(efx, value->u64[0], reg + 0);
87 wmb();
88 _efx_writeq(efx, value->u64[1], reg + 8);
89#else
90 _efx_writed(efx, value->u32[0], reg + 0);
91 _efx_writed(efx, value->u32[1], reg + 4);
92 _efx_writed(efx, value->u32[2], reg + 8);
93 wmb();
94 _efx_writed(efx, value->u32[3], reg + 12);
95#endif
96 mmiowb();
97 spin_unlock_irqrestore(&efx->biu_lock, flags);
98}
99
100/* Write an 8-byte NIC SRAM entry through the supplied mapping,
101 * locking as appropriate. */
102static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
103 efx_qword_t *value, unsigned int index)
104{
105 unsigned int addr = index * sizeof(*value);
106 unsigned long flags __attribute__ ((unused));
107
108 EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n",
109 addr, EFX_QWORD_VAL(*value));
110
111 spin_lock_irqsave(&efx->biu_lock, flags);
112#ifdef EFX_USE_QWORD_IO
113 __raw_writeq((__force u64)value->u64[0], membase + addr);
114#else
115 __raw_writel((__force u32)value->u32[0], membase + addr);
116 wmb();
117 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
118#endif
119 mmiowb();
120 spin_unlock_irqrestore(&efx->biu_lock, flags);
121}
122
123/* Write dword to NIC register that allows partial writes
124 *
125 * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
126 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
127 * for lockless writes.
128 */
129static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
130 unsigned int reg)
131{
132 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
133 reg, EFX_DWORD_VAL(*value));
134
135 /* No lock required */
136 _efx_writed(efx, value->u32[0], reg);
137}
138
139/* Read from a NIC register
140 *
141 * This reads an entire 16-byte register in one go, locking as
142 * appropriate. It is essential to read the first dword first, as this
143 * prompts the NIC to load the current value into the shadow register.
144 */
145static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
146 unsigned int reg)
147{
148 unsigned long flags __attribute__ ((unused));
149
150 spin_lock_irqsave(&efx->biu_lock, flags);
151 value->u32[0] = _efx_readd(efx, reg + 0);
152 rmb();
153 value->u32[1] = _efx_readd(efx, reg + 4);
154 value->u32[2] = _efx_readd(efx, reg + 8);
155 value->u32[3] = _efx_readd(efx, reg + 12);
156 spin_unlock_irqrestore(&efx->biu_lock, flags);
157
158 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
159 EFX_OWORD_VAL(*value));
160}
161
162/* Read an 8-byte SRAM entry through supplied mapping,
163 * locking as appropriate. */
164static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
165 efx_qword_t *value, unsigned int index)
166{
167 unsigned int addr = index * sizeof(*value);
168 unsigned long flags __attribute__ ((unused));
169
170 spin_lock_irqsave(&efx->biu_lock, flags);
171#ifdef EFX_USE_QWORD_IO
172 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
173#else
174 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
175 rmb();
176 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
177#endif
178 spin_unlock_irqrestore(&efx->biu_lock, flags);
179
180 EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
181 addr, EFX_QWORD_VAL(*value));
182}
183
184/* Read dword from register that allows partial writes (sic) */
185static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
186 unsigned int reg)
187{
188 value->u32[0] = _efx_readd(efx, reg);
189 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
190 reg, EFX_DWORD_VAL(*value));
191}
192
193/* Write to a register forming part of a table */
194static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
195 unsigned int reg, unsigned int index)
196{
197 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
198}
199
200/* Read to a register forming part of a table */
201static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
202 unsigned int reg, unsigned int index)
203{
204 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
205}
206
207/* Write to a dword register forming part of a table */
208static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
209 unsigned int reg, unsigned int index)
210{
211 efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
212}
213
214/* Page-mapped register block size */
215#define EFX_PAGE_BLOCK_SIZE 0x2000
216
217/* Calculate offset to page-mapped register block */
218#define EFX_PAGED_REG(page, reg) \
219 ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
220
221/* As for efx_writeo(), but for a page-mapped register. */
222static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
223 unsigned int reg, unsigned int page)
224{
225 efx_writeo(efx, value, EFX_PAGED_REG(page, reg));
226}
227
228/* As for efx_writed(), but for a page-mapped register. */
229static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
230 unsigned int reg, unsigned int page)
231{
232 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
233}
234
235/* Write dword to page-mapped register with an extra lock.
236 *
237 * As for efx_writed_page(), but for a register that suffers from
238 * SFC bug 3181. Take out a lock so the BIU collector cannot be
239 * confused. */
240static inline void efx_writed_page_locked(struct efx_nic *efx,
241 efx_dword_t *value,
242 unsigned int reg,
243 unsigned int page)
244{
245 unsigned long flags __attribute__ ((unused));
246
247 if (page == 0) {
248 spin_lock_irqsave(&efx->biu_lock, flags);
249 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
250 spin_unlock_irqrestore(&efx->biu_lock, flags);
251 } else {
252 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
253 }
254}
255
256#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index 4e7074278fe1..f1aa5f374890 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -15,5 +15,9 @@
15 15
16extern struct efx_mac_operations falcon_gmac_operations; 16extern struct efx_mac_operations falcon_gmac_operations;
17extern struct efx_mac_operations falcon_xmac_operations; 17extern struct efx_mac_operations falcon_xmac_operations;
18extern struct efx_mac_operations efx_mcdi_mac_operations;
19extern void falcon_reconfigure_xmac_core(struct efx_nic *efx);
20extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
21 u32 dma_len, int enable, int clear);
18 22
19#endif 23#endif
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
new file mode 100644
index 000000000000..683353b904c7
--- /dev/null
+++ b/drivers/net/sfc/mcdi.c
@@ -0,0 +1,1112 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
11#include "net_driver.h"
12#include "nic.h"
13#include "io.h"
14#include "regs.h"
15#include "mcdi_pcol.h"
16#include "phy.h"
17
18/**************************************************************************
19 *
20 * Management-Controller-to-Driver Interface
21 *
22 **************************************************************************
23 */
24
25/* Software-defined structure to the shared-memory */
26#define CMD_NOTIFY_PORT0 0
27#define CMD_NOTIFY_PORT1 4
28#define CMD_PDU_PORT0 0x008
29#define CMD_PDU_PORT1 0x108
30#define REBOOT_FLAG_PORT0 0x3f8
31#define REBOOT_FLAG_PORT1 0x3fc
32
33#define MCDI_RPC_TIMEOUT 10 /*seconds */
34
35#define MCDI_PDU(efx) \
36 (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0)
37#define MCDI_DOORBELL(efx) \
38 (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0)
39#define MCDI_REBOOT_FLAG(efx) \
40 (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0)
41
42#define SEQ_MASK \
43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
44
45static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
46{
47 struct siena_nic_data *nic_data;
48 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
49 nic_data = efx->nic_data;
50 return &nic_data->mcdi;
51}
52
53void efx_mcdi_init(struct efx_nic *efx)
54{
55 struct efx_mcdi_iface *mcdi;
56
57 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
58 return;
59
60 mcdi = efx_mcdi(efx);
61 init_waitqueue_head(&mcdi->wq);
62 spin_lock_init(&mcdi->iface_lock);
63 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
64 mcdi->mode = MCDI_MODE_POLL;
65
66 (void) efx_mcdi_poll_reboot(efx);
67}
68
69static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
70 const u8 *inbuf, size_t inlen)
71{
72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
75 unsigned int i;
76 efx_dword_t hdr;
77 u32 xflags, seqno;
78
79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
80 BUG_ON(inlen & 3 || inlen >= 0x100);
81
82 seqno = mcdi->seqno & SEQ_MASK;
83 xflags = 0;
84 if (mcdi->mode == MCDI_MODE_EVENTS)
85 xflags |= MCDI_HEADER_XFLAGS_EVREQ;
86
87 EFX_POPULATE_DWORD_6(hdr,
88 MCDI_HEADER_RESPONSE, 0,
89 MCDI_HEADER_RESYNC, 1,
90 MCDI_HEADER_CODE, cmd,
91 MCDI_HEADER_DATALEN, inlen,
92 MCDI_HEADER_SEQ, seqno,
93 MCDI_HEADER_XFLAGS, xflags);
94
95 efx_writed(efx, &hdr, pdu);
96
97 for (i = 0; i < inlen; i += 4)
98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
99
100 /* Ensure the payload is written out before the header */
101 wmb();
102
103 /* ring the doorbell with a distinctive value */
104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
105}
106
107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
108{
109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
111 int i;
112
113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
114 BUG_ON(outlen & 3 || outlen >= 0x100);
115
116 for (i = 0; i < outlen; i += 4)
117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
118}
119
120static int efx_mcdi_poll(struct efx_nic *efx)
121{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned int time, finish;
124 unsigned int respseq, respcmd, error;
125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
126 unsigned int rc, spins;
127 efx_dword_t reg;
128
129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
130 rc = efx_mcdi_poll_reboot(efx);
131 if (rc)
132 goto out;
133
134 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
135 * because generally mcdi responses are fast. After that, back off
136 * and poll once a jiffy (approximately)
137 */
138 spins = TICK_USEC;
139 finish = get_seconds() + MCDI_RPC_TIMEOUT;
140
141 while (1) {
142 if (spins != 0) {
143 --spins;
144 udelay(1);
145 } else
146 schedule();
147
148 time = get_seconds();
149
150 rmb();
151 efx_readd(efx, &reg, pdu);
152
153 /* All 1's indicates that shared memory is in reset (and is
154 * not a valid header). Wait for it to come out reset before
155 * completing the command */
156 if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
157 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
158 break;
159
160 if (time >= finish)
161 return -ETIMEDOUT;
162 }
163
164 mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
165 respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
166 respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
167 error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
168
169 if (error && mcdi->resplen == 0) {
170 EFX_ERR(efx, "MC rebooted\n");
171 rc = EIO;
172 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
173 EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
174 respseq, mcdi->seqno);
175 rc = EIO;
176 } else if (error) {
177 efx_readd(efx, &reg, pdu + 4);
178 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
179#define TRANSLATE_ERROR(name) \
180 case MC_CMD_ERR_ ## name: \
181 rc = name; \
182 break
183 TRANSLATE_ERROR(ENOENT);
184 TRANSLATE_ERROR(EINTR);
185 TRANSLATE_ERROR(EACCES);
186 TRANSLATE_ERROR(EBUSY);
187 TRANSLATE_ERROR(EINVAL);
188 TRANSLATE_ERROR(EDEADLK);
189 TRANSLATE_ERROR(ENOSYS);
190 TRANSLATE_ERROR(ETIME);
191#undef TRANSLATE_ERROR
192 default:
193 rc = EIO;
194 break;
195 }
196 } else
197 rc = 0;
198
199out:
200 mcdi->resprc = rc;
201 if (rc)
202 mcdi->resplen = 0;
203
204 /* Return rc=0 like wait_event_timeout() */
205 return 0;
206}
207
208/* Test and clear MC-rebooted flag for this port/function */
209int efx_mcdi_poll_reboot(struct efx_nic *efx)
210{
211 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
212 efx_dword_t reg;
213 uint32_t value;
214
215 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
216 return false;
217
218 efx_readd(efx, &reg, addr);
219 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
220
221 if (value == 0)
222 return 0;
223
224 EFX_ZERO_DWORD(reg);
225 efx_writed(efx, &reg, addr);
226
227 if (value == MC_STATUS_DWORD_ASSERT)
228 return -EINTR;
229 else
230 return -EIO;
231}
232
233static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
234{
235 /* Wait until the interface becomes QUIESCENT and we win the race
236 * to mark it RUNNING. */
237 wait_event(mcdi->wq,
238 atomic_cmpxchg(&mcdi->state,
239 MCDI_STATE_QUIESCENT,
240 MCDI_STATE_RUNNING)
241 == MCDI_STATE_QUIESCENT);
242}
243
244static int efx_mcdi_await_completion(struct efx_nic *efx)
245{
246 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
247
248 if (wait_event_timeout(
249 mcdi->wq,
250 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
251 msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
252 return -ETIMEDOUT;
253
254 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
255 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
256 * completed the request first, then we'll just end up completing the
257 * request again, which is safe.
258 *
259 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
260 * wait_event_timeout() implicitly provides.
261 */
262 if (mcdi->mode == MCDI_MODE_POLL)
263 return efx_mcdi_poll(efx);
264
265 return 0;
266}
267
268static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
269{
270 /* If the interface is RUNNING, then move to COMPLETED and wake any
271 * waiters. If the interface isn't in RUNNING then we've received a
272 * duplicate completion after we've already transitioned back to
273 * QUIESCENT. [A subsequent invocation would increment seqno, so would
274 * have failed the seqno check].
275 */
276 if (atomic_cmpxchg(&mcdi->state,
277 MCDI_STATE_RUNNING,
278 MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
279 wake_up(&mcdi->wq);
280 return true;
281 }
282
283 return false;
284}
285
286static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
287{
288 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
289 wake_up(&mcdi->wq);
290}
291
292static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
293 unsigned int datalen, unsigned int errno)
294{
295 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
296 bool wake = false;
297
298 spin_lock(&mcdi->iface_lock);
299
300 if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
301 if (mcdi->credits)
302 /* The request has been cancelled */
303 --mcdi->credits;
304 else
305 EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx "
306 "seq 0x%x\n", seqno, mcdi->seqno);
307 } else {
308 mcdi->resprc = errno;
309 mcdi->resplen = datalen;
310
311 wake = true;
312 }
313
314 spin_unlock(&mcdi->iface_lock);
315
316 if (wake)
317 efx_mcdi_complete(mcdi);
318}
319
320/* Issue the given command by writing the data into the shared memory PDU,
321 * ring the doorbell and wait for completion. Copyout the result. */
322int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
323 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
324 size_t *outlen_actual)
325{
326 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
327 int rc;
328 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
329
330 efx_mcdi_acquire(mcdi);
331
332 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
333 spin_lock_bh(&mcdi->iface_lock);
334 ++mcdi->seqno;
335 spin_unlock_bh(&mcdi->iface_lock);
336
337 efx_mcdi_copyin(efx, cmd, inbuf, inlen);
338
339 if (mcdi->mode == MCDI_MODE_POLL)
340 rc = efx_mcdi_poll(efx);
341 else
342 rc = efx_mcdi_await_completion(efx);
343
344 if (rc != 0) {
345 /* Close the race with efx_mcdi_ev_cpl() executing just too late
346 * and completing a request we've just cancelled, by ensuring
347 * that the seqno check therein fails.
348 */
349 spin_lock_bh(&mcdi->iface_lock);
350 ++mcdi->seqno;
351 ++mcdi->credits;
352 spin_unlock_bh(&mcdi->iface_lock);
353
354 EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n",
355 cmd, (int)inlen, mcdi->mode);
356 } else {
357 size_t resplen;
358
359 /* At the very least we need a memory barrier here to ensure
360 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
361 * a spurious efx_mcdi_ev_cpl() running concurrently by
362 * acquiring the iface_lock. */
363 spin_lock_bh(&mcdi->iface_lock);
364 rc = -mcdi->resprc;
365 resplen = mcdi->resplen;
366 spin_unlock_bh(&mcdi->iface_lock);
367
368 if (rc == 0) {
369 efx_mcdi_copyout(efx, outbuf,
370 min(outlen, mcdi->resplen + 3) & ~0x3);
371 if (outlen_actual != NULL)
372 *outlen_actual = resplen;
373 } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
374 ; /* Don't reset if MC_CMD_REBOOT returns EIO */
375 else if (rc == -EIO || rc == -EINTR) {
376 EFX_ERR(efx, "MC fatal error %d\n", -rc);
377 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
378 } else
379 EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n",
380 cmd, (int)inlen, -rc);
381 }
382
383 efx_mcdi_release(mcdi);
384 return rc;
385}
386
387void efx_mcdi_mode_poll(struct efx_nic *efx)
388{
389 struct efx_mcdi_iface *mcdi;
390
391 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
392 return;
393
394 mcdi = efx_mcdi(efx);
395 if (mcdi->mode == MCDI_MODE_POLL)
396 return;
397
398 /* We can switch from event completion to polled completion, because
399 * mcdi requests are always completed in shared memory. We do this by
400 * switching the mode to POLL'd then completing the request.
401 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
402 *
403 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
404 * which efx_mcdi_complete() provides for us.
405 */
406 mcdi->mode = MCDI_MODE_POLL;
407
408 efx_mcdi_complete(mcdi);
409}
410
411void efx_mcdi_mode_event(struct efx_nic *efx)
412{
413 struct efx_mcdi_iface *mcdi;
414
415 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
416 return;
417
418 mcdi = efx_mcdi(efx);
419
420 if (mcdi->mode == MCDI_MODE_EVENTS)
421 return;
422
423 /* We can't switch from polled to event completion in the middle of a
424 * request, because the completion method is specified in the request.
425 * So acquire the interface to serialise the requestors. We don't need
426 * to acquire the iface_lock to change the mode here, but we do need a
427 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
428 * efx_mcdi_acquire() provides.
429 */
430 efx_mcdi_acquire(mcdi);
431 mcdi->mode = MCDI_MODE_EVENTS;
432 efx_mcdi_release(mcdi);
433}
434
435static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
436{
437 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
438
439 /* If there is an outstanding MCDI request, it has been terminated
440 * either by a BADASSERT or REBOOT event. If the mcdi interface is
441 * in polled mode, then do nothing because the MC reboot handler will
442 * set the header correctly. However, if the mcdi interface is waiting
443 * for a CMDDONE event it won't receive it [and since all MCDI events
444 * are sent to the same queue, we can't be racing with
445 * efx_mcdi_ev_cpl()]
446 *
447 * There's a race here with efx_mcdi_rpc(), because we might receive
448 * a REBOOT event *before* the request has been copied out. In polled
449 * mode (during startup) this is irrelevent, because efx_mcdi_complete()
450 * is ignored. In event mode, this condition is just an edge-case of
451 * receiving a REBOOT event after posting the MCDI request. Did the mc
452 * reboot before or after the copyout? The best we can do always is
453 * just return failure.
454 */
455 spin_lock(&mcdi->iface_lock);
456 if (efx_mcdi_complete(mcdi)) {
457 if (mcdi->mode == MCDI_MODE_EVENTS) {
458 mcdi->resprc = rc;
459 mcdi->resplen = 0;
460 }
461 } else
462 /* Nobody was waiting for an MCDI request, so trigger a reset */
463 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
464
465 spin_unlock(&mcdi->iface_lock);
466}
467
468static unsigned int efx_mcdi_event_link_speed[] = {
469 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
470 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
471 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
472};
473
474
475static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
476{
477 u32 flags, fcntl, speed, lpa;
478
479 speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
480 EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
481 speed = efx_mcdi_event_link_speed[speed];
482
483 flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
484 fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
485 lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
486
487 /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
488 * which is only run after flushing the event queues. Therefore, it
489 * is safe to modify the link state outside of the mac_lock here.
490 */
491 efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
492
493 efx_mcdi_phy_check_fcntl(efx, lpa);
494
495 efx_link_status_changed(efx);
496}
497
498static const char *sensor_names[] = {
499 [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor",
500 [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor",
501 [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling",
502 [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor",
503 [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling",
504 [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor",
505 [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling",
506 [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor",
507 [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor",
508 [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor",
509 [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor",
510 [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor",
511 [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor"
512};
513
514static const char *sensor_status_names[] = {
515 [MC_CMD_SENSOR_STATE_OK] = "OK",
516 [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
517 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
518 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
519};
520
521static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
522{
523 unsigned int monitor, state, value;
524 const char *name, *state_txt;
525 monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
526 state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
527 value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
528 /* Deal gracefully with the board having more drivers than we
529 * know about, but do not expect new sensor states. */
530 name = (monitor >= ARRAY_SIZE(sensor_names))
531 ? "No sensor name available" :
532 sensor_names[monitor];
533 EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
534 state_txt = sensor_status_names[state];
535
536 EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n",
537 monitor, name, state_txt, value);
538}
539
540/* Called from falcon_process_eventq for MCDI events */
541void efx_mcdi_process_event(struct efx_channel *channel,
542 efx_qword_t *event)
543{
544 struct efx_nic *efx = channel->efx;
545 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
546 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
547
548 switch (code) {
549 case MCDI_EVENT_CODE_BADSSERT:
550 EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data);
551 efx_mcdi_ev_death(efx, EINTR);
552 break;
553
554 case MCDI_EVENT_CODE_PMNOTICE:
555 EFX_INFO(efx, "MCDI PM event.\n");
556 break;
557
558 case MCDI_EVENT_CODE_CMDDONE:
559 efx_mcdi_ev_cpl(efx,
560 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
561 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
562 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
563 break;
564
565 case MCDI_EVENT_CODE_LINKCHANGE:
566 efx_mcdi_process_link_change(efx, event);
567 break;
568 case MCDI_EVENT_CODE_SENSOREVT:
569 efx_mcdi_sensor_event(efx, event);
570 break;
571 case MCDI_EVENT_CODE_SCHEDERR:
572 EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data);
573 break;
574 case MCDI_EVENT_CODE_REBOOT:
575 EFX_INFO(efx, "MC Reboot\n");
576 efx_mcdi_ev_death(efx, EIO);
577 break;
578 case MCDI_EVENT_CODE_MAC_STATS_DMA:
579 /* MAC stats are gather lazily. We can ignore this. */
580 break;
581
582 default:
583 EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code);
584 }
585}
586
587/**************************************************************************
588 *
589 * Specific request functions
590 *
591 **************************************************************************
592 */
593
594int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
595{
596 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
597 size_t outlength;
598 const __le16 *ver_words;
599 int rc;
600
601 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
602
603 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
604 outbuf, sizeof(outbuf), &outlength);
605 if (rc)
606 goto fail;
607
608 if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
609 *version = 0;
610 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
611 return 0;
612 }
613
614 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
615 rc = -EMSGSIZE;
616 goto fail;
617 }
618
619 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
620 *version = (((u64)le16_to_cpu(ver_words[0]) << 48) |
621 ((u64)le16_to_cpu(ver_words[1]) << 32) |
622 ((u64)le16_to_cpu(ver_words[2]) << 16) |
623 le16_to_cpu(ver_words[3]));
624 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
625
626 return 0;
627
628fail:
629 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
630 return rc;
631}
632
633int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
634 bool *was_attached)
635{
636 u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN];
637 u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN];
638 size_t outlen;
639 int rc;
640
641 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
642 driver_operating ? 1 : 0);
643 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
644
645 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
646 outbuf, sizeof(outbuf), &outlen);
647 if (rc)
648 goto fail;
649 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN)
650 goto fail;
651
652 if (was_attached != NULL)
653 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
654 return 0;
655
656fail:
657 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
658 return rc;
659}
660
661int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
662 u16 *fw_subtype_list)
663{
664 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
665 size_t outlen;
666 int port_num = efx_port_num(efx);
667 int offset;
668 int rc;
669
670 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
671
672 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
673 outbuf, sizeof(outbuf), &outlen);
674 if (rc)
675 goto fail;
676
677 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
678 rc = -EMSGSIZE;
679 goto fail;
680 }
681
682 offset = (port_num)
683 ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
684 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
685 if (mac_address)
686 memcpy(mac_address, outbuf + offset, ETH_ALEN);
687 if (fw_subtype_list)
688 memcpy(fw_subtype_list,
689 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
690 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN);
691
692 return 0;
693
694fail:
695 EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen);
696
697 return rc;
698}
699
700int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
701{
702 u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
703 u32 dest = 0;
704 int rc;
705
706 if (uart)
707 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
708 if (evq)
709 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
710
711 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
712 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
713
714 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
715
716 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
717 NULL, 0, NULL);
718 if (rc)
719 goto fail;
720
721 return 0;
722
723fail:
724 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
725 return rc;
726}
727
728int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
729{
730 u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
731 size_t outlen;
732 int rc;
733
734 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
735
736 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
737 outbuf, sizeof(outbuf), &outlen);
738 if (rc)
739 goto fail;
740 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN)
741 goto fail;
742
743 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
744 return 0;
745
746fail:
747 EFX_ERR(efx, "%s: failed rc=%d\n",
748 __func__, rc);
749 return rc;
750}
751
752int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
753 size_t *size_out, size_t *erase_size_out,
754 bool *protected_out)
755{
756 u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN];
757 u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN];
758 size_t outlen;
759 int rc;
760
761 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
762
763 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
764 outbuf, sizeof(outbuf), &outlen);
765 if (rc)
766 goto fail;
767 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN)
768 goto fail;
769
770 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
771 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
772 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
773 (1 << MC_CMD_NVRAM_PROTECTED_LBN));
774 return 0;
775
776fail:
777 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
778 return rc;
779}
780
781int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
782{
783 u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
784 int rc;
785
786 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
787
788 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
789
790 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
791 NULL, 0, NULL);
792 if (rc)
793 goto fail;
794
795 return 0;
796
797fail:
798 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
799 return rc;
800}
801
802int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
803 loff_t offset, u8 *buffer, size_t length)
804{
805 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
806 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)];
807 size_t outlen;
808 int rc;
809
810 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
811 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
812 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
813
814 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
815 outbuf, sizeof(outbuf), &outlen);
816 if (rc)
817 goto fail;
818
819 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
820 return 0;
821
822fail:
823 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
824 return rc;
825}
826
827int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
828 loff_t offset, const u8 *buffer, size_t length)
829{
830 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)];
831 int rc;
832
833 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
835 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
836 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
837
838 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
839
840 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf),
841 NULL, 0, NULL);
842 if (rc)
843 goto fail;
844
845 return 0;
846
847fail:
848 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
849 return rc;
850}
851
852int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
853 loff_t offset, size_t length)
854{
855 u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
856 int rc;
857
858 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
859 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
860 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
861
862 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
863
864 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
865 NULL, 0, NULL);
866 if (rc)
867 goto fail;
868
869 return 0;
870
871fail:
872 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
873 return rc;
874}
875
876int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
877{
878 u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
879 int rc;
880
881 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
882
883 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
884
885 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
886 NULL, 0, NULL);
887 if (rc)
888 goto fail;
889
890 return 0;
891
892fail:
893 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
894 return rc;
895}
896
897int efx_mcdi_handle_assertion(struct efx_nic *efx)
898{
899 union {
900 u8 asserts[MC_CMD_GET_ASSERTS_IN_LEN];
901 u8 reboot[MC_CMD_REBOOT_IN_LEN];
902 } inbuf;
903 u8 assertion[MC_CMD_GET_ASSERTS_OUT_LEN];
904 unsigned int flags, index, ofst;
905 const char *reason;
906 size_t outlen;
907 int retry;
908 int rc;
909
910 /* Check if the MC is in the assertion handler, retrying twice. Once
911 * because a boot-time assertion might cause this command to fail
912 * with EINTR. And once again because GET_ASSERTS can race with
913 * MC_CMD_REBOOT running on the other port. */
914 retry = 2;
915 do {
916 MCDI_SET_DWORD(inbuf.asserts, GET_ASSERTS_IN_CLEAR, 0);
917 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
918 inbuf.asserts, MC_CMD_GET_ASSERTS_IN_LEN,
919 assertion, sizeof(assertion), &outlen);
920 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
921
922 if (rc)
923 return rc;
924 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
925 return -EINVAL;
926
927 flags = MCDI_DWORD(assertion, GET_ASSERTS_OUT_GLOBAL_FLAGS);
928 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
929 return 0;
930
931 /* Reset the hardware atomically such that only one port with succeed.
932 * This command will succeed if a reboot is no longer required (because
933 * the other port did it first), but fail with EIO if it succeeds.
934 */
935 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
936 MCDI_SET_DWORD(inbuf.reboot, REBOOT_IN_FLAGS,
937 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
938 efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf.reboot, MC_CMD_REBOOT_IN_LEN,
939 NULL, 0, NULL);
940
941 /* Print out the assertion */
942 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
943 ? "system-level assertion"
944 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
945 ? "thread-level assertion"
946 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
947 ? "watchdog reset"
948 : "unknown assertion";
949 EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
950 MCDI_DWORD(assertion, GET_ASSERTS_OUT_SAVED_PC_OFFS),
951 MCDI_DWORD(assertion, GET_ASSERTS_OUT_THREAD_OFFS));
952
953 /* Print out the registers */
954 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
955 for (index = 1; index < 32; index++) {
956 EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
957 MCDI_DWORD2(assertion, ofst));
958 ofst += sizeof(efx_dword_t);
959 }
960
961 return 0;
962}
963
964void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
965{
966 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
967 int rc;
968
969 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
970 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
971 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
972
973 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
974
975 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
976
977 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
978 NULL, 0, NULL);
979 if (rc)
980 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
981}
982
983int efx_mcdi_reset_port(struct efx_nic *efx)
984{
985 int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
986 if (rc)
987 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
988 return rc;
989}
990
991int efx_mcdi_reset_mc(struct efx_nic *efx)
992{
993 u8 inbuf[MC_CMD_REBOOT_IN_LEN];
994 int rc;
995
996 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
997 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
998 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
999 NULL, 0, NULL);
1000 /* White is black, and up is down */
1001 if (rc == -EIO)
1002 return 0;
1003 if (rc == 0)
1004 rc = -EIO;
1005 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1006 return rc;
1007}
1008
1009int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1010 const u8 *mac, int *id_out)
1011{
1012 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
1013 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
1014 size_t outlen;
1015 int rc;
1016
1017 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1018 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1019 MC_CMD_FILTER_MODE_SIMPLE);
1020 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
1021
1022 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1023 outbuf, sizeof(outbuf), &outlen);
1024 if (rc)
1025 goto fail;
1026
1027 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1028 rc = -EMSGSIZE;
1029 goto fail;
1030 }
1031
1032 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1033
1034 return 0;
1035
1036fail:
1037 *id_out = -1;
1038 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1039 return rc;
1040
1041}
1042
1043
1044int
1045efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
1046{
1047 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1048}
1049
1050
1051int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1052{
1053 u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN];
1054 size_t outlen;
1055 int rc;
1056
1057 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1058 outbuf, sizeof(outbuf), &outlen);
1059 if (rc)
1060 goto fail;
1061
1062 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1063 rc = -EMSGSIZE;
1064 goto fail;
1065 }
1066
1067 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
1068
1069 return 0;
1070
1071fail:
1072 *id_out = -1;
1073 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1074 return rc;
1075}
1076
1077
1078int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1079{
1080 u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
1081 int rc;
1082
1083 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1084
1085 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1086 NULL, 0, NULL);
1087 if (rc)
1088 goto fail;
1089
1090 return 0;
1091
1092fail:
1093 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1094 return rc;
1095}
1096
1097
1098int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1099{
1100 int rc;
1101
1102 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1103 if (rc)
1104 goto fail;
1105
1106 return 0;
1107
1108fail:
1109 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1110 return rc;
1111}
1112
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
new file mode 100644
index 000000000000..de916728c2e3
--- /dev/null
+++ b/drivers/net/sfc/mcdi.h
@@ -0,0 +1,130 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_MCDI_H
11#define EFX_MCDI_H
12
13/**
14 * enum efx_mcdi_state
15 * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
16 * mcdi_lock then they are able to move to MCDI_STATE_RUNNING
17 * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that
18 * moved into this state is allowed to move out of it.
19 * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
20 * has not yet consumed the result. For all other threads, equivalent to
21 * MCDI_STATE_RUNNING.
22 */
23enum efx_mcdi_state {
24 MCDI_STATE_QUIESCENT,
25 MCDI_STATE_RUNNING,
26 MCDI_STATE_COMPLETED,
27};
28
29enum efx_mcdi_mode {
30 MCDI_MODE_POLL,
31 MCDI_MODE_EVENTS,
32};
33
34/**
35 * struct efx_mcdi_iface
36 * @state: Interface state. Waited for by mcdi_wq.
37 * @wq: Wait queue for threads waiting for state != STATE_RUNNING
38 * @iface_lock: Protects @credits, @seqno, @resprc, @resplen
39 * @mode: Poll for mcdi completion, or wait for an mcdi_event.
40 * Serialised by @lock
41 * @seqno: The next sequence number to use for mcdi requests.
42 * Serialised by @lock
43 * @credits: Number of spurious MCDI completion events allowed before we
44 * trigger a fatal error. Protected by @lock
45 * @resprc: Returned MCDI completion
46 * @resplen: Returned payload length
47 */
48struct efx_mcdi_iface {
49 atomic_t state;
50 wait_queue_head_t wq;
51 spinlock_t iface_lock;
52 enum efx_mcdi_mode mode;
53 unsigned int credits;
54 unsigned int seqno;
55 unsigned int resprc;
56 size_t resplen;
57};
58
59extern void efx_mcdi_init(struct efx_nic *efx);
60
61extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
62 size_t inlen, u8 *outbuf, size_t outlen,
63 size_t *outlen_actual);
64
65extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
66extern void efx_mcdi_mode_poll(struct efx_nic *efx);
67extern void efx_mcdi_mode_event(struct efx_nic *efx);
68
69extern void efx_mcdi_process_event(struct efx_channel *channel,
70 efx_qword_t *event);
71
72#define MCDI_PTR2(_buf, _ofst) \
73 (((u8 *)_buf) + _ofst)
74#define MCDI_SET_DWORD2(_buf, _ofst, _value) \
75 EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
76 EFX_DWORD_0, _value)
77#define MCDI_DWORD2(_buf, _ofst) \
78 EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
79 EFX_DWORD_0)
80#define MCDI_QWORD2(_buf, _ofst) \
81 EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \
82 EFX_QWORD_0)
83
84#define MCDI_PTR(_buf, _ofst) \
85 MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
86#define MCDI_SET_DWORD(_buf, _ofst, _value) \
87 MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
88#define MCDI_DWORD(_buf, _ofst) \
89 MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
90#define MCDI_QWORD(_buf, _ofst) \
91 MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
92
93#define MCDI_EVENT_FIELD(_ev, _field) \
94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
95
96extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build);
97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
98 bool *was_attached_out);
99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
100 u16 *fw_subtype_list);
101extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
102 u32 dest_evq);
103extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
104extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
105 size_t *size_out, size_t *erase_size_out,
106 bool *protected_out);
107extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
108 unsigned int type);
109extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
110 loff_t offset, u8 *buffer, size_t length);
111extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
112 loff_t offset, const u8 *buffer,
113 size_t length);
114extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
115 loff_t offset, size_t length);
116extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
117 unsigned int type);
118extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
119extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
120extern int efx_mcdi_reset_port(struct efx_nic *efx);
121extern int efx_mcdi_reset_mc(struct efx_nic *efx);
122extern int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
123 const u8 *mac, int *id_out);
124extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
125 const u8 *mac, int *id_out);
126extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
127extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
128extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
129
130#endif /* EFX_MCDI_H */
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
new file mode 100644
index 000000000000..06d24a1e412a
--- /dev/null
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -0,0 +1,152 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "efx.h"
12#include "mac.h"
13#include "mcdi.h"
14#include "mcdi_pcol.h"
15
16static int efx_mcdi_set_mac(struct efx_nic *efx)
17{
18 u32 reject, fcntl;
19 u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
20
21 memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
22 efx->net_dev->dev_addr, ETH_ALEN);
23
24 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
25 EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
26 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
27
28 /* The MCDI command provides for controlling accept/reject
29 * of broadcast packets too, but the driver doesn't currently
30 * expose this. */
31 reject = (efx->promiscuous) ? 0 :
32 (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
33 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
34
35 switch (efx->wanted_fc) {
36 case EFX_FC_RX | EFX_FC_TX:
37 fcntl = MC_CMD_FCNTL_BIDIR;
38 break;
39 case EFX_FC_RX:
40 fcntl = MC_CMD_FCNTL_RESPOND;
41 break;
42 default:
43 fcntl = MC_CMD_FCNTL_OFF;
44 break;
45 }
46 if (efx->wanted_fc & EFX_FC_AUTO)
47 fcntl = MC_CMD_FCNTL_AUTO;
48
49 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
50
51 return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
52 NULL, 0, NULL);
53}
54
55static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
56{
57 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
58 size_t outlength;
59 int rc;
60
61 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
62
63 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
64 outbuf, sizeof(outbuf), &outlength);
65 if (rc)
66 goto fail;
67
68 *faults = MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT);
69 return 0;
70
71fail:
72 EFX_ERR(efx, "%s: failed rc=%d\n",
73 __func__, rc);
74 return rc;
75}
76
77int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
78 u32 dma_len, int enable, int clear)
79{
80 u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
81 int rc;
82 efx_dword_t *cmd_ptr;
83 int period = 1000;
84 u32 addr_hi;
85 u32 addr_lo;
86
87 BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_LEN != 0);
88
89 addr_lo = ((u64)dma_addr) >> 0;
90 addr_hi = ((u64)dma_addr) >> 32;
91
92 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
93 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
94 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
95 if (enable)
96 EFX_POPULATE_DWORD_6(*cmd_ptr,
97 MC_CMD_MAC_STATS_CMD_DMA, 1,
98 MC_CMD_MAC_STATS_CMD_CLEAR, clear,
99 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
100 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 1,
101 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
102 MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
103 else
104 EFX_POPULATE_DWORD_5(*cmd_ptr,
105 MC_CMD_MAC_STATS_CMD_DMA, 0,
106 MC_CMD_MAC_STATS_CMD_CLEAR, clear,
107 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
108 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 0,
109 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0);
110 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
111
112 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
113 NULL, 0, NULL);
114 if (rc)
115 goto fail;
116
117 return 0;
118
119fail:
120 EFX_ERR(efx, "%s: %s failed rc=%d\n",
121 __func__, enable ? "enable" : "disable", rc);
122 return rc;
123}
124
125static int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
126{
127 int rc;
128
129 rc = efx_mcdi_set_mac(efx);
130 if (rc != 0)
131 return rc;
132
133 /* Restore the multicast hash registers. */
134 efx->type->push_multicast_hash(efx);
135
136 return 0;
137}
138
139
140static bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
141{
142 u32 faults;
143 int rc = efx_mcdi_get_mac_faults(efx, &faults);
144 return (rc != 0) || (faults != 0);
145}
146
147
148struct efx_mac_operations efx_mcdi_mac_operations = {
149 .reconfigure = efx_mcdi_mac_reconfigure,
150 .update_stats = efx_port_dummy_op_void,
151 .check_fault = efx_mcdi_mac_check_fault,
152};
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
new file mode 100644
index 000000000000..2a85360a46f0
--- /dev/null
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -0,0 +1,1578 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10
11#ifndef MCDI_PCOL_H
12#define MCDI_PCOL_H
13
14/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
15/* Power-on reset state */
16#define MC_FW_STATE_POR (1)
17/* If this is set in MC_RESET_STATE_REG then it should be
18 * possible to jump into IMEM without loading code from flash. */
19#define MC_FW_WARM_BOOT_OK (2)
20/* The MC main image has started to boot. */
21#define MC_FW_STATE_BOOTING (4)
22/* The Scheduler has started. */
23#define MC_FW_STATE_SCHED (8)
24
25/* Values to be written to the per-port status dword in shared
26 * memory on reboot and assert */
27#define MC_STATUS_DWORD_REBOOT (0xb007b007)
28#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
29
30/* The current version of the MCDI protocol.
31 *
32 * Note that the ROM burnt into the card only talks V0, so at the very
33 * least every driver must support version 0 and MCDI_PCOL_VERSION
34 */
35#define MCDI_PCOL_VERSION 1
36
37/**
38 * MCDI version 1
39 *
40 * Each MCDI request starts with an MCDI_HEADER, which is a 32byte
41 * structure, filled in by the client.
42 *
43 * 0 7 8 16 20 22 23 24 31
44 * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
45 * | | |
46 * | | \--- Response
47 * | \------- Error
48 * \------------------------------ Resync (always set)
49 *
50 * The client writes it's request into MC shared memory, and rings the
51 * doorbell. Each request is completed by either by the MC writting
52 * back into shared memory, or by writting out an event.
53 *
54 * All MCDI commands support completion by shared memory response. Each
55 * request may also contain additional data (accounted for by HEADER.LEN),
56 * and some response's may also contain additional data (again, accounted
57 * for by HEADER.LEN).
58 *
59 * Some MCDI commands support completion by event, in which any associated
60 * response data is included in the event.
61 *
62 * The protocol requires one response to be delivered for every request, a
63 * request should not be sent unless the response for the previous request
64 * has been received (either by polling shared memory, or by receiving
65 * an event).
66 */
67
68/** Request/Response structure */
69#define MCDI_HEADER_OFST 0
70#define MCDI_HEADER_CODE_LBN 0
71#define MCDI_HEADER_CODE_WIDTH 7
72#define MCDI_HEADER_RESYNC_LBN 7
73#define MCDI_HEADER_RESYNC_WIDTH 1
74#define MCDI_HEADER_DATALEN_LBN 8
75#define MCDI_HEADER_DATALEN_WIDTH 8
76#define MCDI_HEADER_SEQ_LBN 16
77#define MCDI_HEADER_RSVD_LBN 20
78#define MCDI_HEADER_RSVD_WIDTH 2
79#define MCDI_HEADER_SEQ_WIDTH 4
80#define MCDI_HEADER_ERROR_LBN 22
81#define MCDI_HEADER_ERROR_WIDTH 1
82#define MCDI_HEADER_RESPONSE_LBN 23
83#define MCDI_HEADER_RESPONSE_WIDTH 1
84#define MCDI_HEADER_XFLAGS_LBN 24
85#define MCDI_HEADER_XFLAGS_WIDTH 8
86/* Request response using event */
87#define MCDI_HEADER_XFLAGS_EVREQ 0x01
88
89/* Maximum number of payload bytes */
90#define MCDI_CTL_SDU_LEN_MAX 0xfc
91
92/* The MC can generate events for two reasons:
93 * - To complete a shared memory request if XFLAGS_EVREQ was set
94 * - As a notification (link state, i2c event), controlled
95 * via MC_CMD_LOG_CTRL
96 *
97 * Both events share a common structure:
98 *
99 * 0 32 33 36 44 52 60
100 * | Data | Cont | Level | Src | Code | Rsvd |
101 * |
102 * \ There is another event pending in this notification
103 *
104 * If Code==CMDDONE, then the fields are further interpreted as:
105 *
106 * - LEVEL==INFO Command succeded
107 * - LEVEL==ERR Command failed
108 *
109 * 0 8 16 24 32
110 * | Seq | Datalen | Errno | Rsvd |
111 *
112 * These fields are taken directly out of the standard MCDI header, i.e.,
113 * LEVEL==ERR, Datalen == 0 => Reboot
114 *
115 * Events can be squirted out of the UART (using LOG_CTRL) without a
116 * MCDI header. An event can be distinguished from a MCDI response by
117 * examining the first byte which is 0xc0. This corresponds to the
118 * non-existent MCDI command MC_CMD_DEBUG_LOG.
119 *
120 * 0 7 8
121 * | command | Resync | = 0xc0
122 *
123 * Since the event is written in big-endian byte order, this works
124 * providing bits 56-63 of the event are 0xc0.
125 *
126 * 56 60 63
127 * | Rsvd | Code | = 0xc0
128 *
129 * Which means for convenience the event code is 0xc for all MC
130 * generated events.
131 */
132#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
133
134#define MCDI_EVENT_DATA_LBN 0
135#define MCDI_EVENT_DATA_WIDTH 32
136#define MCDI_EVENT_CONT_LBN 32
137#define MCDI_EVENT_CONT_WIDTH 1
138#define MCDI_EVENT_LEVEL_LBN 33
139#define MCDI_EVENT_LEVEL_WIDTH 3
140#define MCDI_EVENT_LEVEL_INFO (0)
141#define MCDI_EVENT_LEVEL_WARN (1)
142#define MCDI_EVENT_LEVEL_ERR (2)
143#define MCDI_EVENT_LEVEL_FATAL (3)
144#define MCDI_EVENT_SRC_LBN 36
145#define MCDI_EVENT_SRC_WIDTH 8
146#define MCDI_EVENT_CODE_LBN 44
147#define MCDI_EVENT_CODE_WIDTH 8
148#define MCDI_EVENT_CODE_BADSSERT (1)
149#define MCDI_EVENT_CODE_PMNOTICE (2)
150#define MCDI_EVENT_CODE_CMDDONE (3)
151#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
152#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
153#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
154#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
155#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
156#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
157#define MCDI_EVENT_CODE_LINKCHANGE (4)
158#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
159#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
160#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
161#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
162#define MCDI_EVENT_LINKCHANGE_SPEED_100M 1
163#define MCDI_EVENT_LINKCHANGE_SPEED_1G 2
164#define MCDI_EVENT_LINKCHANGE_SPEED_10G 3
165#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
166#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
167#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
168#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
169#define MCDI_EVENT_CODE_SENSOREVT (5)
170#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
171#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
172#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
173#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
174#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
175#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
176#define MCDI_EVENT_CODE_SCHEDERR (6)
177#define MCDI_EVENT_CODE_REBOOT (7)
178#define MCDI_EVENT_CODE_MAC_STATS_DMA (8)
179#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
180#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
181
182/* Non-existent command target */
183#define MC_CMD_ERR_ENOENT 2
184/* assert() has killed the MC */
185#define MC_CMD_ERR_EINTR 4
186/* Caller does not hold required locks */
187#define MC_CMD_ERR_EACCES 13
188/* Resource is currently unavailable (e.g. lock contention) */
189#define MC_CMD_ERR_EBUSY 16
190/* Invalid argument to target */
191#define MC_CMD_ERR_EINVAL 22
192/* Non-recursive resource is already acquired */
193#define MC_CMD_ERR_EDEADLK 35
194/* Operation not implemented */
195#define MC_CMD_ERR_ENOSYS 38
196/* Operation timed out */
197#define MC_CMD_ERR_ETIME 62
198
199#define MC_CMD_ERR_CODE_OFST 0
200
201
202/* MC_CMD_READ32: (debug, variadic out)
203 * Read multiple 32byte words from MC memory
204 */
205#define MC_CMD_READ32 0x01
206#define MC_CMD_READ32_IN_LEN 8
207#define MC_CMD_READ32_IN_ADDR_OFST 0
208#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
209#define MC_CMD_READ32_OUT_LEN(_numwords) \
210 (4 * (_numwords))
211#define MC_CMD_READ32_OUT_BUFFER_OFST 0
212
213/* MC_CMD_WRITE32: (debug, variadic in)
214 * Write multiple 32byte words to MC memory
215 */
216#define MC_CMD_WRITE32 0x02
217#define MC_CMD_WRITE32_IN_LEN(_numwords) (((_numwords) * 4) + 4)
218#define MC_CMD_WRITE32_IN_ADDR_OFST 0
219#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
220#define MC_CMD_WRITE32_OUT_LEN 0
221
222/* MC_CMD_COPYCODE: (debug)
223 * Copy MC code between two locations and jump
224 */
225#define MC_CMD_COPYCODE 0x03
226#define MC_CMD_COPYCODE_IN_LEN 16
227#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
228#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
229#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
230#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
231/* Control should return to the caller rather than jumping */
232#define MC_CMD_COPYCODE_JUMP_NONE 1
233#define MC_CMD_COPYCODE_OUT_LEN 0
234
235/* MC_CMD_SET_FUNC: (debug)
236 * Select function for function-specific commands.
237 */
238#define MC_CMD_SET_FUNC 0x04
239#define MC_CMD_SET_FUNC_IN_LEN 4
240#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
241#define MC_CMD_SET_FUNC_OUT_LEN 0
242
243/* MC_CMD_GET_BOOT_STATUS:
244 * Get the instruction address from which the MC booted.
245 */
246#define MC_CMD_GET_BOOT_STATUS 0x05
247#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
248#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
249#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
250#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
251/* Reboot caused by watchdog */
252#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_LBN (0)
253#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_WIDTH (1)
254/* MC booted from primary flash partition */
255#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_LBN (1)
256#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_WIDTH (1)
257/* MC booted from backup flash partition */
258#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_LBN (2)
259#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_WIDTH (1)
260
261/* MC_CMD_GET_ASSERTS: (debug, variadic out)
262 * Get (and optionally clear) the current assertion status.
263 *
264 * Only OUT.GLOBAL_FLAGS is guaranteed to exist in the completion
265 * payload. The other fields will only be present if
266 * OUT.GLOBAL_FLAGS != NO_FAILS
267 */
268#define MC_CMD_GET_ASSERTS 0x06
269#define MC_CMD_GET_ASSERTS_IN_LEN 4
270#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
271#define MC_CMD_GET_ASSERTS_OUT_LEN 140
272/* Assertion status flag */
273#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
274/*! No assertions have failed. */
275#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 1
276/*! A system-level assertion has failed. */
277#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 2
278/*! A thread-level assertion has failed. */
279#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 3
280/*! The system was reset by the watchdog. */
281#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 4
282/* Failing PC value */
283#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
284/* Saved GP regs */
285#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
286#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_LEN 124
287/* Failing thread address */
288#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
289
290/* MC_CMD_LOG_CTRL:
291 * Determine the output stream for various events and messages
292 */
293#define MC_CMD_LOG_CTRL 0x07
294#define MC_CMD_LOG_CTRL_IN_LEN 8
295#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
296#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART (1)
297#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ (2)
298#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
299#define MC_CMD_LOG_CTRL_OUT_LEN 0
300
301/* MC_CMD_GET_VERSION:
302 * Get version information about the MC firmware
303 */
304#define MC_CMD_GET_VERSION 0x08
305#define MC_CMD_GET_VERSION_IN_LEN 0
306#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
307#define MC_CMD_GET_VERSION_V1_OUT_LEN 32
308#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
309/* Reserved version number to indicate "any" version. */
310#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
311/* The version response of a boot ROM awaiting rescue */
312#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000
313#define MC_CMD_GET_VERSION_V1_OUT_PCOL_OFST 4
314/* 128bit mask of functions supported by the current firmware */
315#define MC_CMD_GET_VERSION_V1_OUT_SUPPORTED_FUNCS_OFST 8
316/* The command set exported by the boot ROM (MCDI v0) */
317#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
318 (1 << MC_CMD_READ32) | \
319 (1 << MC_CMD_WRITE32) | \
320 (1 << MC_CMD_COPYCODE) | \
321 (1 << MC_CMD_GET_VERSION), \
322 0, 0, 0 }
323#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
324
325/* Vectors in the boot ROM */
326/* Point to the copycode entry point. */
327#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
328/* Points to the recovery mode entry point. */
329#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
330
331/* Test execution limits */
332#define MC_TESTEXEC_VARIANT_COUNT 16
333#define MC_TESTEXEC_RESULT_COUNT 7
334
335/* MC_CMD_SET_TESTVARS: (debug, variadic in)
336 * Write variant words for test.
337 *
338 * The user supplies a bitmap of the variants they wish to set.
339 * They must ensure that IN.LEN >= 4 + 4 * ffs(BITMAP)
340 */
341#define MC_CMD_SET_TESTVARS 0x09
342#define MC_CMD_SET_TESTVARS_IN_LEN(_numwords) \
343 (4 + 4*(_numwords))
344#define MC_CMD_SET_TESTVARS_IN_ARGS_BITMAP_OFST 0
345/* Up to MC_TESTEXEC_VARIANT_COUNT of 32byte words start here */
346#define MC_CMD_SET_TESTVARS_IN_ARGS_BUFFER_OFST 4
347#define MC_CMD_SET_TESTVARS_OUT_LEN 0
348
349/* MC_CMD_GET_TESTRCS: (debug, variadic out)
350 * Return result words from test.
351 */
352#define MC_CMD_GET_TESTRCS 0x0a
353#define MC_CMD_GET_TESTRCS_IN_LEN 4
354#define MC_CMD_GET_TESTRCS_IN_NUMWORDS_OFST 0
355#define MC_CMD_GET_TESTRCS_OUT_LEN(_numwords) \
356 (4 * (_numwords))
357#define MC_CMD_GET_TESTRCS_OUT_BUFFER_OFST 0
358
359/* MC_CMD_RUN_TEST: (debug)
360 * Run the test exported by this firmware image
361 */
362#define MC_CMD_RUN_TEST 0x0b
363#define MC_CMD_RUN_TEST_IN_LEN 0
364#define MC_CMD_RUN_TEST_OUT_LEN 0
365
366/* MC_CMD_CSR_READ32: (debug, variadic out)
367 * Read 32bit words from the indirect memory map
368 */
369#define MC_CMD_CSR_READ32 0x0c
370#define MC_CMD_CSR_READ32_IN_LEN 12
371#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
372#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
373#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
374#define MC_CMD_CSR_READ32_OUT_LEN(_numwords) \
375 (((_numwords) * 4) + 4)
376/* IN.NUMWORDS of 32bit words start here */
377#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
378#define MC_CMD_CSR_READ32_OUT_IREG_STATUS_OFST(_numwords) \
379 ((_numwords) * 4)
380
381/* MC_CMD_CSR_WRITE32: (debug, variadic in)
382 * Write 32bit dwords to the indirect memory map
383 */
384#define MC_CMD_CSR_WRITE32 0x0d
385#define MC_CMD_CSR_WRITE32_IN_LEN(_numwords) \
386 (((_numwords) * 4) + 8)
387#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
388#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
389/* Multiple 32bit words of data to write start here */
390#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
391#define MC_CMD_CSR_WRITE32_OUT_LEN 4
392#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
393
394/* MC_CMD_JTAG_WORK: (debug, fpga only)
395 * Process JTAG work buffer for RBF acceleration.
396 *
397 * Host: bit count, (up to) 32 words of data to clock out to JTAG
398 * (bits 1,0=TMS,TDO for first bit; bits 3,2=TMS,TDO for second bit, etc.)
399 * MC: bit count, (up to) 32 words of data clocked in from JTAG
400 * (bit 0=TDI for first bit, bit 1=TDI for second bit, etc.; [31:16] unused)
401 */
402#define MC_CMD_JTAG_WORK 0x0e
403
404/* MC_CMD_STACKINFO: (debug, variadic out)
405 * Get stack information
406 *
407 * Host: nothing
408 * MC: (thread ptr, stack size, free space) for each thread in system
409 */
410#define MC_CMD_STACKINFO 0x0f
411
412/* MC_CMD_MDIO_READ:
413 * MDIO register read
414 */
415#define MC_CMD_MDIO_READ 0x10
416#define MC_CMD_MDIO_READ_IN_LEN 16
417#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
418#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
419#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
420#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
421#define MC_CMD_MDIO_READ_OUT_LEN 8
422#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
423#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
424
425/* MC_CMD_MDIO_WRITE:
426 * MDIO register write
427 */
428#define MC_CMD_MDIO_WRITE 0x11
429#define MC_CMD_MDIO_WRITE_IN_LEN 20
430#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
431#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
432#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
433#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
434#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
435#define MC_CMD_MDIO_WRITE_OUT_LEN 4
436#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
437
438/* By default all the MCDI MDIO operations perform clause45 mode.
439 * If you want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
440 */
441#define MC_CMD_MDIO_CLAUSE22 32
442
443/* There are two MDIO buses: one for the internal PHY, and one for external
444 * devices.
445 */
446#define MC_CMD_MDIO_BUS_INTERNAL 0
447#define MC_CMD_MDIO_BUS_EXTERNAL 1
448
449/* The MDIO commands return the raw status bits from the MDIO block. A "good"
450 * transaction should have the DONE bit set and all other bits clear.
451 */
452#define MC_CMD_MDIO_STATUS_GOOD 0x08
453
454
455/* MC_CMD_DBI_WRITE: (debug)
456 * Write DBI register(s)
457 *
458 * Host: address, byte-enables (and VF selection, and cs2 flag),
459 * value [,address ...]
460 * MC: nothing
461 */
462#define MC_CMD_DBI_WRITE 0x12
463#define MC_CMD_DBI_WRITE_IN_LEN(_numwords) \
464 (12 * (_numwords))
465#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(_word) \
466 (((_word) * 12) + 0)
467#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(_word) \
468 (((_word) * 12) + 4)
469#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(_word) \
470 (((_word) * 12) + 8)
471#define MC_CMD_DBI_WRITE_OUT_LEN 0
472
473/* MC_CMD_DBI_READ: (debug)
474 * Read DBI register(s)
475 *
476 * Host: address, [,address ...]
477 * MC: value [,value ...]
478 * (note: this does not support reading from VFs, but is retained for backwards
479 * compatibility; see MC_CMD_DBI_READX below)
480 */
481#define MC_CMD_DBI_READ 0x13
482#define MC_CMD_DBI_READ_IN_LEN(_numwords) \
483 (4 * (_numwords))
484#define MC_CMD_DBI_READ_OUT_LEN(_numwords) \
485 (4 * (_numwords))
486
487/* MC_CMD_PORT_READ32: (debug)
488 * Read a 32-bit register from the indirect port register map.
489 *
490 * The port to access is implied by the Shared memory channel used.
491 */
492#define MC_CMD_PORT_READ32 0x14
493#define MC_CMD_PORT_READ32_IN_LEN 4
494#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
495#define MC_CMD_PORT_READ32_OUT_LEN 8
496#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
497#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
498
499/* MC_CMD_PORT_WRITE32: (debug)
500 * Write a 32-bit register to the indirect port register map.
501 *
502 * The port to access is implied by the Shared memory channel used.
503 */
504#define MC_CMD_PORT_WRITE32 0x15
505#define MC_CMD_PORT_WRITE32_IN_LEN 8
506#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
507#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
508#define MC_CMD_PORT_WRITE32_OUT_LEN 4
509#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
510
511/* MC_CMD_PORT_READ128: (debug)
512 * Read a 128-bit register from indirect port register map
513 *
514 * The port to access is implied by the Shared memory channel used.
515 */
516#define MC_CMD_PORT_READ128 0x16
517#define MC_CMD_PORT_READ128_IN_LEN 4
518#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
519#define MC_CMD_PORT_READ128_OUT_LEN 20
520#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
521#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
522
523/* MC_CMD_PORT_WRITE128: (debug)
524 * Write a 128-bit register to indirect port register map.
525 *
526 * The port to access is implied by the Shared memory channel used.
527 */
528#define MC_CMD_PORT_WRITE128 0x17
529#define MC_CMD_PORT_WRITE128_IN_LEN 20
530#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
531#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
532#define MC_CMD_PORT_WRITE128_OUT_LEN 4
533#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
534
535/* MC_CMD_GET_BOARD_CFG:
536 * Returns the MC firmware configuration structure
537 *
538 * The FW_SUBTYPE_LIST contains a 16-bit value for each of the 12 types of
539 * NVRAM area. The values are defined in the firmware/mc/platform/<xxx>.c file
540 * for a specific board type, but otherwise have no meaning to the MC; they
541 * are used by the driver to manage selection of appropriate firmware updates.
542 */
543#define MC_CMD_GET_BOARD_CFG 0x18
544#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
545#define MC_CMD_GET_BOARD_CFG_OUT_LEN 96
546#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
547#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
548#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
549#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
550#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
551#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
552#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
553#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
554#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
555#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
556#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
557#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
558#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
559#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
560#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 24
561
562/* MC_CMD_DBI_READX: (debug)
563 * Read DBI register(s) -- extended functionality
564 *
565 * Host: vf selection, address, [,vf selection ...]
566 * MC: value [,value ...]
567 */
568#define MC_CMD_DBI_READX 0x19
569#define MC_CMD_DBI_READX_IN_LEN(_numwords) \
570 (8*(_numwords))
571#define MC_CMD_DBI_READX_OUT_LEN(_numwords) \
572 (4*(_numwords))
573
574/* MC_CMD_SET_RAND_SEED:
575 * Set the 16byte seed for the MC psuedo-random generator
576 */
577#define MC_CMD_SET_RAND_SEED 0x1a
578#define MC_CMD_SET_RAND_SEED_IN_LEN 16
579#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
580#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
581
582/* MC_CMD_LTSSM_HIST: (debug)
583 * Retrieve the history of the LTSSM, if the build supports it.
584 *
585 * Host: nothing
586 * MC: variable number of LTSSM values, as bytes
587 * The history is read-to-clear.
588 */
589#define MC_CMD_LTSSM_HIST 0x1b
590
591/* MC_CMD_DRV_ATTACH:
592 * Inform MCPU that this port is managed on the host (i.e. driver active)
593 */
594#define MC_CMD_DRV_ATTACH 0x1c
595#define MC_CMD_DRV_ATTACH_IN_LEN 8
596#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
597#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
598#define MC_CMD_DRV_ATTACH_OUT_LEN 4
599#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
600
601/* MC_CMD_NCSI_PROD: (debug)
602 * Trigger an NC-SI event (and possibly an AEN in response)
603 */
604#define MC_CMD_NCSI_PROD 0x1d
605#define MC_CMD_NCSI_PROD_IN_LEN 4
606#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
607#define MC_CMD_NCSI_PROD_LINKCHANGE_LBN 0
608#define MC_CMD_NCSI_PROD_LINKCHANGE_WIDTH 1
609#define MC_CMD_NCSI_PROD_RESET_LBN 1
610#define MC_CMD_NCSI_PROD_RESET_WIDTH 1
611#define MC_CMD_NCSI_PROD_DRVATTACH_LBN 2
612#define MC_CMD_NCSI_PROD_DRVATTACH_WIDTH 1
613#define MC_CMD_NCSI_PROD_OUT_LEN 0
614
615/* Enumeration */
616#define MC_CMD_NCSI_PROD_LINKCHANGE 0
617#define MC_CMD_NCSI_PROD_RESET 1
618#define MC_CMD_NCSI_PROD_DRVATTACH 2
619
620/* MC_CMD_DEVEL: (debug)
621 * Reserved for development
622 */
623#define MC_CMD_DEVEL 0x1e
624
625/* MC_CMD_SHMUART: (debug)
626 * Route UART output to circular buffer in shared memory instead.
627 */
628#define MC_CMD_SHMUART 0x1f
629#define MC_CMD_SHMUART_IN_FLAG_OFST 0
630#define MC_CMD_SHMUART_IN_LEN 4
631#define MC_CMD_SHMUART_OUT_LEN 0
632
633/* MC_CMD_PORT_RESET:
634 * Generic per-port reset. There is no equivalent for per-board reset.
635 *
636 * Locks required: None
637 * Return code: 0, ETIME
638 */
639#define MC_CMD_PORT_RESET 0x20
640#define MC_CMD_PORT_RESET_IN_LEN 0
641#define MC_CMD_PORT_RESET_OUT_LEN 0
642
643/* MC_CMD_RESOURCE_LOCK:
644 * Generic resource lock/unlock interface.
645 *
646 * Locks required: None
647 * Return code: 0,
648 * EBUSY (if trylock is contended by other port),
649 * EDEADLK (if trylock is already acquired by this port)
650 * EINVAL (if unlock doesn't own the lock)
651 */
652#define MC_CMD_RESOURCE_LOCK 0x21
653#define MC_CMD_RESOURCE_LOCK_IN_LEN 8
654#define MC_CMD_RESOURCE_LOCK_IN_ACTION_OFST 0
655#define MC_CMD_RESOURCE_LOCK_ACTION_TRYLOCK 1
656#define MC_CMD_RESOURCE_LOCK_ACTION_UNLOCK 0
657#define MC_CMD_RESOURCE_LOCK_IN_RESOURCE_OFST 4
658#define MC_CMD_RESOURCE_LOCK_I2C 2
659#define MC_CMD_RESOURCE_LOCK_PHY 3
660#define MC_CMD_RESOURCE_LOCK_OUT_LEN 0
661
662/* MC_CMD_SPI_COMMAND: (variadic in, variadic out)
663 * Read/Write to/from the SPI device.
664 *
665 * Locks required: SPI_LOCK
666 * Return code: 0, ETIME, EINVAL, EACCES (if SPI_LOCK is not held)
667 */
668#define MC_CMD_SPI_COMMAND 0x22
669#define MC_CMD_SPI_COMMAND_IN_LEN(_write_bytes) (12 + (_write_bytes))
670#define MC_CMD_SPI_COMMAND_IN_ARGS_OFST 0
671#define MC_CMD_SPI_COMMAND_IN_ARGS_ADDRESS_OFST 0
672#define MC_CMD_SPI_COMMAND_IN_ARGS_READ_BYTES_OFST 4
673#define MC_CMD_SPI_COMMAND_IN_ARGS_CHIP_SELECT_OFST 8
674/* Data to write here */
675#define MC_CMD_SPI_COMMAND_IN_WRITE_BUFFER_OFST 12
676#define MC_CMD_SPI_COMMAND_OUT_LEN(_read_bytes) (_read_bytes)
677/* Data read here */
678#define MC_CMD_SPI_COMMAND_OUT_READ_BUFFER_OFST 0
679
680/* MC_CMD_I2C_READ_WRITE: (variadic in, variadic out)
681 * Read/Write to/from the I2C bus.
682 *
683 * Locks required: I2C_LOCK
684 * Return code: 0, ETIME, EINVAL, EACCES (if I2C_LOCK is not held)
685 */
686#define MC_CMD_I2C_RW 0x23
687#define MC_CMD_I2C_RW_IN_LEN(_write_bytes) (8 + (_write_bytes))
688#define MC_CMD_I2C_RW_IN_ARGS_OFST 0
689#define MC_CMD_I2C_RW_IN_ARGS_ADDR_OFST 0
690#define MC_CMD_I2C_RW_IN_ARGS_READ_BYTES_OFST 4
691/* Data to write here */
692#define MC_CMD_I2C_RW_IN_WRITE_BUFFER_OFSET 8
693#define MC_CMD_I2C_RW_OUT_LEN(_read_bytes) (_read_bytes)
694/* Data read here */
695#define MC_CMD_I2C_RW_OUT_READ_BUFFER_OFST 0
696
697/* Generic phy capability bitmask */
698#define MC_CMD_PHY_CAP_10HDX_LBN 1
699#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
700#define MC_CMD_PHY_CAP_10FDX_LBN 2
701#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
702#define MC_CMD_PHY_CAP_100HDX_LBN 3
703#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
704#define MC_CMD_PHY_CAP_100FDX_LBN 4
705#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
706#define MC_CMD_PHY_CAP_1000HDX_LBN 5
707#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
708#define MC_CMD_PHY_CAP_1000FDX_LBN 6
709#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
710#define MC_CMD_PHY_CAP_10000FDX_LBN 7
711#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
712#define MC_CMD_PHY_CAP_PAUSE_LBN 8
713#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
714#define MC_CMD_PHY_CAP_ASYM_LBN 9
715#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
716#define MC_CMD_PHY_CAP_AN_LBN 10
717#define MC_CMD_PHY_CAP_AN_WIDTH 1
718
719/* Generic loopback enumeration */
720#define MC_CMD_LOOPBACK_NONE 0
721#define MC_CMD_LOOPBACK_DATA 1
722#define MC_CMD_LOOPBACK_GMAC 2
723#define MC_CMD_LOOPBACK_XGMII 3
724#define MC_CMD_LOOPBACK_XGXS 4
725#define MC_CMD_LOOPBACK_XAUI 5
726#define MC_CMD_LOOPBACK_GMII 6
727#define MC_CMD_LOOPBACK_SGMII 7
728#define MC_CMD_LOOPBACK_XGBR 8
729#define MC_CMD_LOOPBACK_XFI 9
730#define MC_CMD_LOOPBACK_XAUI_FAR 10
731#define MC_CMD_LOOPBACK_GMII_FAR 11
732#define MC_CMD_LOOPBACK_SGMII_FAR 12
733#define MC_CMD_LOOPBACK_XFI_FAR 13
734#define MC_CMD_LOOPBACK_GPHY 14
735#define MC_CMD_LOOPBACK_PHYXS 15
736#define MC_CMD_LOOPBACK_PCS 16
737#define MC_CMD_LOOPBACK_PMAPMD 17
738#define MC_CMD_LOOPBACK_XPORT 18
739#define MC_CMD_LOOPBACK_XGMII_WS 19
740#define MC_CMD_LOOPBACK_XAUI_WS 20
741#define MC_CMD_LOOPBACK_XAUI_WS_FAR 21
742#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 22
743#define MC_CMD_LOOPBACK_GMII_WS 23
744#define MC_CMD_LOOPBACK_XFI_WS 24
745#define MC_CMD_LOOPBACK_XFI_WS_FAR 25
746#define MC_CMD_LOOPBACK_PHYXS_WS 26
747
748/* Generic PHY statistics enumeration */
749#define MC_CMD_OUI 0
750#define MC_CMD_PMA_PMD_LINK_UP 1
751#define MC_CMD_PMA_PMD_RX_FAULT 2
752#define MC_CMD_PMA_PMD_TX_FAULT 3
753#define MC_CMD_PMA_PMD_SIGNAL 4
754#define MC_CMD_PMA_PMD_SNR_A 5
755#define MC_CMD_PMA_PMD_SNR_B 6
756#define MC_CMD_PMA_PMD_SNR_C 7
757#define MC_CMD_PMA_PMD_SNR_D 8
758#define MC_CMD_PCS_LINK_UP 9
759#define MC_CMD_PCS_RX_FAULT 10
760#define MC_CMD_PCS_TX_FAULT 11
761#define MC_CMD_PCS_BER 12
762#define MC_CMD_PCS_BLOCK_ERRORS 13
763#define MC_CMD_PHYXS_LINK_UP 14
764#define MC_CMD_PHYXS_RX_FAULT 15
765#define MC_CMD_PHYXS_TX_FAULT 16
766#define MC_CMD_PHYXS_ALIGN 17
767#define MC_CMD_PHYXS_SYNC 18
768#define MC_CMD_AN_LINK_UP 19
769#define MC_CMD_AN_COMPLETE 20
770#define MC_CMD_AN_10GBT_STATUS 21
771#define MC_CMD_CL22_LINK_UP 22
772#define MC_CMD_PHY_NSTATS 23
773
774/* MC_CMD_GET_PHY_CFG:
775 * Report PHY configuration. This guarantees to succeed even if the PHY is in
776 * a "zombie" state.
777 *
778 * Locks required: None
779 * Return code: 0
780 */
781#define MC_CMD_GET_PHY_CFG 0x24
782
783#define MC_CMD_GET_PHY_CFG_IN_LEN 0
784#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
785
786#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
787#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0
788#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1
789#define MC_CMD_GET_PHY_CFG_SHORTBIST_LBN 1
790#define MC_CMD_GET_PHY_CFG_SHORTBIST_WIDTH 1
791#define MC_CMD_GET_PHY_CFG_LONGBIST_LBN 2
792#define MC_CMD_GET_PHY_CFG_LONGBIST_WIDTH 1
793#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3
794#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1
795#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4
796#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1
797#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5
798#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1
799#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
800/* Bitmask of supported capabilities */
801#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
802#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
803#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
804/* PHY statistics bitmap */
805#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
806/* PHY type/name string */
807#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
808#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
809#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
810#define MC_CMD_MEDIA_XAUI 1
811#define MC_CMD_MEDIA_CX4 2
812#define MC_CMD_MEDIA_KX4 3
813#define MC_CMD_MEDIA_XFP 4
814#define MC_CMD_MEDIA_SFP_PLUS 5
815#define MC_CMD_MEDIA_BASE_T 6
816/* MDIO "MMDS" supported */
817#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
818/* Native clause 22 */
819#define MC_CMD_MMD_CLAUSE22 0
820#define MC_CMD_MMD_CLAUSE45_PMAPMD 1
821#define MC_CMD_MMD_CLAUSE45_WIS 2
822#define MC_CMD_MMD_CLAUSE45_PCS 3
823#define MC_CMD_MMD_CLAUSE45_PHYXS 4
824#define MC_CMD_MMD_CLAUSE45_DTEXS 5
825#define MC_CMD_MMD_CLAUSE45_TC 6
826#define MC_CMD_MMD_CLAUSE45_AN 7
827/* Clause22 proxied over clause45 by PHY */
828#define MC_CMD_MMD_CLAUSE45_C22EXT 29
829#define MC_CMD_MMD_CLAUSE45_VEND1 30
830#define MC_CMD_MMD_CLAUSE45_VEND2 31
831/* PHY stepping version */
832#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
833#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
834
835/* MC_CMD_START_PHY_BIST:
836 * Start a BIST test on the PHY.
837 *
838 * Locks required: PHY_LOCK if doing a PHY BIST
839 * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
840 */
841#define MC_CMD_START_BIST 0x25
842#define MC_CMD_START_BIST_IN_LEN 4
843#define MC_CMD_START_BIST_TYPE_OFST 0
844
845/* Run the PHY's short BIST */
846#define MC_CMD_PHY_BIST_SHORT 1
847/* Run the PHY's long BIST */
848#define MC_CMD_PHY_BIST_LONG 2
849/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */
850#define MC_CMD_BPX_SERDES_BIST 3
851
852/* MC_CMD_POLL_PHY_BIST: (variadic output)
853 * Poll for BIST completion
854 *
855 * Returns a single status code, and a binary blob of phy-specific
856 * bist output. If the driver can't succesfully parse the BIST output,
857 * it should still respect the Pass/Fail in OUT.RESULT.
858 *
859 * Locks required: PHY_LOCK if doing a PHY BIST
860 * Return code: 0, EACCES (if PHY_LOCK is not held)
861 */
862#define MC_CMD_POLL_BIST 0x26
863#define MC_CMD_POLL_BIST_IN_LEN 0
864#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
865#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
866#define MC_CMD_POLL_BIST_RUNNING 1
867#define MC_CMD_POLL_BIST_PASSED 2
868#define MC_CMD_POLL_BIST_FAILED 3
869#define MC_CMD_POLL_BIST_TIMEOUT 4
870#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
871
872/* MC_CMD_PHY_SPI: (variadic in, variadic out)
873 * Read/Write/Erase the PHY SPI device
874 *
875 * Locks required: PHY_LOCK
876 * Return code: 0, ETIME, EINVAL, EACCES (if PHY_LOCK is not held)
877 */
878#define MC_CMD_PHY_SPI 0x27
879#define MC_CMD_PHY_SPI_IN_LEN(_write_bytes) (12 + (_write_bytes))
880#define MC_CMD_PHY_SPI_IN_ARGS_OFST 0
881#define MC_CMD_PHY_SPI_IN_ARGS_ADDR_OFST 0
882#define MC_CMD_PHY_SPI_IN_ARGS_READ_BYTES_OFST 4
883#define MC_CMD_PHY_SPI_IN_ARGS_ERASE_ALL_OFST 8
884/* Data to write here */
885#define MC_CMD_PHY_SPI_IN_WRITE_BUFFER_OFSET 12
886#define MC_CMD_PHY_SPI_OUT_LEN(_read_bytes) (_read_bytes)
887/* Data read here */
888#define MC_CMD_PHY_SPI_OUT_READ_BUFFER_OFST 0
889
890
891/* MC_CMD_GET_LOOPBACK_MODES:
892 * Returns a bitmask of loopback modes evailable at each speed.
893 *
894 * Locks required: None
895 * Return code: 0
896 */
897#define MC_CMD_GET_LOOPBACK_MODES 0x28
898#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
899#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
900#define MC_CMD_GET_LOOPBACK_MODES_100M_OFST 0
901#define MC_CMD_GET_LOOPBACK_MODES_1G_OFST 8
902#define MC_CMD_GET_LOOPBACK_MODES_10G_OFST 16
903#define MC_CMD_GET_LOOPBACK_MODES_SUGGESTED_OFST 24
904
905/* Flow control enumeration */
906#define MC_CMD_FCNTL_OFF 0
907#define MC_CMD_FCNTL_RESPOND 1
908#define MC_CMD_FCNTL_BIDIR 2
909/* Auto - Use what the link has autonegotiated
910 * - The driver should modify the advertised capabilities via SET_LINK.CAP
911 * to control the negotiated flow control mode.
912 * - Can only be set if the PHY supports PAUSE+ASYM capabilities
913 * - Never returned by GET_LINK as the value programmed into the MAC
914 */
915#define MC_CMD_FCNTL_AUTO 3
916
917/* Generic mac fault bitmask */
918#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
919#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
920#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
921#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
922#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
923#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
924
925/* MC_CMD_GET_LINK:
926 * Read the unified MAC/PHY link state
927 *
928 * Locks required: None
929 * Return code: 0, ETIME
930 */
931#define MC_CMD_GET_LINK 0x29
932#define MC_CMD_GET_LINK_IN_LEN 0
933#define MC_CMD_GET_LINK_OUT_LEN 28
934/* near-side and link-partner advertised capabilities */
935#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
936#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
937/* Autonegotiated speed in mbit/s. The link may still be down
938 * even if this reads non-zero */
939#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
940#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
941#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
942/* Whether we have overall link up */
943#define MC_CMD_GET_LINK_LINK_UP_LBN 0
944#define MC_CMD_GET_LINK_LINK_UP_WIDTH 1
945#define MC_CMD_GET_LINK_FULL_DUPLEX_LBN 1
946#define MC_CMD_GET_LINK_FULL_DUPLEX_WIDTH 1
947/* Whether we have link at the layers provided by the BPX */
948#define MC_CMD_GET_LINK_BPX_LINK_LBN 2
949#define MC_CMD_GET_LINK_BPX_LINK_WIDTH 1
950/* Whether the PHY has external link */
951#define MC_CMD_GET_LINK_PHY_LINK_LBN 3
952#define MC_CMD_GET_LINK_PHY_LINK_WIDTH 1
953#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
954#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
955
956/* MC_CMD_SET_LINK:
957 * Write the unified MAC/PHY link configuration
958 *
959 * A loopback speed of "0" is supported, and means
960 * (choose any available speed)
961 *
962 * Locks required: None
963 * Return code: 0, EINVAL, ETIME
964 */
965#define MC_CMD_SET_LINK 0x2a
966#define MC_CMD_SET_LINK_IN_LEN 16
967#define MC_CMD_SET_LINK_IN_CAP_OFST 0
968#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
969#define MC_CMD_SET_LINK_LOWPOWER_LBN 0
970#define MC_CMD_SET_LINK_LOWPOWER_WIDTH 1
971#define MC_CMD_SET_LINK_POWEROFF_LBN 1
972#define MC_CMD_SET_LINK_POWEROFF_WIDTH 1
973#define MC_CMD_SET_LINK_TXDIS_LBN 2
974#define MC_CMD_SET_LINK_TXDIS_WIDTH 1
975#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
976#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
977#define MC_CMD_SET_LINK_OUT_LEN 0
978
979/* MC_CMD_SET_ID_LED:
980 * Set indentification LED state
981 *
982 * Locks required: None
983 * Return code: 0, EINVAL
984 */
985#define MC_CMD_SET_ID_LED 0x2b
986#define MC_CMD_SET_ID_LED_IN_LEN 4
987#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
988#define MC_CMD_LED_OFF 0
989#define MC_CMD_LED_ON 1
990#define MC_CMD_LED_DEFAULT 2
991#define MC_CMD_SET_ID_LED_OUT_LEN 0
992
993/* MC_CMD_SET_MAC:
994 * Set MAC configuration
995 *
996 * The MTU is the MTU programmed directly into the XMAC/GMAC
997 * (inclusive of EtherII, VLAN, bug16011 padding)
998 *
999 * Locks required: None
1000 * Return code: 0, EINVAL
1001 */
1002#define MC_CMD_SET_MAC 0x2c
1003#define MC_CMD_SET_MAC_IN_LEN 24
1004#define MC_CMD_SET_MAC_IN_MTU_OFST 0
1005#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
1006#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
1007#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
1008#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
1009#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
1010#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
1011#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
1012#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
1013#define MC_CMD_SET_MAC_OUT_LEN 0
1014
1015/* MC_CMD_PHY_STATS:
1016 * Get generic PHY statistics
1017 *
1018 * This call returns the statistics for a generic PHY, by direct DMA
1019 * into host memory, in a sparse array (indexed by the enumerate).
1020 * Each value is represented by a 32bit number.
1021 *
1022 * Locks required: None
1023 * Returns: 0, ETIME
1024 * Response methods: shared memory, event
1025 */
1026#define MC_CMD_PHY_STATS 0x2d
1027#define MC_CMD_PHY_STATS_IN_LEN 8
1028#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
1029#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
1030#define MC_CMD_PHY_STATS_OUT_LEN 0
1031
1032/* Unified MAC statistics enumeration */
1033#define MC_CMD_MAC_GENERATION_START 0
1034#define MC_CMD_MAC_TX_PKTS 1
1035#define MC_CMD_MAC_TX_PAUSE_PKTS 2
1036#define MC_CMD_MAC_TX_CONTROL_PKTS 3
1037#define MC_CMD_MAC_TX_UNICAST_PKTS 4
1038#define MC_CMD_MAC_TX_MULTICAST_PKTS 5
1039#define MC_CMD_MAC_TX_BROADCAST_PKTS 6
1040#define MC_CMD_MAC_TX_BYTES 7
1041#define MC_CMD_MAC_TX_BAD_BYTES 8
1042#define MC_CMD_MAC_TX_LT64_PKTS 9
1043#define MC_CMD_MAC_TX_64_PKTS 10
1044#define MC_CMD_MAC_TX_65_TO_127_PKTS 11
1045#define MC_CMD_MAC_TX_128_TO_255_PKTS 12
1046#define MC_CMD_MAC_TX_256_TO_511_PKTS 13
1047#define MC_CMD_MAC_TX_512_TO_1023_PKTS 14
1048#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 15
1049#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 16
1050#define MC_CMD_MAC_TX_GTJUMBO_PKTS 17
1051#define MC_CMD_MAC_TX_BAD_FCS_PKTS 18
1052#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 19
1053#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 20
1054#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 21
1055#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 22
1056#define MC_CMD_MAC_TX_DEFERRED_PKTS 23
1057#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 24
1058#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 25
1059#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 26
1060#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 27
1061#define MC_CMD_MAC_RX_PKTS 28
1062#define MC_CMD_MAC_RX_PAUSE_PKTS 29
1063#define MC_CMD_MAC_RX_GOOD_PKTS 30
1064#define MC_CMD_MAC_RX_CONTROL_PKTS 31
1065#define MC_CMD_MAC_RX_UNICAST_PKTS 32
1066#define MC_CMD_MAC_RX_MULTICAST_PKTS 33
1067#define MC_CMD_MAC_RX_BROADCAST_PKTS 34
1068#define MC_CMD_MAC_RX_BYTES 35
1069#define MC_CMD_MAC_RX_BAD_BYTES 36
1070#define MC_CMD_MAC_RX_64_PKTS 37
1071#define MC_CMD_MAC_RX_65_TO_127_PKTS 38
1072#define MC_CMD_MAC_RX_128_TO_255_PKTS 39
1073#define MC_CMD_MAC_RX_256_TO_511_PKTS 40
1074#define MC_CMD_MAC_RX_512_TO_1023_PKTS 41
1075#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 42
1076#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 43
1077#define MC_CMD_MAC_RX_GTJUMBO_PKTS 44
1078#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 45
1079#define MC_CMD_MAC_RX_BAD_FCS_PKTS 46
1080#define MC_CMD_MAC_RX_OVERFLOW_PKTS 47
1081#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 48
1082#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 49
1083#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 50
1084#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 51
1085#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 52
1086#define MC_CMD_MAC_RX_JABBER_PKTS 53
1087#define MC_CMD_MAC_RX_NODESC_DROPS 54
1088#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 55
1089#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 56
1090#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
1091#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
1092#define MC_CMD_MAC_RX_MATCH_FAULT 59
1093/* Insert new members here. */
1094#define MC_CMD_MAC_GENERATION_END 60
1095#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
1096
1097/* MC_CMD_MAC_STATS:
1098 * Get unified GMAC/XMAC statistics
1099 *
1100 * This call returns unified statistics maintained by the MC as it
1101 * switches between the GMAC and XMAC. The MC will write out all
1102 * supported stats. The driver should zero initialise the buffer to
1103 * guarantee consistent results.
1104 *
1105 * Locks required: None
1106 * Returns: 0
1107 * Response methods: shared memory, event
1108 */
1109#define MC_CMD_MAC_STATS 0x2e
1110#define MC_CMD_MAC_STATS_IN_LEN 16
1111#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
1112#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
1113#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
1114#define MC_CMD_MAC_STATS_CMD_DMA_LBN 0
1115#define MC_CMD_MAC_STATS_CMD_DMA_WIDTH 1
1116#define MC_CMD_MAC_STATS_CMD_CLEAR_LBN 1
1117#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
1118#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
1119#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
1120/* Fields only relevent when PERIODIC_CHANGE is set */
1121#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
1122#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
1123#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
1124#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
1125#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
1126#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
1127#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
1128
1129#define MC_CMD_MAC_STATS_OUT_LEN 0
1130
1131/* Callisto flags */
1132#define MC_CMD_SFT9001_ROBUST_LBN 0
1133#define MC_CMD_SFT9001_ROBUST_WIDTH 1
1134#define MC_CMD_SFT9001_SHORT_REACH_LBN 1
1135#define MC_CMD_SFT9001_SHORT_REACH_WIDTH 1
1136
1137/* MC_CMD_SFT9001_GET:
1138 * Read current callisto specific setting
1139 *
1140 * Locks required: None
1141 * Returns: 0, ETIME
1142 */
1143#define MC_CMD_SFT9001_GET 0x30
1144#define MC_CMD_SFT9001_GET_IN_LEN 0
1145#define MC_CMD_SFT9001_GET_OUT_LEN 4
1146#define MC_CMD_SFT9001_GET_OUT_FLAGS_OFST 0
1147
1148/* MC_CMD_SFT9001_SET:
1149 * Write current callisto specific setting
1150 *
1151 * Locks required: None
1152 * Returns: 0, ETIME, EINVAL
1153 */
1154#define MC_CMD_SFT9001_SET 0x31
1155#define MC_CMD_SFT9001_SET_IN_LEN 4
1156#define MC_CMD_SFT9001_SET_IN_FLAGS_OFST 0
1157#define MC_CMD_SFT9001_SET_OUT_LEN 0
1158
1159
1160/* MC_CMD_WOL_FILTER_SET:
1161 * Set a WoL filter
1162 *
1163 * Locks required: None
1164 * Returns: 0, EBUSY, EINVAL, ENOSYS
1165 */
1166#define MC_CMD_WOL_FILTER_SET 0x32
1167#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 /* 190 rounded up to a word */
1168#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
1169#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
1170
1171/* There is a union at offset 8, following defines overlap due to
1172 * this */
1173#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
1174
1175#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST \
1176 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
1177
1178#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST \
1179 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
1180#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST \
1181 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 4)
1182#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST \
1183 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 8)
1184#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST \
1185 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 10)
1186
1187#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST \
1188 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
1189#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST \
1190 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 16)
1191#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST \
1192 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 32)
1193#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST \
1194 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 34)
1195
1196#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST \
1197 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
1198#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_OFST \
1199 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 48)
1200#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST \
1201 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 176)
1202#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST \
1203 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 177)
1204#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \
1205 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178)
1206
1207#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
1208#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
1209
1210/* WOL Filter types enumeration */
1211#define MC_CMD_WOL_TYPE_MAGIC 0x0
1212 /* unused 0x1 */
1213#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
1214#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
1215#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
1216#define MC_CMD_WOL_TYPE_BITMAP 0x5
1217#define MC_CMD_WOL_TYPE_MAX 0x6
1218
1219#define MC_CMD_FILTER_MODE_SIMPLE 0x0
1220#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff
1221
1222/* MC_CMD_WOL_FILTER_REMOVE:
1223 * Remove a WoL filter
1224 *
1225 * Locks required: None
1226 * Returns: 0, EINVAL, ENOSYS
1227 */
1228#define MC_CMD_WOL_FILTER_REMOVE 0x33
1229#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
1230#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
1231#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
1232
1233
1234/* MC_CMD_WOL_FILTER_RESET:
1235 * Reset (i.e. remove all) WoL filters
1236 *
1237 * Locks required: None
1238 * Returns: 0, ENOSYS
1239 */
1240#define MC_CMD_WOL_FILTER_RESET 0x34
1241#define MC_CMD_WOL_FILTER_RESET_IN_LEN 0
1242#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
1243
1244/* MC_CMD_SET_MCAST_HASH:
1245 * Set the MCASH hash value without otherwise
1246 * reconfiguring the MAC
1247 */
1248#define MC_CMD_SET_MCAST_HASH 0x35
1249#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
1250#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
1251#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
1252#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
1253
1254/* MC_CMD_NVRAM_TYPES:
1255 * Return bitfield indicating available types of virtual NVRAM partitions
1256 *
1257 * Locks required: none
1258 * Returns: 0
1259 */
1260#define MC_CMD_NVRAM_TYPES 0x36
1261#define MC_CMD_NVRAM_TYPES_IN_LEN 0
1262#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
1263#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
1264
1265/* Supported NVRAM types */
1266#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0
1267#define MC_CMD_NVRAM_TYPE_MC_FW 1
1268#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 2
1269#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 3
1270#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 4
1271#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 5
1272#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 6
1273#define MC_CMD_NVRAM_TYPE_EXP_ROM 7
1274#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 8
1275#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 9
1276#define MC_CMD_NVRAM_TYPE_PHY_PORT0 10
1277#define MC_CMD_NVRAM_TYPE_PHY_PORT1 11
1278#define MC_CMD_NVRAM_TYPE_LOG 12
1279
1280/* MC_CMD_NVRAM_INFO:
1281 * Read info about a virtual NVRAM partition
1282 *
1283 * Locks required: none
1284 * Returns: 0, EINVAL (bad type)
1285 */
1286#define MC_CMD_NVRAM_INFO 0x37
1287#define MC_CMD_NVRAM_INFO_IN_LEN 4
1288#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
1289#define MC_CMD_NVRAM_INFO_OUT_LEN 24
1290#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
1291#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
1292#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
1293#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
1294#define MC_CMD_NVRAM_PROTECTED_LBN 0
1295#define MC_CMD_NVRAM_PROTECTED_WIDTH 1
1296#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
1297#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
1298
1299/* MC_CMD_NVRAM_UPDATE_START:
1300 * Start a group of update operations on a virtual NVRAM partition
1301 *
1302 * Locks required: PHY_LOCK if type==*PHY*
1303 * Returns: 0, EINVAL (bad type), EACCES (if PHY_LOCK required and not held)
1304 */
1305#define MC_CMD_NVRAM_UPDATE_START 0x38
1306#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
1307#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
1308#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
1309
1310/* MC_CMD_NVRAM_READ:
1311 * Read data from a virtual NVRAM partition
1312 *
1313 * Locks required: PHY_LOCK if type==*PHY*
1314 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
1315 */
1316#define MC_CMD_NVRAM_READ 0x39
1317#define MC_CMD_NVRAM_READ_IN_LEN 12
1318#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
1319#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
1320#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
1321#define MC_CMD_NVRAM_READ_OUT_LEN(_read_bytes) (_read_bytes)
1322#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
1323
1324/* MC_CMD_NVRAM_WRITE:
1325 * Write data to a virtual NVRAM partition
1326 *
1327 * Locks required: PHY_LOCK if type==*PHY*
1328 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
1329 */
1330#define MC_CMD_NVRAM_WRITE 0x3a
1331#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
1332#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
1333#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
1334#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
1335#define MC_CMD_NVRAM_WRITE_IN_LEN(_write_bytes) (12 + _write_bytes)
1336#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
1337
1338/* MC_CMD_NVRAM_ERASE:
1339 * Erase sector(s) from a virtual NVRAM partition
1340 *
1341 * Locks required: PHY_LOCK if type==*PHY*
1342 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
1343 */
1344#define MC_CMD_NVRAM_ERASE 0x3b
1345#define MC_CMD_NVRAM_ERASE_IN_LEN 12
1346#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
1347#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
1348#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
1349#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
1350
1351/* MC_CMD_NVRAM_UPDATE_FINISH:
1352 * Finish a group of update operations on a virtual NVRAM partition
1353 *
1354 * Locks required: PHY_LOCK if type==*PHY*
1355 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
1356 */
1357#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
1358#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 4
1359#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
1360#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
1361
1362/* MC_CMD_REBOOT:
1363 * Reboot the MC. The AFTER_ASSERTION flag is intended to be used
1364 * when the driver notices an assertion failure, to allow two ports to
1365 * both recover (semi-)gracefully.
1366 *
1367 * Locks required: NONE
1368 * Returns: Nothing. You get back a response with ERR=1, DATALEN=0
1369 */
1370#define MC_CMD_REBOOT 0x3d
1371#define MC_CMD_REBOOT_IN_LEN 4
1372#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
1373#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 1
1374#define MC_CMD_REBOOT_OUT_LEN 0
1375
1376/* MC_CMD_SCHEDINFO:
1377 * Request scheduler info. from the MC.
1378 *
1379 * Locks required: NONE
1380 * Returns: An array of (timeslice,maximum overrun), one for each thread,
1381 * in ascending order of thread address.s
1382 */
1383#define MC_CMD_SCHEDINFO 0x3e
1384#define MC_CMD_SCHEDINFO_IN_LEN 0
1385
1386
1387/* MC_CMD_SET_REBOOT_MODE: (debug)
1388 * Set the mode for the next MC reboot.
1389 *
1390 * Locks required: NONE
1391 *
1392 * Sets the reboot mode to the specified value. Returns the old mode.
1393 */
1394#define MC_CMD_REBOOT_MODE 0x3f
1395#define MC_CMD_REBOOT_MODE_IN_LEN 4
1396#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
1397#define MC_CMD_REBOOT_MODE_OUT_LEN 4
1398#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
1399#define MC_CMD_REBOOT_MODE_NORMAL 0
1400#define MC_CMD_REBOOT_MODE_SNAPPER 3
1401
1402/* MC_CMD_DEBUG_LOG:
1403 * Null request/response command (debug)
1404 * - sequence number is always zero
1405 * - only supported on the UART interface
1406 * (the same set of bytes is delivered as an
1407 * event over PCI)
1408 */
1409#define MC_CMD_DEBUG_LOG 0x40
1410#define MC_CMD_DEBUG_LOG_IN_LEN 0
1411#define MC_CMD_DEBUG_LOG_OUT_LEN 0
1412
1413/* Generic sensor enumeration. Note that a dual port NIC
1414 * will EITHER expose PHY_COMMON_TEMP OR PHY0_TEMP and
1415 * PHY1_TEMP depending on whether there is a single sensor
1416 * in the vicinity of the two port, or one per port.
1417 */
1418#define MC_CMD_SENSOR_CONTROLLER_TEMP 0 /* degC */
1419#define MC_CMD_SENSOR_PHY_COMMON_TEMP 1 /* degC */
1420#define MC_CMD_SENSOR_CONTROLLER_COOLING 2 /* bool */
1421#define MC_CMD_SENSOR_PHY0_TEMP 3 /* degC */
1422#define MC_CMD_SENSOR_PHY0_COOLING 4 /* bool */
1423#define MC_CMD_SENSOR_PHY1_TEMP 5 /* degC */
1424#define MC_CMD_SENSOR_PHY1_COOLING 6 /* bool */
1425#define MC_CMD_SENSOR_IN_1V0 7 /* mV */
1426#define MC_CMD_SENSOR_IN_1V2 8 /* mV */
1427#define MC_CMD_SENSOR_IN_1V8 9 /* mV */
1428#define MC_CMD_SENSOR_IN_2V5 10 /* mV */
1429#define MC_CMD_SENSOR_IN_3V3 11 /* mV */
1430#define MC_CMD_SENSOR_IN_12V0 12 /* mV */
1431
1432
1433/* Sensor state */
1434#define MC_CMD_SENSOR_STATE_OK 0
1435#define MC_CMD_SENSOR_STATE_WARNING 1
1436#define MC_CMD_SENSOR_STATE_FATAL 2
1437#define MC_CMD_SENSOR_STATE_BROKEN 3
1438
1439/* MC_CMD_SENSOR_INFO:
1440 * Returns information about every available sensor.
1441 *
1442 * Each sensor has a single (16bit) value, and a corresponding state.
1443 * The mapping between value and sensor is nominally determined by the
1444 * MC, but in practise is implemented as zero (BROKEN), one (TEMPERATURE),
1445 * or two (VOLTAGE) ranges per sensor per state.
1446 *
1447 * This call returns a mask (32bit) of the sensors that are supported
1448 * by this platform, then an array (indexed by MC_CMD_SENSOR) of byte
1449 * offsets to the per-sensor arrays. Each sensor array has four 16bit
1450 * numbers, min1, max1, min2, max2.
1451 *
1452 * Locks required: None
1453 * Returns: 0
1454 */
1455#define MC_CMD_SENSOR_INFO 0x41
1456#define MC_CMD_SENSOR_INFO_IN_LEN 0
1457#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
1458#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
1459 (4 + (_x))
1460#define MC_CMD_SENSOR_INFO_OUT_MIN1_OFST(_ofst) \
1461 ((_ofst) + 0)
1462#define MC_CMD_SENSOR_INFO_OUT_MAX1_OFST(_ofst) \
1463 ((_ofst) + 2)
1464#define MC_CMD_SENSOR_INFO_OUT_MIN2_OFST(_ofst) \
1465 ((_ofst) + 4)
1466#define MC_CMD_SENSOR_INFO_OUT_MAX2_OFST(_ofst) \
1467 ((_ofst) + 6)
1468
1469/* MC_CMD_READ_SENSORS
1470 * Returns the current (value, state) for each sensor
1471 *
1472 * Returns the current (value, state) [each 16bit] of each sensor supported by
1473 * this board, by DMA'ing a sparse array (indexed by the sensor type) into host
1474 * memory.
1475 *
1476 * The MC will send a SENSOREVT event every time any sensor changes state. The
1477 * driver is responsible for ensuring that it doesn't miss any events. The board
1478 * will function normally if all sensors are in STATE_OK or state_WARNING.
1479 * Otherwise the board should not be expected to function.
1480 */
1481#define MC_CMD_READ_SENSORS 0x42
1482#define MC_CMD_READ_SENSORS_IN_LEN 8
1483#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
1484#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
1485#define MC_CMD_READ_SENSORS_OUT_LEN 0
1486
1487
1488/* MC_CMD_GET_PHY_STATE:
1489 * Report current state of PHY. A "zombie" PHY is a PHY that has failed to
1490 * boot (e.g. due to missing or corrupted firmware).
1491 *
1492 * Locks required: None
1493 * Return code: 0
1494 */
1495#define MC_CMD_GET_PHY_STATE 0x43
1496
1497#define MC_CMD_GET_PHY_STATE_IN_LEN 0
1498#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
1499#define MC_CMD_GET_PHY_STATE_STATE_OFST 0
1500/* PHY state enumeration: */
1501#define MC_CMD_PHY_STATE_OK 1
1502#define MC_CMD_PHY_STATE_ZOMBIE 2
1503
1504
1505/* 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
1506 * disable 802.Qbb for a given priority. */
1507#define MC_CMD_SETUP_8021QBB 0x44
1508#define MC_CMD_SETUP_8021QBB_IN_LEN 32
1509#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
1510#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFFST 0
1511
1512
1513/* MC_CMD_WOL_FILTER_GET:
1514 * Retrieve ID of any WoL filters
1515 *
1516 * Locks required: None
1517 * Returns: 0, ENOSYS
1518 */
1519#define MC_CMD_WOL_FILTER_GET 0x45
1520#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
1521#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
1522#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
1523
1524
1525/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD:
1526 * Offload a protocol to NIC for lights-out state
1527 *
1528 * Locks required: None
1529 * Returns: 0, ENOSYS
1530 */
1531#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
1532
1533#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN 16
1534#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
1535
1536/* There is a union at offset 4, following defines overlap due to
1537 * this */
1538#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
1539#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPMAC_OFST 4
1540#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPIP_OFST 10
1541#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSMAC_OFST 4
1542#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSSNIPV6_OFST 10
1543#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSIPV6_OFST 26
1544
1545#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
1546#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
1547
1548
1549/* MC_CMD_REMOVE_LIGHTSOUT_PROTOCOL_OFFLOAD:
1550 * Offload a protocol to NIC for lights-out state
1551 *
1552 * Locks required: None
1553 * Returns: 0, ENOSYS
1554 */
1555#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
1556#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
1557#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
1558
1559#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
1560#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
1561
1562/* Lights-out offload protocols enumeration */
1563#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1
1564#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2
1565
1566
1567/* MC_CMD_MAC_RESET_RESTORE:
1568 * Restore MAC after block reset
1569 *
1570 * Locks required: None
1571 * Returns: 0
1572 */
1573
1574#define MC_CMD_MAC_RESET_RESTORE 0x48
1575#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
1576#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
1577
1578#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
new file mode 100644
index 000000000000..0e1bcc5a0d52
--- /dev/null
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -0,0 +1,597 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/*
11 * Driver for PHY related operations via MCDI.
12 */
13
14#include "efx.h"
15#include "phy.h"
16#include "mcdi.h"
17#include "mcdi_pcol.h"
18#include "mdio_10g.h"
19
20struct efx_mcdi_phy_cfg {
21 u32 flags;
22 u32 type;
23 u32 supported_cap;
24 u32 channel;
25 u32 port;
26 u32 stats_mask;
27 u8 name[20];
28 u32 media;
29 u32 mmd_mask;
30 u8 revision[20];
31 u32 forced_cap;
32};
33
34static int
35efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
36{
37 u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
38 size_t outlen;
39 int rc;
40
41 BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
42 BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
43
44 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
45 outbuf, sizeof(outbuf), &outlen);
46 if (rc)
47 goto fail;
48
49 if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
50 rc = -EMSGSIZE;
51 goto fail;
52 }
53
54 cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
55 cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
56 cfg->supported_cap =
57 MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
58 cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
59 cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
60 cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
61 memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
62 sizeof(cfg->name));
63 cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
64 cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
65 memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
66 sizeof(cfg->revision));
67
68 return 0;
69
70fail:
71 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
72 return rc;
73}
74
75static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
76 u32 flags, u32 loopback_mode,
77 u32 loopback_speed)
78{
79 u8 inbuf[MC_CMD_SET_LINK_IN_LEN];
80 int rc;
81
82 BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
83
84 MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
85 MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
86 MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
87 MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
88
89 rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
90 NULL, 0, NULL);
91 if (rc)
92 goto fail;
93
94 return 0;
95
96fail:
97 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
98 return rc;
99}
100
101static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
102{
103 u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN];
104 size_t outlen;
105 int rc;
106
107 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
108 outbuf, sizeof(outbuf), &outlen);
109 if (rc)
110 goto fail;
111
112 if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
113 rc = -EMSGSIZE;
114 goto fail;
115 }
116
117 *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED);
118
119 return 0;
120
121fail:
122 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
123 return rc;
124}
125
126int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
127 unsigned int prtad, unsigned int devad, u16 addr,
128 u16 *value_out, u32 *status_out)
129{
130 u8 inbuf[MC_CMD_MDIO_READ_IN_LEN];
131 u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN];
132 size_t outlen;
133 int rc;
134
135 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus);
136 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
137 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
138 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
139
140 rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
141 outbuf, sizeof(outbuf), &outlen);
142 if (rc)
143 goto fail;
144
145 *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
146 *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS);
147 return 0;
148
149fail:
150 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
151 return rc;
152}
153
154int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
155 unsigned int prtad, unsigned int devad, u16 addr,
156 u16 value, u32 *status_out)
157{
158 u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN];
159 u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN];
160 size_t outlen;
161 int rc;
162
163 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus);
164 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
165 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
166 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
167 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
168
169 rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
170 outbuf, sizeof(outbuf), &outlen);
171 if (rc)
172 goto fail;
173
174 *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS);
175 return 0;
176
177fail:
178 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
179 return rc;
180}
181
182static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
183{
184 u32 result = 0;
185
186 switch (media) {
187 case MC_CMD_MEDIA_KX4:
188 result |= SUPPORTED_Backplane;
189 if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
190 result |= SUPPORTED_1000baseKX_Full;
191 if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
192 result |= SUPPORTED_10000baseKX4_Full;
193 break;
194
195 case MC_CMD_MEDIA_XFP:
196 case MC_CMD_MEDIA_SFP_PLUS:
197 result |= SUPPORTED_FIBRE;
198 break;
199
200 case MC_CMD_MEDIA_BASE_T:
201 result |= SUPPORTED_TP;
202 if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
203 result |= SUPPORTED_10baseT_Half;
204 if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
205 result |= SUPPORTED_10baseT_Full;
206 if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
207 result |= SUPPORTED_100baseT_Half;
208 if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
209 result |= SUPPORTED_100baseT_Full;
210 if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
211 result |= SUPPORTED_1000baseT_Half;
212 if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
213 result |= SUPPORTED_1000baseT_Full;
214 if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
215 result |= SUPPORTED_10000baseT_Full;
216 break;
217 }
218
219 if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
220 result |= SUPPORTED_Pause;
221 if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
222 result |= SUPPORTED_Asym_Pause;
223 if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
224 result |= SUPPORTED_Autoneg;
225
226 return result;
227}
228
229static u32 ethtool_to_mcdi_cap(u32 cap)
230{
231 u32 result = 0;
232
233 if (cap & SUPPORTED_10baseT_Half)
234 result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
235 if (cap & SUPPORTED_10baseT_Full)
236 result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
237 if (cap & SUPPORTED_100baseT_Half)
238 result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
239 if (cap & SUPPORTED_100baseT_Full)
240 result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
241 if (cap & SUPPORTED_1000baseT_Half)
242 result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
243 if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
244 result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
245 if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
246 result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
247 if (cap & SUPPORTED_Pause)
248 result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
249 if (cap & SUPPORTED_Asym_Pause)
250 result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
251 if (cap & SUPPORTED_Autoneg)
252 result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
253
254 return result;
255}
256
257static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
258{
259 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
260 enum efx_phy_mode mode, supported;
261 u32 flags;
262
263 /* TODO: Advertise the capabilities supported by this PHY */
264 supported = 0;
265 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN))
266 supported |= PHY_MODE_TX_DISABLED;
267 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN))
268 supported |= PHY_MODE_LOW_POWER;
269 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN))
270 supported |= PHY_MODE_OFF;
271
272 mode = efx->phy_mode & supported;
273
274 flags = 0;
275 if (mode & PHY_MODE_TX_DISABLED)
276 flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN);
277 if (mode & PHY_MODE_LOW_POWER)
278 flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN);
279 if (mode & PHY_MODE_OFF)
280 flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN);
281
282 return flags;
283}
284
285static u32 mcdi_to_ethtool_media(u32 media)
286{
287 switch (media) {
288 case MC_CMD_MEDIA_XAUI:
289 case MC_CMD_MEDIA_CX4:
290 case MC_CMD_MEDIA_KX4:
291 return PORT_OTHER;
292
293 case MC_CMD_MEDIA_XFP:
294 case MC_CMD_MEDIA_SFP_PLUS:
295 return PORT_FIBRE;
296
297 case MC_CMD_MEDIA_BASE_T:
298 return PORT_TP;
299
300 default:
301 return PORT_OTHER;
302 }
303}
304
305static int efx_mcdi_phy_probe(struct efx_nic *efx)
306{
307 struct efx_mcdi_phy_cfg *phy_cfg;
308 int rc;
309
310 /* TODO: Move phy_data initialisation to
311 * phy_op->probe/remove, rather than init/fini */
312 phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL);
313 if (phy_cfg == NULL) {
314 rc = -ENOMEM;
315 goto fail_alloc;
316 }
317 rc = efx_mcdi_get_phy_cfg(efx, phy_cfg);
318 if (rc != 0)
319 goto fail;
320
321 efx->phy_type = phy_cfg->type;
322
323 efx->mdio_bus = phy_cfg->channel;
324 efx->mdio.prtad = phy_cfg->port;
325 efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
326 efx->mdio.mode_support = 0;
327 if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
328 efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
329 if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
330 efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
331
332 /* Assert that we can map efx -> mcdi loopback modes */
333 BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
334 BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
335 BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
336 BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
337 BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
338 BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
339 BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
340 BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
341 BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
342 BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
343 BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
344 BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
345 BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
346 BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
347 BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
348 BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
349 BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
350 BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
351 BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
352 BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
353 BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
354 BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
355 BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
356 BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
357 BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
358 BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
359 BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
360
361 rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
362 if (rc != 0)
363 goto fail;
364 /* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
365 * but by convention we don't */
366 efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
367
368 kfree(phy_cfg);
369
370 return 0;
371
372fail:
373 kfree(phy_cfg);
374fail_alloc:
375 return rc;
376}
377
378static int efx_mcdi_phy_init(struct efx_nic *efx)
379{
380 struct efx_mcdi_phy_cfg *phy_data;
381 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
382 u32 caps;
383 int rc;
384
385 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
386 if (phy_data == NULL)
387 return -ENOMEM;
388
389 rc = efx_mcdi_get_phy_cfg(efx, phy_data);
390 if (rc != 0)
391 goto fail;
392
393 efx->phy_data = phy_data;
394
395 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
396 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
397 outbuf, sizeof(outbuf), NULL);
398 if (rc)
399 goto fail;
400
401 caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
402 if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
403 efx->link_advertising =
404 mcdi_to_ethtool_cap(phy_data->media, caps);
405 else
406 phy_data->forced_cap = caps;
407
408 return 0;
409
410fail:
411 kfree(phy_data);
412 return rc;
413}
414
415int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
416{
417 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
418 u32 caps = (efx->link_advertising ?
419 ethtool_to_mcdi_cap(efx->link_advertising) :
420 phy_cfg->forced_cap);
421
422 return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
423 efx->loopback_mode, 0);
424}
425
426void efx_mcdi_phy_decode_link(struct efx_nic *efx,
427 struct efx_link_state *link_state,
428 u32 speed, u32 flags, u32 fcntl)
429{
430 switch (fcntl) {
431 case MC_CMD_FCNTL_AUTO:
432 WARN_ON(1); /* This is not a link mode */
433 link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
434 break;
435 case MC_CMD_FCNTL_BIDIR:
436 link_state->fc = EFX_FC_TX | EFX_FC_RX;
437 break;
438 case MC_CMD_FCNTL_RESPOND:
439 link_state->fc = EFX_FC_RX;
440 break;
441 default:
442 WARN_ON(1);
443 case MC_CMD_FCNTL_OFF:
444 link_state->fc = 0;
445 break;
446 }
447
448 link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN));
449 link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN));
450 link_state->speed = speed;
451}
452
453/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
454 * supported by the link partner. Warn the user if this isn't the case
455 */
456void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
457{
458 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
459 u32 rmtadv;
460
461 /* The link partner capabilities are only relevent if the
462 * link supports flow control autonegotiation */
463 if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
464 return;
465
466 /* If flow control autoneg is supported and enabled, then fine */
467 if (efx->wanted_fc & EFX_FC_AUTO)
468 return;
469
470 rmtadv = 0;
471 if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
472 rmtadv |= ADVERTISED_Pause;
473 if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
474 rmtadv |= ADVERTISED_Asym_Pause;
475
476 if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
477 EFX_ERR(efx, "warning: link partner doesn't support "
478 "pause frames");
479}
480
481static bool efx_mcdi_phy_poll(struct efx_nic *efx)
482{
483 struct efx_link_state old_state = efx->link_state;
484 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
485 int rc;
486
487 WARN_ON(!mutex_is_locked(&efx->mac_lock));
488
489 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
490
491 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
492 outbuf, sizeof(outbuf), NULL);
493 if (rc) {
494 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
495 efx->link_state.up = false;
496 } else {
497 efx_mcdi_phy_decode_link(
498 efx, &efx->link_state,
499 MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
500 MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
501 MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
502 }
503
504 return !efx_link_state_equal(&efx->link_state, &old_state);
505}
506
507static void efx_mcdi_phy_fini(struct efx_nic *efx)
508{
509 struct efx_mcdi_phy_data *phy_data = efx->phy_data;
510
511 efx->phy_data = NULL;
512 kfree(phy_data);
513}
514
515static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
516{
517 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
518 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
519 int rc;
520
521 ecmd->supported =
522 mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
523 ecmd->advertising = efx->link_advertising;
524 ecmd->speed = efx->link_state.speed;
525 ecmd->duplex = efx->link_state.fd;
526 ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
527 ecmd->phy_address = phy_cfg->port;
528 ecmd->transceiver = XCVR_INTERNAL;
529 ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
530 ecmd->mdio_support = (efx->mdio.mode_support &
531 (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
532
533 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
534 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
535 outbuf, sizeof(outbuf), NULL);
536 if (rc) {
537 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
538 return;
539 }
540 ecmd->lp_advertising =
541 mcdi_to_ethtool_cap(phy_cfg->media,
542 MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
543}
544
545static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
546{
547 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
548 u32 caps;
549 int rc;
550
551 if (ecmd->autoneg) {
552 caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
553 1 << MC_CMD_PHY_CAP_AN_LBN);
554 } else if (ecmd->duplex) {
555 switch (ecmd->speed) {
556 case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
557 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
558 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
559 case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
560 default: return -EINVAL;
561 }
562 } else {
563 switch (ecmd->speed) {
564 case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
565 case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
566 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
567 default: return -EINVAL;
568 }
569 }
570
571 rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
572 efx->loopback_mode, 0);
573 if (rc)
574 return rc;
575
576 if (ecmd->autoneg) {
577 efx_link_set_advertising(
578 efx, ecmd->advertising | ADVERTISED_Autoneg);
579 phy_cfg->forced_cap = 0;
580 } else {
581 efx_link_set_advertising(efx, 0);
582 phy_cfg->forced_cap = caps;
583 }
584 return 0;
585}
586
587struct efx_phy_operations efx_mcdi_phy_ops = {
588 .probe = efx_mcdi_phy_probe,
589 .init = efx_mcdi_phy_init,
590 .reconfigure = efx_mcdi_phy_reconfigure,
591 .poll = efx_mcdi_phy_poll,
592 .fini = efx_mcdi_phy_fini,
593 .get_settings = efx_mcdi_phy_get_settings,
594 .set_settings = efx_mcdi_phy_set_settings,
595 .run_tests = NULL,
596 .test_name = NULL,
597};
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 6c33459f9ea9..1574e52f0594 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc. 3 * Copyright 2006-2009 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -14,8 +14,8 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include "net_driver.h" 15#include "net_driver.h"
16#include "mdio_10g.h" 16#include "mdio_10g.h"
17#include "boards.h"
18#include "workarounds.h" 17#include "workarounds.h"
18#include "nic.h"
19 19
20unsigned efx_mdio_id_oui(u32 id) 20unsigned efx_mdio_id_oui(u32 id)
21{ 21{
@@ -174,7 +174,7 @@ bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
174 * of mmd's */ 174 * of mmd's */
175 if (LOOPBACK_INTERNAL(efx)) 175 if (LOOPBACK_INTERNAL(efx))
176 return true; 176 return true;
177 else if (efx->loopback_mode == LOOPBACK_NETWORK) 177 else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS)
178 return false; 178 return false;
179 else if (efx_phy_mode_disabled(efx->phy_mode)) 179 else if (efx_phy_mode_disabled(efx->phy_mode))
180 return false; 180 return false;
@@ -211,7 +211,7 @@ void efx_mdio_phy_reconfigure(struct efx_nic *efx)
211 efx->loopback_mode == LOOPBACK_PCS); 211 efx->loopback_mode == LOOPBACK_PCS);
212 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, 212 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS,
213 MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK, 213 MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK,
214 efx->loopback_mode == LOOPBACK_NETWORK); 214 efx->loopback_mode == LOOPBACK_PHYXS_WS);
215} 215}
216 216
217static void efx_mdio_set_mmd_lpower(struct efx_nic *efx, 217static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
@@ -249,8 +249,6 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
249int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 249int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
250{ 250{
251 struct ethtool_cmd prev; 251 struct ethtool_cmd prev;
252 u32 required;
253 int reg;
254 252
255 efx->phy_op->get_settings(efx, &prev); 253 efx->phy_op->get_settings(efx, &prev);
256 254
@@ -266,86 +264,74 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
266 return -EINVAL; 264 return -EINVAL;
267 265
268 /* Check that PHY supports these settings */ 266 /* Check that PHY supports these settings */
269 if (ecmd->autoneg) { 267 if (!ecmd->autoneg ||
270 required = SUPPORTED_Autoneg; 268 (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
271 } else if (ecmd->duplex) {
272 switch (ecmd->speed) {
273 case SPEED_10: required = SUPPORTED_10baseT_Full; break;
274 case SPEED_100: required = SUPPORTED_100baseT_Full; break;
275 default: return -EINVAL;
276 }
277 } else {
278 switch (ecmd->speed) {
279 case SPEED_10: required = SUPPORTED_10baseT_Half; break;
280 case SPEED_100: required = SUPPORTED_100baseT_Half; break;
281 default: return -EINVAL;
282 }
283 }
284 required |= ecmd->advertising;
285 if (required & ~prev.supported)
286 return -EINVAL; 269 return -EINVAL;
287 270
288 if (ecmd->autoneg) { 271 efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
289 bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full 272 efx_mdio_an_reconfigure(efx);
290 || EFX_WORKAROUND_13204(efx));
291
292 /* Set up the base page */
293 reg = ADVERTISE_CSMA;
294 if (ecmd->advertising & ADVERTISED_10baseT_Half)
295 reg |= ADVERTISE_10HALF;
296 if (ecmd->advertising & ADVERTISED_10baseT_Full)
297 reg |= ADVERTISE_10FULL;
298 if (ecmd->advertising & ADVERTISED_100baseT_Half)
299 reg |= ADVERTISE_100HALF;
300 if (ecmd->advertising & ADVERTISED_100baseT_Full)
301 reg |= ADVERTISE_100FULL;
302 if (xnp)
303 reg |= ADVERTISE_RESV;
304 else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
305 ADVERTISED_1000baseT_Full))
306 reg |= ADVERTISE_NPAGE;
307 reg |= mii_advertise_flowctrl(efx->wanted_fc);
308 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
309
310 /* Set up the (extended) next page if necessary */
311 if (efx->phy_op->set_npage_adv)
312 efx->phy_op->set_npage_adv(efx, ecmd->advertising);
313
314 /* Enable and restart AN */
315 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
316 reg |= MDIO_AN_CTRL1_ENABLE;
317 if (!(EFX_WORKAROUND_15195(efx) &&
318 LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
319 reg |= MDIO_AN_CTRL1_RESTART;
320 if (xnp)
321 reg |= MDIO_AN_CTRL1_XNP;
322 else
323 reg &= ~MDIO_AN_CTRL1_XNP;
324 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
325 } else {
326 /* Disable AN */
327 efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_CTRL1,
328 MDIO_AN_CTRL1_ENABLE, false);
329
330 /* Set the basic control bits */
331 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1);
332 reg &= ~(MDIO_CTRL1_SPEEDSEL | MDIO_CTRL1_FULLDPLX);
333 if (ecmd->speed == SPEED_100)
334 reg |= MDIO_PMA_CTRL1_SPEED100;
335 if (ecmd->duplex)
336 reg |= MDIO_CTRL1_FULLDPLX;
337 efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, reg);
338 }
339
340 return 0; 273 return 0;
341} 274}
342 275
276/**
277 * efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
278 * @efx: Efx NIC
279 */
280void efx_mdio_an_reconfigure(struct efx_nic *efx)
281{
282 bool xnp = (efx->link_advertising & ADVERTISED_10000baseT_Full
283 || EFX_WORKAROUND_13204(efx));
284 int reg;
285
286 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
287
288 /* Set up the base page */
289 reg = ADVERTISE_CSMA;
290 if (efx->link_advertising & ADVERTISED_10baseT_Half)
291 reg |= ADVERTISE_10HALF;
292 if (efx->link_advertising & ADVERTISED_10baseT_Full)
293 reg |= ADVERTISE_10FULL;
294 if (efx->link_advertising & ADVERTISED_100baseT_Half)
295 reg |= ADVERTISE_100HALF;
296 if (efx->link_advertising & ADVERTISED_100baseT_Full)
297 reg |= ADVERTISE_100FULL;
298 if (xnp)
299 reg |= ADVERTISE_RESV;
300 else if (efx->link_advertising & (ADVERTISED_1000baseT_Half |
301 ADVERTISED_1000baseT_Full))
302 reg |= ADVERTISE_NPAGE;
303 if (efx->link_advertising & ADVERTISED_Pause)
304 reg |= ADVERTISE_PAUSE_CAP;
305 if (efx->link_advertising & ADVERTISED_Asym_Pause)
306 reg |= ADVERTISE_PAUSE_ASYM;
307 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
308
309 /* Set up the (extended) next page if necessary */
310 if (efx->phy_op->set_npage_adv)
311 efx->phy_op->set_npage_adv(efx, efx->link_advertising);
312
313 /* Enable and restart AN */
314 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
315 reg |= MDIO_AN_CTRL1_ENABLE;
316 if (!(EFX_WORKAROUND_15195(efx) && LOOPBACK_EXTERNAL(efx)))
317 reg |= MDIO_AN_CTRL1_RESTART;
318 if (xnp)
319 reg |= MDIO_AN_CTRL1_XNP;
320 else
321 reg &= ~MDIO_AN_CTRL1_XNP;
322 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
323}
324
343enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx) 325enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
344{ 326{
345 int lpa; 327 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
346 328
347 if (!(efx->phy_op->mmds & MDIO_DEVS_AN)) 329 if (!(efx->wanted_fc & EFX_FC_AUTO))
348 return efx->wanted_fc; 330 return efx->wanted_fc;
349 lpa = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA); 331
350 return efx_fc_resolve(efx->wanted_fc, lpa); 332 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
333
334 return mii_resolve_flowctrl_fdx(
335 mii_advertise_flowctrl(efx->wanted_fc),
336 efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
351} 337}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 6b14421a7444..f6ac9503339d 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc. 3 * Copyright 2006-2009 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -17,7 +17,6 @@
17 */ 17 */
18 18
19#include "efx.h" 19#include "efx.h"
20#include "boards.h"
21 20
22static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } 21static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
23static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } 22static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
@@ -87,6 +86,9 @@ extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
87/* Set (some of) the PHY settings over MDIO */ 86/* Set (some of) the PHY settings over MDIO */
88extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); 87extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
89 88
89/* Push advertising flags and restart autonegotiation */
90extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
91
90/* Get pause parameters from AN if available (otherwise return 92/* Get pause parameters from AN if available (otherwise return
91 * requested pause parameters) 93 * requested pause parameters)
92 */ 94 */
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 820c233c3ea0..3a464529a46b 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -1,36 +1,80 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference. 8 * by the Free Software Foundation, incorporated herein by reference.
9 */ 9 */
10 10
11#include <linux/bitops.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/mtd/mtd.h> 13#include <linux/mtd/mtd.h>
13#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/rtnetlink.h>
14 16
15#define EFX_DRIVER_NAME "sfc_mtd" 17#define EFX_DRIVER_NAME "sfc_mtd"
16#include "net_driver.h" 18#include "net_driver.h"
17#include "spi.h" 19#include "spi.h"
18#include "efx.h" 20#include "efx.h"
21#include "nic.h"
22#include "mcdi.h"
23#include "mcdi_pcol.h"
19 24
20#define EFX_SPI_VERIFY_BUF_LEN 16 25#define EFX_SPI_VERIFY_BUF_LEN 16
26#define EFX_MCDI_CHUNK_LEN 128
21 27
22struct efx_mtd { 28struct efx_mtd_partition {
23 const struct efx_spi_device *spi;
24 struct mtd_info mtd; 29 struct mtd_info mtd;
30 union {
31 struct {
32 bool updating;
33 u8 nvram_type;
34 u16 fw_subtype;
35 } mcdi;
36 size_t offset;
37 };
38 const char *type_name;
25 char name[IFNAMSIZ + 20]; 39 char name[IFNAMSIZ + 20];
26}; 40};
27 41
42struct efx_mtd_ops {
43 int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
44 size_t *retlen, u8 *buffer);
45 int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
46 int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
47 size_t *retlen, const u8 *buffer);
48 int (*sync)(struct mtd_info *mtd);
49};
50
51struct efx_mtd {
52 struct list_head node;
53 struct efx_nic *efx;
54 const struct efx_spi_device *spi;
55 const char *name;
56 const struct efx_mtd_ops *ops;
57 size_t n_parts;
58 struct efx_mtd_partition part[0];
59};
60
61#define efx_for_each_partition(part, efx_mtd) \
62 for ((part) = &(efx_mtd)->part[0]; \
63 (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
64 (part)++)
65
66#define to_efx_mtd_partition(mtd) \
67 container_of(mtd, struct efx_mtd_partition, mtd)
68
69static int falcon_mtd_probe(struct efx_nic *efx);
70static int siena_mtd_probe(struct efx_nic *efx);
71
28/* SPI utilities */ 72/* SPI utilities */
29 73
30static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible) 74static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
31{ 75{
32 const struct efx_spi_device *spi = efx_mtd->spi; 76 const struct efx_spi_device *spi = efx_mtd->spi;
33 struct efx_nic *efx = spi->efx; 77 struct efx_nic *efx = efx_mtd->efx;
34 u8 status; 78 u8 status;
35 int rc, i; 79 int rc, i;
36 80
@@ -39,7 +83,7 @@ static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
39 __set_current_state(uninterruptible ? 83 __set_current_state(uninterruptible ?
40 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); 84 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
41 schedule_timeout(HZ / 10); 85 schedule_timeout(HZ / 10);
42 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, 86 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
43 &status, sizeof(status)); 87 &status, sizeof(status));
44 if (rc) 88 if (rc)
45 return rc; 89 return rc;
@@ -52,32 +96,35 @@ static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
52 return -ETIMEDOUT; 96 return -ETIMEDOUT;
53} 97}
54 98
55static int efx_spi_unlock(const struct efx_spi_device *spi) 99static int
100efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
56{ 101{
57 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 | 102 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
58 SPI_STATUS_BP0); 103 SPI_STATUS_BP0);
59 u8 status; 104 u8 status;
60 int rc; 105 int rc;
61 106
62 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, &status, sizeof(status)); 107 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
108 &status, sizeof(status));
63 if (rc) 109 if (rc)
64 return rc; 110 return rc;
65 111
66 if (!(status & unlock_mask)) 112 if (!(status & unlock_mask))
67 return 0; /* already unlocked */ 113 return 0; /* already unlocked */
68 114
69 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0); 115 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
70 if (rc) 116 if (rc)
71 return rc; 117 return rc;
72 rc = falcon_spi_cmd(spi, SPI_SST_EWSR, -1, NULL, NULL, 0); 118 rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
73 if (rc) 119 if (rc)
74 return rc; 120 return rc;
75 121
76 status &= ~unlock_mask; 122 status &= ~unlock_mask;
77 rc = falcon_spi_cmd(spi, SPI_WRSR, -1, &status, NULL, sizeof(status)); 123 rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
124 NULL, sizeof(status));
78 if (rc) 125 if (rc)
79 return rc; 126 return rc;
80 rc = falcon_spi_wait_write(spi); 127 rc = falcon_spi_wait_write(efx, spi);
81 if (rc) 128 if (rc)
82 return rc; 129 return rc;
83 130
@@ -87,6 +134,7 @@ static int efx_spi_unlock(const struct efx_spi_device *spi)
87static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len) 134static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
88{ 135{
89 const struct efx_spi_device *spi = efx_mtd->spi; 136 const struct efx_spi_device *spi = efx_mtd->spi;
137 struct efx_nic *efx = efx_mtd->efx;
90 unsigned pos, block_len; 138 unsigned pos, block_len;
91 u8 empty[EFX_SPI_VERIFY_BUF_LEN]; 139 u8 empty[EFX_SPI_VERIFY_BUF_LEN];
92 u8 buffer[EFX_SPI_VERIFY_BUF_LEN]; 140 u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
@@ -98,13 +146,14 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
98 if (spi->erase_command == 0) 146 if (spi->erase_command == 0)
99 return -EOPNOTSUPP; 147 return -EOPNOTSUPP;
100 148
101 rc = efx_spi_unlock(spi); 149 rc = efx_spi_unlock(efx, spi);
102 if (rc) 150 if (rc)
103 return rc; 151 return rc;
104 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0); 152 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
105 if (rc) 153 if (rc)
106 return rc; 154 return rc;
107 rc = falcon_spi_cmd(spi, spi->erase_command, start, NULL, NULL, 0); 155 rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
156 NULL, 0);
108 if (rc) 157 if (rc)
109 return rc; 158 return rc;
110 rc = efx_spi_slow_wait(efx_mtd, false); 159 rc = efx_spi_slow_wait(efx_mtd, false);
@@ -113,7 +162,8 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
113 memset(empty, 0xff, sizeof(empty)); 162 memset(empty, 0xff, sizeof(empty));
114 for (pos = 0; pos < len; pos += block_len) { 163 for (pos = 0; pos < len; pos += block_len) {
115 block_len = min(len - pos, sizeof(buffer)); 164 block_len = min(len - pos, sizeof(buffer));
116 rc = falcon_spi_read(spi, start + pos, block_len, NULL, buffer); 165 rc = falcon_spi_read(efx, spi, start + pos, block_len,
166 NULL, buffer);
117 if (rc) 167 if (rc)
118 return rc; 168 return rc;
119 if (memcmp(empty, buffer, block_len)) 169 if (memcmp(empty, buffer, block_len))
@@ -130,140 +180,473 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
130 180
131/* MTD interface */ 181/* MTD interface */
132 182
133static int efx_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, 183static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
134 size_t *retlen, u8 *buffer)
135{ 184{
136 struct efx_mtd *efx_mtd = mtd->priv; 185 struct efx_mtd *efx_mtd = mtd->priv;
186 int rc;
187
188 rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
189 if (rc == 0) {
190 erase->state = MTD_ERASE_DONE;
191 } else {
192 erase->state = MTD_ERASE_FAILED;
193 erase->fail_addr = 0xffffffff;
194 }
195 mtd_erase_callback(erase);
196 return rc;
197}
198
199static void efx_mtd_sync(struct mtd_info *mtd)
200{
201 struct efx_mtd *efx_mtd = mtd->priv;
202 struct efx_nic *efx = efx_mtd->efx;
203 int rc;
204
205 rc = efx_mtd->ops->sync(mtd);
206 if (rc)
207 EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
208}
209
210static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
211{
212 int rc;
213
214 for (;;) {
215 rc = del_mtd_device(&part->mtd);
216 if (rc != -EBUSY)
217 break;
218 ssleep(1);
219 }
220 WARN_ON(rc);
221}
222
223static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
224{
225 struct efx_mtd_partition *part;
226
227 efx_for_each_partition(part, efx_mtd)
228 efx_mtd_remove_partition(part);
229 list_del(&efx_mtd->node);
230 kfree(efx_mtd);
231}
232
233static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
234{
235 struct efx_mtd_partition *part;
236
237 efx_for_each_partition(part, efx_mtd)
238 if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
239 snprintf(part->name, sizeof(part->name),
240 "%s %s:%02x", efx_mtd->efx->name,
241 part->type_name, part->mcdi.fw_subtype);
242 else
243 snprintf(part->name, sizeof(part->name),
244 "%s %s", efx_mtd->efx->name,
245 part->type_name);
246}
247
248static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
249{
250 struct efx_mtd_partition *part;
251
252 efx_mtd->efx = efx;
253
254 efx_mtd_rename_device(efx_mtd);
255
256 efx_for_each_partition(part, efx_mtd) {
257 part->mtd.writesize = 1;
258
259 part->mtd.owner = THIS_MODULE;
260 part->mtd.priv = efx_mtd;
261 part->mtd.name = part->name;
262 part->mtd.erase = efx_mtd_erase;
263 part->mtd.read = efx_mtd->ops->read;
264 part->mtd.write = efx_mtd->ops->write;
265 part->mtd.sync = efx_mtd_sync;
266
267 if (add_mtd_device(&part->mtd))
268 goto fail;
269 }
270
271 list_add(&efx_mtd->node, &efx->mtd_list);
272 return 0;
273
274fail:
275 while (part != &efx_mtd->part[0]) {
276 --part;
277 efx_mtd_remove_partition(part);
278 }
279 /* add_mtd_device() returns 1 if the MTD table is full */
280 return -ENOMEM;
281}
282
283void efx_mtd_remove(struct efx_nic *efx)
284{
285 struct efx_mtd *efx_mtd, *next;
286
287 WARN_ON(efx_dev_registered(efx));
288
289 list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
290 efx_mtd_remove_device(efx_mtd);
291}
292
293void efx_mtd_rename(struct efx_nic *efx)
294{
295 struct efx_mtd *efx_mtd;
296
297 ASSERT_RTNL();
298
299 list_for_each_entry(efx_mtd, &efx->mtd_list, node)
300 efx_mtd_rename_device(efx_mtd);
301}
302
303int efx_mtd_probe(struct efx_nic *efx)
304{
305 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
306 return siena_mtd_probe(efx);
307 else
308 return falcon_mtd_probe(efx);
309}
310
311/* Implementation of MTD operations for Falcon */
312
313static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
314 size_t len, size_t *retlen, u8 *buffer)
315{
316 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
317 struct efx_mtd *efx_mtd = mtd->priv;
137 const struct efx_spi_device *spi = efx_mtd->spi; 318 const struct efx_spi_device *spi = efx_mtd->spi;
138 struct efx_nic *efx = spi->efx; 319 struct efx_nic *efx = efx_mtd->efx;
139 int rc; 320 int rc;
140 321
141 rc = mutex_lock_interruptible(&efx->spi_lock); 322 rc = mutex_lock_interruptible(&efx->spi_lock);
142 if (rc) 323 if (rc)
143 return rc; 324 return rc;
144 rc = falcon_spi_read(spi, FALCON_FLASH_BOOTCODE_START + start, 325 rc = falcon_spi_read(efx, spi, part->offset + start, len,
145 len, retlen, buffer); 326 retlen, buffer);
146 mutex_unlock(&efx->spi_lock); 327 mutex_unlock(&efx->spi_lock);
147 return rc; 328 return rc;
148} 329}
149 330
150static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) 331static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
151{ 332{
333 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
152 struct efx_mtd *efx_mtd = mtd->priv; 334 struct efx_mtd *efx_mtd = mtd->priv;
153 struct efx_nic *efx = efx_mtd->spi->efx; 335 struct efx_nic *efx = efx_mtd->efx;
154 int rc; 336 int rc;
155 337
156 rc = mutex_lock_interruptible(&efx->spi_lock); 338 rc = mutex_lock_interruptible(&efx->spi_lock);
157 if (rc) 339 if (rc)
158 return rc; 340 return rc;
159 rc = efx_spi_erase(efx_mtd, FALCON_FLASH_BOOTCODE_START + erase->addr, 341 rc = efx_spi_erase(efx_mtd, part->offset + start, len);
160 erase->len);
161 mutex_unlock(&efx->spi_lock); 342 mutex_unlock(&efx->spi_lock);
162
163 if (rc == 0) {
164 erase->state = MTD_ERASE_DONE;
165 } else {
166 erase->state = MTD_ERASE_FAILED;
167 erase->fail_addr = 0xffffffff;
168 }
169 mtd_erase_callback(erase);
170 return rc; 343 return rc;
171} 344}
172 345
173static int efx_mtd_write(struct mtd_info *mtd, loff_t start, 346static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
174 size_t len, size_t *retlen, const u8 *buffer) 347 size_t len, size_t *retlen, const u8 *buffer)
175{ 348{
349 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
176 struct efx_mtd *efx_mtd = mtd->priv; 350 struct efx_mtd *efx_mtd = mtd->priv;
177 const struct efx_spi_device *spi = efx_mtd->spi; 351 const struct efx_spi_device *spi = efx_mtd->spi;
178 struct efx_nic *efx = spi->efx; 352 struct efx_nic *efx = efx_mtd->efx;
179 int rc; 353 int rc;
180 354
181 rc = mutex_lock_interruptible(&efx->spi_lock); 355 rc = mutex_lock_interruptible(&efx->spi_lock);
182 if (rc) 356 if (rc)
183 return rc; 357 return rc;
184 rc = falcon_spi_write(spi, FALCON_FLASH_BOOTCODE_START + start, 358 rc = falcon_spi_write(efx, spi, part->offset + start, len,
185 len, retlen, buffer); 359 retlen, buffer);
186 mutex_unlock(&efx->spi_lock); 360 mutex_unlock(&efx->spi_lock);
187 return rc; 361 return rc;
188} 362}
189 363
190static void efx_mtd_sync(struct mtd_info *mtd) 364static int falcon_mtd_sync(struct mtd_info *mtd)
191{ 365{
192 struct efx_mtd *efx_mtd = mtd->priv; 366 struct efx_mtd *efx_mtd = mtd->priv;
193 struct efx_nic *efx = efx_mtd->spi->efx; 367 struct efx_nic *efx = efx_mtd->efx;
194 int rc; 368 int rc;
195 369
196 mutex_lock(&efx->spi_lock); 370 mutex_lock(&efx->spi_lock);
197 rc = efx_spi_slow_wait(efx_mtd, true); 371 rc = efx_spi_slow_wait(efx_mtd, true);
198 mutex_unlock(&efx->spi_lock); 372 mutex_unlock(&efx->spi_lock);
373 return rc;
374}
375
376static struct efx_mtd_ops falcon_mtd_ops = {
377 .read = falcon_mtd_read,
378 .erase = falcon_mtd_erase,
379 .write = falcon_mtd_write,
380 .sync = falcon_mtd_sync,
381};
382
383static int falcon_mtd_probe(struct efx_nic *efx)
384{
385 struct efx_spi_device *spi = efx->spi_flash;
386 struct efx_mtd *efx_mtd;
387 int rc;
388
389 ASSERT_RTNL();
199 390
391 if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
392 return -ENODEV;
393
394 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
395 GFP_KERNEL);
396 if (!efx_mtd)
397 return -ENOMEM;
398
399 efx_mtd->spi = spi;
400 efx_mtd->name = "flash";
401 efx_mtd->ops = &falcon_mtd_ops;
402
403 efx_mtd->n_parts = 1;
404 efx_mtd->part[0].mtd.type = MTD_NORFLASH;
405 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
406 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
407 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
408 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
409 efx_mtd->part[0].type_name = "sfc_flash_bootrom";
410
411 rc = efx_mtd_probe_device(efx, efx_mtd);
200 if (rc) 412 if (rc)
201 EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc); 413 kfree(efx_mtd);
202 return; 414 return rc;
203} 415}
204 416
205void efx_mtd_remove(struct efx_nic *efx) 417/* Implementation of MTD operations for Siena */
418
419static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
420 size_t len, size_t *retlen, u8 *buffer)
206{ 421{
207 if (efx->spi_flash && efx->spi_flash->mtd) { 422 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
208 struct efx_mtd *efx_mtd = efx->spi_flash->mtd; 423 struct efx_mtd *efx_mtd = mtd->priv;
209 int rc; 424 struct efx_nic *efx = efx_mtd->efx;
210 425 loff_t offset = start;
211 for (;;) { 426 loff_t end = min_t(loff_t, start + len, mtd->size);
212 rc = del_mtd_device(&efx_mtd->mtd); 427 size_t chunk;
213 if (rc != -EBUSY) 428 int rc = 0;
214 break; 429
215 ssleep(1); 430 while (offset < end) {
216 } 431 chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
217 WARN_ON(rc); 432 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
218 kfree(efx_mtd); 433 buffer, chunk);
434 if (rc)
435 goto out;
436 offset += chunk;
437 buffer += chunk;
219 } 438 }
439out:
440 *retlen = offset - start;
441 return rc;
220} 442}
221 443
222void efx_mtd_rename(struct efx_nic *efx) 444static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
223{ 445{
224 if (efx->spi_flash && efx->spi_flash->mtd) { 446 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
225 struct efx_mtd *efx_mtd = efx->spi_flash->mtd; 447 struct efx_mtd *efx_mtd = mtd->priv;
226 snprintf(efx_mtd->name, sizeof(efx_mtd->name), 448 struct efx_nic *efx = efx_mtd->efx;
227 "%s sfc_flash_bootrom", efx->name); 449 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
450 loff_t end = min_t(loff_t, start + len, mtd->size);
451 size_t chunk = part->mtd.erasesize;
452 int rc = 0;
453
454 if (!part->mcdi.updating) {
455 rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
456 if (rc)
457 goto out;
458 part->mcdi.updating = 1;
459 }
460
461 /* The MCDI interface can in fact do multiple erase blocks at once;
462 * but erasing may be slow, so we make multiple calls here to avoid
463 * tripping the MCDI RPC timeout. */
464 while (offset < end) {
465 rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
466 chunk);
467 if (rc)
468 goto out;
469 offset += chunk;
228 } 470 }
471out:
472 return rc;
229} 473}
230 474
231int efx_mtd_probe(struct efx_nic *efx) 475static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
476 size_t len, size_t *retlen, const u8 *buffer)
232{ 477{
233 struct efx_spi_device *spi = efx->spi_flash; 478 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
234 struct efx_mtd *efx_mtd; 479 struct efx_mtd *efx_mtd = mtd->priv;
480 struct efx_nic *efx = efx_mtd->efx;
481 loff_t offset = start;
482 loff_t end = min_t(loff_t, start + len, mtd->size);
483 size_t chunk;
484 int rc = 0;
485
486 if (!part->mcdi.updating) {
487 rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
488 if (rc)
489 goto out;
490 part->mcdi.updating = 1;
491 }
235 492
236 if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START) 493 while (offset < end) {
494 chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
495 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
496 buffer, chunk);
497 if (rc)
498 goto out;
499 offset += chunk;
500 buffer += chunk;
501 }
502out:
503 *retlen = offset - start;
504 return rc;
505}
506
507static int siena_mtd_sync(struct mtd_info *mtd)
508{
509 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
510 struct efx_mtd *efx_mtd = mtd->priv;
511 struct efx_nic *efx = efx_mtd->efx;
512 int rc = 0;
513
514 if (part->mcdi.updating) {
515 part->mcdi.updating = 0;
516 rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
517 }
518
519 return rc;
520}
521
522static struct efx_mtd_ops siena_mtd_ops = {
523 .read = siena_mtd_read,
524 .erase = siena_mtd_erase,
525 .write = siena_mtd_write,
526 .sync = siena_mtd_sync,
527};
528
529struct siena_nvram_type_info {
530 int port;
531 const char *name;
532};
533
534static struct siena_nvram_type_info siena_nvram_types[] = {
535 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
536 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
537 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
538 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
539 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
540 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
541 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
542 [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
543 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
544 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
545 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
546 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
547};
548
549static int siena_mtd_probe_partition(struct efx_nic *efx,
550 struct efx_mtd *efx_mtd,
551 unsigned int part_id,
552 unsigned int type)
553{
554 struct efx_mtd_partition *part = &efx_mtd->part[part_id];
555 struct siena_nvram_type_info *info;
556 size_t size, erase_size;
557 bool protected;
558 int rc;
559
560 if (type >= ARRAY_SIZE(siena_nvram_types))
237 return -ENODEV; 561 return -ENODEV;
238 562
239 efx_mtd = kzalloc(sizeof(*efx_mtd), GFP_KERNEL); 563 info = &siena_nvram_types[type];
564
565 if (info->port != efx_port_num(efx))
566 return -ENODEV;
567
568 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
569 if (rc)
570 return rc;
571 if (protected)
572 return -ENODEV; /* hide it */
573
574 part->mcdi.nvram_type = type;
575 part->type_name = info->name;
576
577 part->mtd.type = MTD_NORFLASH;
578 part->mtd.flags = MTD_CAP_NORFLASH;
579 part->mtd.size = size;
580 part->mtd.erasesize = erase_size;
581
582 return 0;
583}
584
585static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
586 struct efx_mtd *efx_mtd)
587{
588 struct efx_mtd_partition *part;
589 uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN /
590 sizeof(uint16_t)];
591 int rc;
592
593 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list);
594 if (rc)
595 return rc;
596
597 efx_for_each_partition(part, efx_mtd)
598 part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
599
600 return 0;
601}
602
603static int siena_mtd_probe(struct efx_nic *efx)
604{
605 struct efx_mtd *efx_mtd;
606 int rc = -ENODEV;
607 u32 nvram_types;
608 unsigned int type;
609
610 ASSERT_RTNL();
611
612 rc = efx_mcdi_nvram_types(efx, &nvram_types);
613 if (rc)
614 return rc;
615
616 efx_mtd = kzalloc(sizeof(*efx_mtd) +
617 hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
618 GFP_KERNEL);
240 if (!efx_mtd) 619 if (!efx_mtd)
241 return -ENOMEM; 620 return -ENOMEM;
242 621
243 efx_mtd->spi = spi; 622 efx_mtd->name = "Siena NVRAM manager";
244 spi->mtd = efx_mtd; 623
245 624 efx_mtd->ops = &siena_mtd_ops;
246 efx_mtd->mtd.type = MTD_NORFLASH; 625
247 efx_mtd->mtd.flags = MTD_CAP_NORFLASH; 626 type = 0;
248 efx_mtd->mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; 627 efx_mtd->n_parts = 0;
249 efx_mtd->mtd.erasesize = spi->erase_size; 628
250 efx_mtd->mtd.writesize = 1; 629 while (nvram_types != 0) {
251 efx_mtd_rename(efx); 630 if (nvram_types & 1) {
252 631 rc = siena_mtd_probe_partition(efx, efx_mtd,
253 efx_mtd->mtd.owner = THIS_MODULE; 632 efx_mtd->n_parts, type);
254 efx_mtd->mtd.priv = efx_mtd; 633 if (rc == 0)
255 efx_mtd->mtd.name = efx_mtd->name; 634 efx_mtd->n_parts++;
256 efx_mtd->mtd.erase = efx_mtd_erase; 635 else if (rc != -ENODEV)
257 efx_mtd->mtd.read = efx_mtd_read; 636 goto fail;
258 efx_mtd->mtd.write = efx_mtd_write; 637 }
259 efx_mtd->mtd.sync = efx_mtd_sync; 638 type++;
260 639 nvram_types >>= 1;
261 if (add_mtd_device(&efx_mtd->mtd)) {
262 kfree(efx_mtd);
263 spi->mtd = NULL;
264 /* add_mtd_device() returns 1 if the MTD table is full */
265 return -ENOMEM;
266 } 640 }
267 641
268 return 0; 642 rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
643 if (rc)
644 goto fail;
645
646 rc = efx_mtd_probe_device(efx, efx_mtd);
647fail:
648 if (rc)
649 kfree(efx_mtd);
650 return rc;
269} 651}
652
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 298566da638b..34c381f009b7 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc. 4 * Copyright 2005-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -38,7 +38,7 @@
38#ifndef EFX_DRIVER_NAME 38#ifndef EFX_DRIVER_NAME
39#define EFX_DRIVER_NAME "sfc" 39#define EFX_DRIVER_NAME "sfc"
40#endif 40#endif
41#define EFX_DRIVER_VERSION "2.3" 41#define EFX_DRIVER_VERSION "3.0"
42 42
43#ifdef EFX_ENABLE_DEBUG 43#ifdef EFX_ENABLE_DEBUG
44#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 44#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -113,6 +113,13 @@ struct efx_special_buffer {
113 int entries; 113 int entries;
114}; 114};
115 115
116enum efx_flush_state {
117 FLUSH_NONE,
118 FLUSH_PENDING,
119 FLUSH_FAILED,
120 FLUSH_DONE,
121};
122
116/** 123/**
117 * struct efx_tx_buffer - An Efx TX buffer 124 * struct efx_tx_buffer - An Efx TX buffer
118 * @skb: The associated socket buffer. 125 * @skb: The associated socket buffer.
@@ -189,7 +196,7 @@ struct efx_tx_queue {
189 struct efx_nic *nic; 196 struct efx_nic *nic;
190 struct efx_tx_buffer *buffer; 197 struct efx_tx_buffer *buffer;
191 struct efx_special_buffer txd; 198 struct efx_special_buffer txd;
192 bool flushed; 199 enum efx_flush_state flushed;
193 200
194 /* Members used mainly on the completion path */ 201 /* Members used mainly on the completion path */
195 unsigned int read_count ____cacheline_aligned_in_smp; 202 unsigned int read_count ____cacheline_aligned_in_smp;
@@ -284,7 +291,7 @@ struct efx_rx_queue {
284 struct page *buf_page; 291 struct page *buf_page;
285 dma_addr_t buf_dma_addr; 292 dma_addr_t buf_dma_addr;
286 char *buf_data; 293 char *buf_data;
287 bool flushed; 294 enum efx_flush_state flushed;
288}; 295};
289 296
290/** 297/**
@@ -327,7 +334,7 @@ enum efx_rx_alloc_method {
327 * @used_flags: Channel is used by net driver 334 * @used_flags: Channel is used by net driver
328 * @enabled: Channel enabled indicator 335 * @enabled: Channel enabled indicator
329 * @irq: IRQ number (MSI and MSI-X only) 336 * @irq: IRQ number (MSI and MSI-X only)
330 * @irq_moderation: IRQ moderation value (in us) 337 * @irq_moderation: IRQ moderation value (in hardware ticks)
331 * @napi_dev: Net device used with NAPI 338 * @napi_dev: Net device used with NAPI
332 * @napi_str: NAPI control structure 339 * @napi_str: NAPI control structure
333 * @reset_work: Scheduled reset work thread 340 * @reset_work: Scheduled reset work thread
@@ -343,9 +350,9 @@ enum efx_rx_alloc_method {
343 * @rx_alloc_push_pages: RX allocation method currently in use for pushing 350 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
344 * descriptors 351 * descriptors
345 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 352 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
346 * @n_rx_ip_frag_err: Count of RX IP fragment errors
347 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 353 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
348 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors 354 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
355 * @n_rx_mcast_mismatch: Count of unmatched multicast frames
349 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 356 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
350 * @n_rx_overlength: Count of RX_OVERLENGTH errors 357 * @n_rx_overlength: Count of RX_OVERLENGTH errors
351 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 358 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
@@ -373,9 +380,9 @@ struct efx_channel {
373 int rx_alloc_push_pages; 380 int rx_alloc_push_pages;
374 381
375 unsigned n_rx_tobe_disc; 382 unsigned n_rx_tobe_disc;
376 unsigned n_rx_ip_frag_err;
377 unsigned n_rx_ip_hdr_chksum_err; 383 unsigned n_rx_ip_hdr_chksum_err;
378 unsigned n_rx_tcp_udp_chksum_err; 384 unsigned n_rx_tcp_udp_chksum_err;
385 unsigned n_rx_mcast_mismatch;
379 unsigned n_rx_frm_trunc; 386 unsigned n_rx_frm_trunc;
380 unsigned n_rx_overlength; 387 unsigned n_rx_overlength;
381 unsigned n_skbuff_leaks; 388 unsigned n_skbuff_leaks;
@@ -388,53 +395,29 @@ struct efx_channel {
388 395
389}; 396};
390 397
391/** 398enum efx_led_mode {
392 * struct efx_blinker - S/W LED blinking context 399 EFX_LED_OFF = 0,
393 * @state: Current state - on or off 400 EFX_LED_ON = 1,
394 * @resubmit: Timer resubmission flag 401 EFX_LED_DEFAULT = 2
395 * @timer: Control timer for blinking
396 */
397struct efx_blinker {
398 bool state;
399 bool resubmit;
400 struct timer_list timer;
401}; 402};
402 403
404#define STRING_TABLE_LOOKUP(val, member) \
405 ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
403 406
404/** 407extern const char *efx_loopback_mode_names[];
405 * struct efx_board - board information 408extern const unsigned int efx_loopback_mode_max;
406 * @type: Board model type 409#define LOOPBACK_MODE(efx) \
407 * @major: Major rev. ('A', 'B' ...) 410 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
408 * @minor: Minor rev. (0, 1, ...) 411
409 * @init: Initialisation function 412extern const char *efx_interrupt_mode_names[];
410 * @init_leds: Sets up board LEDs. May be called repeatedly. 413extern const unsigned int efx_interrupt_mode_max;
411 * @set_id_led: Turns the identification LED on or off 414#define INT_MODE(efx) \
412 * @blink: Starts/stops blinking 415 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
413 * @monitor: Board-specific health check function
414 * @fini: Cleanup function
415 * @blinker: used to blink LEDs in software
416 * @hwmon_client: I2C client for hardware monitor
417 * @ioexp_client: I2C client for power/port control
418 */
419struct efx_board {
420 int type;
421 int major;
422 int minor;
423 int (*init) (struct efx_nic *nic);
424 /* As the LEDs are typically attached to the PHY, LEDs
425 * have a separate init callback that happens later than
426 * board init. */
427 void (*init_leds)(struct efx_nic *efx);
428 void (*set_id_led) (struct efx_nic *efx, bool state);
429 int (*monitor) (struct efx_nic *nic);
430 void (*blink) (struct efx_nic *efx, bool start);
431 void (*fini) (struct efx_nic *nic);
432 struct efx_blinker blinker;
433 struct i2c_client *hwmon_client, *ioexp_client;
434};
435 416
436#define STRING_TABLE_LOOKUP(val, member) \ 417extern const char *efx_reset_type_names[];
437 member ## _names[val] 418extern const unsigned int efx_reset_type_max;
419#define RESET_TYPE(type) \
420 STRING_TABLE_LOOKUP(type, efx_reset_type)
438 421
439enum efx_int_mode { 422enum efx_int_mode {
440 /* Be careful if altering to correct macro below */ 423 /* Be careful if altering to correct macro below */
@@ -445,20 +428,7 @@ enum efx_int_mode {
445}; 428};
446#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) 429#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
447 430
448enum phy_type { 431#define EFX_IS10G(efx) ((efx)->link_state.speed == 10000)
449 PHY_TYPE_NONE = 0,
450 PHY_TYPE_TXC43128 = 1,
451 PHY_TYPE_88E1111 = 2,
452 PHY_TYPE_SFX7101 = 3,
453 PHY_TYPE_QT2022C2 = 4,
454 PHY_TYPE_PM8358 = 6,
455 PHY_TYPE_SFT9001A = 8,
456 PHY_TYPE_QT2025C = 9,
457 PHY_TYPE_SFT9001B = 10,
458 PHY_TYPE_MAX /* Insert any new items before this */
459};
460
461#define EFX_IS10G(efx) ((efx)->link_speed == 10000)
462 432
463enum nic_state { 433enum nic_state {
464 STATE_INIT = 0, 434 STATE_INIT = 0,
@@ -500,73 +470,69 @@ enum efx_fc_type {
500 EFX_FC_AUTO = 4, 470 EFX_FC_AUTO = 4,
501}; 471};
502 472
503/* Supported MAC bit-mask */ 473/**
504enum efx_mac_type { 474 * struct efx_link_state - Current state of the link
505 EFX_GMAC = 1, 475 * @up: Link is up
506 EFX_XMAC = 2, 476 * @fd: Link is full-duplex
477 * @fc: Actual flow control flags
478 * @speed: Link speed (Mbps)
479 */
480struct efx_link_state {
481 bool up;
482 bool fd;
483 enum efx_fc_type fc;
484 unsigned int speed;
507}; 485};
508 486
509static inline enum efx_fc_type efx_fc_resolve(enum efx_fc_type wanted_fc, 487static inline bool efx_link_state_equal(const struct efx_link_state *left,
510 unsigned int lpa) 488 const struct efx_link_state *right)
511{ 489{
512 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX)); 490 return left->up == right->up && left->fd == right->fd &&
513 491 left->fc == right->fc && left->speed == right->speed;
514 if (!(wanted_fc & EFX_FC_AUTO))
515 return wanted_fc;
516
517 return mii_resolve_flowctrl_fdx(mii_advertise_flowctrl(wanted_fc), lpa);
518} 492}
519 493
520/** 494/**
521 * struct efx_mac_operations - Efx MAC operations table 495 * struct efx_mac_operations - Efx MAC operations table
522 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock 496 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
523 * @update_stats: Update statistics 497 * @update_stats: Update statistics
524 * @irq: Hardware MAC event callback. Serialised by the mac_lock 498 * @check_fault: Check fault state. True if fault present.
525 * @poll: Poll for hardware state. Serialised by the mac_lock
526 */ 499 */
527struct efx_mac_operations { 500struct efx_mac_operations {
528 void (*reconfigure) (struct efx_nic *efx); 501 int (*reconfigure) (struct efx_nic *efx);
529 void (*update_stats) (struct efx_nic *efx); 502 void (*update_stats) (struct efx_nic *efx);
530 void (*irq) (struct efx_nic *efx); 503 bool (*check_fault)(struct efx_nic *efx);
531 void (*poll) (struct efx_nic *efx);
532}; 504};
533 505
534/** 506/**
535 * struct efx_phy_operations - Efx PHY operations table 507 * struct efx_phy_operations - Efx PHY operations table
508 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
509 * efx->loopback_modes.
536 * @init: Initialise PHY 510 * @init: Initialise PHY
537 * @fini: Shut down PHY 511 * @fini: Shut down PHY
538 * @reconfigure: Reconfigure PHY (e.g. for new link parameters) 512 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
539 * @clear_interrupt: Clear down interrupt 513 * @poll: Update @link_state and report whether it changed.
540 * @blink: Blink LEDs 514 * Serialised by the mac_lock.
541 * @poll: Poll for hardware state. Serialised by the mac_lock.
542 * @get_settings: Get ethtool settings. Serialised by the mac_lock. 515 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
543 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 516 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
544 * @set_npage_adv: Set abilities advertised in (Extended) Next Page 517 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
545 * (only needed where AN bit is set in mmds) 518 * (only needed where AN bit is set in mmds)
546 * @num_tests: Number of PHY-specific tests/results 519 * @test_name: Get the name of a PHY-specific test/result
547 * @test_names: Names of the tests/results
548 * @run_tests: Run tests and record results as appropriate. 520 * @run_tests: Run tests and record results as appropriate.
549 * Flags are the ethtool tests flags. 521 * Flags are the ethtool tests flags.
550 * @mmds: MMD presence mask
551 * @loopbacks: Supported loopback modes mask
552 */ 522 */
553struct efx_phy_operations { 523struct efx_phy_operations {
554 enum efx_mac_type macs; 524 int (*probe) (struct efx_nic *efx);
555 int (*init) (struct efx_nic *efx); 525 int (*init) (struct efx_nic *efx);
556 void (*fini) (struct efx_nic *efx); 526 void (*fini) (struct efx_nic *efx);
557 void (*reconfigure) (struct efx_nic *efx); 527 int (*reconfigure) (struct efx_nic *efx);
558 void (*clear_interrupt) (struct efx_nic *efx); 528 bool (*poll) (struct efx_nic *efx);
559 void (*poll) (struct efx_nic *efx);
560 void (*get_settings) (struct efx_nic *efx, 529 void (*get_settings) (struct efx_nic *efx,
561 struct ethtool_cmd *ecmd); 530 struct ethtool_cmd *ecmd);
562 int (*set_settings) (struct efx_nic *efx, 531 int (*set_settings) (struct efx_nic *efx,
563 struct ethtool_cmd *ecmd); 532 struct ethtool_cmd *ecmd);
564 void (*set_npage_adv) (struct efx_nic *efx, u32); 533 void (*set_npage_adv) (struct efx_nic *efx, u32);
565 u32 num_tests; 534 const char *(*test_name) (struct efx_nic *efx, unsigned int index);
566 const char *const *test_names;
567 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); 535 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
568 int mmds;
569 unsigned loopbacks;
570}; 536};
571 537
572/** 538/**
@@ -690,36 +656,38 @@ union efx_multicast_hash {
690 * @interrupt_mode: Interrupt mode 656 * @interrupt_mode: Interrupt mode
691 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 657 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
692 * @irq_rx_moderation: IRQ moderation time for RX event queues 658 * @irq_rx_moderation: IRQ moderation time for RX event queues
693 * @i2c_adap: I2C adapter
694 * @board_info: Board-level information
695 * @state: Device state flag. Serialised by the rtnl_lock. 659 * @state: Device state flag. Serialised by the rtnl_lock.
696 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) 660 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
697 * @tx_queue: TX DMA queues 661 * @tx_queue: TX DMA queues
698 * @rx_queue: RX DMA queues 662 * @rx_queue: RX DMA queues
699 * @channel: Channels 663 * @channel: Channels
664 * @next_buffer_table: First available buffer table id
700 * @n_rx_queues: Number of RX queues 665 * @n_rx_queues: Number of RX queues
701 * @n_channels: Number of channels in use 666 * @n_channels: Number of channels in use
702 * @rx_buffer_len: RX buffer length 667 * @rx_buffer_len: RX buffer length
703 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 668 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
669 * @int_error_count: Number of internal errors seen recently
670 * @int_error_expire: Time at which error count will be expired
704 * @irq_status: Interrupt status buffer 671 * @irq_status: Interrupt status buffer
705 * @last_irq_cpu: Last CPU to handle interrupt. 672 * @last_irq_cpu: Last CPU to handle interrupt.
706 * This register is written with the SMP processor ID whenever an 673 * This register is written with the SMP processor ID whenever an
707 * interrupt is handled. It is used by falcon_test_interrupt() 674 * interrupt is handled. It is used by falcon_test_interrupt()
708 * to verify that an interrupt has occurred. 675 * to verify that an interrupt has occurred.
709 * @spi_flash: SPI flash device 676 * @spi_flash: SPI flash device
710 * This field will be %NULL if no flash device is present. 677 * This field will be %NULL if no flash device is present (or for Siena).
711 * @spi_eeprom: SPI EEPROM device 678 * @spi_eeprom: SPI EEPROM device
712 * This field will be %NULL if no EEPROM device is present. 679 * This field will be %NULL if no EEPROM device is present (or for Siena).
713 * @spi_lock: SPI bus lock 680 * @spi_lock: SPI bus lock
681 * @mtd_list: List of MTDs attached to the NIC
714 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 682 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
715 * @nic_data: Hardware dependant state 683 * @nic_data: Hardware dependant state
716 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 684 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
717 * @port_inhibited, efx_monitor() and efx_reconfigure_port() 685 * @port_inhibited, efx_monitor() and efx_reconfigure_port()
718 * @port_enabled: Port enabled indicator. 686 * @port_enabled: Port enabled indicator.
719 * Serialises efx_stop_all(), efx_start_all(), efx_monitor(), 687 * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
720 * efx_phy_work(), and efx_mac_work() with kernel interfaces. Safe to read 688 * efx_mac_work() with kernel interfaces. Safe to read under any
721 * under any one of the rtnl_lock, mac_lock, or netif_tx_lock, but all 689 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
722 * three must be held to modify it. 690 * be held to modify it.
723 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock 691 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
724 * @port_initialized: Port initialized? 692 * @port_initialized: Port initialized?
725 * @net_dev: Operating system network device. Consider holding the rtnl lock 693 * @net_dev: Operating system network device. Consider holding the rtnl lock
@@ -731,26 +699,23 @@ union efx_multicast_hash {
731 * &struct net_device_stats. 699 * &struct net_device_stats.
732 * @stats_buffer: DMA buffer for statistics 700 * @stats_buffer: DMA buffer for statistics
733 * @stats_lock: Statistics update lock. Serialises statistics fetches 701 * @stats_lock: Statistics update lock. Serialises statistics fetches
734 * @stats_disable_count: Nest count for disabling statistics fetches
735 * @mac_op: MAC interface 702 * @mac_op: MAC interface
736 * @mac_address: Permanent MAC address 703 * @mac_address: Permanent MAC address
737 * @phy_type: PHY type 704 * @phy_type: PHY type
738 * @phy_lock: PHY access lock 705 * @mdio_lock: MDIO lock
739 * @phy_op: PHY interface 706 * @phy_op: PHY interface
740 * @phy_data: PHY private data (including PHY-specific stats) 707 * @phy_data: PHY private data (including PHY-specific stats)
741 * @mdio: PHY MDIO interface 708 * @mdio: PHY MDIO interface
709 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
742 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 710 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
743 * @mac_up: MAC link state 711 * @xmac_poll_required: XMAC link state needs polling
744 * @link_up: Link status 712 * @link_advertising: Autonegotiation advertising flags
745 * @link_fd: Link is full duplex 713 * @link_state: Current state of the link
746 * @link_fc: Actualy flow control flags
747 * @link_speed: Link speed (Mbps)
748 * @n_link_state_changes: Number of times the link has changed state 714 * @n_link_state_changes: Number of times the link has changed state
749 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. 715 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
750 * @multicast_hash: Multicast hash table 716 * @multicast_hash: Multicast hash table
751 * @wanted_fc: Wanted flow control flags 717 * @wanted_fc: Wanted flow control flags
752 * @phy_work: work item for dealing with PHY events 718 * @mac_work: Work item for changing MAC promiscuity and multicast hash
753 * @mac_work: work item for dealing with MAC events
754 * @loopback_mode: Loopback status 719 * @loopback_mode: Loopback status
755 * @loopback_modes: Supported loopback mode bitmask 720 * @loopback_modes: Supported loopback mode bitmask
756 * @loopback_selftest: Offline self-test private state 721 * @loopback_selftest: Offline self-test private state
@@ -774,9 +739,6 @@ struct efx_nic {
774 bool irq_rx_adaptive; 739 bool irq_rx_adaptive;
775 unsigned int irq_rx_moderation; 740 unsigned int irq_rx_moderation;
776 741
777 struct i2c_adapter i2c_adap;
778 struct efx_board board_info;
779
780 enum nic_state state; 742 enum nic_state state;
781 enum reset_type reset_pending; 743 enum reset_type reset_pending;
782 744
@@ -784,21 +746,29 @@ struct efx_nic {
784 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 746 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
785 struct efx_channel channel[EFX_MAX_CHANNELS]; 747 struct efx_channel channel[EFX_MAX_CHANNELS];
786 748
749 unsigned next_buffer_table;
787 int n_rx_queues; 750 int n_rx_queues;
788 int n_channels; 751 int n_channels;
789 unsigned int rx_buffer_len; 752 unsigned int rx_buffer_len;
790 unsigned int rx_buffer_order; 753 unsigned int rx_buffer_order;
791 754
755 unsigned int_error_count;
756 unsigned long int_error_expire;
757
792 struct efx_buffer irq_status; 758 struct efx_buffer irq_status;
793 volatile signed int last_irq_cpu; 759 volatile signed int last_irq_cpu;
760 unsigned long irq_zero_count;
794 761
795 struct efx_spi_device *spi_flash; 762 struct efx_spi_device *spi_flash;
796 struct efx_spi_device *spi_eeprom; 763 struct efx_spi_device *spi_eeprom;
797 struct mutex spi_lock; 764 struct mutex spi_lock;
765#ifdef CONFIG_SFC_MTD
766 struct list_head mtd_list;
767#endif
798 768
799 unsigned n_rx_nodesc_drop_cnt; 769 unsigned n_rx_nodesc_drop_cnt;
800 770
801 struct falcon_nic_data *nic_data; 771 void *nic_data;
802 772
803 struct mutex mac_lock; 773 struct mutex mac_lock;
804 struct work_struct mac_work; 774 struct work_struct mac_work;
@@ -815,24 +785,21 @@ struct efx_nic {
815 struct efx_mac_stats mac_stats; 785 struct efx_mac_stats mac_stats;
816 struct efx_buffer stats_buffer; 786 struct efx_buffer stats_buffer;
817 spinlock_t stats_lock; 787 spinlock_t stats_lock;
818 unsigned int stats_disable_count;
819 788
820 struct efx_mac_operations *mac_op; 789 struct efx_mac_operations *mac_op;
821 unsigned char mac_address[ETH_ALEN]; 790 unsigned char mac_address[ETH_ALEN];
822 791
823 enum phy_type phy_type; 792 unsigned int phy_type;
824 spinlock_t phy_lock; 793 struct mutex mdio_lock;
825 struct work_struct phy_work;
826 struct efx_phy_operations *phy_op; 794 struct efx_phy_operations *phy_op;
827 void *phy_data; 795 void *phy_data;
828 struct mdio_if_info mdio; 796 struct mdio_if_info mdio;
797 unsigned int mdio_bus;
829 enum efx_phy_mode phy_mode; 798 enum efx_phy_mode phy_mode;
830 799
831 bool mac_up; 800 bool xmac_poll_required;
832 bool link_up; 801 u32 link_advertising;
833 bool link_fd; 802 struct efx_link_state link_state;
834 enum efx_fc_type link_fc;
835 unsigned int link_speed;
836 unsigned int n_link_state_changes; 803 unsigned int n_link_state_changes;
837 804
838 bool promiscuous; 805 bool promiscuous;
@@ -841,7 +808,7 @@ struct efx_nic {
841 808
842 atomic_t rx_reset; 809 atomic_t rx_reset;
843 enum efx_loopback_mode loopback_mode; 810 enum efx_loopback_mode loopback_mode;
844 unsigned int loopback_modes; 811 u64 loopback_modes;
845 812
846 void *loopback_selftest; 813 void *loopback_selftest;
847}; 814};
@@ -860,50 +827,95 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
860 return efx_dev_registered(efx) ? efx->name : ""; 827 return efx_dev_registered(efx) ? efx->name : "";
861} 828}
862 829
830static inline unsigned int efx_port_num(struct efx_nic *efx)
831{
832 return PCI_FUNC(efx->pci_dev->devfn);
833}
834
863/** 835/**
864 * struct efx_nic_type - Efx device type definition 836 * struct efx_nic_type - Efx device type definition
865 * @mem_bar: Memory BAR number 837 * @probe: Probe the controller
838 * @remove: Free resources allocated by probe()
839 * @init: Initialise the controller
840 * @fini: Shut down the controller
841 * @monitor: Periodic function for polling link state and hardware monitor
842 * @reset: Reset the controller hardware and possibly the PHY. This will
843 * be called while the controller is uninitialised.
844 * @probe_port: Probe the MAC and PHY
845 * @remove_port: Free resources allocated by probe_port()
846 * @prepare_flush: Prepare the hardware for flushing the DMA queues
847 * @update_stats: Update statistics not provided by event handling
848 * @start_stats: Start the regular fetching of statistics
849 * @stop_stats: Stop the regular fetching of statistics
850 * @set_id_led: Set state of identifying LED or revert to automatic function
851 * @push_irq_moderation: Apply interrupt moderation value
852 * @push_multicast_hash: Apply multicast hash table
853 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
854 * @get_wol: Get WoL configuration from driver state
855 * @set_wol: Push WoL configuration to the NIC
856 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
857 * @test_registers: Test read/write functionality of control registers
858 * @test_nvram: Test validity of NVRAM contents
859 * @default_mac_ops: efx_mac_operations to set at startup
860 * @revision: Hardware architecture revision
866 * @mem_map_size: Memory BAR mapped size 861 * @mem_map_size: Memory BAR mapped size
867 * @txd_ptr_tbl_base: TX descriptor ring base address 862 * @txd_ptr_tbl_base: TX descriptor ring base address
868 * @rxd_ptr_tbl_base: RX descriptor ring base address 863 * @rxd_ptr_tbl_base: RX descriptor ring base address
869 * @buf_tbl_base: Buffer table base address 864 * @buf_tbl_base: Buffer table base address
870 * @evq_ptr_tbl_base: Event queue pointer table base address 865 * @evq_ptr_tbl_base: Event queue pointer table base address
871 * @evq_rptr_tbl_base: Event queue read-pointer table base address 866 * @evq_rptr_tbl_base: Event queue read-pointer table base address
872 * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
873 * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
874 * @evq_size: Event queue size (must be a power of two)
875 * @max_dma_mask: Maximum possible DMA mask 867 * @max_dma_mask: Maximum possible DMA mask
876 * @tx_dma_mask: TX DMA mask
877 * @bug5391_mask: Address mask for bug 5391 workaround
878 * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
879 * @rx_xon_thresh: RX FIFO XON watermark (bytes)
880 * @rx_buffer_padding: Padding added to each RX buffer 868 * @rx_buffer_padding: Padding added to each RX buffer
881 * @max_interrupt_mode: Highest capability interrupt mode supported 869 * @max_interrupt_mode: Highest capability interrupt mode supported
882 * from &enum efx_init_mode. 870 * from &enum efx_init_mode.
883 * @phys_addr_channels: Number of channels with physically addressed 871 * @phys_addr_channels: Number of channels with physically addressed
884 * descriptors 872 * descriptors
873 * @tx_dc_base: Base address in SRAM of TX queue descriptor caches
874 * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
875 * @offload_features: net_device feature flags for protocol offload
876 * features implemented in hardware
877 * @reset_world_flags: Flags for additional components covered by
878 * reset method RESET_TYPE_WORLD
885 */ 879 */
886struct efx_nic_type { 880struct efx_nic_type {
887 unsigned int mem_bar; 881 int (*probe)(struct efx_nic *efx);
882 void (*remove)(struct efx_nic *efx);
883 int (*init)(struct efx_nic *efx);
884 void (*fini)(struct efx_nic *efx);
885 void (*monitor)(struct efx_nic *efx);
886 int (*reset)(struct efx_nic *efx, enum reset_type method);
887 int (*probe_port)(struct efx_nic *efx);
888 void (*remove_port)(struct efx_nic *efx);
889 void (*prepare_flush)(struct efx_nic *efx);
890 void (*update_stats)(struct efx_nic *efx);
891 void (*start_stats)(struct efx_nic *efx);
892 void (*stop_stats)(struct efx_nic *efx);
893 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
894 void (*push_irq_moderation)(struct efx_channel *channel);
895 void (*push_multicast_hash)(struct efx_nic *efx);
896 int (*reconfigure_port)(struct efx_nic *efx);
897 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
898 int (*set_wol)(struct efx_nic *efx, u32 type);
899 void (*resume_wol)(struct efx_nic *efx);
900 int (*test_registers)(struct efx_nic *efx);
901 int (*test_nvram)(struct efx_nic *efx);
902 struct efx_mac_operations *default_mac_ops;
903
904 int revision;
888 unsigned int mem_map_size; 905 unsigned int mem_map_size;
889 unsigned int txd_ptr_tbl_base; 906 unsigned int txd_ptr_tbl_base;
890 unsigned int rxd_ptr_tbl_base; 907 unsigned int rxd_ptr_tbl_base;
891 unsigned int buf_tbl_base; 908 unsigned int buf_tbl_base;
892 unsigned int evq_ptr_tbl_base; 909 unsigned int evq_ptr_tbl_base;
893 unsigned int evq_rptr_tbl_base; 910 unsigned int evq_rptr_tbl_base;
894
895 unsigned int txd_ring_mask;
896 unsigned int rxd_ring_mask;
897 unsigned int evq_size;
898 u64 max_dma_mask; 911 u64 max_dma_mask;
899 unsigned int tx_dma_mask;
900 unsigned bug5391_mask;
901
902 int rx_xoff_thresh;
903 int rx_xon_thresh;
904 unsigned int rx_buffer_padding; 912 unsigned int rx_buffer_padding;
905 unsigned int max_interrupt_mode; 913 unsigned int max_interrupt_mode;
906 unsigned int phys_addr_channels; 914 unsigned int phys_addr_channels;
915 unsigned int tx_dc_base;
916 unsigned int rx_dc_base;
917 unsigned long offload_features;
918 u32 reset_world_flags;
907}; 919};
908 920
909/************************************************************************** 921/**************************************************************************
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
new file mode 100644
index 000000000000..a577be227862
--- /dev/null
+++ b/drivers/net/sfc/nic.c
@@ -0,0 +1,1583 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/pci.h>
14#include <linux/module.h>
15#include <linux/seq_file.h>
16#include "net_driver.h"
17#include "bitfield.h"
18#include "efx.h"
19#include "nic.h"
20#include "regs.h"
21#include "io.h"
22#include "workarounds.h"
23
24/**************************************************************************
25 *
26 * Configurable values
27 *
28 **************************************************************************
29 */
30
31/* This is set to 16 for a good reason. In summary, if larger than
32 * 16, the descriptor cache holds more than a default socket
33 * buffer's worth of packets (for UDP we can only have at most one
34 * socket buffer's worth outstanding). This combined with the fact
35 * that we only get 1 TX event per descriptor cache means the NIC
36 * goes idle.
37 */
38#define TX_DC_ENTRIES 16
39#define TX_DC_ENTRIES_ORDER 1
40
41#define RX_DC_ENTRIES 64
42#define RX_DC_ENTRIES_ORDER 3
43
44/* RX FIFO XOFF watermark
45 *
46 * When the amount of the RX FIFO increases used increases past this
47 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
48 * This also has an effect on RX/TX arbitration
49 */
50int efx_nic_rx_xoff_thresh = -1;
51module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
52MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
53
54/* RX FIFO XON watermark
55 *
56 * When the amount of the RX FIFO used decreases below this
57 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
58 * This also has an effect on RX/TX arbitration
59 */
60int efx_nic_rx_xon_thresh = -1;
61module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
62MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
63
64/* If EFX_MAX_INT_ERRORS internal errors occur within
65 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
66 * disable it.
67 */
68#define EFX_INT_ERROR_EXPIRE 3600
69#define EFX_MAX_INT_ERRORS 5
70
71/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
72 */
73#define EFX_FLUSH_INTERVAL 10
74#define EFX_FLUSH_POLL_COUNT 100
75
76/* Size and alignment of special buffers (4KB) */
77#define EFX_BUF_SIZE 4096
78
79/* Depth of RX flush request fifo */
80#define EFX_RX_FLUSH_COUNT 4
81
82/**************************************************************************
83 *
84 * Solarstorm hardware access
85 *
86 **************************************************************************/
87
88static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
89 unsigned int index)
90{
91 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
92 value, index);
93}
94
95/* Read the current event from the event queue */
96static inline efx_qword_t *efx_event(struct efx_channel *channel,
97 unsigned int index)
98{
99 return (((efx_qword_t *) (channel->eventq.addr)) + index);
100}
101
102/* See if an event is present
103 *
104 * We check both the high and low dword of the event for all ones. We
105 * wrote all ones when we cleared the event, and no valid event can
106 * have all ones in either its high or low dwords. This approach is
107 * robust against reordering.
108 *
109 * Note that using a single 64-bit comparison is incorrect; even
110 * though the CPU read will be atomic, the DMA write may not be.
111 */
112static inline int efx_event_present(efx_qword_t *event)
113{
114 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
115 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
116}
117
118static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
119 const efx_oword_t *mask)
120{
121 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
122 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
123}
124
125int efx_nic_test_registers(struct efx_nic *efx,
126 const struct efx_nic_register_test *regs,
127 size_t n_regs)
128{
129 unsigned address = 0, i, j;
130 efx_oword_t mask, imask, original, reg, buf;
131
132 /* Falcon should be in loopback to isolate the XMAC from the PHY */
133 WARN_ON(!LOOPBACK_INTERNAL(efx));
134
135 for (i = 0; i < n_regs; ++i) {
136 address = regs[i].address;
137 mask = imask = regs[i].mask;
138 EFX_INVERT_OWORD(imask);
139
140 efx_reado(efx, &original, address);
141
142 /* bit sweep on and off */
143 for (j = 0; j < 128; j++) {
144 if (!EFX_EXTRACT_OWORD32(mask, j, j))
145 continue;
146
147 /* Test this testable bit can be set in isolation */
148 EFX_AND_OWORD(reg, original, mask);
149 EFX_SET_OWORD32(reg, j, j, 1);
150
151 efx_writeo(efx, &reg, address);
152 efx_reado(efx, &buf, address);
153
154 if (efx_masked_compare_oword(&reg, &buf, &mask))
155 goto fail;
156
157 /* Test this testable bit can be cleared in isolation */
158 EFX_OR_OWORD(reg, original, mask);
159 EFX_SET_OWORD32(reg, j, j, 0);
160
161 efx_writeo(efx, &reg, address);
162 efx_reado(efx, &buf, address);
163
164 if (efx_masked_compare_oword(&reg, &buf, &mask))
165 goto fail;
166 }
167
168 efx_writeo(efx, &original, address);
169 }
170
171 return 0;
172
173fail:
174 EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
175 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
176 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
177 return -EIO;
178}
179
180/**************************************************************************
181 *
182 * Special buffer handling
183 * Special buffers are used for event queues and the TX and RX
184 * descriptor rings.
185 *
186 *************************************************************************/
187
188/*
189 * Initialise a special buffer
190 *
191 * This will define a buffer (previously allocated via
192 * efx_alloc_special_buffer()) in the buffer table, allowing
193 * it to be used for event queues, descriptor rings etc.
194 */
195static void
196efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
197{
198 efx_qword_t buf_desc;
199 int index;
200 dma_addr_t dma_addr;
201 int i;
202
203 EFX_BUG_ON_PARANOID(!buffer->addr);
204
205 /* Write buffer descriptors to NIC */
206 for (i = 0; i < buffer->entries; i++) {
207 index = buffer->index + i;
208 dma_addr = buffer->dma_addr + (i * 4096);
209 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
210 index, (unsigned long long)dma_addr);
211 EFX_POPULATE_QWORD_3(buf_desc,
212 FRF_AZ_BUF_ADR_REGION, 0,
213 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
214 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
215 efx_write_buf_tbl(efx, &buf_desc, index);
216 }
217}
218
219/* Unmaps a buffer and clears the buffer table entries */
220static void
221efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
222{
223 efx_oword_t buf_tbl_upd;
224 unsigned int start = buffer->index;
225 unsigned int end = (buffer->index + buffer->entries - 1);
226
227 if (!buffer->entries)
228 return;
229
230 EFX_LOG(efx, "unmapping special buffers %d-%d\n",
231 buffer->index, buffer->index + buffer->entries - 1);
232
233 EFX_POPULATE_OWORD_4(buf_tbl_upd,
234 FRF_AZ_BUF_UPD_CMD, 0,
235 FRF_AZ_BUF_CLR_CMD, 1,
236 FRF_AZ_BUF_CLR_END_ID, end,
237 FRF_AZ_BUF_CLR_START_ID, start);
238 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
239}
240
241/*
242 * Allocate a new special buffer
243 *
244 * This allocates memory for a new buffer, clears it and allocates a
245 * new buffer ID range. It does not write into the buffer table.
246 *
247 * This call will allocate 4KB buffers, since 8KB buffers can't be
248 * used for event queues and descriptor rings.
249 */
250static int efx_alloc_special_buffer(struct efx_nic *efx,
251 struct efx_special_buffer *buffer,
252 unsigned int len)
253{
254 len = ALIGN(len, EFX_BUF_SIZE);
255
256 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
257 &buffer->dma_addr);
258 if (!buffer->addr)
259 return -ENOMEM;
260 buffer->len = len;
261 buffer->entries = len / EFX_BUF_SIZE;
262 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
263
264 /* All zeros is a potentially valid event so memset to 0xff */
265 memset(buffer->addr, 0xff, len);
266
267 /* Select new buffer ID */
268 buffer->index = efx->next_buffer_table;
269 efx->next_buffer_table += buffer->entries;
270
271 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
272 "(virt %p phys %llx)\n", buffer->index,
273 buffer->index + buffer->entries - 1,
274 (u64)buffer->dma_addr, len,
275 buffer->addr, (u64)virt_to_phys(buffer->addr));
276
277 return 0;
278}
279
280static void
281efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
282{
283 if (!buffer->addr)
284 return;
285
286 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
287 "(virt %p phys %llx)\n", buffer->index,
288 buffer->index + buffer->entries - 1,
289 (u64)buffer->dma_addr, buffer->len,
290 buffer->addr, (u64)virt_to_phys(buffer->addr));
291
292 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
293 buffer->dma_addr);
294 buffer->addr = NULL;
295 buffer->entries = 0;
296}
297
298/**************************************************************************
299 *
300 * Generic buffer handling
301 * These buffers are used for interrupt status and MAC stats
302 *
303 **************************************************************************/
304
305int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
306 unsigned int len)
307{
308 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
309 &buffer->dma_addr);
310 if (!buffer->addr)
311 return -ENOMEM;
312 buffer->len = len;
313 memset(buffer->addr, 0, len);
314 return 0;
315}
316
317void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
318{
319 if (buffer->addr) {
320 pci_free_consistent(efx->pci_dev, buffer->len,
321 buffer->addr, buffer->dma_addr);
322 buffer->addr = NULL;
323 }
324}
325
326/**************************************************************************
327 *
328 * TX path
329 *
330 **************************************************************************/
331
332/* Returns a pointer to the specified transmit descriptor in the TX
333 * descriptor queue belonging to the specified channel.
334 */
335static inline efx_qword_t *
336efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
337{
338 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
339}
340
341/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
342static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
343{
344 unsigned write_ptr;
345 efx_dword_t reg;
346
347 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
348 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
349 efx_writed_page(tx_queue->efx, &reg,
350 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
351}
352
353
354/* For each entry inserted into the software descriptor ring, create a
355 * descriptor in the hardware TX descriptor ring (in host memory), and
356 * write a doorbell.
357 */
358void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
359{
360
361 struct efx_tx_buffer *buffer;
362 efx_qword_t *txd;
363 unsigned write_ptr;
364
365 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
366
367 do {
368 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
369 buffer = &tx_queue->buffer[write_ptr];
370 txd = efx_tx_desc(tx_queue, write_ptr);
371 ++tx_queue->write_count;
372
373 /* Create TX descriptor ring entry */
374 EFX_POPULATE_QWORD_4(*txd,
375 FSF_AZ_TX_KER_CONT, buffer->continuation,
376 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
377 FSF_AZ_TX_KER_BUF_REGION, 0,
378 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
379 } while (tx_queue->write_count != tx_queue->insert_count);
380
381 wmb(); /* Ensure descriptors are written before they are fetched */
382 efx_notify_tx_desc(tx_queue);
383}
384
385/* Allocate hardware resources for a TX queue */
386int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
387{
388 struct efx_nic *efx = tx_queue->efx;
389 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
390 EFX_TXQ_SIZE & EFX_TXQ_MASK);
391 return efx_alloc_special_buffer(efx, &tx_queue->txd,
392 EFX_TXQ_SIZE * sizeof(efx_qword_t));
393}
394
395void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
396{
397 efx_oword_t tx_desc_ptr;
398 struct efx_nic *efx = tx_queue->efx;
399
400 tx_queue->flushed = FLUSH_NONE;
401
402 /* Pin TX descriptor ring */
403 efx_init_special_buffer(efx, &tx_queue->txd);
404
405 /* Push TX descriptor ring to card */
406 EFX_POPULATE_OWORD_10(tx_desc_ptr,
407 FRF_AZ_TX_DESCQ_EN, 1,
408 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
409 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
410 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
411 FRF_AZ_TX_DESCQ_EVQ_ID,
412 tx_queue->channel->channel,
413 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
414 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
415 FRF_AZ_TX_DESCQ_SIZE,
416 __ffs(tx_queue->txd.entries),
417 FRF_AZ_TX_DESCQ_TYPE, 0,
418 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
419
420 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
421 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
422 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
423 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
424 !csum);
425 }
426
427 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
428 tx_queue->queue);
429
430 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
431 efx_oword_t reg;
432
433 /* Only 128 bits in this register */
434 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
435
436 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
437 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
438 clear_bit_le(tx_queue->queue, (void *)&reg);
439 else
440 set_bit_le(tx_queue->queue, (void *)&reg);
441 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
442 }
443}
444
445static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
446{
447 struct efx_nic *efx = tx_queue->efx;
448 efx_oword_t tx_flush_descq;
449
450 tx_queue->flushed = FLUSH_PENDING;
451
452 /* Post a flush command */
453 EFX_POPULATE_OWORD_2(tx_flush_descq,
454 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
455 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
456 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
457}
458
459void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
460{
461 struct efx_nic *efx = tx_queue->efx;
462 efx_oword_t tx_desc_ptr;
463
464 /* The queue should have been flushed */
465 WARN_ON(tx_queue->flushed != FLUSH_DONE);
466
467 /* Remove TX descriptor ring from card */
468 EFX_ZERO_OWORD(tx_desc_ptr);
469 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
470 tx_queue->queue);
471
472 /* Unpin TX descriptor ring */
473 efx_fini_special_buffer(efx, &tx_queue->txd);
474}
475
476/* Free buffers backing TX queue */
477void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
478{
479 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
480}
481
482/**************************************************************************
483 *
484 * RX path
485 *
486 **************************************************************************/
487
488/* Returns a pointer to the specified descriptor in the RX descriptor queue */
489static inline efx_qword_t *
490efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
491{
492 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
493}
494
495/* This creates an entry in the RX descriptor queue */
496static inline void
497efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
498{
499 struct efx_rx_buffer *rx_buf;
500 efx_qword_t *rxd;
501
502 rxd = efx_rx_desc(rx_queue, index);
503 rx_buf = efx_rx_buffer(rx_queue, index);
504 EFX_POPULATE_QWORD_3(*rxd,
505 FSF_AZ_RX_KER_BUF_SIZE,
506 rx_buf->len -
507 rx_queue->efx->type->rx_buffer_padding,
508 FSF_AZ_RX_KER_BUF_REGION, 0,
509 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
510}
511
512/* This writes to the RX_DESC_WPTR register for the specified receive
513 * descriptor ring.
514 */
515void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
516{
517 efx_dword_t reg;
518 unsigned write_ptr;
519
520 while (rx_queue->notified_count != rx_queue->added_count) {
521 efx_build_rx_desc(rx_queue,
522 rx_queue->notified_count &
523 EFX_RXQ_MASK);
524 ++rx_queue->notified_count;
525 }
526
527 wmb();
528 write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
529 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
530 efx_writed_page(rx_queue->efx, &reg,
531 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
532}
533
534int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
535{
536 struct efx_nic *efx = rx_queue->efx;
537 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
538 EFX_RXQ_SIZE & EFX_RXQ_MASK);
539 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
540 EFX_RXQ_SIZE * sizeof(efx_qword_t));
541}
542
543void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
544{
545 efx_oword_t rx_desc_ptr;
546 struct efx_nic *efx = rx_queue->efx;
547 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
548 bool iscsi_digest_en = is_b0;
549
550 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
551 rx_queue->queue, rx_queue->rxd.index,
552 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
553
554 rx_queue->flushed = FLUSH_NONE;
555
556 /* Pin RX descriptor ring */
557 efx_init_special_buffer(efx, &rx_queue->rxd);
558
559 /* Push RX descriptor ring to card */
560 EFX_POPULATE_OWORD_10(rx_desc_ptr,
561 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
562 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
563 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
564 FRF_AZ_RX_DESCQ_EVQ_ID,
565 rx_queue->channel->channel,
566 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
567 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
568 FRF_AZ_RX_DESCQ_SIZE,
569 __ffs(rx_queue->rxd.entries),
570 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
571 /* For >=B0 this is scatter so disable */
572 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
573 FRF_AZ_RX_DESCQ_EN, 1);
574 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
575 rx_queue->queue);
576}
577
578static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
579{
580 struct efx_nic *efx = rx_queue->efx;
581 efx_oword_t rx_flush_descq;
582
583 rx_queue->flushed = FLUSH_PENDING;
584
585 /* Post a flush command */
586 EFX_POPULATE_OWORD_2(rx_flush_descq,
587 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
588 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
589 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
590}
591
592void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
593{
594 efx_oword_t rx_desc_ptr;
595 struct efx_nic *efx = rx_queue->efx;
596
597 /* The queue should already have been flushed */
598 WARN_ON(rx_queue->flushed != FLUSH_DONE);
599
600 /* Remove RX descriptor ring from card */
601 EFX_ZERO_OWORD(rx_desc_ptr);
602 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
603 rx_queue->queue);
604
605 /* Unpin RX descriptor ring */
606 efx_fini_special_buffer(efx, &rx_queue->rxd);
607}
608
609/* Free buffers backing RX queue */
610void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
611{
612 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
613}
614
615/**************************************************************************
616 *
617 * Event queue processing
618 * Event queues are processed by per-channel tasklets.
619 *
620 **************************************************************************/
621
622/* Update a channel's event queue's read pointer (RPTR) register
623 *
624 * This writes the EVQ_RPTR_REG register for the specified channel's
625 * event queue.
626 *
627 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
628 * whereas channel->eventq_read_ptr contains the index of the "next to
629 * read" event.
630 */
631void efx_nic_eventq_read_ack(struct efx_channel *channel)
632{
633 efx_dword_t reg;
634 struct efx_nic *efx = channel->efx;
635
636 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
637 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
638 channel->channel);
639}
640
641/* Use HW to insert a SW defined event */
642void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
643{
644 efx_oword_t drv_ev_reg;
645
646 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
647 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
648 drv_ev_reg.u32[0] = event->u32[0];
649 drv_ev_reg.u32[1] = event->u32[1];
650 drv_ev_reg.u32[2] = 0;
651 drv_ev_reg.u32[3] = 0;
652 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
653 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
654}
655
656/* Handle a transmit completion event
657 *
658 * The NIC batches TX completion events; the message we receive is of
659 * the form "complete all TX events up to this index".
660 */
661static void
662efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
663{
664 unsigned int tx_ev_desc_ptr;
665 unsigned int tx_ev_q_label;
666 struct efx_tx_queue *tx_queue;
667 struct efx_nic *efx = channel->efx;
668
669 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
670 /* Transmit completion */
671 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
672 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
673 tx_queue = &efx->tx_queue[tx_ev_q_label];
674 channel->irq_mod_score +=
675 (tx_ev_desc_ptr - tx_queue->read_count) &
676 EFX_TXQ_MASK;
677 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
678 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
679 /* Rewrite the FIFO write pointer */
680 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
681 tx_queue = &efx->tx_queue[tx_ev_q_label];
682
683 if (efx_dev_registered(efx))
684 netif_tx_lock(efx->net_dev);
685 efx_notify_tx_desc(tx_queue);
686 if (efx_dev_registered(efx))
687 netif_tx_unlock(efx->net_dev);
688 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
689 EFX_WORKAROUND_10727(efx)) {
690 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
691 } else {
692 EFX_ERR(efx, "channel %d unexpected TX event "
693 EFX_QWORD_FMT"\n", channel->channel,
694 EFX_QWORD_VAL(*event));
695 }
696}
697
698/* Detect errors included in the rx_evt_pkt_ok bit. */
699static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
700 const efx_qword_t *event,
701 bool *rx_ev_pkt_ok,
702 bool *discard)
703{
704 struct efx_nic *efx = rx_queue->efx;
705 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
706 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
707 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
708 bool rx_ev_other_err, rx_ev_pause_frm;
709 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
710 unsigned rx_ev_pkt_type;
711
712 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
713 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
714 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
715 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
716 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
717 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
718 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
719 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
720 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
721 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
722 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
723 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
724 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
725 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
726 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
727
728 /* Every error apart from tobe_disc and pause_frm */
729 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
730 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
731 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
732
733 /* Count errors that are not in MAC stats. Ignore expected
734 * checksum errors during self-test. */
735 if (rx_ev_frm_trunc)
736 ++rx_queue->channel->n_rx_frm_trunc;
737 else if (rx_ev_tobe_disc)
738 ++rx_queue->channel->n_rx_tobe_disc;
739 else if (!efx->loopback_selftest) {
740 if (rx_ev_ip_hdr_chksum_err)
741 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
742 else if (rx_ev_tcp_udp_chksum_err)
743 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
744 }
745
746 /* The frame must be discarded if any of these are true. */
747 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
748 rx_ev_tobe_disc | rx_ev_pause_frm);
749
750 /* TOBE_DISC is expected on unicast mismatches; don't print out an
751 * error message. FRM_TRUNC indicates RXDP dropped the packet due
752 * to a FIFO overflow.
753 */
754#ifdef EFX_ENABLE_DEBUG
755 if (rx_ev_other_err) {
756 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
757 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
758 rx_queue->queue, EFX_QWORD_VAL(*event),
759 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
760 rx_ev_ip_hdr_chksum_err ?
761 " [IP_HDR_CHKSUM_ERR]" : "",
762 rx_ev_tcp_udp_chksum_err ?
763 " [TCP_UDP_CHKSUM_ERR]" : "",
764 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
765 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
766 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
767 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
768 rx_ev_pause_frm ? " [PAUSE]" : "");
769 }
770#endif
771}
772
773/* Handle receive events that are not in-order. */
774static void
775efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
776{
777 struct efx_nic *efx = rx_queue->efx;
778 unsigned expected, dropped;
779
780 expected = rx_queue->removed_count & EFX_RXQ_MASK;
781 dropped = (index - expected) & EFX_RXQ_MASK;
782 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
783 dropped, index, expected);
784
785 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
786 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
787}
788
789/* Handle a packet received event
790 *
791 * The NIC gives a "discard" flag if it's a unicast packet with the
792 * wrong destination address
793 * Also "is multicast" and "matches multicast filter" flags can be used to
794 * discard non-matching multicast packets.
795 */
796static void
797efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
798{
799 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
800 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
801 unsigned expected_ptr;
802 bool rx_ev_pkt_ok, discard = false, checksummed;
803 struct efx_rx_queue *rx_queue;
804 struct efx_nic *efx = channel->efx;
805
806 /* Basic packet information */
807 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
808 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
809 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
810 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
811 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
812 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
813 channel->channel);
814
815 rx_queue = &efx->rx_queue[channel->channel];
816
817 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
818 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
819 if (unlikely(rx_ev_desc_ptr != expected_ptr))
820 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
821
822 if (likely(rx_ev_pkt_ok)) {
823 /* If packet is marked as OK and packet type is TCP/IP or
824 * UDP/IP, then we can rely on the hardware checksum.
825 */
826 checksummed =
827 likely(efx->rx_checksum_enabled) &&
828 (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
829 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
830 } else {
831 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
832 checksummed = false;
833 }
834
835 /* Detect multicast packets that didn't match the filter */
836 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
837 if (rx_ev_mcast_pkt) {
838 unsigned int rx_ev_mcast_hash_match =
839 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
840
841 if (unlikely(!rx_ev_mcast_hash_match)) {
842 ++channel->n_rx_mcast_mismatch;
843 discard = true;
844 }
845 }
846
847 channel->irq_mod_score += 2;
848
849 /* Handle received packet */
850 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
851 checksummed, discard);
852}
853
854/* Global events are basically PHY events */
855static void
856efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
857{
858 struct efx_nic *efx = channel->efx;
859 bool handled = false;
860
861 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
862 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
863 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
864 /* Ignored */
865 handled = true;
866 }
867
868 if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
869 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
870 efx->xmac_poll_required = true;
871 handled = true;
872 }
873
874 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
875 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
876 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
877 EFX_ERR(efx, "channel %d seen global RX_RESET "
878 "event. Resetting.\n", channel->channel);
879
880 atomic_inc(&efx->rx_reset);
881 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
882 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
883 handled = true;
884 }
885
886 if (!handled)
887 EFX_ERR(efx, "channel %d unknown global event "
888 EFX_QWORD_FMT "\n", channel->channel,
889 EFX_QWORD_VAL(*event));
890}
891
892static void
893efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
894{
895 struct efx_nic *efx = channel->efx;
896 unsigned int ev_sub_code;
897 unsigned int ev_sub_data;
898
899 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
900 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
901
902 switch (ev_sub_code) {
903 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
904 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
905 channel->channel, ev_sub_data);
906 break;
907 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
908 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
909 channel->channel, ev_sub_data);
910 break;
911 case FSE_AZ_EVQ_INIT_DONE_EV:
912 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
913 channel->channel, ev_sub_data);
914 break;
915 case FSE_AZ_SRM_UPD_DONE_EV:
916 EFX_TRACE(efx, "channel %d SRAM update done\n",
917 channel->channel);
918 break;
919 case FSE_AZ_WAKE_UP_EV:
920 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
921 channel->channel, ev_sub_data);
922 break;
923 case FSE_AZ_TIMER_EV:
924 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
925 channel->channel, ev_sub_data);
926 break;
927 case FSE_AA_RX_RECOVER_EV:
928 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
929 "Resetting.\n", channel->channel);
930 atomic_inc(&efx->rx_reset);
931 efx_schedule_reset(efx,
932 EFX_WORKAROUND_6555(efx) ?
933 RESET_TYPE_RX_RECOVERY :
934 RESET_TYPE_DISABLE);
935 break;
936 case FSE_BZ_RX_DSC_ERROR_EV:
937 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
938 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
939 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
940 break;
941 case FSE_BZ_TX_DSC_ERROR_EV:
942 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
943 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
944 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
945 break;
946 default:
947 EFX_TRACE(efx, "channel %d unknown driver event code %d "
948 "data %04x\n", channel->channel, ev_sub_code,
949 ev_sub_data);
950 break;
951 }
952}
953
954int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
955{
956 unsigned int read_ptr;
957 efx_qword_t event, *p_event;
958 int ev_code;
959 int rx_packets = 0;
960
961 read_ptr = channel->eventq_read_ptr;
962
963 do {
964 p_event = efx_event(channel, read_ptr);
965 event = *p_event;
966
967 if (!efx_event_present(&event))
968 /* End of events */
969 break;
970
971 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
972 channel->channel, EFX_QWORD_VAL(event));
973
974 /* Clear this event by marking it all ones */
975 EFX_SET_QWORD(*p_event);
976
977 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
978
979 switch (ev_code) {
980 case FSE_AZ_EV_CODE_RX_EV:
981 efx_handle_rx_event(channel, &event);
982 ++rx_packets;
983 break;
984 case FSE_AZ_EV_CODE_TX_EV:
985 efx_handle_tx_event(channel, &event);
986 break;
987 case FSE_AZ_EV_CODE_DRV_GEN_EV:
988 channel->eventq_magic = EFX_QWORD_FIELD(
989 event, FSF_AZ_DRV_GEN_EV_MAGIC);
990 EFX_LOG(channel->efx, "channel %d received generated "
991 "event "EFX_QWORD_FMT"\n", channel->channel,
992 EFX_QWORD_VAL(event));
993 break;
994 case FSE_AZ_EV_CODE_GLOBAL_EV:
995 efx_handle_global_event(channel, &event);
996 break;
997 case FSE_AZ_EV_CODE_DRIVER_EV:
998 efx_handle_driver_event(channel, &event);
999 break;
1000 case FSE_CZ_EV_CODE_MCDI_EV:
1001 efx_mcdi_process_event(channel, &event);
1002 break;
1003 default:
1004 EFX_ERR(channel->efx, "channel %d unknown event type %d"
1005 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1006 ev_code, EFX_QWORD_VAL(event));
1007 }
1008
1009 /* Increment read pointer */
1010 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1011
1012 } while (rx_packets < rx_quota);
1013
1014 channel->eventq_read_ptr = read_ptr;
1015 return rx_packets;
1016}
1017
1018
1019/* Allocate buffer table entries for event queue */
1020int efx_nic_probe_eventq(struct efx_channel *channel)
1021{
1022 struct efx_nic *efx = channel->efx;
1023 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
1024 EFX_EVQ_SIZE & EFX_EVQ_MASK);
1025 return efx_alloc_special_buffer(efx, &channel->eventq,
1026 EFX_EVQ_SIZE * sizeof(efx_qword_t));
1027}
1028
1029void efx_nic_init_eventq(struct efx_channel *channel)
1030{
1031 efx_oword_t reg;
1032 struct efx_nic *efx = channel->efx;
1033
1034 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1035 channel->channel, channel->eventq.index,
1036 channel->eventq.index + channel->eventq.entries - 1);
1037
1038 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1039 EFX_POPULATE_OWORD_3(reg,
1040 FRF_CZ_TIMER_Q_EN, 1,
1041 FRF_CZ_HOST_NOTIFY_MODE, 0,
1042 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1043 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1044 }
1045
1046 /* Pin event queue buffer */
1047 efx_init_special_buffer(efx, &channel->eventq);
1048
1049 /* Fill event queue with all ones (i.e. empty events) */
1050 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1051
1052 /* Push event queue to card */
1053 EFX_POPULATE_OWORD_3(reg,
1054 FRF_AZ_EVQ_EN, 1,
1055 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1056 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1057 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1058 channel->channel);
1059
1060 efx->type->push_irq_moderation(channel);
1061}
1062
1063void efx_nic_fini_eventq(struct efx_channel *channel)
1064{
1065 efx_oword_t reg;
1066 struct efx_nic *efx = channel->efx;
1067
1068 /* Remove event queue from card */
1069 EFX_ZERO_OWORD(reg);
1070 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1071 channel->channel);
1072 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1073 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1074
1075 /* Unpin event queue */
1076 efx_fini_special_buffer(efx, &channel->eventq);
1077}
1078
1079/* Free buffers backing event queue */
1080void efx_nic_remove_eventq(struct efx_channel *channel)
1081{
1082 efx_free_special_buffer(channel->efx, &channel->eventq);
1083}
1084
1085
1086/* Generates a test event on the event queue. A subsequent call to
1087 * process_eventq() should pick up the event and place the value of
1088 * "magic" into channel->eventq_magic;
1089 */
1090void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic)
1091{
1092 efx_qword_t test_event;
1093
1094 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1095 FSE_AZ_EV_CODE_DRV_GEN_EV,
1096 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1097 efx_generate_event(channel, &test_event);
1098}
1099
1100/**************************************************************************
1101 *
1102 * Flush handling
1103 *
1104 **************************************************************************/
1105
1106
1107static void efx_poll_flush_events(struct efx_nic *efx)
1108{
1109 struct efx_channel *channel = &efx->channel[0];
1110 struct efx_tx_queue *tx_queue;
1111 struct efx_rx_queue *rx_queue;
1112 unsigned int read_ptr = channel->eventq_read_ptr;
1113 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1114
1115 do {
1116 efx_qword_t *event = efx_event(channel, read_ptr);
1117 int ev_code, ev_sub_code, ev_queue;
1118 bool ev_failed;
1119
1120 if (!efx_event_present(event))
1121 break;
1122
1123 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1124 ev_sub_code = EFX_QWORD_FIELD(*event,
1125 FSF_AZ_DRIVER_EV_SUBCODE);
1126 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1127 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1128 ev_queue = EFX_QWORD_FIELD(*event,
1129 FSF_AZ_DRIVER_EV_SUBDATA);
1130 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1131 tx_queue = efx->tx_queue + ev_queue;
1132 tx_queue->flushed = FLUSH_DONE;
1133 }
1134 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1135 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1136 ev_queue = EFX_QWORD_FIELD(
1137 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1138 ev_failed = EFX_QWORD_FIELD(
1139 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1140 if (ev_queue < efx->n_rx_queues) {
1141 rx_queue = efx->rx_queue + ev_queue;
1142 rx_queue->flushed =
1143 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1144 }
1145 }
1146
1147 /* We're about to destroy the queue anyway, so
1148 * it's ok to throw away every non-flush event */
1149 EFX_SET_QWORD(*event);
1150
1151 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1152 } while (read_ptr != end_ptr);
1153
1154 channel->eventq_read_ptr = read_ptr;
1155}
1156
1157/* Handle tx and rx flushes at the same time, since they run in
1158 * parallel in the hardware and there's no reason for us to
1159 * serialise them */
1160int efx_nic_flush_queues(struct efx_nic *efx)
1161{
1162 struct efx_rx_queue *rx_queue;
1163 struct efx_tx_queue *tx_queue;
1164 int i, tx_pending, rx_pending;
1165
1166 /* If necessary prepare the hardware for flushing */
1167 efx->type->prepare_flush(efx);
1168
1169 /* Flush all tx queues in parallel */
1170 efx_for_each_tx_queue(tx_queue, efx)
1171 efx_flush_tx_queue(tx_queue);
1172
1173 /* The hardware supports four concurrent rx flushes, each of which may
1174 * need to be retried if there is an outstanding descriptor fetch */
1175 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1176 rx_pending = tx_pending = 0;
1177 efx_for_each_rx_queue(rx_queue, efx) {
1178 if (rx_queue->flushed == FLUSH_PENDING)
1179 ++rx_pending;
1180 }
1181 efx_for_each_rx_queue(rx_queue, efx) {
1182 if (rx_pending == EFX_RX_FLUSH_COUNT)
1183 break;
1184 if (rx_queue->flushed == FLUSH_FAILED ||
1185 rx_queue->flushed == FLUSH_NONE) {
1186 efx_flush_rx_queue(rx_queue);
1187 ++rx_pending;
1188 }
1189 }
1190 efx_for_each_tx_queue(tx_queue, efx) {
1191 if (tx_queue->flushed != FLUSH_DONE)
1192 ++tx_pending;
1193 }
1194
1195 if (rx_pending == 0 && tx_pending == 0)
1196 return 0;
1197
1198 msleep(EFX_FLUSH_INTERVAL);
1199 efx_poll_flush_events(efx);
1200 }
1201
1202 /* Mark the queues as all flushed. We're going to return failure
1203 * leading to a reset, or fake up success anyway */
1204 efx_for_each_tx_queue(tx_queue, efx) {
1205 if (tx_queue->flushed != FLUSH_DONE)
1206 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1207 tx_queue->queue);
1208 tx_queue->flushed = FLUSH_DONE;
1209 }
1210 efx_for_each_rx_queue(rx_queue, efx) {
1211 if (rx_queue->flushed != FLUSH_DONE)
1212 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1213 rx_queue->queue);
1214 rx_queue->flushed = FLUSH_DONE;
1215 }
1216
1217 if (EFX_WORKAROUND_7803(efx))
1218 return 0;
1219
1220 return -ETIMEDOUT;
1221}
1222
1223/**************************************************************************
1224 *
1225 * Hardware interrupts
1226 * The hardware interrupt handler does very little work; all the event
1227 * queue processing is carried out by per-channel tasklets.
1228 *
1229 **************************************************************************/
1230
1231/* Enable/disable/generate interrupts */
1232static inline void efx_nic_interrupts(struct efx_nic *efx,
1233 bool enabled, bool force)
1234{
1235 efx_oword_t int_en_reg_ker;
1236 unsigned int level = 0;
1237
1238 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1239 /* Set the level always even if we're generating a test
1240 * interrupt, because our legacy interrupt handler is safe */
1241 level = 0x1f;
1242
1243 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1244 FRF_AZ_KER_INT_LEVE_SEL, level,
1245 FRF_AZ_KER_INT_KER, force,
1246 FRF_AZ_DRV_INT_EN_KER, enabled);
1247 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1248}
1249
1250void efx_nic_enable_interrupts(struct efx_nic *efx)
1251{
1252 struct efx_channel *channel;
1253
1254 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1255 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1256
1257 /* Enable interrupts */
1258 efx_nic_interrupts(efx, true, false);
1259
1260 /* Force processing of all the channels to get the EVQ RPTRs up to
1261 date */
1262 efx_for_each_channel(channel, efx)
1263 efx_schedule_channel(channel);
1264}
1265
1266void efx_nic_disable_interrupts(struct efx_nic *efx)
1267{
1268 /* Disable interrupts */
1269 efx_nic_interrupts(efx, false, false);
1270}
1271
1272/* Generate a test interrupt
1273 * Interrupt must already have been enabled, otherwise nasty things
1274 * may happen.
1275 */
1276void efx_nic_generate_interrupt(struct efx_nic *efx)
1277{
1278 efx_nic_interrupts(efx, true, true);
1279}
1280
1281/* Process a fatal interrupt
1282 * Disable bus mastering ASAP and schedule a reset
1283 */
1284irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1285{
1286 struct falcon_nic_data *nic_data = efx->nic_data;
1287 efx_oword_t *int_ker = efx->irq_status.addr;
1288 efx_oword_t fatal_intr;
1289 int error, mem_perr;
1290
1291 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1292 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1293
1294 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1295 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1296 EFX_OWORD_VAL(fatal_intr),
1297 error ? "disabling bus mastering" : "no recognised error");
1298 if (error == 0)
1299 goto out;
1300
1301 /* If this is a memory parity error dump which blocks are offending */
1302 mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
1303 if (mem_perr) {
1304 efx_oword_t reg;
1305 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1306 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1307 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1308 }
1309
1310 /* Disable both devices */
1311 pci_clear_master(efx->pci_dev);
1312 if (efx_nic_is_dual_func(efx))
1313 pci_clear_master(nic_data->pci_dev2);
1314 efx_nic_disable_interrupts(efx);
1315
1316 /* Count errors and reset or disable the NIC accordingly */
1317 if (efx->int_error_count == 0 ||
1318 time_after(jiffies, efx->int_error_expire)) {
1319 efx->int_error_count = 0;
1320 efx->int_error_expire =
1321 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1322 }
1323 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1324 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1325 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1326 } else {
1327 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
1328 "NIC will be disabled\n");
1329 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1330 }
1331out:
1332 return IRQ_HANDLED;
1333}
1334
1335/* Handle a legacy interrupt
1336 * Acknowledges the interrupt and schedule event queue processing.
1337 */
1338static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1339{
1340 struct efx_nic *efx = dev_id;
1341 efx_oword_t *int_ker = efx->irq_status.addr;
1342 irqreturn_t result = IRQ_NONE;
1343 struct efx_channel *channel;
1344 efx_dword_t reg;
1345 u32 queues;
1346 int syserr;
1347
1348 /* Read the ISR which also ACKs the interrupts */
1349 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1350 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1351
1352 /* Check to see if we have a serious error condition */
1353 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1354 if (unlikely(syserr))
1355 return efx_nic_fatal_interrupt(efx);
1356
1357 if (queues != 0) {
1358 if (EFX_WORKAROUND_15783(efx))
1359 efx->irq_zero_count = 0;
1360
1361 /* Schedule processing of any interrupting queues */
1362 efx_for_each_channel(channel, efx) {
1363 if (queues & 1)
1364 efx_schedule_channel(channel);
1365 queues >>= 1;
1366 }
1367 result = IRQ_HANDLED;
1368
1369 } else if (EFX_WORKAROUND_15783(efx) &&
1370 efx->irq_zero_count++ == 0) {
1371 efx_qword_t *event;
1372
1373 /* Ensure we rearm all event queues */
1374 efx_for_each_channel(channel, efx) {
1375 event = efx_event(channel, channel->eventq_read_ptr);
1376 if (efx_event_present(event))
1377 efx_schedule_channel(channel);
1378 }
1379
1380 result = IRQ_HANDLED;
1381 }
1382
1383 if (result == IRQ_HANDLED) {
1384 efx->last_irq_cpu = raw_smp_processor_id();
1385 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1386 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1387 }
1388
1389 return result;
1390}
1391
1392/* Handle an MSI interrupt
1393 *
1394 * Handle an MSI hardware interrupt. This routine schedules event
1395 * queue processing. No interrupt acknowledgement cycle is necessary.
1396 * Also, we never need to check that the interrupt is for us, since
1397 * MSI interrupts cannot be shared.
1398 */
1399static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1400{
1401 struct efx_channel *channel = dev_id;
1402 struct efx_nic *efx = channel->efx;
1403 efx_oword_t *int_ker = efx->irq_status.addr;
1404 int syserr;
1405
1406 efx->last_irq_cpu = raw_smp_processor_id();
1407 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1408 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1409
1410 /* Check to see if we have a serious error condition */
1411 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1412 if (unlikely(syserr))
1413 return efx_nic_fatal_interrupt(efx);
1414
1415 /* Schedule processing of the channel */
1416 efx_schedule_channel(channel);
1417
1418 return IRQ_HANDLED;
1419}
1420
1421
1422/* Setup RSS indirection table.
1423 * This maps from the hash value of the packet to RXQ
1424 */
1425static void efx_setup_rss_indir_table(struct efx_nic *efx)
1426{
1427 int i = 0;
1428 unsigned long offset;
1429 efx_dword_t dword;
1430
1431 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1432 return;
1433
1434 for (offset = FR_BZ_RX_INDIRECTION_TBL;
1435 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1436 offset += 0x10) {
1437 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1438 i % efx->n_rx_queues);
1439 efx_writed(efx, &dword, offset);
1440 i++;
1441 }
1442}
1443
1444/* Hook interrupt handler(s)
1445 * Try MSI and then legacy interrupts.
1446 */
1447int efx_nic_init_interrupt(struct efx_nic *efx)
1448{
1449 struct efx_channel *channel;
1450 int rc;
1451
1452 if (!EFX_INT_MODE_USE_MSI(efx)) {
1453 irq_handler_t handler;
1454 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1455 handler = efx_legacy_interrupt;
1456 else
1457 handler = falcon_legacy_interrupt_a1;
1458
1459 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1460 efx->name, efx);
1461 if (rc) {
1462 EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
1463 efx->pci_dev->irq);
1464 goto fail1;
1465 }
1466 return 0;
1467 }
1468
1469 /* Hook MSI or MSI-X interrupt */
1470 efx_for_each_channel(channel, efx) {
1471 rc = request_irq(channel->irq, efx_msi_interrupt,
1472 IRQF_PROBE_SHARED, /* Not shared */
1473 channel->name, channel);
1474 if (rc) {
1475 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1476 goto fail2;
1477 }
1478 }
1479
1480 return 0;
1481
1482 fail2:
1483 efx_for_each_channel(channel, efx)
1484 free_irq(channel->irq, channel);
1485 fail1:
1486 return rc;
1487}
1488
1489void efx_nic_fini_interrupt(struct efx_nic *efx)
1490{
1491 struct efx_channel *channel;
1492 efx_oword_t reg;
1493
1494 /* Disable MSI/MSI-X interrupts */
1495 efx_for_each_channel(channel, efx) {
1496 if (channel->irq)
1497 free_irq(channel->irq, channel);
1498 }
1499
1500 /* ACK legacy interrupt */
1501 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1502 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1503 else
1504 falcon_irq_ack_a1(efx);
1505
1506 /* Disable legacy interrupt */
1507 if (efx->legacy_irq)
1508 free_irq(efx->legacy_irq, efx);
1509}
1510
1511u32 efx_nic_fpga_ver(struct efx_nic *efx)
1512{
1513 efx_oword_t altera_build;
1514 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1515 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1516}
1517
1518void efx_nic_init_common(struct efx_nic *efx)
1519{
1520 efx_oword_t temp;
1521
1522 /* Set positions of descriptor caches in SRAM. */
1523 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
1524 efx->type->tx_dc_base / 8);
1525 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1526 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
1527 efx->type->rx_dc_base / 8);
1528 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1529
1530 /* Set TX descriptor cache size. */
1531 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1532 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1533 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1534
1535 /* Set RX descriptor cache size. Set low watermark to size-8, as
1536 * this allows most efficient prefetching.
1537 */
1538 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1539 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1540 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1541 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1542 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1543
1544 /* Program INT_KER address */
1545 EFX_POPULATE_OWORD_2(temp,
1546 FRF_AZ_NORM_INT_VEC_DIS_KER,
1547 EFX_INT_MODE_USE_MSI(efx),
1548 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1549 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1550
1551 /* Enable all the genuinely fatal interrupts. (They are still
1552 * masked by the overall interrupt mask, controlled by
1553 * falcon_interrupts()).
1554 *
1555 * Note: All other fatal interrupts are enabled
1556 */
1557 EFX_POPULATE_OWORD_3(temp,
1558 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1559 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1560 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1561 EFX_INVERT_OWORD(temp);
1562 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1563
1564 efx_setup_rss_indir_table(efx);
1565
1566 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1567 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1568 */
1569 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1570 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1571 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1572 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1573 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
1574 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1575 /* Enable SW_EV to inherit in char driver - assume harmless here */
1576 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1577 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1578 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1579 /* Squash TX of packets of 16 bytes or less */
1580 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1581 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1582 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1583}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
new file mode 100644
index 000000000000..9351c0331a47
--- /dev/null
+++ b/drivers/net/sfc/nic.h
@@ -0,0 +1,261 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_NIC_H
12#define EFX_NIC_H
13
14#include <linux/i2c-algo-bit.h>
15#include "net_driver.h"
16#include "efx.h"
17#include "mcdi.h"
18
19/*
20 * Falcon hardware control
21 */
22
23enum {
24 EFX_REV_FALCON_A0 = 0,
25 EFX_REV_FALCON_A1 = 1,
26 EFX_REV_FALCON_B0 = 2,
27 EFX_REV_SIENA_A0 = 3,
28};
29
30static inline int efx_nic_rev(struct efx_nic *efx)
31{
32 return efx->type->revision;
33}
34
35extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
36
37static inline bool efx_nic_has_mc(struct efx_nic *efx)
38{
39 return efx_nic_rev(efx) >= EFX_REV_SIENA_A0;
40}
41/* NIC has two interlinked PCI functions for the same port. */
42static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
43{
44 return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
45}
46
47enum {
48 PHY_TYPE_NONE = 0,
49 PHY_TYPE_TXC43128 = 1,
50 PHY_TYPE_88E1111 = 2,
51 PHY_TYPE_SFX7101 = 3,
52 PHY_TYPE_QT2022C2 = 4,
53 PHY_TYPE_PM8358 = 6,
54 PHY_TYPE_SFT9001A = 8,
55 PHY_TYPE_QT2025C = 9,
56 PHY_TYPE_SFT9001B = 10,
57};
58
59#define FALCON_XMAC_LOOPBACKS \
60 ((1 << LOOPBACK_XGMII) | \
61 (1 << LOOPBACK_XGXS) | \
62 (1 << LOOPBACK_XAUI))
63
64#define FALCON_GMAC_LOOPBACKS \
65 (1 << LOOPBACK_GMAC)
66
67/**
68 * struct falcon_board_type - board operations and type information
69 * @id: Board type id, as found in NVRAM
70 * @ref_model: Model number of Solarflare reference design
71 * @gen_type: Generic board type description
72 * @init: Allocate resources and initialise peripheral hardware
73 * @init_phy: Do board-specific PHY initialisation
74 * @fini: Shut down hardware and free resources
75 * @set_id_led: Set state of identifying LED or revert to automatic function
76 * @monitor: Board-specific health check function
77 */
78struct falcon_board_type {
79 u8 id;
80 const char *ref_model;
81 const char *gen_type;
82 int (*init) (struct efx_nic *nic);
83 void (*init_phy) (struct efx_nic *efx);
84 void (*fini) (struct efx_nic *nic);
85 void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
86 int (*monitor) (struct efx_nic *nic);
87};
88
89/**
90 * struct falcon_board - board information
91 * @type: Type of board
92 * @major: Major rev. ('A', 'B' ...)
93 * @minor: Minor rev. (0, 1, ...)
94 * @i2c_adap: I2C adapter for on-board peripherals
95 * @i2c_data: Data for bit-banging algorithm
96 * @hwmon_client: I2C client for hardware monitor
97 * @ioexp_client: I2C client for power/port control
98 */
99struct falcon_board {
100 const struct falcon_board_type *type;
101 int major;
102 int minor;
103 struct i2c_adapter i2c_adap;
104 struct i2c_algo_bit_data i2c_data;
105 struct i2c_client *hwmon_client, *ioexp_client;
106};
107
108/**
109 * struct falcon_nic_data - Falcon NIC state
110 * @pci_dev2: Secondary function of Falcon A
111 * @board: Board state and functions
112 * @stats_disable_count: Nest count for disabling statistics fetches
113 * @stats_pending: Is there a pending DMA of MAC statistics.
114 * @stats_timer: A timer for regularly fetching MAC statistics.
115 * @stats_dma_done: Pointer to the flag which indicates DMA completion.
116 */
117struct falcon_nic_data {
118 struct pci_dev *pci_dev2;
119 struct falcon_board board;
120 unsigned int stats_disable_count;
121 bool stats_pending;
122 struct timer_list stats_timer;
123 u32 *stats_dma_done;
124};
125
126static inline struct falcon_board *falcon_board(struct efx_nic *efx)
127{
128 struct falcon_nic_data *data = efx->nic_data;
129 return &data->board;
130}
131
132/**
133 * struct siena_nic_data - Siena NIC state
134 * @fw_version: Management controller firmware version
135 * @fw_build: Firmware build number
136 * @mcdi: Management-Controller-to-Driver Interface
137 * @wol_filter_id: Wake-on-LAN packet filter id
138 */
139struct siena_nic_data {
140 u64 fw_version;
141 u32 fw_build;
142 struct efx_mcdi_iface mcdi;
143 int wol_filter_id;
144};
145
146extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
147
148extern struct efx_nic_type falcon_a1_nic_type;
149extern struct efx_nic_type falcon_b0_nic_type;
150extern struct efx_nic_type siena_a0_nic_type;
151
152/**************************************************************************
153 *
154 * Externs
155 *
156 **************************************************************************
157 */
158
159extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
160
161/* TX data path */
162extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
163extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
164extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
165extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
166extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
167
168/* RX data path */
169extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
170extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
171extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
172extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
173extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
174
175/* Event data path */
176extern int efx_nic_probe_eventq(struct efx_channel *channel);
177extern void efx_nic_init_eventq(struct efx_channel *channel);
178extern void efx_nic_fini_eventq(struct efx_channel *channel);
179extern void efx_nic_remove_eventq(struct efx_channel *channel);
180extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
181extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
182
183/* MAC/PHY */
184extern void falcon_drain_tx_fifo(struct efx_nic *efx);
185extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
186extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
187
188/* Interrupts and test events */
189extern int efx_nic_init_interrupt(struct efx_nic *efx);
190extern void efx_nic_enable_interrupts(struct efx_nic *efx);
191extern void efx_nic_generate_test_event(struct efx_channel *channel,
192 unsigned int magic);
193extern void efx_nic_generate_interrupt(struct efx_nic *efx);
194extern void efx_nic_disable_interrupts(struct efx_nic *efx);
195extern void efx_nic_fini_interrupt(struct efx_nic *efx);
196extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
197extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
198extern void falcon_irq_ack_a1(struct efx_nic *efx);
199
200#define EFX_IRQ_MOD_RESOLUTION 5
201
202/* Global Resources */
203extern int efx_nic_flush_queues(struct efx_nic *efx);
204extern void falcon_start_nic_stats(struct efx_nic *efx);
205extern void falcon_stop_nic_stats(struct efx_nic *efx);
206extern int falcon_reset_xaui(struct efx_nic *efx);
207extern void efx_nic_init_common(struct efx_nic *efx);
208
209int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
210 unsigned int len);
211void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
212
213/* Tests */
214struct efx_nic_register_test {
215 unsigned address;
216 efx_oword_t mask;
217};
218extern int efx_nic_test_registers(struct efx_nic *efx,
219 const struct efx_nic_register_test *regs,
220 size_t n_regs);
221
222/**************************************************************************
223 *
224 * Falcon MAC stats
225 *
226 **************************************************************************
227 */
228
229#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
230#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
231
232/* Retrieve statistic from statistics block */
233#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
234 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
235 (efx)->mac_stats.efx_stat += le16_to_cpu( \
236 *((__force __le16 *) \
237 (efx->stats_buffer.addr + \
238 FALCON_STAT_OFFSET(falcon_stat)))); \
239 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
240 (efx)->mac_stats.efx_stat += le32_to_cpu( \
241 *((__force __le32 *) \
242 (efx->stats_buffer.addr + \
243 FALCON_STAT_OFFSET(falcon_stat)))); \
244 else \
245 (efx)->mac_stats.efx_stat += le64_to_cpu( \
246 *((__force __le64 *) \
247 (efx->stats_buffer.addr + \
248 FALCON_STAT_OFFSET(falcon_stat)))); \
249 } while (0)
250
251#define FALCON_MAC_STATS_SIZE 0x100
252
253#define MAC_DATA_LBN 0
254#define MAC_DATA_WIDTH 32
255
256extern void efx_nic_generate_event(struct efx_channel *channel,
257 efx_qword_t *event);
258
259extern void falcon_poll_xmac(struct efx_nic *efx);
260
261#endif /* EFX_NIC_H */
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index c1cff9c0c173..5bc26137257b 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc. 3 * Copyright 2007-2009 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -16,16 +16,16 @@
16extern struct efx_phy_operations falcon_sfx7101_phy_ops; 16extern struct efx_phy_operations falcon_sfx7101_phy_ops;
17extern struct efx_phy_operations falcon_sft9001_phy_ops; 17extern struct efx_phy_operations falcon_sft9001_phy_ops;
18 18
19extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink); 19extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
20 20
21/* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed 21/* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed
22 * to boot due to corrupt flash, or some other negative error code. */ 22 * to boot due to corrupt flash, or some other negative error code. */
23extern int sft9001_wait_boot(struct efx_nic *efx); 23extern int sft9001_wait_boot(struct efx_nic *efx);
24 24
25/**************************************************************************** 25/****************************************************************************
26 * AMCC/Quake QT20xx PHYs 26 * AMCC/Quake QT202x PHYs
27 */ 27 */
28extern struct efx_phy_operations falcon_xfp_phy_ops; 28extern struct efx_phy_operations falcon_qt202x_phy_ops;
29 29
30/* These PHYs provide various H/W control states for LEDs */ 30/* These PHYs provide various H/W control states for LEDs */
31#define QUAKE_LED_LINK_INVAL (0) 31#define QUAKE_LED_LINK_INVAL (0)
@@ -39,6 +39,23 @@ extern struct efx_phy_operations falcon_xfp_phy_ops;
39#define QUAKE_LED_TXLINK (0) 39#define QUAKE_LED_TXLINK (0)
40#define QUAKE_LED_RXLINK (8) 40#define QUAKE_LED_RXLINK (8)
41 41
42extern void xfp_set_led(struct efx_nic *p, int led, int state); 42extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
43
44/****************************************************************************
45 * Siena managed PHYs
46 */
47extern struct efx_phy_operations efx_mcdi_phy_ops;
48
49extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
50 unsigned int prtad, unsigned int devad,
51 u16 addr, u16 *value_out, u32 *status_out);
52extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
53 unsigned int prtad, unsigned int devad,
54 u16 addr, u16 value, u32 *status_out);
55extern void efx_mcdi_phy_decode_link(struct efx_nic *efx,
56 struct efx_link_state *link_state,
57 u32 speed, u32 flags, u32 fcntl);
58extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx);
59extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
43 60
44#endif 61#endif
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/qt202x_phy.c
index e6b3d5eaddba..3800fc791b2f 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -1,14 +1,13 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc. 3 * Copyright 2006-2009 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference. 7 * by the Free Software Foundation, incorporated herein by reference.
8 */ 8 */
9/* 9/*
10 * Driver for SFP+ and XFP optical PHYs plus some support specific to the 10 * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
11 * AMCC QT20xx adapters; see www.amcc.com for details
12 */ 11 */
13 12
14#include <linux/timer.h> 13#include <linux/timer.h>
@@ -16,15 +15,15 @@
16#include "efx.h" 15#include "efx.h"
17#include "mdio_10g.h" 16#include "mdio_10g.h"
18#include "phy.h" 17#include "phy.h"
19#include "falcon.h" 18#include "nic.h"
20 19
21#define XFP_REQUIRED_DEVS (MDIO_DEVS_PCS | \ 20#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \
22 MDIO_DEVS_PMAPMD | \ 21 MDIO_DEVS_PMAPMD | \
23 MDIO_DEVS_PHYXS) 22 MDIO_DEVS_PHYXS)
24 23
25#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ 24#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \
26 (1 << LOOPBACK_PMAPMD) | \ 25 (1 << LOOPBACK_PMAPMD) | \
27 (1 << LOOPBACK_NETWORK)) 26 (1 << LOOPBACK_PHYXS_WS))
28 27
29/****************************************************************************/ 28/****************************************************************************/
30/* Quake-specific MDIO registers */ 29/* Quake-specific MDIO registers */
@@ -45,18 +44,18 @@
45#define PCS_VEND1_REG 0xc000 44#define PCS_VEND1_REG 0xc000
46#define PCS_VEND1_LBTXD_LBN 5 45#define PCS_VEND1_LBTXD_LBN 5
47 46
48void xfp_set_led(struct efx_nic *p, int led, int mode) 47void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
49{ 48{
50 int addr = MDIO_QUAKE_LED0_REG + led; 49 int addr = MDIO_QUAKE_LED0_REG + led;
51 efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode); 50 efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
52} 51}
53 52
54struct xfp_phy_data { 53struct qt202x_phy_data {
55 enum efx_phy_mode phy_mode; 54 enum efx_phy_mode phy_mode;
56}; 55};
57 56
58#define XFP_MAX_RESET_TIME 500 57#define QT2022C2_MAX_RESET_TIME 500
59#define XFP_RESET_WAIT 10 58#define QT2022C2_RESET_WAIT 10
60 59
61static int qt2025c_wait_reset(struct efx_nic *efx) 60static int qt2025c_wait_reset(struct efx_nic *efx)
62{ 61{
@@ -97,7 +96,7 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
97 return 0; 96 return 0;
98} 97}
99 98
100static int xfp_reset_phy(struct efx_nic *efx) 99static int qt202x_reset_phy(struct efx_nic *efx)
101{ 100{
102 int rc; 101 int rc;
103 102
@@ -111,8 +110,9 @@ static int xfp_reset_phy(struct efx_nic *efx)
111 /* Reset the PHYXS MMD. This is documented as doing 110 /* Reset the PHYXS MMD. This is documented as doing
112 * a complete soft reset. */ 111 * a complete soft reset. */
113 rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS, 112 rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
114 XFP_MAX_RESET_TIME / XFP_RESET_WAIT, 113 QT2022C2_MAX_RESET_TIME /
115 XFP_RESET_WAIT); 114 QT2022C2_RESET_WAIT,
115 QT2022C2_RESET_WAIT);
116 if (rc < 0) 116 if (rc < 0)
117 goto fail; 117 goto fail;
118 } 118 }
@@ -122,11 +122,11 @@ static int xfp_reset_phy(struct efx_nic *efx)
122 122
123 /* Check that all the MMDs we expect are present and responding. We 123 /* Check that all the MMDs we expect are present and responding. We
124 * expect faults on some if the link is down, but not on the PHY XS */ 124 * expect faults on some if the link is down, but not on the PHY XS */
125 rc = efx_mdio_check_mmds(efx, XFP_REQUIRED_DEVS, MDIO_DEVS_PHYXS); 125 rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
126 if (rc < 0) 126 if (rc < 0)
127 goto fail; 127 goto fail;
128 128
129 efx->board_info.init_leds(efx); 129 falcon_board(efx)->type->init_phy(efx);
130 130
131 return rc; 131 return rc;
132 132
@@ -135,60 +135,60 @@ static int xfp_reset_phy(struct efx_nic *efx)
135 return rc; 135 return rc;
136} 136}
137 137
138static int xfp_phy_init(struct efx_nic *efx) 138static int qt202x_phy_probe(struct efx_nic *efx)
139{ 139{
140 struct xfp_phy_data *phy_data; 140 efx->mdio.mmds = QT202X_REQUIRED_DEVS;
141 u32 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); 141 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
142 efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
143 return 0;
144}
145
146static int qt202x_phy_init(struct efx_nic *efx)
147{
148 struct qt202x_phy_data *phy_data;
149 u32 devid;
142 int rc; 150 int rc;
143 151
144 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 152 rc = qt202x_reset_phy(efx);
153 if (rc) {
154 EFX_ERR(efx, "PHY init failed\n");
155 return rc;
156 }
157
158 phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
145 if (!phy_data) 159 if (!phy_data)
146 return -ENOMEM; 160 return -ENOMEM;
147 efx->phy_data = phy_data; 161 efx->phy_data = phy_data;
148 162
163 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
149 EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", 164 EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
150 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), 165 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
151 efx_mdio_id_rev(devid)); 166 efx_mdio_id_rev(devid));
152 167
153 phy_data->phy_mode = efx->phy_mode; 168 phy_data->phy_mode = efx->phy_mode;
154
155 rc = xfp_reset_phy(efx);
156
157 EFX_INFO(efx, "PHY init %s.\n",
158 rc ? "failed" : "successful");
159 if (rc < 0)
160 goto fail;
161
162 return 0; 169 return 0;
163
164 fail:
165 kfree(efx->phy_data);
166 efx->phy_data = NULL;
167 return rc;
168} 170}
169 171
170static void xfp_phy_clear_interrupt(struct efx_nic *efx) 172static int qt202x_link_ok(struct efx_nic *efx)
171{ 173{
172 /* Read to clear link status alarm */ 174 return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
173 efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT);
174} 175}
175 176
176static int xfp_link_ok(struct efx_nic *efx) 177static bool qt202x_phy_poll(struct efx_nic *efx)
177{ 178{
178 return efx_mdio_links_ok(efx, XFP_REQUIRED_DEVS); 179 bool was_up = efx->link_state.up;
179}
180 180
181static void xfp_phy_poll(struct efx_nic *efx) 181 efx->link_state.up = qt202x_link_ok(efx);
182{ 182 efx->link_state.speed = 10000;
183 int link_up = xfp_link_ok(efx); 183 efx->link_state.fd = true;
184 /* Simulate a PHY event if link state has changed */ 184 efx->link_state.fc = efx->wanted_fc;
185 if (link_up != efx->link_up) 185
186 falcon_sim_phy_event(efx); 186 return efx->link_state.up != was_up;
187} 187}
188 188
189static void xfp_phy_reconfigure(struct efx_nic *efx) 189static int qt202x_phy_reconfigure(struct efx_nic *efx)
190{ 190{
191 struct xfp_phy_data *phy_data = efx->phy_data; 191 struct qt202x_phy_data *phy_data = efx->phy_data;
192 192
193 if (efx->phy_type == PHY_TYPE_QT2025C) { 193 if (efx->phy_type == PHY_TYPE_QT2025C) {
194 /* There are several different register bits which can 194 /* There are several different register bits which can
@@ -207,7 +207,7 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
207 /* Reset the PHY when moving from tx off to tx on */ 207 /* Reset the PHY when moving from tx off to tx on */
208 if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) && 208 if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
209 (phy_data->phy_mode & PHY_MODE_TX_DISABLED)) 209 (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
210 xfp_reset_phy(efx); 210 qt202x_reset_phy(efx);
211 211
212 efx_mdio_transmit_disable(efx); 212 efx_mdio_transmit_disable(efx);
213 } 213 }
@@ -215,36 +215,28 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
215 efx_mdio_phy_reconfigure(efx); 215 efx_mdio_phy_reconfigure(efx);
216 216
217 phy_data->phy_mode = efx->phy_mode; 217 phy_data->phy_mode = efx->phy_mode;
218 efx->link_up = xfp_link_ok(efx); 218
219 efx->link_speed = 10000; 219 return 0;
220 efx->link_fd = true;
221 efx->link_fc = efx->wanted_fc;
222} 220}
223 221
224static void xfp_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 222static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
225{ 223{
226 mdio45_ethtool_gset(&efx->mdio, ecmd); 224 mdio45_ethtool_gset(&efx->mdio, ecmd);
227} 225}
228 226
229static void xfp_phy_fini(struct efx_nic *efx) 227static void qt202x_phy_fini(struct efx_nic *efx)
230{ 228{
231 /* Clobber the LED if it was blinking */
232 efx->board_info.blink(efx, false);
233
234 /* Free the context block */ 229 /* Free the context block */
235 kfree(efx->phy_data); 230 kfree(efx->phy_data);
236 efx->phy_data = NULL; 231 efx->phy_data = NULL;
237} 232}
238 233
239struct efx_phy_operations falcon_xfp_phy_ops = { 234struct efx_phy_operations falcon_qt202x_phy_ops = {
240 .macs = EFX_XMAC, 235 .probe = qt202x_phy_probe,
241 .init = xfp_phy_init, 236 .init = qt202x_phy_init,
242 .reconfigure = xfp_phy_reconfigure, 237 .reconfigure = qt202x_phy_reconfigure,
243 .poll = xfp_phy_poll, 238 .poll = qt202x_phy_poll,
244 .fini = xfp_phy_fini, 239 .fini = qt202x_phy_fini,
245 .clear_interrupt = xfp_phy_clear_interrupt, 240 .get_settings = qt202x_phy_get_settings,
246 .get_settings = xfp_phy_get_settings,
247 .set_settings = efx_mdio_set_settings, 241 .set_settings = efx_mdio_set_settings,
248 .mmds = XFP_REQUIRED_DEVS,
249 .loopbacks = XFP_LOOPBACKS,
250}; 242};
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
new file mode 100644
index 000000000000..89d606fe9248
--- /dev/null
+++ b/drivers/net/sfc/regs.h
@@ -0,0 +1,3168 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_REGS_H
12#define EFX_REGS_H
13
14/*
15 * Falcon hardware architecture definitions have a name prefix following
16 * the format:
17 *
18 * F<type>_<min-rev><max-rev>_
19 *
20 * The following <type> strings are used:
21 *
22 * MMIO register MC register Host memory structure
23 * -------------------------------------------------------------
24 * Address R MCR
25 * Bitfield RF MCRF SF
26 * Enumerator FE MCFE SE
27 *
28 * <min-rev> is the first revision to which the definition applies:
29 *
30 * A: Falcon A1 (SFC4000AB)
31 * B: Falcon B0 (SFC4000BA)
32 * C: Siena A0 (SFL9021AA)
33 *
34 * If the definition has been changed or removed in later revisions
35 * then <max-rev> is the last revision to which the definition applies;
36 * otherwise it is "Z".
37 */
38
39/**************************************************************************
40 *
41 * Falcon/Siena registers and descriptors
42 *
43 **************************************************************************
44 */
45
46/* ADR_REGION_REG: Address region register */
47#define FR_AZ_ADR_REGION 0x00000000
48#define FRF_AZ_ADR_REGION3_LBN 96
49#define FRF_AZ_ADR_REGION3_WIDTH 18
50#define FRF_AZ_ADR_REGION2_LBN 64
51#define FRF_AZ_ADR_REGION2_WIDTH 18
52#define FRF_AZ_ADR_REGION1_LBN 32
53#define FRF_AZ_ADR_REGION1_WIDTH 18
54#define FRF_AZ_ADR_REGION0_LBN 0
55#define FRF_AZ_ADR_REGION0_WIDTH 18
56
57/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
58#define FR_AZ_INT_EN_KER 0x00000010
59#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
60#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
61#define FRF_AZ_KER_INT_CHAR_LBN 4
62#define FRF_AZ_KER_INT_CHAR_WIDTH 1
63#define FRF_AZ_KER_INT_KER_LBN 3
64#define FRF_AZ_KER_INT_KER_WIDTH 1
65#define FRF_AZ_DRV_INT_EN_KER_LBN 0
66#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
67
68/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
69#define FR_BZ_INT_EN_CHAR 0x00000020
70#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
71#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
72#define FRF_BZ_CHAR_INT_CHAR_LBN 4
73#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1
74#define FRF_BZ_CHAR_INT_KER_LBN 3
75#define FRF_BZ_CHAR_INT_KER_WIDTH 1
76#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0
77#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
78
79/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
80#define FR_AZ_INT_ADR_KER 0x00000030
81#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
82#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
83#define FRF_AZ_INT_ADR_KER_LBN 0
84#define FRF_AZ_INT_ADR_KER_WIDTH 64
85
86/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
87#define FR_BZ_INT_ADR_CHAR 0x00000040
88#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
89#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
90#define FRF_BZ_INT_ADR_CHAR_LBN 0
91#define FRF_BZ_INT_ADR_CHAR_WIDTH 64
92
93/* INT_ACK_KER: Kernel interrupt acknowledge register */
94#define FR_AA_INT_ACK_KER 0x00000050
95#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
96#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
97
98/* INT_ISR0_REG: Function 0 Interrupt Acknowlege Status register */
99#define FR_BZ_INT_ISR0 0x00000090
100#define FRF_BZ_INT_ISR_REG_LBN 0
101#define FRF_BZ_INT_ISR_REG_WIDTH 64
102
103/* HW_INIT_REG: Hardware initialization register */
104#define FR_AZ_HW_INIT 0x000000c0
105#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
106#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
107#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
108#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
109#define FRF_CZ_TX_MRG_TAGS_LBN 120
110#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
111#define FRF_AB_TRGT_MASK_ALL_LBN 100
112#define FRF_AB_TRGT_MASK_ALL_WIDTH 1
113#define FRF_AZ_DOORBELL_DROP_LBN 92
114#define FRF_AZ_DOORBELL_DROP_WIDTH 8
115#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
116#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
117#define FRF_AB_PE_EIDLE_DIS_LBN 75
118#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
119#define FRF_AA_FC_BLOCKING_EN_LBN 45
120#define FRF_AA_FC_BLOCKING_EN_WIDTH 1
121#define FRF_BZ_B2B_REQ_EN_LBN 45
122#define FRF_BZ_B2B_REQ_EN_WIDTH 1
123#define FRF_AA_B2B_REQ_EN_LBN 44
124#define FRF_AA_B2B_REQ_EN_WIDTH 1
125#define FRF_BB_FC_BLOCKING_EN_LBN 44
126#define FRF_BB_FC_BLOCKING_EN_WIDTH 1
127#define FRF_AZ_POST_WR_MASK_LBN 40
128#define FRF_AZ_POST_WR_MASK_WIDTH 4
129#define FRF_AZ_TLP_TC_LBN 34
130#define FRF_AZ_TLP_TC_WIDTH 3
131#define FRF_AZ_TLP_ATTR_LBN 32
132#define FRF_AZ_TLP_ATTR_WIDTH 2
133#define FRF_AB_INTB_VEC_LBN 24
134#define FRF_AB_INTB_VEC_WIDTH 5
135#define FRF_AB_INTA_VEC_LBN 16
136#define FRF_AB_INTA_VEC_WIDTH 5
137#define FRF_AZ_WD_TIMER_LBN 8
138#define FRF_AZ_WD_TIMER_WIDTH 8
139#define FRF_AZ_US_DISABLE_LBN 5
140#define FRF_AZ_US_DISABLE_WIDTH 1
141#define FRF_AZ_TLP_EP_LBN 4
142#define FRF_AZ_TLP_EP_WIDTH 1
143#define FRF_AZ_ATTR_SEL_LBN 3
144#define FRF_AZ_ATTR_SEL_WIDTH 1
145#define FRF_AZ_TD_SEL_LBN 1
146#define FRF_AZ_TD_SEL_WIDTH 1
147#define FRF_AZ_TLP_TD_LBN 0
148#define FRF_AZ_TLP_TD_WIDTH 1
149
150/* EE_SPI_HCMD_REG: SPI host command register */
151#define FR_AB_EE_SPI_HCMD 0x00000100
152#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
153#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
154#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
155#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
156#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
157#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
158#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
159#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
160#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
161#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
162#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
163#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
164#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
165#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
166#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
167#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
168
169/* USR_EV_CFG: User Level Event Configuration register */
170#define FR_CZ_USR_EV_CFG 0x00000100
171#define FRF_CZ_USREV_DIS_LBN 16
172#define FRF_CZ_USREV_DIS_WIDTH 1
173#define FRF_CZ_DFLT_EVQ_LBN 0
174#define FRF_CZ_DFLT_EVQ_WIDTH 10
175
176/* EE_SPI_HADR_REG: SPI host address register */
177#define FR_AB_EE_SPI_HADR 0x00000110
178#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
179#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
180#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
181#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
182
183/* EE_SPI_HDATA_REG: SPI host data register */
184#define FR_AB_EE_SPI_HDATA 0x00000120
185#define FRF_AB_EE_SPI_HDATA3_LBN 96
186#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
187#define FRF_AB_EE_SPI_HDATA2_LBN 64
188#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
189#define FRF_AB_EE_SPI_HDATA1_LBN 32
190#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
191#define FRF_AB_EE_SPI_HDATA0_LBN 0
192#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
193
194/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
195#define FR_AB_EE_BASE_PAGE 0x00000130
196#define FRF_AB_EE_EXPROM_MASK_LBN 16
197#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
198#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
199#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
200
201/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
202#define FR_AB_EE_VPD_CFG0 0x00000140
203#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
204#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
205#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
206#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
207#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
208#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
209#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
210#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
211#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
212#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
213#define FRF_AB_EE_VPDW_LENGTH_LBN 80
214#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
215#define FRF_AB_EE_VPDW_BASE_LBN 64
216#define FRF_AB_EE_VPDW_BASE_WIDTH 15
217#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
218#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
219#define FRF_AB_EE_VPD_BASE_LBN 32
220#define FRF_AB_EE_VPD_BASE_WIDTH 24
221#define FRF_AB_EE_VPD_LENGTH_LBN 16
222#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
223#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
224#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
225#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
226#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
227#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
228#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
229#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
230#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
231#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
232#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
233#define FRF_AB_EE_VPD_EN_LBN 0
234#define FRF_AB_EE_VPD_EN_WIDTH 1
235
236/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
237#define FR_AB_EE_VPD_SW_CNTL 0x00000150
238#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
239#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
240#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
241#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
242#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
243#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
244
245/* EE_VPD_SW_DATA_REG: VPD access SW data register */
246#define FR_AB_EE_VPD_SW_DATA 0x00000160
247#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
248#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
249
250/* PBMX_DBG_IADDR_REG: Capture Module address register */
251#define FR_CZ_PBMX_DBG_IADDR 0x000001f0
252#define FRF_CZ_PBMX_DBG_IADDR_LBN 0
253#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
254
255/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
256#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0
257#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
258#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
259#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
260#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
261#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
262#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
263
264/* PBMX_DBG_IDATA_REG: Capture Module data register */
265#define FR_CZ_PBMX_DBG_IDATA 0x000001f8
266#define FRF_CZ_PBMX_DBG_IDATA_LBN 0
267#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
268
269/* NIC_STAT_REG: NIC status register */
270#define FR_AB_NIC_STAT 0x00000200
271#define FRF_BB_AER_DIS_LBN 34
272#define FRF_BB_AER_DIS_WIDTH 1
273#define FRF_BB_EE_STRAP_EN_LBN 31
274#define FRF_BB_EE_STRAP_EN_WIDTH 1
275#define FRF_BB_EE_STRAP_LBN 24
276#define FRF_BB_EE_STRAP_WIDTH 4
277#define FRF_BB_REVISION_ID_LBN 17
278#define FRF_BB_REVISION_ID_WIDTH 7
279#define FRF_AB_ONCHIP_SRAM_LBN 16
280#define FRF_AB_ONCHIP_SRAM_WIDTH 1
281#define FRF_AB_SF_PRST_LBN 9
282#define FRF_AB_SF_PRST_WIDTH 1
283#define FRF_AB_EE_PRST_LBN 8
284#define FRF_AB_EE_PRST_WIDTH 1
285#define FRF_AB_ATE_MODE_LBN 3
286#define FRF_AB_ATE_MODE_WIDTH 1
287#define FRF_AB_STRAP_PINS_LBN 0
288#define FRF_AB_STRAP_PINS_WIDTH 3
289
290/* GPIO_CTL_REG: GPIO control register */
291#define FR_AB_GPIO_CTL 0x00000210
292#define FRF_AB_GPIO_OUT3_LBN 112
293#define FRF_AB_GPIO_OUT3_WIDTH 16
294#define FRF_AB_GPIO_IN3_LBN 104
295#define FRF_AB_GPIO_IN3_WIDTH 8
296#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
297#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
298#define FRF_AB_GPIO_OUT2_LBN 80
299#define FRF_AB_GPIO_OUT2_WIDTH 16
300#define FRF_AB_GPIO_IN2_LBN 72
301#define FRF_AB_GPIO_IN2_WIDTH 8
302#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
303#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
304#define FRF_AB_GPIO15_OEN_LBN 63
305#define FRF_AB_GPIO15_OEN_WIDTH 1
306#define FRF_AB_GPIO14_OEN_LBN 62
307#define FRF_AB_GPIO14_OEN_WIDTH 1
308#define FRF_AB_GPIO13_OEN_LBN 61
309#define FRF_AB_GPIO13_OEN_WIDTH 1
310#define FRF_AB_GPIO12_OEN_LBN 60
311#define FRF_AB_GPIO12_OEN_WIDTH 1
312#define FRF_AB_GPIO11_OEN_LBN 59
313#define FRF_AB_GPIO11_OEN_WIDTH 1
314#define FRF_AB_GPIO10_OEN_LBN 58
315#define FRF_AB_GPIO10_OEN_WIDTH 1
316#define FRF_AB_GPIO9_OEN_LBN 57
317#define FRF_AB_GPIO9_OEN_WIDTH 1
318#define FRF_AB_GPIO8_OEN_LBN 56
319#define FRF_AB_GPIO8_OEN_WIDTH 1
320#define FRF_AB_GPIO15_OUT_LBN 55
321#define FRF_AB_GPIO15_OUT_WIDTH 1
322#define FRF_AB_GPIO14_OUT_LBN 54
323#define FRF_AB_GPIO14_OUT_WIDTH 1
324#define FRF_AB_GPIO13_OUT_LBN 53
325#define FRF_AB_GPIO13_OUT_WIDTH 1
326#define FRF_AB_GPIO12_OUT_LBN 52
327#define FRF_AB_GPIO12_OUT_WIDTH 1
328#define FRF_AB_GPIO11_OUT_LBN 51
329#define FRF_AB_GPIO11_OUT_WIDTH 1
330#define FRF_AB_GPIO10_OUT_LBN 50
331#define FRF_AB_GPIO10_OUT_WIDTH 1
332#define FRF_AB_GPIO9_OUT_LBN 49
333#define FRF_AB_GPIO9_OUT_WIDTH 1
334#define FRF_AB_GPIO8_OUT_LBN 48
335#define FRF_AB_GPIO8_OUT_WIDTH 1
336#define FRF_AB_GPIO15_IN_LBN 47
337#define FRF_AB_GPIO15_IN_WIDTH 1
338#define FRF_AB_GPIO14_IN_LBN 46
339#define FRF_AB_GPIO14_IN_WIDTH 1
340#define FRF_AB_GPIO13_IN_LBN 45
341#define FRF_AB_GPIO13_IN_WIDTH 1
342#define FRF_AB_GPIO12_IN_LBN 44
343#define FRF_AB_GPIO12_IN_WIDTH 1
344#define FRF_AB_GPIO11_IN_LBN 43
345#define FRF_AB_GPIO11_IN_WIDTH 1
346#define FRF_AB_GPIO10_IN_LBN 42
347#define FRF_AB_GPIO10_IN_WIDTH 1
348#define FRF_AB_GPIO9_IN_LBN 41
349#define FRF_AB_GPIO9_IN_WIDTH 1
350#define FRF_AB_GPIO8_IN_LBN 40
351#define FRF_AB_GPIO8_IN_WIDTH 1
352#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
353#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
354#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
355#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
356#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
357#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
358#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
359#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
360#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
361#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
362#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
363#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
364#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
365#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
366#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
367#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
368#define FRF_AB_CLK156_OUT_EN_LBN 31
369#define FRF_AB_CLK156_OUT_EN_WIDTH 1
370#define FRF_AB_USE_NIC_CLK_LBN 30
371#define FRF_AB_USE_NIC_CLK_WIDTH 1
372#define FRF_AB_GPIO5_OEN_LBN 29
373#define FRF_AB_GPIO5_OEN_WIDTH 1
374#define FRF_AB_GPIO4_OEN_LBN 28
375#define FRF_AB_GPIO4_OEN_WIDTH 1
376#define FRF_AB_GPIO3_OEN_LBN 27
377#define FRF_AB_GPIO3_OEN_WIDTH 1
378#define FRF_AB_GPIO2_OEN_LBN 26
379#define FRF_AB_GPIO2_OEN_WIDTH 1
380#define FRF_AB_GPIO1_OEN_LBN 25
381#define FRF_AB_GPIO1_OEN_WIDTH 1
382#define FRF_AB_GPIO0_OEN_LBN 24
383#define FRF_AB_GPIO0_OEN_WIDTH 1
384#define FRF_AB_GPIO7_OUT_LBN 23
385#define FRF_AB_GPIO7_OUT_WIDTH 1
386#define FRF_AB_GPIO6_OUT_LBN 22
387#define FRF_AB_GPIO6_OUT_WIDTH 1
388#define FRF_AB_GPIO5_OUT_LBN 21
389#define FRF_AB_GPIO5_OUT_WIDTH 1
390#define FRF_AB_GPIO4_OUT_LBN 20
391#define FRF_AB_GPIO4_OUT_WIDTH 1
392#define FRF_AB_GPIO3_OUT_LBN 19
393#define FRF_AB_GPIO3_OUT_WIDTH 1
394#define FRF_AB_GPIO2_OUT_LBN 18
395#define FRF_AB_GPIO2_OUT_WIDTH 1
396#define FRF_AB_GPIO1_OUT_LBN 17
397#define FRF_AB_GPIO1_OUT_WIDTH 1
398#define FRF_AB_GPIO0_OUT_LBN 16
399#define FRF_AB_GPIO0_OUT_WIDTH 1
400#define FRF_AB_GPIO7_IN_LBN 15
401#define FRF_AB_GPIO7_IN_WIDTH 1
402#define FRF_AB_GPIO6_IN_LBN 14
403#define FRF_AB_GPIO6_IN_WIDTH 1
404#define FRF_AB_GPIO5_IN_LBN 13
405#define FRF_AB_GPIO5_IN_WIDTH 1
406#define FRF_AB_GPIO4_IN_LBN 12
407#define FRF_AB_GPIO4_IN_WIDTH 1
408#define FRF_AB_GPIO3_IN_LBN 11
409#define FRF_AB_GPIO3_IN_WIDTH 1
410#define FRF_AB_GPIO2_IN_LBN 10
411#define FRF_AB_GPIO2_IN_WIDTH 1
412#define FRF_AB_GPIO1_IN_LBN 9
413#define FRF_AB_GPIO1_IN_WIDTH 1
414#define FRF_AB_GPIO0_IN_LBN 8
415#define FRF_AB_GPIO0_IN_WIDTH 1
416#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
417#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
418#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
419#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
420#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
421#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
422#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
423#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
424#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
425#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
426#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
427#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
428#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
429#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
430#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
431#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
432
433/* GLB_CTL_REG: Global control register */
434#define FR_AB_GLB_CTL 0x00000220
435#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
436#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
437#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
438#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
439#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
440#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
441#define FRF_AA_PCIX_RST_CTL_LBN 60
442#define FRF_AA_PCIX_RST_CTL_WIDTH 1
443#define FRF_BB_BIU_RST_CTL_LBN 60
444#define FRF_BB_BIU_RST_CTL_WIDTH 1
445#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
446#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
447#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
448#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
449#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
450#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
451#define FRF_AB_XGRX_RST_CTL_LBN 56
452#define FRF_AB_XGRX_RST_CTL_WIDTH 1
453#define FRF_AB_XGTX_RST_CTL_LBN 55
454#define FRF_AB_XGTX_RST_CTL_WIDTH 1
455#define FRF_AB_EM_RST_CTL_LBN 54
456#define FRF_AB_EM_RST_CTL_WIDTH 1
457#define FRF_AB_EV_RST_CTL_LBN 53
458#define FRF_AB_EV_RST_CTL_WIDTH 1
459#define FRF_AB_SR_RST_CTL_LBN 52
460#define FRF_AB_SR_RST_CTL_WIDTH 1
461#define FRF_AB_RX_RST_CTL_LBN 51
462#define FRF_AB_RX_RST_CTL_WIDTH 1
463#define FRF_AB_TX_RST_CTL_LBN 50
464#define FRF_AB_TX_RST_CTL_WIDTH 1
465#define FRF_AB_EE_RST_CTL_LBN 49
466#define FRF_AB_EE_RST_CTL_WIDTH 1
467#define FRF_AB_CS_RST_CTL_LBN 48
468#define FRF_AB_CS_RST_CTL_WIDTH 1
469#define FRF_AB_HOT_RST_CTL_LBN 40
470#define FRF_AB_HOT_RST_CTL_WIDTH 2
471#define FRF_AB_RST_EXT_PHY_LBN 31
472#define FRF_AB_RST_EXT_PHY_WIDTH 1
473#define FRF_AB_RST_XAUI_SD_LBN 30
474#define FRF_AB_RST_XAUI_SD_WIDTH 1
475#define FRF_AB_RST_PCIE_SD_LBN 29
476#define FRF_AB_RST_PCIE_SD_WIDTH 1
477#define FRF_AA_RST_PCIX_LBN 28
478#define FRF_AA_RST_PCIX_WIDTH 1
479#define FRF_BB_RST_BIU_LBN 28
480#define FRF_BB_RST_BIU_WIDTH 1
481#define FRF_AB_RST_PCIE_STKY_LBN 27
482#define FRF_AB_RST_PCIE_STKY_WIDTH 1
483#define FRF_AB_RST_PCIE_NSTKY_LBN 26
484#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
485#define FRF_AB_RST_PCIE_CORE_LBN 25
486#define FRF_AB_RST_PCIE_CORE_WIDTH 1
487#define FRF_AB_RST_XGRX_LBN 24
488#define FRF_AB_RST_XGRX_WIDTH 1
489#define FRF_AB_RST_XGTX_LBN 23
490#define FRF_AB_RST_XGTX_WIDTH 1
491#define FRF_AB_RST_EM_LBN 22
492#define FRF_AB_RST_EM_WIDTH 1
493#define FRF_AB_RST_EV_LBN 21
494#define FRF_AB_RST_EV_WIDTH 1
495#define FRF_AB_RST_SR_LBN 20
496#define FRF_AB_RST_SR_WIDTH 1
497#define FRF_AB_RST_RX_LBN 19
498#define FRF_AB_RST_RX_WIDTH 1
499#define FRF_AB_RST_TX_LBN 18
500#define FRF_AB_RST_TX_WIDTH 1
501#define FRF_AB_RST_SF_LBN 17
502#define FRF_AB_RST_SF_WIDTH 1
503#define FRF_AB_RST_CS_LBN 16
504#define FRF_AB_RST_CS_WIDTH 1
505#define FRF_AB_INT_RST_DUR_LBN 4
506#define FRF_AB_INT_RST_DUR_WIDTH 3
507#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
508#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
509#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
510#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
511#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
512#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
513#define FFE_AB_EXT_PHY_RST_DUR_640US 3
514#define FFE_AB_EXT_PHY_RST_DUR_320US 2
515#define FFE_AB_EXT_PHY_RST_DUR_160US 1
516#define FFE_AB_EXT_PHY_RST_DUR_80US 0
517#define FRF_AB_SWRST_LBN 0
518#define FRF_AB_SWRST_WIDTH 1
519
520/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
521#define FR_AZ_FATAL_INTR_KER 0x00000230
522#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
523#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
524#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
525#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
526#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
527#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
528#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
529#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
530#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
531#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
532#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
533#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
534#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
535#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
536#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
537#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
538#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
539#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
540#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
541#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
542#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
543#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
544#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
545#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
546#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
547#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
548#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
549#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
550#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
551#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
552#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
553#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
554#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
555#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
556#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
557#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
558#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
559#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
560#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
561#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
562#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
563#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
564#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
565#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
566#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
567#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
568#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
569#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
570#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
571#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
572#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
573#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
574#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
575#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
576#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
577#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
578
579/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
580#define FR_BZ_FATAL_INTR_CHAR 0x00000240
581#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
582#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
583#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
584#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
585#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
586#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
587#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
588#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
589#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
590#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
591#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
592#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
593#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
594#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
595#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
596#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
597#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
598#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
599#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
600#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
601#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
602#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
603#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
604#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
605#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
606#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
607#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
608#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
609#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
610#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
611#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
612#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
613#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
614#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
615#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
616#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
617#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
618#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
619#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
620#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
621#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
622#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
623#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
624#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
625#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
626#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
627#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
628#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
629#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
630#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
631#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
632#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
633#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
634#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
635#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
636#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
637
638/* DP_CTRL_REG: Datapath control register */
639#define FR_BZ_DP_CTRL 0x00000250
640#define FRF_BZ_FLS_EVQ_ID_LBN 0
641#define FRF_BZ_FLS_EVQ_ID_WIDTH 12
642
643/* MEM_STAT_REG: Memory status register */
644#define FR_AZ_MEM_STAT 0x00000260
645#define FRF_AB_MEM_PERR_VEC_LBN 53
646#define FRF_AB_MEM_PERR_VEC_WIDTH 38
647#define FRF_AB_MBIST_CORR_LBN 38
648#define FRF_AB_MBIST_CORR_WIDTH 15
649#define FRF_AB_MBIST_ERR_LBN 0
650#define FRF_AB_MBIST_ERR_WIDTH 40
651#define FRF_CZ_MEM_PERR_VEC_LBN 0
652#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
653
654/* CS_DEBUG_REG: Debug register */
655#define FR_AZ_CS_DEBUG 0x00000270
656#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
657#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
658#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
659#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
660#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
661#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
662#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
663#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
664#define FRF_CZ_CS_PORT_NUM_LBN 40
665#define FRF_CZ_CS_PORT_NUM_WIDTH 2
666#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
667#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
668#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
669#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
670#define FRF_CZ_CS_PORT_FPE_LBN 1
671#define FRF_CZ_CS_PORT_FPE_WIDTH 35
672#define FRF_AB_EM_DEBUG_ADDR_LBN 26
673#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
674#define FRF_AB_SR_DEBUG_ADDR_LBN 21
675#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
676#define FRF_AB_EV_DEBUG_ADDR_LBN 16
677#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
678#define FRF_AB_RX_DEBUG_ADDR_LBN 11
679#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
680#define FRF_AB_TX_DEBUG_ADDR_LBN 6
681#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
682#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
683#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
684#define FRF_AZ_CS_DEBUG_EN_LBN 0
685#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
686
687/* DRIVER_REG: Driver scratch register [0-7] */
688#define FR_AZ_DRIVER 0x00000280
689#define FR_AZ_DRIVER_STEP 16
690#define FR_AZ_DRIVER_ROWS 8
691#define FRF_AZ_DRIVER_DW0_LBN 0
692#define FRF_AZ_DRIVER_DW0_WIDTH 32
693
694/* ALTERA_BUILD_REG: Altera build register */
695#define FR_AZ_ALTERA_BUILD 0x00000300
696#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
697#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
698
699/* CSR_SPARE_REG: Spare register */
700#define FR_AZ_CSR_SPARE 0x00000310
701#define FRF_AB_MEM_PERR_EN_LBN 64
702#define FRF_AB_MEM_PERR_EN_WIDTH 38
703#define FRF_CZ_MEM_PERR_EN_LBN 64
704#define FRF_CZ_MEM_PERR_EN_WIDTH 35
705#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
706#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
707#define FRF_AZ_CSR_SPARE_BITS_LBN 0
708#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
709
710/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
711#define FR_AB_PCIE_SD_CTL0123 0x00000320
712#define FRF_AB_PCIE_TESTSIG_H_LBN 96
713#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
714#define FRF_AB_PCIE_TESTSIG_L_LBN 64
715#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
716#define FRF_AB_PCIE_OFFSET_LBN 56
717#define FRF_AB_PCIE_OFFSET_WIDTH 8
718#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
719#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
720#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
721#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
722#define FRF_AB_PCIE_HIVMODE_H_LBN 53
723#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
724#define FRF_AB_PCIE_HIVMODE_L_LBN 52
725#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
726#define FRF_AB_PCIE_PARRESET_H_LBN 51
727#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
728#define FRF_AB_PCIE_PARRESET_L_LBN 50
729#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
730#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
731#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
732#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
733#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
734#define FRF_AB_PCIE_LPBK_LBN 40
735#define FRF_AB_PCIE_LPBK_WIDTH 8
736#define FRF_AB_PCIE_PARLPBK_LBN 32
737#define FRF_AB_PCIE_PARLPBK_WIDTH 8
738#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
739#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
740#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
741#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
742#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
743#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
744#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
745#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
746#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
747#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
748#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
749#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
750#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
751#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
752#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
753#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
754#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
755#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
756#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
757#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
758#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
759#define FFE_AB_PCIE_RXEQCTL_OFF 2
760#define FFE_AB_PCIE_RXEQCTL_MIN 1
761#define FFE_AB_PCIE_RXEQCTL_MAX 0
762#define FRF_AB_PCIE_HIDRV_LBN 8
763#define FRF_AB_PCIE_HIDRV_WIDTH 8
764#define FRF_AB_PCIE_LODRV_LBN 0
765#define FRF_AB_PCIE_LODRV_WIDTH 8
766
767/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
768#define FR_AB_PCIE_SD_CTL45 0x00000330
769#define FRF_AB_PCIE_DTX7_LBN 60
770#define FRF_AB_PCIE_DTX7_WIDTH 4
771#define FRF_AB_PCIE_DTX6_LBN 56
772#define FRF_AB_PCIE_DTX6_WIDTH 4
773#define FRF_AB_PCIE_DTX5_LBN 52
774#define FRF_AB_PCIE_DTX5_WIDTH 4
775#define FRF_AB_PCIE_DTX4_LBN 48
776#define FRF_AB_PCIE_DTX4_WIDTH 4
777#define FRF_AB_PCIE_DTX3_LBN 44
778#define FRF_AB_PCIE_DTX3_WIDTH 4
779#define FRF_AB_PCIE_DTX2_LBN 40
780#define FRF_AB_PCIE_DTX2_WIDTH 4
781#define FRF_AB_PCIE_DTX1_LBN 36
782#define FRF_AB_PCIE_DTX1_WIDTH 4
783#define FRF_AB_PCIE_DTX0_LBN 32
784#define FRF_AB_PCIE_DTX0_WIDTH 4
785#define FRF_AB_PCIE_DEQ7_LBN 28
786#define FRF_AB_PCIE_DEQ7_WIDTH 4
787#define FRF_AB_PCIE_DEQ6_LBN 24
788#define FRF_AB_PCIE_DEQ6_WIDTH 4
789#define FRF_AB_PCIE_DEQ5_LBN 20
790#define FRF_AB_PCIE_DEQ5_WIDTH 4
791#define FRF_AB_PCIE_DEQ4_LBN 16
792#define FRF_AB_PCIE_DEQ4_WIDTH 4
793#define FRF_AB_PCIE_DEQ3_LBN 12
794#define FRF_AB_PCIE_DEQ3_WIDTH 4
795#define FRF_AB_PCIE_DEQ2_LBN 8
796#define FRF_AB_PCIE_DEQ2_WIDTH 4
797#define FRF_AB_PCIE_DEQ1_LBN 4
798#define FRF_AB_PCIE_DEQ1_WIDTH 4
799#define FRF_AB_PCIE_DEQ0_LBN 0
800#define FRF_AB_PCIE_DEQ0_WIDTH 4
801
802/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
803#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340
804#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
805#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
806#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
807#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
808#define FRF_AB_PCIE_PRBSERR_LBN 40
809#define FRF_AB_PCIE_PRBSERR_WIDTH 8
810#define FRF_AB_PCIE_PRBSERRH0_LBN 32
811#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
812#define FRF_AB_PCIE_FASTINIT_H_LBN 15
813#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
814#define FRF_AB_PCIE_FASTINIT_L_LBN 14
815#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
816#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
817#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
818#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
819#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
820#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
821#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
822#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
823#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
824#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
825#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
826#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
827#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
828#define FRF_AB_PCIE_PRBSSEL_LBN 0
829#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
830
831/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
832#define FR_BB_DEBUG_DATA_OUT 0x00000350
833#define FRF_BB_DEBUG2_PORT_LBN 25
834#define FRF_BB_DEBUG2_PORT_WIDTH 15
835#define FRF_BB_DEBUG1_PORT_LBN 0
836#define FRF_BB_DEBUG1_PORT_WIDTH 25
837
838/* EVQ_RPTR_REGP0: Event queue read pointer register */
839#define FR_BZ_EVQ_RPTR_P0 0x00000400
840#define FR_BZ_EVQ_RPTR_P0_STEP 8192
841#define FR_BZ_EVQ_RPTR_P0_ROWS 1024
842/* EVQ_RPTR_REG_KER: Event queue read pointer register */
843#define FR_AA_EVQ_RPTR_KER 0x00011b00
844#define FR_AA_EVQ_RPTR_KER_STEP 4
845#define FR_AA_EVQ_RPTR_KER_ROWS 4
846/* EVQ_RPTR_REG: Event queue read pointer register */
847#define FR_BZ_EVQ_RPTR 0x00fa0000
848#define FR_BZ_EVQ_RPTR_STEP 16
849#define FR_BB_EVQ_RPTR_ROWS 4096
850#define FR_CZ_EVQ_RPTR_ROWS 1024
851/* EVQ_RPTR_REGP123: Event queue read pointer register */
852#define FR_BB_EVQ_RPTR_P123 0x01000400
853#define FR_BB_EVQ_RPTR_P123_STEP 8192
854#define FR_BB_EVQ_RPTR_P123_ROWS 3072
855#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
856#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
857#define FRF_AZ_EVQ_RPTR_LBN 0
858#define FRF_AZ_EVQ_RPTR_WIDTH 15
859
860/* TIMER_COMMAND_REGP0: Timer Command Registers */
861#define FR_BZ_TIMER_COMMAND_P0 0x00000420
862#define FR_BZ_TIMER_COMMAND_P0_STEP 8192
863#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024
864/* TIMER_COMMAND_REG_KER: Timer Command Registers */
865#define FR_AA_TIMER_COMMAND_KER 0x00000420
866#define FR_AA_TIMER_COMMAND_KER_STEP 8192
867#define FR_AA_TIMER_COMMAND_KER_ROWS 4
868/* TIMER_COMMAND_REGP123: Timer Command Registers */
869#define FR_BB_TIMER_COMMAND_P123 0x01000420
870#define FR_BB_TIMER_COMMAND_P123_STEP 8192
871#define FR_BB_TIMER_COMMAND_P123_ROWS 3072
872#define FRF_CZ_TC_TIMER_MODE_LBN 14
873#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
874#define FRF_AB_TC_TIMER_MODE_LBN 12
875#define FRF_AB_TC_TIMER_MODE_WIDTH 2
876#define FRF_CZ_TC_TIMER_VAL_LBN 0
877#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
878#define FRF_AB_TC_TIMER_VAL_LBN 0
879#define FRF_AB_TC_TIMER_VAL_WIDTH 12
880
881/* DRV_EV_REG: Driver generated event register */
882#define FR_AZ_DRV_EV 0x00000440
883#define FRF_AZ_DRV_EV_QID_LBN 64
884#define FRF_AZ_DRV_EV_QID_WIDTH 12
885#define FRF_AZ_DRV_EV_DATA_LBN 0
886#define FRF_AZ_DRV_EV_DATA_WIDTH 64
887
888/* EVQ_CTL_REG: Event queue control register */
889#define FR_AZ_EVQ_CTL 0x00000450
890#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
891#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
892#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
893#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
894#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
895#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
896#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
897#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
898#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
899#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
900
901/* EVQ_CNT1_REG: Event counter 1 register */
902#define FR_AZ_EVQ_CNT1 0x00000460
903#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
904#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
905#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
906#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
907#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
908#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
909#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
910#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
911#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
912#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
913#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
914#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
915#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
916#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
917
918/* EVQ_CNT2_REG: Event counter 2 register */
919#define FR_AZ_EVQ_CNT2 0x00000470
920#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
921#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
922#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
923#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
924#define FRF_AZ_EVQ_RDY_CNT_LBN 80
925#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
926#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
927#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
928#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
929#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
930#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
931#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
932#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
933#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
934
935/* USR_EV_REG: Event mailbox register */
936#define FR_CZ_USR_EV 0x00000540
937#define FR_CZ_USR_EV_STEP 8192
938#define FR_CZ_USR_EV_ROWS 1024
939#define FRF_CZ_USR_EV_DATA_LBN 0
940#define FRF_CZ_USR_EV_DATA_WIDTH 32
941
942/* BUF_TBL_CFG_REG: Buffer table configuration register */
943#define FR_AZ_BUF_TBL_CFG 0x00000600
944#define FRF_AZ_BUF_TBL_MODE_LBN 3
945#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
946
947/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
948#define FR_AZ_SRM_RX_DC_CFG 0x00000610
949#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
950#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
951#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
952#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
953
954/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
955#define FR_AZ_SRM_TX_DC_CFG 0x00000620
956#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
957#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
958
959/* SRM_CFG_REG: SRAM configuration register */
960#define FR_AZ_SRM_CFG 0x00000630
961#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
962#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
963#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
964#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
965#define FRF_AZ_SRM_INIT_EN_LBN 3
966#define FRF_AZ_SRM_INIT_EN_WIDTH 1
967#define FRF_AZ_SRM_NUM_BANK_LBN 2
968#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
969#define FRF_AZ_SRM_BANK_SIZE_LBN 0
970#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
971
972/* BUF_TBL_UPD_REG: Buffer table update register */
973#define FR_AZ_BUF_TBL_UPD 0x00000650
974#define FRF_AZ_BUF_UPD_CMD_LBN 63
975#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
976#define FRF_AZ_BUF_CLR_CMD_LBN 62
977#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
978#define FRF_AZ_BUF_CLR_END_ID_LBN 32
979#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
980#define FRF_AZ_BUF_CLR_START_ID_LBN 0
981#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
982
983/* SRM_UPD_EVQ_REG: Buffer table update register */
984#define FR_AZ_SRM_UPD_EVQ 0x00000660
985#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
986#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
987
988/* SRAM_PARITY_REG: SRAM parity register. */
989#define FR_AZ_SRAM_PARITY 0x00000670
990#define FRF_CZ_BYPASS_ECC_LBN 3
991#define FRF_CZ_BYPASS_ECC_WIDTH 1
992#define FRF_CZ_SEC_INT_LBN 2
993#define FRF_CZ_SEC_INT_WIDTH 1
994#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
995#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
996#define FRF_AB_FORCE_SRAM_PERR_LBN 0
997#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
998#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
999#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
1000
1001/* RX_CFG_REG: Receive configuration register */
1002#define FR_AZ_RX_CFG 0x00000800
1003#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
1004#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
1005#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
1006#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
1007#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
1008#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
1009#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
1010#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
1011#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
1012#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
1013#define FRF_BZ_RX_TCP_SUP_LBN 48
1014#define FRF_BZ_RX_TCP_SUP_WIDTH 1
1015#define FRF_BZ_RX_INGR_EN_LBN 47
1016#define FRF_BZ_RX_INGR_EN_WIDTH 1
1017#define FRF_BZ_RX_IP_HASH_LBN 46
1018#define FRF_BZ_RX_IP_HASH_WIDTH 1
1019#define FRF_BZ_RX_HASH_ALG_LBN 45
1020#define FRF_BZ_RX_HASH_ALG_WIDTH 1
1021#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
1022#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
1023#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
1024#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
1025#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
1026#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
1027#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
1028#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
1029#define FRF_BZ_RX_OWNERR_CTL_LBN 38
1030#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
1031#define FRF_BZ_RX_XON_TX_TH_LBN 33
1032#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
1033#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
1034#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
1035#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
1036#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
1037#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
1038#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
1039#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
1040#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
1041#define FRF_AA_RX_OWNERR_CTL_LBN 30
1042#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
1043#define FRF_AA_RX_XON_TX_TH_LBN 25
1044#define FRF_AA_RX_XON_TX_TH_WIDTH 5
1045#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
1046#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
1047#define FRF_AA_RX_XOFF_TX_TH_LBN 20
1048#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
1049#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
1050#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
1051#define FRF_BZ_RX_XON_MAC_TH_LBN 10
1052#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
1053#define FRF_AA_RX_XON_MAC_TH_LBN 6
1054#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
1055#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
1056#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
1057#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
1058#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
1059#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
1060#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
1061
1062/* RX_FILTER_CTL_REG: Receive filter control registers */
1063#define FR_BZ_RX_FILTER_CTL 0x00000810
1064#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
1065#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
1066#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
1067#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
1068#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
1069#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
1070#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
1071#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
1072#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
1073#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
1074#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
1075#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
1076#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
1077#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
1078#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
1079#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
1080#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
1081#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
1082#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
1083#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
1084#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
1085#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
1086#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
1087#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
1088#define FRF_BZ_NUM_KER_LBN 24
1089#define FRF_BZ_NUM_KER_WIDTH 2
1090#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
1091#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
1092#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
1093#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
1094#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
1095#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
1096
1097/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
1098#define FR_AZ_RX_FLUSH_DESCQ 0x00000820
1099#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
1100#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
1101#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
1102#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
1103
1104/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
1105#define FR_BZ_RX_DESC_UPD_P0 0x00000830
1106#define FR_BZ_RX_DESC_UPD_P0_STEP 8192
1107#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024
1108/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
1109#define FR_AA_RX_DESC_UPD_KER 0x00000830
1110#define FR_AA_RX_DESC_UPD_KER_STEP 8192
1111#define FR_AA_RX_DESC_UPD_KER_ROWS 4
1112/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
1113#define FR_BB_RX_DESC_UPD_P123 0x01000830
1114#define FR_BB_RX_DESC_UPD_P123_STEP 8192
1115#define FR_BB_RX_DESC_UPD_P123_ROWS 3072
1116#define FRF_AZ_RX_DESC_WPTR_LBN 96
1117#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
1118#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
1119#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
1120#define FRF_AZ_RX_DESC_LBN 0
1121#define FRF_AZ_RX_DESC_WIDTH 64
1122
1123/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
1124#define FR_AZ_RX_DC_CFG 0x00000840
1125#define FRF_AB_RX_MAX_PF_LBN 2
1126#define FRF_AB_RX_MAX_PF_WIDTH 2
1127#define FRF_AZ_RX_DC_SIZE_LBN 0
1128#define FRF_AZ_RX_DC_SIZE_WIDTH 2
1129#define FFE_AZ_RX_DC_SIZE_64 3
1130#define FFE_AZ_RX_DC_SIZE_32 2
1131#define FFE_AZ_RX_DC_SIZE_16 1
1132#define FFE_AZ_RX_DC_SIZE_8 0
1133
1134/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
1135#define FR_AZ_RX_DC_PF_WM 0x00000850
1136#define FRF_AZ_RX_DC_PF_HWM_LBN 6
1137#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
1138#define FRF_AZ_RX_DC_PF_LWM_LBN 0
1139#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
1140
1141/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
1142#define FR_BZ_RX_RSS_TKEY 0x00000860
1143#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64
1144#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
1145#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0
1146#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
1147
1148/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
1149#define FR_AZ_RX_NODESC_DROP 0x00000880
1150#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
1151#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
1152#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0
1153#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
1154
1155/* RX_SELF_RST_REG: Receive self reset register */
1156#define FR_AA_RX_SELF_RST 0x00000890
1157#define FRF_AA_RX_ISCSI_DIS_LBN 17
1158#define FRF_AA_RX_ISCSI_DIS_WIDTH 1
1159#define FRF_AA_RX_SW_RST_REG_LBN 16
1160#define FRF_AA_RX_SW_RST_REG_WIDTH 1
1161#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
1162#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
1163#define FRF_AA_RX_SELF_RST_EN_LBN 8
1164#define FRF_AA_RX_SELF_RST_EN_WIDTH 1
1165#define FRF_AA_RX_MAX_PF_LAT_LBN 4
1166#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4
1167#define FRF_AA_RX_MAX_LU_LAT_LBN 0
1168#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4
1169
1170/* RX_DEBUG_REG: undocumented register */
1171#define FR_AZ_RX_DEBUG 0x000008a0
1172#define FRF_AZ_RX_DEBUG_LBN 0
1173#define FRF_AZ_RX_DEBUG_WIDTH 64
1174
1175/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
1176#define FR_AZ_RX_PUSH_DROP 0x000008b0
1177#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
1178#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
1179
1180/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
1181#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
1182#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
1183#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
1184
1185/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
1186#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
1187#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
1188#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
1189
1190/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
1191#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
1192#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
1193#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
1194#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
1195#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
1196#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
1197#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
1198#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
1199#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
1200
1201/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
1202#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00
1203#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
1204#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
1205#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
1206#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
1207
1208/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
1209#define FR_BZ_TX_DESC_UPD_P0 0x00000a10
1210#define FR_BZ_TX_DESC_UPD_P0_STEP 8192
1211#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024
1212/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
1213#define FR_AA_TX_DESC_UPD_KER 0x00000a10
1214#define FR_AA_TX_DESC_UPD_KER_STEP 8192
1215#define FR_AA_TX_DESC_UPD_KER_ROWS 8
1216/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
1217#define FR_BB_TX_DESC_UPD_P123 0x01000a10
1218#define FR_BB_TX_DESC_UPD_P123_STEP 8192
1219#define FR_BB_TX_DESC_UPD_P123_ROWS 3072
1220#define FRF_AZ_TX_DESC_WPTR_LBN 96
1221#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
1222#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
1223#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
1224#define FRF_AZ_TX_DESC_LBN 0
1225#define FRF_AZ_TX_DESC_WIDTH 95
1226
1227/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
1228#define FR_AZ_TX_DC_CFG 0x00000a20
1229#define FRF_AZ_TX_DC_SIZE_LBN 0
1230#define FRF_AZ_TX_DC_SIZE_WIDTH 2
1231#define FFE_AZ_TX_DC_SIZE_32 2
1232#define FFE_AZ_TX_DC_SIZE_16 1
1233#define FFE_AZ_TX_DC_SIZE_8 0
1234
1235/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
1236#define FR_AA_TX_CHKSM_CFG 0x00000a30
1237#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
1238#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
1239#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
1240#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
1241#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
1242#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
1243#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
1244#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
1245
1246/* TX_CFG_REG: Transmit configuration register */
1247#define FR_AZ_TX_CFG 0x00000a50
1248#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
1249#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
1250#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
1251#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
1252#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
1253#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1254#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
1255#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1256#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
1257#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1258#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
1259#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1260#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
1261#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1262#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
1263#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1264#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
1265#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
1266#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
1267#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
1268#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
1269#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
1270#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
1271#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
1272#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
1273#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
1274#define FRF_AZ_TX_P1_PRI_EN_LBN 4
1275#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
1276#define FRF_AZ_TX_OWNERR_CTL_LBN 2
1277#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
1278#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
1279#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
1280#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
1281#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
1282
1283/* TX_PUSH_DROP_REG: Transmit push dropped register */
1284#define FR_AZ_TX_PUSH_DROP 0x00000a60
1285#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
1286#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
1287
1288/* TX_RESERVED_REG: Transmit configuration register */
1289#define FR_AZ_TX_RESERVED 0x00000a80
1290#define FRF_AZ_TX_EVT_CNT_LBN 121
1291#define FRF_AZ_TX_EVT_CNT_WIDTH 7
1292#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
1293#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
1294#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
1295#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
1296#define FRF_AZ_TX_PUSH_EN_LBN 89
1297#define FRF_AZ_TX_PUSH_EN_WIDTH 1
1298#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
1299#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
1300#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
1301#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
1302#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
1303#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
1304#define FRF_AZ_TX_DMAQ_ST_LBN 78
1305#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
1306#define FRF_AZ_TX_RX_SPACER_LBN 64
1307#define FRF_AZ_TX_RX_SPACER_WIDTH 8
1308#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
1309#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
1310#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
1311#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
1312#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
1313#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
1314#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
1315#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
1316#define FRF_AZ_TX_XP_TIMER_LBN 52
1317#define FRF_AZ_TX_XP_TIMER_WIDTH 5
1318#define FRF_AZ_TX_PREF_SPACER_LBN 44
1319#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
1320#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
1321#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
1322#define FRF_AZ_TX_ONLY1TAG_LBN 21
1323#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
1324#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
1325#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
1326#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
1327#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
1328#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
1329#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
1330#define FRF_AA_TX_DMA_FF_THR_LBN 16
1331#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
1332#define FRF_AZ_TX_DMA_SPACER_LBN 8
1333#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
1334#define FRF_AA_TX_TCP_DIS_LBN 7
1335#define FRF_AA_TX_TCP_DIS_WIDTH 1
1336#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
1337#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
1338#define FRF_AA_TX_IP_DIS_LBN 6
1339#define FRF_AA_TX_IP_DIS_WIDTH 1
1340#define FRF_AZ_TX_MAX_CPL_LBN 2
1341#define FRF_AZ_TX_MAX_CPL_WIDTH 2
1342#define FFE_AZ_TX_MAX_CPL_16 3
1343#define FFE_AZ_TX_MAX_CPL_8 2
1344#define FFE_AZ_TX_MAX_CPL_4 1
1345#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
1346#define FRF_AZ_TX_MAX_PREF_LBN 0
1347#define FRF_AZ_TX_MAX_PREF_WIDTH 2
1348#define FFE_AZ_TX_MAX_PREF_32 3
1349#define FFE_AZ_TX_MAX_PREF_16 2
1350#define FFE_AZ_TX_MAX_PREF_8 1
1351#define FFE_AZ_TX_MAX_PREF_OFF 0
1352
1353/* TX_PACE_REG: Transmit pace control register */
1354#define FR_BZ_TX_PACE 0x00000a90
1355#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
1356#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
1357#define FRF_BZ_TX_PACE_SB_AF_LBN 9
1358#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10
1359#define FRF_BZ_TX_PACE_FB_BASE_LBN 5
1360#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
1361#define FRF_BZ_TX_PACE_BIN_TH_LBN 0
1362#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
1363
1364/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
1365#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0
1366#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
1367#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
1368
1369/* TX_VLAN_REG: Transmit VLAN tag register */
1370#define FR_BB_TX_VLAN 0x00000ae0
1371#define FRF_BB_TX_VLAN_EN_LBN 127
1372#define FRF_BB_TX_VLAN_EN_WIDTH 1
1373#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
1374#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
1375#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
1376#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
1377#define FRF_BB_TX_VLAN7_LBN 112
1378#define FRF_BB_TX_VLAN7_WIDTH 12
1379#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
1380#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
1381#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
1382#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
1383#define FRF_BB_TX_VLAN6_LBN 96
1384#define FRF_BB_TX_VLAN6_WIDTH 12
1385#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
1386#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
1387#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
1388#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
1389#define FRF_BB_TX_VLAN5_LBN 80
1390#define FRF_BB_TX_VLAN5_WIDTH 12
1391#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
1392#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
1393#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
1394#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
1395#define FRF_BB_TX_VLAN4_LBN 64
1396#define FRF_BB_TX_VLAN4_WIDTH 12
1397#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
1398#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
1399#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
1400#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
1401#define FRF_BB_TX_VLAN3_LBN 48
1402#define FRF_BB_TX_VLAN3_WIDTH 12
1403#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
1404#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
1405#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
1406#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
1407#define FRF_BB_TX_VLAN2_LBN 32
1408#define FRF_BB_TX_VLAN2_WIDTH 12
1409#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
1410#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
1411#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
1412#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
1413#define FRF_BB_TX_VLAN1_LBN 16
1414#define FRF_BB_TX_VLAN1_WIDTH 12
1415#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
1416#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
1417#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
1418#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
1419#define FRF_BB_TX_VLAN0_LBN 0
1420#define FRF_BB_TX_VLAN0_WIDTH 12
1421
1422/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
1423#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0
1424#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64
1425#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
1426#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
1427#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
1428#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
1429#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
1430#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
1431#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
1432#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
1433#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
1434#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
1435#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
1436#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
1437#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
1438#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
1439#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
1440#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
1441#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
1442#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
1443#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
1444#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
1445#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
1446#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
1447#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
1448#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
1449#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
1450#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
1451#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
1452#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
1453#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
1454#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
1455#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
1456#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
1457#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
1458#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
1459#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
1460#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
1461#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
1462#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
1463#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
1464#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
1465#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
1466#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
1467#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
1468#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
1469#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
1470#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
1471#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
1472#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
1473#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
1474#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
1475#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
1476#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
1477#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
1478#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
1479#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
1480#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
1481#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
1482#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
1483#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
1484#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
1485#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
1486#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
1487#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
1488#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
1489#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
1490
1491/* TX_IPFIL_TBL: Transmit IP source address filter table */
1492#define FR_BB_TX_IPFIL_TBL 0x00000b00
1493#define FR_BB_TX_IPFIL_TBL_STEP 16
1494#define FR_BB_TX_IPFIL_TBL_ROWS 16
1495#define FRF_BB_TX_IPFIL_MASK_1_LBN 96
1496#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
1497#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64
1498#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
1499#define FRF_BB_TX_IPFIL_MASK_0_LBN 32
1500#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
1501#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0
1502#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
1503
1504/* MD_TXD_REG: PHY management transmit data register */
1505#define FR_AB_MD_TXD 0x00000c00
1506#define FRF_AB_MD_TXD_LBN 0
1507#define FRF_AB_MD_TXD_WIDTH 16
1508
1509/* MD_RXD_REG: PHY management receive data register */
1510#define FR_AB_MD_RXD 0x00000c10
1511#define FRF_AB_MD_RXD_LBN 0
1512#define FRF_AB_MD_RXD_WIDTH 16
1513
1514/* MD_CS_REG: PHY management configuration & status register */
1515#define FR_AB_MD_CS 0x00000c20
1516#define FRF_AB_MD_RD_EN_CMD_LBN 15
1517#define FRF_AB_MD_RD_EN_CMD_WIDTH 1
1518#define FRF_AB_MD_WR_EN_CMD_LBN 14
1519#define FRF_AB_MD_WR_EN_CMD_WIDTH 1
1520#define FRF_AB_MD_ADDR_CMD_LBN 13
1521#define FRF_AB_MD_ADDR_CMD_WIDTH 1
1522#define FRF_AB_MD_PT_LBN 7
1523#define FRF_AB_MD_PT_WIDTH 3
1524#define FRF_AB_MD_PL_LBN 6
1525#define FRF_AB_MD_PL_WIDTH 1
1526#define FRF_AB_MD_INT_CLR_LBN 5
1527#define FRF_AB_MD_INT_CLR_WIDTH 1
1528#define FRF_AB_MD_GC_LBN 4
1529#define FRF_AB_MD_GC_WIDTH 1
1530#define FRF_AB_MD_PRSP_LBN 3
1531#define FRF_AB_MD_PRSP_WIDTH 1
1532#define FRF_AB_MD_RIC_LBN 2
1533#define FRF_AB_MD_RIC_WIDTH 1
1534#define FRF_AB_MD_RDC_LBN 1
1535#define FRF_AB_MD_RDC_WIDTH 1
1536#define FRF_AB_MD_WRC_LBN 0
1537#define FRF_AB_MD_WRC_WIDTH 1
1538
1539/* MD_PHY_ADR_REG: PHY management PHY address register */
1540#define FR_AB_MD_PHY_ADR 0x00000c30
1541#define FRF_AB_MD_PHY_ADR_LBN 0
1542#define FRF_AB_MD_PHY_ADR_WIDTH 16
1543
1544/* MD_ID_REG: PHY management ID register */
1545#define FR_AB_MD_ID 0x00000c40
1546#define FRF_AB_MD_PRT_ADR_LBN 11
1547#define FRF_AB_MD_PRT_ADR_WIDTH 5
1548#define FRF_AB_MD_DEV_ADR_LBN 6
1549#define FRF_AB_MD_DEV_ADR_WIDTH 5
1550
1551/* MD_STAT_REG: PHY management status & mask register */
1552#define FR_AB_MD_STAT 0x00000c50
1553#define FRF_AB_MD_PINT_LBN 4
1554#define FRF_AB_MD_PINT_WIDTH 1
1555#define FRF_AB_MD_DONE_LBN 3
1556#define FRF_AB_MD_DONE_WIDTH 1
1557#define FRF_AB_MD_BSERR_LBN 2
1558#define FRF_AB_MD_BSERR_WIDTH 1
1559#define FRF_AB_MD_LNFL_LBN 1
1560#define FRF_AB_MD_LNFL_WIDTH 1
1561#define FRF_AB_MD_BSY_LBN 0
1562#define FRF_AB_MD_BSY_WIDTH 1
1563
1564/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
1565#define FR_AB_MAC_STAT_DMA 0x00000c60
1566#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
1567#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
1568#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
1569#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
1570
1571/* MAC_CTRL_REG: Port MAC control register */
1572#define FR_AB_MAC_CTRL 0x00000c80
1573#define FRF_AB_MAC_XOFF_VAL_LBN 16
1574#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
1575#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
1576#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
1577#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
1578#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
1579#define FRF_AB_MAC_BCAD_ACPT_LBN 4
1580#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
1581#define FRF_AB_MAC_UC_PROM_LBN 3
1582#define FRF_AB_MAC_UC_PROM_WIDTH 1
1583#define FRF_AB_MAC_LINK_STATUS_LBN 2
1584#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
1585#define FRF_AB_MAC_SPEED_LBN 0
1586#define FRF_AB_MAC_SPEED_WIDTH 2
1587#define FFE_AB_MAC_SPEED_10G 3
1588#define FFE_AB_MAC_SPEED_1G 2
1589#define FFE_AB_MAC_SPEED_100M 1
1590#define FFE_AB_MAC_SPEED_10M 0
1591
1592/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
1593#define FR_BB_GEN_MODE 0x00000c90
1594#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
1595#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
1596#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
1597#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
1598#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
1599#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
1600#define FRF_BB_XG_PHY_INT_MASK_LBN 0
1601#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
1602
1603/* MAC_MC_HASH_REG0: Multicast address hash table */
1604#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0
1605#define FRF_AB_MAC_MCAST_HASH0_LBN 0
1606#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
1607
1608/* MAC_MC_HASH_REG1: Multicast address hash table */
1609#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0
1610#define FRF_AB_MAC_MCAST_HASH1_LBN 0
1611#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
1612
1613/* GM_CFG1_REG: GMAC configuration register 1 */
1614#define FR_AB_GM_CFG1 0x00000e00
1615#define FRF_AB_GM_SW_RST_LBN 31
1616#define FRF_AB_GM_SW_RST_WIDTH 1
1617#define FRF_AB_GM_SIM_RST_LBN 30
1618#define FRF_AB_GM_SIM_RST_WIDTH 1
1619#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
1620#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
1621#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
1622#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
1623#define FRF_AB_GM_RST_RX_FUNC_LBN 17
1624#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
1625#define FRF_AB_GM_RST_TX_FUNC_LBN 16
1626#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
1627#define FRF_AB_GM_LOOP_LBN 8
1628#define FRF_AB_GM_LOOP_WIDTH 1
1629#define FRF_AB_GM_RX_FC_EN_LBN 5
1630#define FRF_AB_GM_RX_FC_EN_WIDTH 1
1631#define FRF_AB_GM_TX_FC_EN_LBN 4
1632#define FRF_AB_GM_TX_FC_EN_WIDTH 1
1633#define FRF_AB_GM_SYNC_RXEN_LBN 3
1634#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
1635#define FRF_AB_GM_RX_EN_LBN 2
1636#define FRF_AB_GM_RX_EN_WIDTH 1
1637#define FRF_AB_GM_SYNC_TXEN_LBN 1
1638#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
1639#define FRF_AB_GM_TX_EN_LBN 0
1640#define FRF_AB_GM_TX_EN_WIDTH 1
1641
1642/* GM_CFG2_REG: GMAC configuration register 2 */
1643#define FR_AB_GM_CFG2 0x00000e10
1644#define FRF_AB_GM_PAMBL_LEN_LBN 12
1645#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
1646#define FRF_AB_GM_IF_MODE_LBN 8
1647#define FRF_AB_GM_IF_MODE_WIDTH 2
1648#define FFE_AB_IF_MODE_BYTE_MODE 2
1649#define FFE_AB_IF_MODE_NIBBLE_MODE 1
1650#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
1651#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
1652#define FRF_AB_GM_LEN_CHK_LBN 4
1653#define FRF_AB_GM_LEN_CHK_WIDTH 1
1654#define FRF_AB_GM_PAD_CRC_EN_LBN 2
1655#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
1656#define FRF_AB_GM_CRC_EN_LBN 1
1657#define FRF_AB_GM_CRC_EN_WIDTH 1
1658#define FRF_AB_GM_FD_LBN 0
1659#define FRF_AB_GM_FD_WIDTH 1
1660
1661/* GM_IPG_REG: GMAC IPG register */
1662#define FR_AB_GM_IPG 0x00000e20
1663#define FRF_AB_GM_NONB2B_IPG1_LBN 24
1664#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
1665#define FRF_AB_GM_NONB2B_IPG2_LBN 16
1666#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
1667#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
1668#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
1669#define FRF_AB_GM_B2B_IPG_LBN 0
1670#define FRF_AB_GM_B2B_IPG_WIDTH 7
1671
1672/* GM_HD_REG: GMAC half duplex register */
1673#define FR_AB_GM_HD 0x00000e30
1674#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
1675#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
1676#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
1677#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
1678#define FRF_AB_GM_BP_NO_BOFF_LBN 18
1679#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
1680#define FRF_AB_GM_DIS_BOFF_LBN 17
1681#define FRF_AB_GM_DIS_BOFF_WIDTH 1
1682#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
1683#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
1684#define FRF_AB_GM_RTRY_LIMIT_LBN 12
1685#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
1686#define FRF_AB_GM_COL_WIN_LBN 0
1687#define FRF_AB_GM_COL_WIN_WIDTH 10
1688
1689/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
1690#define FR_AB_GM_MAX_FLEN 0x00000e40
1691#define FRF_AB_GM_MAX_FLEN_LBN 0
1692#define FRF_AB_GM_MAX_FLEN_WIDTH 16
1693
1694/* GM_TEST_REG: GMAC test register */
1695#define FR_AB_GM_TEST 0x00000e70
1696#define FRF_AB_GM_MAX_BOFF_LBN 3
1697#define FRF_AB_GM_MAX_BOFF_WIDTH 1
1698#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
1699#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
1700#define FRF_AB_GM_TEST_PAUSE_LBN 1
1701#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
1702#define FRF_AB_GM_SHORT_SLOT_LBN 0
1703#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
1704
1705/* GM_ADR1_REG: GMAC station address register 1 */
1706#define FR_AB_GM_ADR1 0x00000f00
1707#define FRF_AB_GM_ADR_B0_LBN 24
1708#define FRF_AB_GM_ADR_B0_WIDTH 8
1709#define FRF_AB_GM_ADR_B1_LBN 16
1710#define FRF_AB_GM_ADR_B1_WIDTH 8
1711#define FRF_AB_GM_ADR_B2_LBN 8
1712#define FRF_AB_GM_ADR_B2_WIDTH 8
1713#define FRF_AB_GM_ADR_B3_LBN 0
1714#define FRF_AB_GM_ADR_B3_WIDTH 8
1715
1716/* GM_ADR2_REG: GMAC station address register 2 */
1717#define FR_AB_GM_ADR2 0x00000f10
1718#define FRF_AB_GM_ADR_B4_LBN 24
1719#define FRF_AB_GM_ADR_B4_WIDTH 8
1720#define FRF_AB_GM_ADR_B5_LBN 16
1721#define FRF_AB_GM_ADR_B5_WIDTH 8
1722
1723/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
1724#define FR_AB_GMF_CFG0 0x00000f20
1725#define FRF_AB_GMF_FTFENRPLY_LBN 20
1726#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
1727#define FRF_AB_GMF_STFENRPLY_LBN 19
1728#define FRF_AB_GMF_STFENRPLY_WIDTH 1
1729#define FRF_AB_GMF_FRFENRPLY_LBN 18
1730#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
1731#define FRF_AB_GMF_SRFENRPLY_LBN 17
1732#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
1733#define FRF_AB_GMF_WTMENRPLY_LBN 16
1734#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
1735#define FRF_AB_GMF_FTFENREQ_LBN 12
1736#define FRF_AB_GMF_FTFENREQ_WIDTH 1
1737#define FRF_AB_GMF_STFENREQ_LBN 11
1738#define FRF_AB_GMF_STFENREQ_WIDTH 1
1739#define FRF_AB_GMF_FRFENREQ_LBN 10
1740#define FRF_AB_GMF_FRFENREQ_WIDTH 1
1741#define FRF_AB_GMF_SRFENREQ_LBN 9
1742#define FRF_AB_GMF_SRFENREQ_WIDTH 1
1743#define FRF_AB_GMF_WTMENREQ_LBN 8
1744#define FRF_AB_GMF_WTMENREQ_WIDTH 1
1745#define FRF_AB_GMF_HSTRSTFT_LBN 4
1746#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
1747#define FRF_AB_GMF_HSTRSTST_LBN 3
1748#define FRF_AB_GMF_HSTRSTST_WIDTH 1
1749#define FRF_AB_GMF_HSTRSTFR_LBN 2
1750#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
1751#define FRF_AB_GMF_HSTRSTSR_LBN 1
1752#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
1753#define FRF_AB_GMF_HSTRSTWT_LBN 0
1754#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
1755
1756/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
1757#define FR_AB_GMF_CFG1 0x00000f30
1758#define FRF_AB_GMF_CFGFRTH_LBN 16
1759#define FRF_AB_GMF_CFGFRTH_WIDTH 5
1760#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
1761#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
1762
1763/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
1764#define FR_AB_GMF_CFG2 0x00000f40
1765#define FRF_AB_GMF_CFGHWM_LBN 16
1766#define FRF_AB_GMF_CFGHWM_WIDTH 6
1767#define FRF_AB_GMF_CFGLWM_LBN 0
1768#define FRF_AB_GMF_CFGLWM_WIDTH 6
1769
1770/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
1771#define FR_AB_GMF_CFG3 0x00000f50
1772#define FRF_AB_GMF_CFGHWMFT_LBN 16
1773#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
1774#define FRF_AB_GMF_CFGFTTH_LBN 0
1775#define FRF_AB_GMF_CFGFTTH_WIDTH 6
1776
1777/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
1778#define FR_AB_GMF_CFG4 0x00000f60
1779#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
1780#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
1781
1782/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
1783#define FR_AB_GMF_CFG5 0x00000f70
1784#define FRF_AB_GMF_CFGHDPLX_LBN 22
1785#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
1786#define FRF_AB_GMF_SRFULL_LBN 21
1787#define FRF_AB_GMF_SRFULL_WIDTH 1
1788#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
1789#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
1790#define FRF_AB_GMF_CFGBYTMODE_LBN 19
1791#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
1792#define FRF_AB_GMF_HSTDRPLT64_LBN 18
1793#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
1794#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
1795#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
1796
1797/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
1798#define FR_BB_TX_SRC_MAC_TBL 0x00001000
1799#define FR_BB_TX_SRC_MAC_TBL_STEP 16
1800#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
1801#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
1802#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
1803#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
1804#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
1805
1806/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
1807#define FR_BB_TX_SRC_MAC_CTL 0x00001100
1808#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
1809#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
1810#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
1811#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
1812#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
1813#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
1814#define FRF_BB_TX_MAC_QID_SEL_LBN 0
1815#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
1816
1817/* XM_ADR_LO_REG: XGMAC address register low */
1818#define FR_AB_XM_ADR_LO 0x00001200
1819#define FRF_AB_XM_ADR_LO_LBN 0
1820#define FRF_AB_XM_ADR_LO_WIDTH 32
1821
1822/* XM_ADR_HI_REG: XGMAC address register high */
1823#define FR_AB_XM_ADR_HI 0x00001210
1824#define FRF_AB_XM_ADR_HI_LBN 0
1825#define FRF_AB_XM_ADR_HI_WIDTH 16
1826
1827/* XM_GLB_CFG_REG: XGMAC global configuration */
1828#define FR_AB_XM_GLB_CFG 0x00001220
1829#define FRF_AB_XM_RMTFLT_GEN_LBN 17
1830#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
1831#define FRF_AB_XM_DEBUG_MODE_LBN 16
1832#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
1833#define FRF_AB_XM_RX_STAT_EN_LBN 11
1834#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
1835#define FRF_AB_XM_TX_STAT_EN_LBN 10
1836#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
1837#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
1838#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
1839#define FRF_AB_XM_WAN_MODE_LBN 5
1840#define FRF_AB_XM_WAN_MODE_WIDTH 1
1841#define FRF_AB_XM_INTCLR_MODE_LBN 3
1842#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
1843#define FRF_AB_XM_CORE_RST_LBN 0
1844#define FRF_AB_XM_CORE_RST_WIDTH 1
1845
1846/* XM_TX_CFG_REG: XGMAC transmit configuration */
1847#define FR_AB_XM_TX_CFG 0x00001230
1848#define FRF_AB_XM_TX_PROG_LBN 24
1849#define FRF_AB_XM_TX_PROG_WIDTH 1
1850#define FRF_AB_XM_IPG_LBN 16
1851#define FRF_AB_XM_IPG_WIDTH 4
1852#define FRF_AB_XM_FCNTL_LBN 10
1853#define FRF_AB_XM_FCNTL_WIDTH 1
1854#define FRF_AB_XM_TXCRC_LBN 8
1855#define FRF_AB_XM_TXCRC_WIDTH 1
1856#define FRF_AB_XM_EDRC_LBN 6
1857#define FRF_AB_XM_EDRC_WIDTH 1
1858#define FRF_AB_XM_AUTO_PAD_LBN 5
1859#define FRF_AB_XM_AUTO_PAD_WIDTH 1
1860#define FRF_AB_XM_TX_PRMBL_LBN 2
1861#define FRF_AB_XM_TX_PRMBL_WIDTH 1
1862#define FRF_AB_XM_TXEN_LBN 1
1863#define FRF_AB_XM_TXEN_WIDTH 1
1864#define FRF_AB_XM_TX_RST_LBN 0
1865#define FRF_AB_XM_TX_RST_WIDTH 1
1866
1867/* XM_RX_CFG_REG: XGMAC receive configuration */
1868#define FR_AB_XM_RX_CFG 0x00001240
1869#define FRF_AB_XM_PASS_LENERR_LBN 26
1870#define FRF_AB_XM_PASS_LENERR_WIDTH 1
1871#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
1872#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
1873#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
1874#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
1875#define FRF_AB_XM_REJ_BCAST_LBN 20
1876#define FRF_AB_XM_REJ_BCAST_WIDTH 1
1877#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
1878#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
1879#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
1880#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
1881#define FRF_AB_XM_AUTO_DEPAD_LBN 8
1882#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
1883#define FRF_AB_XM_RXCRC_LBN 3
1884#define FRF_AB_XM_RXCRC_WIDTH 1
1885#define FRF_AB_XM_RX_PRMBL_LBN 2
1886#define FRF_AB_XM_RX_PRMBL_WIDTH 1
1887#define FRF_AB_XM_RXEN_LBN 1
1888#define FRF_AB_XM_RXEN_WIDTH 1
1889#define FRF_AB_XM_RX_RST_LBN 0
1890#define FRF_AB_XM_RX_RST_WIDTH 1
1891
1892/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
1893#define FR_AB_XM_MGT_INT_MASK 0x00001250
1894#define FRF_AB_XM_MSK_STA_INTR_LBN 16
1895#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
1896#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
1897#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
1898#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
1899#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
1900#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
1901#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
1902#define FRF_AB_XM_MSK_RMTFLT_LBN 1
1903#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
1904#define FRF_AB_XM_MSK_LCLFLT_LBN 0
1905#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
1906
1907/* XM_FC_REG: XGMAC flow control register */
1908#define FR_AB_XM_FC 0x00001270
1909#define FRF_AB_XM_PAUSE_TIME_LBN 16
1910#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
1911#define FRF_AB_XM_RX_MAC_STAT_LBN 11
1912#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
1913#define FRF_AB_XM_TX_MAC_STAT_LBN 10
1914#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
1915#define FRF_AB_XM_MCNTL_PASS_LBN 8
1916#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
1917#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
1918#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
1919#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
1920#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
1921#define FRF_AB_XM_ZPAUSE_LBN 2
1922#define FRF_AB_XM_ZPAUSE_WIDTH 1
1923#define FRF_AB_XM_XMIT_PAUSE_LBN 1
1924#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
1925#define FRF_AB_XM_DIS_FCNTL_LBN 0
1926#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
1927
1928/* XM_PAUSE_TIME_REG: XGMAC pause time register */
1929#define FR_AB_XM_PAUSE_TIME 0x00001290
1930#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
1931#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
1932#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
1933#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
1934
1935/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
1936#define FR_AB_XM_TX_PARAM 0x000012d0
1937#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
1938#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
1939#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
1940#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
1941#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
1942#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
1943#define FRF_AB_XM_PAD_CHAR_LBN 0
1944#define FRF_AB_XM_PAD_CHAR_WIDTH 8
1945
1946/* XM_RX_PARAM_REG: XGMAC receive parameter register */
1947#define FR_AB_XM_RX_PARAM 0x000012e0
1948#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
1949#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
1950#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
1951#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
1952
1953/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
1954#define FR_AB_XM_MGT_INT_MSK 0x000012f0
1955#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
1956#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
1957#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
1958#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
1959#define FRF_AB_XM_PRMBLE_ERR_LBN 2
1960#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
1961#define FRF_AB_XM_RMTFLT_LBN 1
1962#define FRF_AB_XM_RMTFLT_WIDTH 1
1963#define FRF_AB_XM_LCLFLT_LBN 0
1964#define FRF_AB_XM_LCLFLT_WIDTH 1
1965
1966/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
1967#define FR_AB_XX_PWR_RST 0x00001300
1968#define FRF_AB_XX_PWRDND_SIG_LBN 31
1969#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
1970#define FRF_AB_XX_PWRDNC_SIG_LBN 30
1971#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
1972#define FRF_AB_XX_PWRDNB_SIG_LBN 29
1973#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
1974#define FRF_AB_XX_PWRDNA_SIG_LBN 28
1975#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
1976#define FRF_AB_XX_SIM_MODE_LBN 27
1977#define FRF_AB_XX_SIM_MODE_WIDTH 1
1978#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
1979#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
1980#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
1981#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
1982#define FRF_AB_XX_RESETD_SIG_LBN 23
1983#define FRF_AB_XX_RESETD_SIG_WIDTH 1
1984#define FRF_AB_XX_RESETC_SIG_LBN 22
1985#define FRF_AB_XX_RESETC_SIG_WIDTH 1
1986#define FRF_AB_XX_RESETB_SIG_LBN 21
1987#define FRF_AB_XX_RESETB_SIG_WIDTH 1
1988#define FRF_AB_XX_RESETA_SIG_LBN 20
1989#define FRF_AB_XX_RESETA_SIG_WIDTH 1
1990#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
1991#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
1992#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
1993#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
1994#define FRF_AB_XX_SD_RST_ACT_LBN 16
1995#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
1996#define FRF_AB_XX_PWRDND_EN_LBN 15
1997#define FRF_AB_XX_PWRDND_EN_WIDTH 1
1998#define FRF_AB_XX_PWRDNC_EN_LBN 14
1999#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
2000#define FRF_AB_XX_PWRDNB_EN_LBN 13
2001#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
2002#define FRF_AB_XX_PWRDNA_EN_LBN 12
2003#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
2004#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
2005#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
2006#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
2007#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
2008#define FRF_AB_XX_RESETD_EN_LBN 7
2009#define FRF_AB_XX_RESETD_EN_WIDTH 1
2010#define FRF_AB_XX_RESETC_EN_LBN 6
2011#define FRF_AB_XX_RESETC_EN_WIDTH 1
2012#define FRF_AB_XX_RESETB_EN_LBN 5
2013#define FRF_AB_XX_RESETB_EN_WIDTH 1
2014#define FRF_AB_XX_RESETA_EN_LBN 4
2015#define FRF_AB_XX_RESETA_EN_WIDTH 1
2016#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
2017#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
2018#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
2019#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
2020#define FRF_AB_XX_RST_XX_EN_LBN 0
2021#define FRF_AB_XX_RST_XX_EN_WIDTH 1
2022
2023/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
2024#define FR_AB_XX_SD_CTL 0x00001310
2025#define FRF_AB_XX_TERMADJ1_LBN 17
2026#define FRF_AB_XX_TERMADJ1_WIDTH 1
2027#define FRF_AB_XX_TERMADJ0_LBN 16
2028#define FRF_AB_XX_TERMADJ0_WIDTH 1
2029#define FRF_AB_XX_HIDRVD_LBN 15
2030#define FRF_AB_XX_HIDRVD_WIDTH 1
2031#define FRF_AB_XX_LODRVD_LBN 14
2032#define FRF_AB_XX_LODRVD_WIDTH 1
2033#define FRF_AB_XX_HIDRVC_LBN 13
2034#define FRF_AB_XX_HIDRVC_WIDTH 1
2035#define FRF_AB_XX_LODRVC_LBN 12
2036#define FRF_AB_XX_LODRVC_WIDTH 1
2037#define FRF_AB_XX_HIDRVB_LBN 11
2038#define FRF_AB_XX_HIDRVB_WIDTH 1
2039#define FRF_AB_XX_LODRVB_LBN 10
2040#define FRF_AB_XX_LODRVB_WIDTH 1
2041#define FRF_AB_XX_HIDRVA_LBN 9
2042#define FRF_AB_XX_HIDRVA_WIDTH 1
2043#define FRF_AB_XX_LODRVA_LBN 8
2044#define FRF_AB_XX_LODRVA_WIDTH 1
2045#define FRF_AB_XX_LPBKD_LBN 3
2046#define FRF_AB_XX_LPBKD_WIDTH 1
2047#define FRF_AB_XX_LPBKC_LBN 2
2048#define FRF_AB_XX_LPBKC_WIDTH 1
2049#define FRF_AB_XX_LPBKB_LBN 1
2050#define FRF_AB_XX_LPBKB_WIDTH 1
2051#define FRF_AB_XX_LPBKA_LBN 0
2052#define FRF_AB_XX_LPBKA_WIDTH 1
2053
2054/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
2055#define FR_AB_XX_TXDRV_CTL 0x00001320
2056#define FRF_AB_XX_DEQD_LBN 28
2057#define FRF_AB_XX_DEQD_WIDTH 4
2058#define FRF_AB_XX_DEQC_LBN 24
2059#define FRF_AB_XX_DEQC_WIDTH 4
2060#define FRF_AB_XX_DEQB_LBN 20
2061#define FRF_AB_XX_DEQB_WIDTH 4
2062#define FRF_AB_XX_DEQA_LBN 16
2063#define FRF_AB_XX_DEQA_WIDTH 4
2064#define FRF_AB_XX_DTXD_LBN 12
2065#define FRF_AB_XX_DTXD_WIDTH 4
2066#define FRF_AB_XX_DTXC_LBN 8
2067#define FRF_AB_XX_DTXC_WIDTH 4
2068#define FRF_AB_XX_DTXB_LBN 4
2069#define FRF_AB_XX_DTXB_WIDTH 4
2070#define FRF_AB_XX_DTXA_LBN 0
2071#define FRF_AB_XX_DTXA_WIDTH 4
2072
2073/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
2074#define FR_AB_XX_PRBS_CTL 0x00001330
2075#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
2076#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
2077#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
2078#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
2079#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
2080#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
2081#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
2082#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
2083#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
2084#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
2085#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
2086#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
2087#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
2088#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
2089#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
2090#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
2091#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
2092#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
2093#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
2094#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
2095#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
2096#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
2097#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
2098#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
2099#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
2100#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
2101#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
2102#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
2103#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
2104#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
2105#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
2106#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
2107#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
2108#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
2109#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
2110#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
2111#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
2112#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
2113#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
2114#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
2115#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
2116#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
2117#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
2118#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
2119#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
2120#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
2121#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
2122#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
2123
2124/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
2125#define FR_AB_XX_PRBS_CHK 0x00001340
2126#define FRF_AB_XX_REV_LB_EN_LBN 16
2127#define FRF_AB_XX_REV_LB_EN_WIDTH 1
2128#define FRF_AB_XX_CH3_DEG_DET_LBN 15
2129#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
2130#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
2131#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
2132#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
2133#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
2134#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
2135#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
2136#define FRF_AB_XX_CH2_DEG_DET_LBN 11
2137#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
2138#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
2139#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
2140#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
2141#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
2142#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
2143#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
2144#define FRF_AB_XX_CH1_DEG_DET_LBN 7
2145#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
2146#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
2147#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
2148#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
2149#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
2150#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
2151#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
2152#define FRF_AB_XX_CH0_DEG_DET_LBN 3
2153#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
2154#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
2155#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
2156#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
2157#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
2158#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
2159#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
2160
2161/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
2162#define FR_AB_XX_PRBS_ERR 0x00001350
2163#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
2164#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
2165#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
2166#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
2167#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
2168#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
2169#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
2170#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
2171
2172/* XX_CORE_STAT_REG: XAUI XGXS core status register */
2173#define FR_AB_XX_CORE_STAT 0x00001360
2174#define FRF_AB_XX_FORCE_SIG3_LBN 31
2175#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
2176#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
2177#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
2178#define FRF_AB_XX_FORCE_SIG2_LBN 29
2179#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
2180#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
2181#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
2182#define FRF_AB_XX_FORCE_SIG1_LBN 27
2183#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
2184#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
2185#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
2186#define FRF_AB_XX_FORCE_SIG0_LBN 25
2187#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
2188#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
2189#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
2190#define FRF_AB_XX_XGXS_LB_EN_LBN 23
2191#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
2192#define FRF_AB_XX_XGMII_LB_EN_LBN 22
2193#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
2194#define FRF_AB_XX_MATCH_FAULT_LBN 21
2195#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
2196#define FRF_AB_XX_ALIGN_DONE_LBN 20
2197#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
2198#define FRF_AB_XX_SYNC_STAT3_LBN 19
2199#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
2200#define FRF_AB_XX_SYNC_STAT2_LBN 18
2201#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
2202#define FRF_AB_XX_SYNC_STAT1_LBN 17
2203#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
2204#define FRF_AB_XX_SYNC_STAT0_LBN 16
2205#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
2206#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
2207#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
2208#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
2209#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
2210#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
2211#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
2212#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
2213#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
2214#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
2215#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
2216#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
2217#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
2218#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
2219#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
2220#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
2221#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
2222#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
2223#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
2224#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
2225#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
2226#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
2227#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
2228#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
2229#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
2230#define FRF_AB_XX_DISPERR_CH3_LBN 3
2231#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
2232#define FRF_AB_XX_DISPERR_CH2_LBN 2
2233#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
2234#define FRF_AB_XX_DISPERR_CH1_LBN 1
2235#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
2236#define FRF_AB_XX_DISPERR_CH0_LBN 0
2237#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
2238
2239/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
2240#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
2241#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
2242#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
2243/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
2244#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000
2245#define FR_BZ_RX_DESC_PTR_TBL_STEP 16
2246#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096
2247#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
2248#define FRF_CZ_RX_HDR_SPLIT_LBN 90
2249#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
2250#define FRF_AA_RX_RESET_LBN 89
2251#define FRF_AA_RX_RESET_WIDTH 1
2252#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
2253#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
2254#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
2255#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
2256#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
2257#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
2258#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
2259#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
2260#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
2261#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
2262#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
2263#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
2264#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
2265#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
2266#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
2267#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
2268#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
2269#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
2270#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
2271#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
2272#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
2273#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
2274#define FFE_AZ_RX_DESCQ_SIZE_4K 3
2275#define FFE_AZ_RX_DESCQ_SIZE_2K 2
2276#define FFE_AZ_RX_DESCQ_SIZE_1K 1
2277#define FFE_AZ_RX_DESCQ_SIZE_512 0
2278#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
2279#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
2280#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
2281#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
2282#define FRF_AZ_RX_DESCQ_EN_LBN 0
2283#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
2284
2285/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
2286#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
2287#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
2288#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
2289/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
2290#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000
2291#define FR_BZ_TX_DESC_PTR_TBL_STEP 16
2292#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096
2293#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
2294#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
2295#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
2296#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
2297#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
2298#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
2299#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
2300#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
2301#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
2302#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
2303#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
2304#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
2305#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
2306#define FRF_AZ_TX_DESCQ_EN_LBN 88
2307#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
2308#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
2309#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
2310#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
2311#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
2312#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
2313#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
2314#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
2315#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
2316#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
2317#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
2318#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
2319#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
2320#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
2321#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
2322#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
2323#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
2324#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
2325#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
2326#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
2327#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
2328#define FFE_AZ_TX_DESCQ_SIZE_4K 3
2329#define FFE_AZ_TX_DESCQ_SIZE_2K 2
2330#define FFE_AZ_TX_DESCQ_SIZE_1K 1
2331#define FFE_AZ_TX_DESCQ_SIZE_512 0
2332#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
2333#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
2334#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
2335#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
2336
2337/* EVQ_PTR_TBL_KER: Event queue pointer table */
2338#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00
2339#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
2340#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
2341/* EVQ_PTR_TBL: Event queue pointer table */
2342#define FR_BZ_EVQ_PTR_TBL 0x00f60000
2343#define FR_BZ_EVQ_PTR_TBL_STEP 16
2344#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
2345#define FR_BB_EVQ_PTR_TBL_ROWS 4096
2346#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
2347#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
2348#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
2349#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
2350#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
2351#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
2352#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
2353#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
2354#define FRF_AZ_EVQ_EN_LBN 23
2355#define FRF_AZ_EVQ_EN_WIDTH 1
2356#define FRF_AZ_EVQ_SIZE_LBN 20
2357#define FRF_AZ_EVQ_SIZE_WIDTH 3
2358#define FFE_AZ_EVQ_SIZE_32K 6
2359#define FFE_AZ_EVQ_SIZE_16K 5
2360#define FFE_AZ_EVQ_SIZE_8K 4
2361#define FFE_AZ_EVQ_SIZE_4K 3
2362#define FFE_AZ_EVQ_SIZE_2K 2
2363#define FFE_AZ_EVQ_SIZE_1K 1
2364#define FFE_AZ_EVQ_SIZE_512 0
2365#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
2366#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
2367
2368/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
2369#define FR_AA_BUF_HALF_TBL_KER 0x00018000
2370#define FR_AA_BUF_HALF_TBL_KER_STEP 8
2371#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
2372/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
2373#define FR_BZ_BUF_HALF_TBL 0x00800000
2374#define FR_BZ_BUF_HALF_TBL_STEP 8
2375#define FR_CZ_BUF_HALF_TBL_ROWS 147456
2376#define FR_BB_BUF_HALF_TBL_ROWS 524288
2377#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
2378#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
2379#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
2380#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
2381#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
2382#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
2383#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
2384#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
2385
2386/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
2387#define FR_AA_BUF_FULL_TBL_KER 0x00018000
2388#define FR_AA_BUF_FULL_TBL_KER_STEP 8
2389#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
2390/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
2391#define FR_BZ_BUF_FULL_TBL 0x00800000
2392#define FR_BZ_BUF_FULL_TBL_STEP 8
2393#define FR_CZ_BUF_FULL_TBL_ROWS 147456
2394#define FR_BB_BUF_FULL_TBL_ROWS 917504
2395#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
2396#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
2397#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
2398#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
2399#define FRF_AZ_BUF_ADR_REGION_LBN 48
2400#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
2401#define FFE_AZ_BUF_ADR_REGN3 3
2402#define FFE_AZ_BUF_ADR_REGN2 2
2403#define FFE_AZ_BUF_ADR_REGN1 1
2404#define FFE_AZ_BUF_ADR_REGN0 0
2405#define FRF_AZ_BUF_ADR_FBUF_LBN 14
2406#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
2407#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
2408#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
2409
2410/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
2411#define FR_BZ_RX_FILTER_TBL0 0x00f00000
2412#define FR_BZ_RX_FILTER_TBL0_STEP 32
2413#define FR_BZ_RX_FILTER_TBL0_ROWS 8192
2414/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
2415#define FR_BB_RX_FILTER_TBL1 0x00f00010
2416#define FR_BB_RX_FILTER_TBL1_STEP 32
2417#define FR_BB_RX_FILTER_TBL1_ROWS 8192
2418#define FRF_BZ_RSS_EN_LBN 110
2419#define FRF_BZ_RSS_EN_WIDTH 1
2420#define FRF_BZ_SCATTER_EN_LBN 109
2421#define FRF_BZ_SCATTER_EN_WIDTH 1
2422#define FRF_BZ_TCP_UDP_LBN 108
2423#define FRF_BZ_TCP_UDP_WIDTH 1
2424#define FRF_BZ_RXQ_ID_LBN 96
2425#define FRF_BZ_RXQ_ID_WIDTH 12
2426#define FRF_BZ_DEST_IP_LBN 64
2427#define FRF_BZ_DEST_IP_WIDTH 32
2428#define FRF_BZ_DEST_PORT_TCP_LBN 48
2429#define FRF_BZ_DEST_PORT_TCP_WIDTH 16
2430#define FRF_BZ_SRC_IP_LBN 16
2431#define FRF_BZ_SRC_IP_WIDTH 32
2432#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
2433#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
2434
2435/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
2436#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
2437#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
2438#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
2439#define FRF_CZ_RMFT_RSS_EN_LBN 75
2440#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
2441#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
2442#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
2443#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
2444#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
2445#define FRF_CZ_RMFT_RXQ_ID_LBN 61
2446#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
2447#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
2448#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
2449#define FRF_CZ_RMFT_DEST_MAC_LBN 16
2450#define FRF_CZ_RMFT_DEST_MAC_WIDTH 44
2451#define FRF_CZ_RMFT_VLAN_ID_LBN 0
2452#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
2453
2454/* TIMER_TBL: Timer table */
2455#define FR_BZ_TIMER_TBL 0x00f70000
2456#define FR_BZ_TIMER_TBL_STEP 16
2457#define FR_CZ_TIMER_TBL_ROWS 1024
2458#define FR_BB_TIMER_TBL_ROWS 4096
2459#define FRF_CZ_TIMER_Q_EN_LBN 33
2460#define FRF_CZ_TIMER_Q_EN_WIDTH 1
2461#define FRF_CZ_INT_ARMD_LBN 32
2462#define FRF_CZ_INT_ARMD_WIDTH 1
2463#define FRF_CZ_INT_PEND_LBN 31
2464#define FRF_CZ_INT_PEND_WIDTH 1
2465#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
2466#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
2467#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
2468#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
2469#define FRF_CZ_TIMER_MODE_LBN 14
2470#define FRF_CZ_TIMER_MODE_WIDTH 2
2471#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
2472#define FFE_CZ_TIMER_MODE_TRIG_START 2
2473#define FFE_CZ_TIMER_MODE_IMMED_START 1
2474#define FFE_CZ_TIMER_MODE_DIS 0
2475#define FRF_BB_TIMER_MODE_LBN 12
2476#define FRF_BB_TIMER_MODE_WIDTH 2
2477#define FFE_BB_TIMER_MODE_INT_HLDOFF 2
2478#define FFE_BB_TIMER_MODE_TRIG_START 2
2479#define FFE_BB_TIMER_MODE_IMMED_START 1
2480#define FFE_BB_TIMER_MODE_DIS 0
2481#define FRF_CZ_TIMER_VAL_LBN 0
2482#define FRF_CZ_TIMER_VAL_WIDTH 14
2483#define FRF_BB_TIMER_VAL_LBN 0
2484#define FRF_BB_TIMER_VAL_WIDTH 12
2485
2486/* TX_PACE_TBL: Transmit pacing table */
2487#define FR_BZ_TX_PACE_TBL 0x00f80000
2488#define FR_BZ_TX_PACE_TBL_STEP 16
2489#define FR_CZ_TX_PACE_TBL_ROWS 1024
2490#define FR_BB_TX_PACE_TBL_ROWS 4096
2491#define FRF_BZ_TX_PACE_LBN 0
2492#define FRF_BZ_TX_PACE_WIDTH 5
2493
2494/* RX_INDIRECTION_TBL: RX Indirection Table */
2495#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
2496#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
2497#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
2498#define FRF_BZ_IT_QUEUE_LBN 0
2499#define FRF_BZ_IT_QUEUE_WIDTH 6
2500
2501/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
2502#define FR_CZ_TX_FILTER_TBL0 0x00fc0000
2503#define FR_CZ_TX_FILTER_TBL0_STEP 16
2504#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
2505#define FRF_CZ_TIFT_TCP_UDP_LBN 108
2506#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
2507#define FRF_CZ_TIFT_TXQ_ID_LBN 96
2508#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
2509#define FRF_CZ_TIFT_DEST_IP_LBN 64
2510#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
2511#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
2512#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
2513#define FRF_CZ_TIFT_SRC_IP_LBN 16
2514#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
2515#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
2516#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
2517
2518/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
2519#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
2520#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
2521#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
2522#define FRF_CZ_TMFT_TXQ_ID_LBN 61
2523#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
2524#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
2525#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
2526#define FRF_CZ_TMFT_SRC_MAC_LBN 16
2527#define FRF_CZ_TMFT_SRC_MAC_WIDTH 44
2528#define FRF_CZ_TMFT_VLAN_ID_LBN 0
2529#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
2530
2531/* MC_TREG_SMEM: MC Shared Memory */
2532#define FR_CZ_MC_TREG_SMEM 0x00ff0000
2533#define FR_CZ_MC_TREG_SMEM_STEP 4
2534#define FR_CZ_MC_TREG_SMEM_ROWS 512
2535#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
2536#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
2537
2538/* MSIX_VECTOR_TABLE: MSIX Vector Table */
2539#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
2540#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
2541#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
2542/* MSIX_VECTOR_TABLE: MSIX Vector Table */
2543#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000
2544/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
2545#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
2546#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
2547#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
2548#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
2549#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
2550#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
2551#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
2552#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
2553#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
2554#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
2555#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
2556
2557/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
2558#define FR_BB_MSIX_PBA_TABLE 0x00ff2000
2559#define FR_BZ_MSIX_PBA_TABLE_STEP 4
2560#define FR_BB_MSIX_PBA_TABLE_ROWS 2
2561/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
2562#define FR_CZ_MSIX_PBA_TABLE 0x00008000
2563/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
2564#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
2565#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
2566#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
2567
2568/* SRM_DBG_REG: SRAM debug access */
2569#define FR_BZ_SRM_DBG 0x03000000
2570#define FR_BZ_SRM_DBG_STEP 8
2571#define FR_CZ_SRM_DBG_ROWS 262144
2572#define FR_BB_SRM_DBG_ROWS 2097152
2573#define FRF_BZ_SRM_DBG_LBN 0
2574#define FRF_BZ_SRM_DBG_WIDTH 64
2575
2576/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
2577#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
2578#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
2579#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
2580#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
2581#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
2582
2583/* DRIVER_EV */
2584#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
2585#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
2586#define FSE_BZ_TX_DSC_ERROR_EV 15
2587#define FSE_BZ_RX_DSC_ERROR_EV 14
2588#define FSE_AA_RX_RECOVER_EV 11
2589#define FSE_AZ_TIMER_EV 10
2590#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
2591#define FSE_AZ_WAKE_UP_EV 6
2592#define FSE_AZ_SRM_UPD_DONE_EV 5
2593#define FSE_AB_EVQ_NOT_EN_EV 3
2594#define FSE_AZ_EVQ_INIT_DONE_EV 2
2595#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
2596#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
2597#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
2598#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
2599
2600/* EVENT_ENTRY */
2601#define FSF_AZ_EV_CODE_LBN 60
2602#define FSF_AZ_EV_CODE_WIDTH 4
2603#define FSE_CZ_EV_CODE_MCDI_EV 12
2604#define FSE_CZ_EV_CODE_USER_EV 8
2605#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
2606#define FSE_AZ_EV_CODE_GLOBAL_EV 6
2607#define FSE_AZ_EV_CODE_DRIVER_EV 5
2608#define FSE_AZ_EV_CODE_TX_EV 2
2609#define FSE_AZ_EV_CODE_RX_EV 0
2610#define FSF_AZ_EV_DATA_LBN 0
2611#define FSF_AZ_EV_DATA_WIDTH 60
2612
2613/* GLOBAL_EV */
2614#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
2615#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
2616#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
2617#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
2618#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
2619#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
2620#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
2621#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
2622#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
2623#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
2624#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
2625#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
2626
2627/* LEGACY_INT_VEC */
2628#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
2629#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
2630#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
2631#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
2632#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
2633#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
2634#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
2635#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
2636#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
2637#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
2638
2639/* MC_XGMAC_FLTR_RULE_DEF */
2640#define FSF_CZ_MC_XFRC_MODE_LBN 416
2641#define FSF_CZ_MC_XFRC_MODE_WIDTH 1
2642#define FSE_CZ_MC_XFRC_MODE_LAYERED 1
2643#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0
2644#define FSF_CZ_MC_XFRC_HASH_LBN 384
2645#define FSF_CZ_MC_XFRC_HASH_WIDTH 32
2646#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
2647#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
2648#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
2649#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
2650#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
2651#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
2652
2653/* RX_EV */
2654#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
2655#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
2656#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
2657#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
2658#define FSF_AZ_RX_EV_PKT_OK_LBN 56
2659#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
2660#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
2661#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
2662#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
2663#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
2664#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
2665#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
2666#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
2667#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
2668#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
2669#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
2670#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
2671#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
2672#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
2673#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
2674#define FSF_AA_RX_EV_DRIB_NIB_LBN 49
2675#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
2676#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
2677#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
2678#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
2679#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
2680#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
2681#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
2682#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
2683#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
2684#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
2685#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
2686#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
2687#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
2688#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
2689#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
2690#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
2691#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
2692#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
2693#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
2694#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
2695#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
2696#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
2697#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
2698#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
2699#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
2700#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
2701#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
2702#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
2703#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
2704#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
2705#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
2706#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
2707#define FSF_AZ_RX_EV_PORT_LBN 30
2708#define FSF_AZ_RX_EV_PORT_WIDTH 1
2709#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
2710#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
2711#define FSF_AZ_RX_EV_SOP_LBN 15
2712#define FSF_AZ_RX_EV_SOP_WIDTH 1
2713#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
2714#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
2715#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
2716#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
2717#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
2718#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
2719#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
2720#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
2721
2722/* RX_KER_DESC */
2723#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
2724#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
2725#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
2726#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
2727#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
2728#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
2729
2730/* RX_USER_DESC */
2731#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
2732#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
2733#define FSF_AZ_RX_USER_BUF_ID_LBN 0
2734#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
2735
2736/* TX_EV */
2737#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
2738#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
2739#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
2740#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
2741#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
2742#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
2743#define FSF_AZ_TX_EV_PORT_LBN 16
2744#define FSF_AZ_TX_EV_PORT_WIDTH 1
2745#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
2746#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
2747#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
2748#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
2749#define FSF_AZ_TX_EV_COMP_LBN 12
2750#define FSF_AZ_TX_EV_COMP_WIDTH 1
2751#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
2752#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
2753
2754/* TX_KER_DESC */
2755#define FSF_AZ_TX_KER_CONT_LBN 62
2756#define FSF_AZ_TX_KER_CONT_WIDTH 1
2757#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
2758#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
2759#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
2760#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
2761#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
2762#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
2763
2764/* TX_USER_DESC */
2765#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
2766#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
2767#define FSF_AZ_TX_USER_CONT_LBN 46
2768#define FSF_AZ_TX_USER_CONT_WIDTH 1
2769#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
2770#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
2771#define FSF_AZ_TX_USER_BUF_ID_LBN 13
2772#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
2773#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
2774#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
2775
2776/* USER_EV */
2777#define FSF_CZ_USER_QID_LBN 32
2778#define FSF_CZ_USER_QID_WIDTH 10
2779#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
2780#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
2781
2782/**************************************************************************
2783 *
2784 * Falcon B0 PCIe core indirect registers
2785 *
2786 **************************************************************************
2787 */
2788
2789#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
2790
2791#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
2792
2793#define FPCR_BB_ACK_RPL_TIMER 0x700
2794#define FPCRF_BB_ACK_TL_LBN 0
2795#define FPCRF_BB_ACK_TL_WIDTH 16
2796#define FPCRF_BB_RPL_TL_LBN 16
2797#define FPCRF_BB_RPL_TL_WIDTH 16
2798
2799#define FPCR_BB_ACK_FREQ 0x70C
2800#define FPCRF_BB_ACK_FREQ_LBN 0
2801#define FPCRF_BB_ACK_FREQ_WIDTH 7
2802
2803/**************************************************************************
2804 *
2805 * Pseudo-registers and fields
2806 *
2807 **************************************************************************
2808 */
2809
2810/* Interrupt acknowledge work-around register (A0/A1 only) */
2811#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
2812
2813/* EE_SPI_HCMD_REG: SPI host command register */
2814/* Values for the EE_SPI_HCMD_SF_SEL register field */
2815#define FFE_AB_SPI_DEVICE_EEPROM 0
2816#define FFE_AB_SPI_DEVICE_FLASH 1
2817
2818/* NIC_STAT_REG: NIC status register */
2819#define FRF_AB_STRAP_10G_LBN 2
2820#define FRF_AB_STRAP_10G_WIDTH 1
2821#define FRF_AA_STRAP_PCIE_LBN 0
2822#define FRF_AA_STRAP_PCIE_WIDTH 1
2823
2824/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
2825#define FRF_AZ_FATAL_INTR_LBN 0
2826#define FRF_AZ_FATAL_INTR_WIDTH 12
2827
2828/* SRM_CFG_REG: SRAM configuration register */
2829/* We treat the number of SRAM banks and bank size as a single field */
2830#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
2831#define FRF_AZ_SRM_NB_SZ_WIDTH \
2832 (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
2833#define FFE_AB_SRM_NB1_SZ2M 0
2834#define FFE_AB_SRM_NB1_SZ4M 1
2835#define FFE_AB_SRM_NB1_SZ8M 2
2836#define FFE_AB_SRM_NB_SZ_DEF 3
2837#define FFE_AB_SRM_NB2_SZ4M 4
2838#define FFE_AB_SRM_NB2_SZ8M 5
2839#define FFE_AB_SRM_NB2_SZ16M 6
2840#define FFE_AB_SRM_NB_SZ_RES 7
2841
2842/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
2843/* We write just the last dword of these registers */
2844#define FR_AZ_RX_DESC_UPD_DWORD_P0 \
2845 (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
2846 FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
2847#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
2848#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
2849
2850/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
2851#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
2852 (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
2853 FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
2854#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
2855#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
2856
2857/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
2858#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
2859#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
2860
2861/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
2862#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
2863#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
2864
2865/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
2866#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
2867#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
2868 FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
2869
2870/* XM_RX_PARAM_REG: XGMAC receive parameter register */
2871#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
2872#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
2873 FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
2874
2875/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
2876/* Default values */
2877#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
2878#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
2879#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */
2880
2881/* XX_CORE_STAT_REG: XAUI XGXS core status register */
2882/* XGXS all-lanes status fields */
2883#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
2884#define FRF_AB_XX_SYNC_STAT_WIDTH 4
2885#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
2886#define FRF_AB_XX_COMMA_DET_WIDTH 4
2887#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
2888#define FRF_AB_XX_CHAR_ERR_WIDTH 4
2889#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
2890#define FRF_AB_XX_DISPERR_WIDTH 4
2891#define FFE_AB_XX_STAT_ALL_LANES 0xf
2892#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
2893#define FRF_AB_XX_FORCE_SIG_WIDTH 8
2894#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
2895
2896/* DRIVER_EV */
2897/* Sub-fields of an RX flush completion event */
2898#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
2899#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
2900#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
2901#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
2902
2903/* EVENT_ENTRY */
2904/* Magic number field for event test */
2905#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
2906#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
2907
2908/**************************************************************************
2909 *
2910 * Falcon MAC stats
2911 *
2912 **************************************************************************
2913 *
2914 */
2915
2916#define GRxGoodOct_offset 0x0
2917#define GRxGoodOct_WIDTH 48
2918#define GRxBadOct_offset 0x8
2919#define GRxBadOct_WIDTH 48
2920#define GRxMissPkt_offset 0x10
2921#define GRxMissPkt_WIDTH 32
2922#define GRxFalseCRS_offset 0x14
2923#define GRxFalseCRS_WIDTH 32
2924#define GRxPausePkt_offset 0x18
2925#define GRxPausePkt_WIDTH 32
2926#define GRxBadPkt_offset 0x1C
2927#define GRxBadPkt_WIDTH 32
2928#define GRxUcastPkt_offset 0x20
2929#define GRxUcastPkt_WIDTH 32
2930#define GRxMcastPkt_offset 0x24
2931#define GRxMcastPkt_WIDTH 32
2932#define GRxBcastPkt_offset 0x28
2933#define GRxBcastPkt_WIDTH 32
2934#define GRxGoodLt64Pkt_offset 0x2C
2935#define GRxGoodLt64Pkt_WIDTH 32
2936#define GRxBadLt64Pkt_offset 0x30
2937#define GRxBadLt64Pkt_WIDTH 32
2938#define GRx64Pkt_offset 0x34
2939#define GRx64Pkt_WIDTH 32
2940#define GRx65to127Pkt_offset 0x38
2941#define GRx65to127Pkt_WIDTH 32
2942#define GRx128to255Pkt_offset 0x3C
2943#define GRx128to255Pkt_WIDTH 32
2944#define GRx256to511Pkt_offset 0x40
2945#define GRx256to511Pkt_WIDTH 32
2946#define GRx512to1023Pkt_offset 0x44
2947#define GRx512to1023Pkt_WIDTH 32
2948#define GRx1024to15xxPkt_offset 0x48
2949#define GRx1024to15xxPkt_WIDTH 32
2950#define GRx15xxtoJumboPkt_offset 0x4C
2951#define GRx15xxtoJumboPkt_WIDTH 32
2952#define GRxGtJumboPkt_offset 0x50
2953#define GRxGtJumboPkt_WIDTH 32
2954#define GRxFcsErr64to15xxPkt_offset 0x54
2955#define GRxFcsErr64to15xxPkt_WIDTH 32
2956#define GRxFcsErr15xxtoJumboPkt_offset 0x58
2957#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
2958#define GRxFcsErrGtJumboPkt_offset 0x5C
2959#define GRxFcsErrGtJumboPkt_WIDTH 32
2960#define GTxGoodBadOct_offset 0x80
2961#define GTxGoodBadOct_WIDTH 48
2962#define GTxGoodOct_offset 0x88
2963#define GTxGoodOct_WIDTH 48
2964#define GTxSglColPkt_offset 0x90
2965#define GTxSglColPkt_WIDTH 32
2966#define GTxMultColPkt_offset 0x94
2967#define GTxMultColPkt_WIDTH 32
2968#define GTxExColPkt_offset 0x98
2969#define GTxExColPkt_WIDTH 32
2970#define GTxDefPkt_offset 0x9C
2971#define GTxDefPkt_WIDTH 32
2972#define GTxLateCol_offset 0xA0
2973#define GTxLateCol_WIDTH 32
2974#define GTxExDefPkt_offset 0xA4
2975#define GTxExDefPkt_WIDTH 32
2976#define GTxPausePkt_offset 0xA8
2977#define GTxPausePkt_WIDTH 32
2978#define GTxBadPkt_offset 0xAC
2979#define GTxBadPkt_WIDTH 32
2980#define GTxUcastPkt_offset 0xB0
2981#define GTxUcastPkt_WIDTH 32
2982#define GTxMcastPkt_offset 0xB4
2983#define GTxMcastPkt_WIDTH 32
2984#define GTxBcastPkt_offset 0xB8
2985#define GTxBcastPkt_WIDTH 32
2986#define GTxLt64Pkt_offset 0xBC
2987#define GTxLt64Pkt_WIDTH 32
2988#define GTx64Pkt_offset 0xC0
2989#define GTx64Pkt_WIDTH 32
2990#define GTx65to127Pkt_offset 0xC4
2991#define GTx65to127Pkt_WIDTH 32
2992#define GTx128to255Pkt_offset 0xC8
2993#define GTx128to255Pkt_WIDTH 32
2994#define GTx256to511Pkt_offset 0xCC
2995#define GTx256to511Pkt_WIDTH 32
2996#define GTx512to1023Pkt_offset 0xD0
2997#define GTx512to1023Pkt_WIDTH 32
2998#define GTx1024to15xxPkt_offset 0xD4
2999#define GTx1024to15xxPkt_WIDTH 32
3000#define GTx15xxtoJumboPkt_offset 0xD8
3001#define GTx15xxtoJumboPkt_WIDTH 32
3002#define GTxGtJumboPkt_offset 0xDC
3003#define GTxGtJumboPkt_WIDTH 32
3004#define GTxNonTcpUdpPkt_offset 0xE0
3005#define GTxNonTcpUdpPkt_WIDTH 16
3006#define GTxMacSrcErrPkt_offset 0xE4
3007#define GTxMacSrcErrPkt_WIDTH 16
3008#define GTxIpSrcErrPkt_offset 0xE8
3009#define GTxIpSrcErrPkt_WIDTH 16
3010#define GDmaDone_offset 0xEC
3011#define GDmaDone_WIDTH 32
3012
3013#define XgRxOctets_offset 0x0
3014#define XgRxOctets_WIDTH 48
3015#define XgRxOctetsOK_offset 0x8
3016#define XgRxOctetsOK_WIDTH 48
3017#define XgRxPkts_offset 0x10
3018#define XgRxPkts_WIDTH 32
3019#define XgRxPktsOK_offset 0x14
3020#define XgRxPktsOK_WIDTH 32
3021#define XgRxBroadcastPkts_offset 0x18
3022#define XgRxBroadcastPkts_WIDTH 32
3023#define XgRxMulticastPkts_offset 0x1C
3024#define XgRxMulticastPkts_WIDTH 32
3025#define XgRxUnicastPkts_offset 0x20
3026#define XgRxUnicastPkts_WIDTH 32
3027#define XgRxUndersizePkts_offset 0x24
3028#define XgRxUndersizePkts_WIDTH 32
3029#define XgRxOversizePkts_offset 0x28
3030#define XgRxOversizePkts_WIDTH 32
3031#define XgRxJabberPkts_offset 0x2C
3032#define XgRxJabberPkts_WIDTH 32
3033#define XgRxUndersizeFCSerrorPkts_offset 0x30
3034#define XgRxUndersizeFCSerrorPkts_WIDTH 32
3035#define XgRxDropEvents_offset 0x34
3036#define XgRxDropEvents_WIDTH 32
3037#define XgRxFCSerrorPkts_offset 0x38
3038#define XgRxFCSerrorPkts_WIDTH 32
3039#define XgRxAlignError_offset 0x3C
3040#define XgRxAlignError_WIDTH 32
3041#define XgRxSymbolError_offset 0x40
3042#define XgRxSymbolError_WIDTH 32
3043#define XgRxInternalMACError_offset 0x44
3044#define XgRxInternalMACError_WIDTH 32
3045#define XgRxControlPkts_offset 0x48
3046#define XgRxControlPkts_WIDTH 32
3047#define XgRxPausePkts_offset 0x4C
3048#define XgRxPausePkts_WIDTH 32
3049#define XgRxPkts64Octets_offset 0x50
3050#define XgRxPkts64Octets_WIDTH 32
3051#define XgRxPkts65to127Octets_offset 0x54
3052#define XgRxPkts65to127Octets_WIDTH 32
3053#define XgRxPkts128to255Octets_offset 0x58
3054#define XgRxPkts128to255Octets_WIDTH 32
3055#define XgRxPkts256to511Octets_offset 0x5C
3056#define XgRxPkts256to511Octets_WIDTH 32
3057#define XgRxPkts512to1023Octets_offset 0x60
3058#define XgRxPkts512to1023Octets_WIDTH 32
3059#define XgRxPkts1024to15xxOctets_offset 0x64
3060#define XgRxPkts1024to15xxOctets_WIDTH 32
3061#define XgRxPkts15xxtoMaxOctets_offset 0x68
3062#define XgRxPkts15xxtoMaxOctets_WIDTH 32
3063#define XgRxLengthError_offset 0x6C
3064#define XgRxLengthError_WIDTH 32
3065#define XgTxPkts_offset 0x80
3066#define XgTxPkts_WIDTH 32
3067#define XgTxOctets_offset 0x88
3068#define XgTxOctets_WIDTH 48
3069#define XgTxMulticastPkts_offset 0x90
3070#define XgTxMulticastPkts_WIDTH 32
3071#define XgTxBroadcastPkts_offset 0x94
3072#define XgTxBroadcastPkts_WIDTH 32
3073#define XgTxUnicastPkts_offset 0x98
3074#define XgTxUnicastPkts_WIDTH 32
3075#define XgTxControlPkts_offset 0x9C
3076#define XgTxControlPkts_WIDTH 32
3077#define XgTxPausePkts_offset 0xA0
3078#define XgTxPausePkts_WIDTH 32
3079#define XgTxPkts64Octets_offset 0xA4
3080#define XgTxPkts64Octets_WIDTH 32
3081#define XgTxPkts65to127Octets_offset 0xA8
3082#define XgTxPkts65to127Octets_WIDTH 32
3083#define XgTxPkts128to255Octets_offset 0xAC
3084#define XgTxPkts128to255Octets_WIDTH 32
3085#define XgTxPkts256to511Octets_offset 0xB0
3086#define XgTxPkts256to511Octets_WIDTH 32
3087#define XgTxPkts512to1023Octets_offset 0xB4
3088#define XgTxPkts512to1023Octets_WIDTH 32
3089#define XgTxPkts1024to15xxOctets_offset 0xB8
3090#define XgTxPkts1024to15xxOctets_WIDTH 32
3091#define XgTxPkts1519toMaxOctets_offset 0xBC
3092#define XgTxPkts1519toMaxOctets_WIDTH 32
3093#define XgTxUndersizePkts_offset 0xC0
3094#define XgTxUndersizePkts_WIDTH 32
3095#define XgTxOversizePkts_offset 0xC4
3096#define XgTxOversizePkts_WIDTH 32
3097#define XgTxNonTcpUdpPkt_offset 0xC8
3098#define XgTxNonTcpUdpPkt_WIDTH 16
3099#define XgTxMacSrcErrPkt_offset 0xCC
3100#define XgTxMacSrcErrPkt_WIDTH 16
3101#define XgTxIpSrcErrPkt_offset 0xD0
3102#define XgTxIpSrcErrPkt_WIDTH 16
3103#define XgDmaDone_offset 0xD4
3104#define XgDmaDone_WIDTH 32
3105
3106#define FALCON_STATS_NOT_DONE 0x00000000
3107#define FALCON_STATS_DONE 0xffffffff
3108
3109/**************************************************************************
3110 *
3111 * Falcon non-volatile configuration
3112 *
3113 **************************************************************************
3114 */
3115
3116/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
3117struct falcon_nvconfig_board_v2 {
3118 __le16 nports;
3119 u8 port0_phy_addr;
3120 u8 port0_phy_type;
3121 u8 port1_phy_addr;
3122 u8 port1_phy_type;
3123 __le16 asic_sub_revision;
3124 __le16 board_revision;
3125} __packed;
3126
3127/* Board configuration v3 extra information */
3128struct falcon_nvconfig_board_v3 {
3129 __le32 spi_device_type[2];
3130} __packed;
3131
3132/* Bit numbers for spi_device_type */
3133#define SPI_DEV_TYPE_SIZE_LBN 0
3134#define SPI_DEV_TYPE_SIZE_WIDTH 5
3135#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
3136#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
3137#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
3138#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
3139#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
3140#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
3141#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
3142#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
3143#define SPI_DEV_TYPE_FIELD(type, field) \
3144 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
3145
3146#define FALCON_NVCONFIG_OFFSET 0x300
3147
3148#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
3149struct falcon_nvconfig {
3150 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
3151 u8 mac_address[2][8]; /* 0x310 */
3152 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
3153 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
3154 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
3155 efx_oword_t hw_init_reg; /* 0x350 */
3156 efx_oword_t nic_stat_reg; /* 0x360 */
3157 efx_oword_t glb_ctl_reg; /* 0x370 */
3158 efx_oword_t srm_cfg_reg; /* 0x380 */
3159 efx_oword_t spare_reg; /* 0x390 */
3160 __le16 board_magic_num; /* 0x3A0 */
3161 __le16 board_struct_ver;
3162 __le16 board_checksum;
3163 struct falcon_nvconfig_board_v2 board_v2;
3164 efx_oword_t ee_base_page_reg; /* 0x3B0 */
3165 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
3166} __packed;
3167
3168#endif /* EFX_REGS_H */
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 98bff5ada09a..a97c923b560c 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc. 4 * Copyright 2005-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -16,9 +16,8 @@
16#include <net/ip.h> 16#include <net/ip.h>
17#include <net/checksum.h> 17#include <net/checksum.h>
18#include "net_driver.h" 18#include "net_driver.h"
19#include "rx.h"
20#include "efx.h" 19#include "efx.h"
21#include "falcon.h" 20#include "nic.h"
22#include "selftest.h" 21#include "selftest.h"
23#include "workarounds.h" 22#include "workarounds.h"
24 23
@@ -61,7 +60,7 @@
61 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? 60 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
62 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) 61 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
63 */ 62 */
64static int rx_alloc_method = RX_ALLOC_METHOD_PAGE; 63static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
65 64
66#define RX_ALLOC_LEVEL_LRO 0x2000 65#define RX_ALLOC_LEVEL_LRO 0x2000
67#define RX_ALLOC_LEVEL_MAX 0x3000 66#define RX_ALLOC_LEVEL_MAX 0x3000
@@ -293,8 +292,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
293 * fill anyway. 292 * fill anyway.
294 */ 293 */
295 fill_level = (rx_queue->added_count - rx_queue->removed_count); 294 fill_level = (rx_queue->added_count - rx_queue->removed_count);
296 EFX_BUG_ON_PARANOID(fill_level > 295 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
297 rx_queue->efx->type->rxd_ring_mask + 1);
298 296
299 /* Don't fill if we don't need to */ 297 /* Don't fill if we don't need to */
300 if (fill_level >= rx_queue->fast_fill_trigger) 298 if (fill_level >= rx_queue->fast_fill_trigger)
@@ -316,8 +314,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
316 retry: 314 retry:
317 /* Recalculate current fill level now that we have the lock */ 315 /* Recalculate current fill level now that we have the lock */
318 fill_level = (rx_queue->added_count - rx_queue->removed_count); 316 fill_level = (rx_queue->added_count - rx_queue->removed_count);
319 EFX_BUG_ON_PARANOID(fill_level > 317 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
320 rx_queue->efx->type->rxd_ring_mask + 1);
321 space = rx_queue->fast_fill_limit - fill_level; 318 space = rx_queue->fast_fill_limit - fill_level;
322 if (space < EFX_RX_BATCH) 319 if (space < EFX_RX_BATCH)
323 goto out_unlock; 320 goto out_unlock;
@@ -329,8 +326,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
329 326
330 do { 327 do {
331 for (i = 0; i < EFX_RX_BATCH; ++i) { 328 for (i = 0; i < EFX_RX_BATCH; ++i) {
332 index = (rx_queue->added_count & 329 index = rx_queue->added_count & EFX_RXQ_MASK;
333 rx_queue->efx->type->rxd_ring_mask);
334 rx_buf = efx_rx_buffer(rx_queue, index); 330 rx_buf = efx_rx_buffer(rx_queue, index);
335 rc = efx_init_rx_buffer(rx_queue, rx_buf); 331 rc = efx_init_rx_buffer(rx_queue, rx_buf);
336 if (unlikely(rc)) 332 if (unlikely(rc))
@@ -345,7 +341,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
345 341
346 out: 342 out:
347 /* Send write pointer to card. */ 343 /* Send write pointer to card. */
348 falcon_notify_rx_desc(rx_queue); 344 efx_nic_notify_rx_desc(rx_queue);
349 345
350 /* If the fast fill is running inside from the refill tasklet, then 346 /* If the fast fill is running inside from the refill tasklet, then
351 * for SMP systems it may be running on a different CPU to 347 * for SMP systems it may be running on a different CPU to
@@ -448,17 +444,23 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
448 bool checksummed) 444 bool checksummed)
449{ 445{
450 struct napi_struct *napi = &channel->napi_str; 446 struct napi_struct *napi = &channel->napi_str;
447 gro_result_t gro_result;
451 448
452 /* Pass the skb/page into the LRO engine */ 449 /* Pass the skb/page into the LRO engine */
453 if (rx_buf->page) { 450 if (rx_buf->page) {
454 struct sk_buff *skb = napi_get_frags(napi); 451 struct page *page = rx_buf->page;
452 struct sk_buff *skb;
455 453
454 EFX_BUG_ON_PARANOID(rx_buf->skb);
455 rx_buf->page = NULL;
456
457 skb = napi_get_frags(napi);
456 if (!skb) { 458 if (!skb) {
457 put_page(rx_buf->page); 459 put_page(page);
458 goto out; 460 return;
459 } 461 }
460 462
461 skb_shinfo(skb)->frags[0].page = rx_buf->page; 463 skb_shinfo(skb)->frags[0].page = page;
462 skb_shinfo(skb)->frags[0].page_offset = 464 skb_shinfo(skb)->frags[0].page_offset =
463 efx_rx_buf_offset(rx_buf); 465 efx_rx_buf_offset(rx_buf);
464 skb_shinfo(skb)->frags[0].size = rx_buf->len; 466 skb_shinfo(skb)->frags[0].size = rx_buf->len;
@@ -470,17 +472,24 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
470 skb->ip_summed = 472 skb->ip_summed =
471 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; 473 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
472 474
473 napi_gro_frags(napi); 475 skb_record_rx_queue(skb, channel->channel);
474 476
475out: 477 gro_result = napi_gro_frags(napi);
476 EFX_BUG_ON_PARANOID(rx_buf->skb);
477 rx_buf->page = NULL;
478 } else { 478 } else {
479 EFX_BUG_ON_PARANOID(!rx_buf->skb); 479 struct sk_buff *skb = rx_buf->skb;
480 EFX_BUG_ON_PARANOID(!checksummed);
481 480
482 napi_gro_receive(napi, rx_buf->skb); 481 EFX_BUG_ON_PARANOID(!skb);
482 EFX_BUG_ON_PARANOID(!checksummed);
483 rx_buf->skb = NULL; 483 rx_buf->skb = NULL;
484
485 gro_result = napi_gro_receive(napi, skb);
486 }
487
488 if (gro_result == GRO_NORMAL) {
489 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
490 } else if (gro_result != GRO_DROP) {
491 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
492 channel->irq_mod_score += 2;
484 } 493 }
485} 494}
486 495
@@ -558,7 +567,7 @@ void __efx_rx_packet(struct efx_channel *channel,
558 if (unlikely(efx->loopback_selftest)) { 567 if (unlikely(efx->loopback_selftest)) {
559 efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); 568 efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
560 efx_free_rx_buffer(efx, rx_buf); 569 efx_free_rx_buffer(efx, rx_buf);
561 goto done; 570 return;
562 } 571 }
563 572
564 if (rx_buf->skb) { 573 if (rx_buf->skb) {
@@ -570,34 +579,28 @@ void __efx_rx_packet(struct efx_channel *channel,
570 * at the ethernet header */ 579 * at the ethernet header */
571 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, 580 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
572 efx->net_dev); 581 efx->net_dev);
582
583 skb_record_rx_queue(rx_buf->skb, channel->channel);
573 } 584 }
574 585
575 if (likely(checksummed || rx_buf->page)) { 586 if (likely(checksummed || rx_buf->page)) {
576 efx_rx_packet_lro(channel, rx_buf, checksummed); 587 efx_rx_packet_lro(channel, rx_buf, checksummed);
577 goto done; 588 return;
578 } 589 }
579 590
580 /* We now own the SKB */ 591 /* We now own the SKB */
581 skb = rx_buf->skb; 592 skb = rx_buf->skb;
582 rx_buf->skb = NULL; 593 rx_buf->skb = NULL;
583
584 EFX_BUG_ON_PARANOID(rx_buf->page);
585 EFX_BUG_ON_PARANOID(rx_buf->skb);
586 EFX_BUG_ON_PARANOID(!skb); 594 EFX_BUG_ON_PARANOID(!skb);
587 595
588 /* Set the SKB flags */ 596 /* Set the SKB flags */
589 skb->ip_summed = CHECKSUM_NONE; 597 skb->ip_summed = CHECKSUM_NONE;
590 598
591 skb_record_rx_queue(skb, channel->channel);
592
593 /* Pass the packet up */ 599 /* Pass the packet up */
594 netif_receive_skb(skb); 600 netif_receive_skb(skb);
595 601
596 /* Update allocation strategy method */ 602 /* Update allocation strategy method */
597 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 603 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
598
599done:
600 ;
601} 604}
602 605
603void efx_rx_strategy(struct efx_channel *channel) 606void efx_rx_strategy(struct efx_channel *channel)
@@ -632,12 +635,12 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
632 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); 635 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
633 636
634 /* Allocate RX buffers */ 637 /* Allocate RX buffers */
635 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); 638 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
636 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 639 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
637 if (!rx_queue->buffer) 640 if (!rx_queue->buffer)
638 return -ENOMEM; 641 return -ENOMEM;
639 642
640 rc = falcon_probe_rx(rx_queue); 643 rc = efx_nic_probe_rx(rx_queue);
641 if (rc) { 644 if (rc) {
642 kfree(rx_queue->buffer); 645 kfree(rx_queue->buffer);
643 rx_queue->buffer = NULL; 646 rx_queue->buffer = NULL;
@@ -647,7 +650,6 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
647 650
648void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 651void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
649{ 652{
650 struct efx_nic *efx = rx_queue->efx;
651 unsigned int max_fill, trigger, limit; 653 unsigned int max_fill, trigger, limit;
652 654
653 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); 655 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
@@ -660,7 +662,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
660 rx_queue->min_overfill = -1U; 662 rx_queue->min_overfill = -1U;
661 663
662 /* Initialise limit fields */ 664 /* Initialise limit fields */
663 max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM; 665 max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
664 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 666 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
665 limit = max_fill * min(rx_refill_limit, 100U) / 100U; 667 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
666 668
@@ -669,7 +671,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
669 rx_queue->fast_fill_limit = limit; 671 rx_queue->fast_fill_limit = limit;
670 672
671 /* Set up RX descriptor ring */ 673 /* Set up RX descriptor ring */
672 falcon_init_rx(rx_queue); 674 efx_nic_init_rx(rx_queue);
673} 675}
674 676
675void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 677void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
@@ -679,11 +681,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
679 681
680 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); 682 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
681 683
682 falcon_fini_rx(rx_queue); 684 efx_nic_fini_rx(rx_queue);
683 685
684 /* Release RX buffers NB start at index 0 not current HW ptr */ 686 /* Release RX buffers NB start at index 0 not current HW ptr */
685 if (rx_queue->buffer) { 687 if (rx_queue->buffer) {
686 for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { 688 for (i = 0; i <= EFX_RXQ_MASK; i++) {
687 rx_buf = efx_rx_buffer(rx_queue, i); 689 rx_buf = efx_rx_buffer(rx_queue, i);
688 efx_fini_rx_buffer(rx_queue, rx_buf); 690 efx_fini_rx_buffer(rx_queue, rx_buf);
689 } 691 }
@@ -704,7 +706,7 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
704{ 706{
705 EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); 707 EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
706 708
707 falcon_remove_rx(rx_queue); 709 efx_nic_remove_rx(rx_queue);
708 710
709 kfree(rx_queue->buffer); 711 kfree(rx_queue->buffer);
710 rx_queue->buffer = NULL; 712 rx_queue->buffer = NULL;
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
deleted file mode 100644
index 42ee7555a80b..000000000000
--- a/drivers/net/sfc/rx.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_RX_H
11#define EFX_RX_H
12
13#include "net_driver.h"
14
15int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
16void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
17void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
19
20void efx_rx_strategy(struct efx_channel *channel);
21void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
22void efx_rx_work(struct work_struct *data);
23void __efx_rx_packet(struct efx_channel *channel,
24 struct efx_rx_buffer *rx_buf, bool checksummed);
25
26#endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 817c7efc11e0..14949bb303a0 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -20,14 +20,12 @@
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include "net_driver.h" 22#include "net_driver.h"
23#include "ethtool.h"
24#include "efx.h" 23#include "efx.h"
25#include "falcon.h" 24#include "nic.h"
26#include "selftest.h" 25#include "selftest.h"
27#include "boards.h"
28#include "workarounds.h" 26#include "workarounds.h"
29#include "spi.h" 27#include "spi.h"
30#include "falcon_io.h" 28#include "io.h"
31#include "mdio_10g.h" 29#include "mdio_10g.h"
32 30
33/* 31/*
@@ -57,6 +55,7 @@ static const char *payload_msg =
57 * @flush: Drop all packets in efx_loopback_rx_packet 55 * @flush: Drop all packets in efx_loopback_rx_packet
58 * @packet_count: Number of packets being used in this test 56 * @packet_count: Number of packets being used in this test
59 * @skbs: An array of skbs transmitted 57 * @skbs: An array of skbs transmitted
58 * @offload_csum: Checksums are being offloaded
60 * @rx_good: RX good packet count 59 * @rx_good: RX good packet count
61 * @rx_bad: RX bad packet count 60 * @rx_bad: RX bad packet count
62 * @payload: Payload used in tests 61 * @payload: Payload used in tests
@@ -65,10 +64,7 @@ struct efx_loopback_state {
65 bool flush; 64 bool flush;
66 int packet_count; 65 int packet_count;
67 struct sk_buff **skbs; 66 struct sk_buff **skbs;
68
69 /* Checksums are being offloaded */
70 bool offload_csum; 67 bool offload_csum;
71
72 atomic_t rx_good; 68 atomic_t rx_good;
73 atomic_t rx_bad; 69 atomic_t rx_bad;
74 struct efx_loopback_payload payload; 70 struct efx_loopback_payload payload;
@@ -104,7 +100,7 @@ static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
104 } 100 }
105 101
106 if (EFX_IS10G(efx)) { 102 if (EFX_IS10G(efx)) {
107 rc = efx_mdio_check_mmds(efx, efx->phy_op->mmds, 0); 103 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
108 if (rc) 104 if (rc)
109 goto out; 105 goto out;
110 } 106 }
@@ -117,23 +113,26 @@ out:
117 113
118static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) 114static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
119{ 115{
120 int rc; 116 int rc = 0;
117
118 if (efx->type->test_nvram) {
119 rc = efx->type->test_nvram(efx);
120 tests->nvram = rc ? -1 : 1;
121 }
121 122
122 rc = falcon_read_nvram(efx, NULL);
123 tests->nvram = rc ? -1 : 1;
124 return rc; 123 return rc;
125} 124}
126 125
127static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) 126static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
128{ 127{
129 int rc; 128 int rc = 0;
130 129
131 /* Not supported on A-series silicon */ 130 /* Test register access */
132 if (falcon_rev(efx) < FALCON_REV_B0) 131 if (efx->type->test_registers) {
133 return 0; 132 rc = efx->type->test_registers(efx);
133 tests->registers = rc ? -1 : 1;
134 }
134 135
135 rc = falcon_test_registers(efx);
136 tests->registers = rc ? -1 : 1;
137 return rc; 136 return rc;
138} 137}
139 138
@@ -165,7 +164,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
165 goto success; 164 goto success;
166 } 165 }
167 166
168 falcon_generate_interrupt(efx); 167 efx_nic_generate_interrupt(efx);
169 168
170 /* Wait for arrival of test interrupt. */ 169 /* Wait for arrival of test interrupt. */
171 EFX_LOG(efx, "waiting for test interrupt\n"); 170 EFX_LOG(efx, "waiting for test interrupt\n");
@@ -177,8 +176,8 @@ static int efx_test_interrupts(struct efx_nic *efx,
177 return -ETIMEDOUT; 176 return -ETIMEDOUT;
178 177
179 success: 178 success:
180 EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n", 179 EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx),
181 efx->interrupt_mode, efx->last_irq_cpu); 180 efx->last_irq_cpu);
182 tests->interrupt = 1; 181 tests->interrupt = 1;
183 return 0; 182 return 0;
184} 183}
@@ -203,7 +202,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
203 channel->eventq_magic = 0; 202 channel->eventq_magic = 0;
204 smp_wmb(); 203 smp_wmb();
205 204
206 falcon_generate_test_event(channel, magic); 205 efx_nic_generate_test_event(channel, magic);
207 206
208 /* Wait for arrival of interrupt */ 207 /* Wait for arrival of interrupt */
209 count = 0; 208 count = 0;
@@ -254,9 +253,6 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
254 if (!efx->phy_op->run_tests) 253 if (!efx->phy_op->run_tests)
255 return 0; 254 return 0;
256 255
257 EFX_BUG_ON_PARANOID(efx->phy_op->num_tests == 0 ||
258 efx->phy_op->num_tests > EFX_MAX_PHY_TESTS);
259
260 mutex_lock(&efx->mac_lock); 256 mutex_lock(&efx->mac_lock);
261 rc = efx->phy_op->run_tests(efx, tests->phy, flags); 257 rc = efx->phy_op->run_tests(efx, tests->phy, flags);
262 mutex_unlock(&efx->mac_lock); 258 mutex_unlock(&efx->mac_lock);
@@ -426,7 +422,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
426 422
427 if (efx_dev_registered(efx)) 423 if (efx_dev_registered(efx))
428 netif_tx_lock_bh(efx->net_dev); 424 netif_tx_lock_bh(efx->net_dev);
429 rc = efx_xmit(efx, tx_queue, skb); 425 rc = efx_enqueue_skb(tx_queue, skb);
430 if (efx_dev_registered(efx)) 426 if (efx_dev_registered(efx))
431 netif_tx_unlock_bh(efx->net_dev); 427 netif_tx_unlock_bh(efx->net_dev);
432 428
@@ -439,7 +435,6 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
439 kfree_skb(skb); 435 kfree_skb(skb);
440 return -EPIPE; 436 return -EPIPE;
441 } 437 }
442 efx->net_dev->trans_start = jiffies;
443 } 438 }
444 439
445 return 0; 440 return 0;
@@ -527,7 +522,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
527 522
528 for (i = 0; i < 3; i++) { 523 for (i = 0; i < 3; i++) {
529 /* Determine how many packets to send */ 524 /* Determine how many packets to send */
530 state->packet_count = (efx->type->txd_ring_mask + 1) / 3; 525 state->packet_count = EFX_TXQ_SIZE / 3;
531 state->packet_count = min(1 << (i << 2), state->packet_count); 526 state->packet_count = min(1 << (i << 2), state->packet_count);
532 state->skbs = kzalloc(sizeof(state->skbs[0]) * 527 state->skbs = kzalloc(sizeof(state->skbs[0]) *
533 state->packet_count, GFP_KERNEL); 528 state->packet_count, GFP_KERNEL);
@@ -568,14 +563,49 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
568 return 0; 563 return 0;
569} 564}
570 565
566/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
567 * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
568 * to delay and retry. Therefore, it's safer to just poll directly. Wait
569 * for link up and any faults to dissipate. */
570static int efx_wait_for_link(struct efx_nic *efx)
571{
572 struct efx_link_state *link_state = &efx->link_state;
573 int count;
574 bool link_up;
575
576 for (count = 0; count < 40; count++) {
577 schedule_timeout_uninterruptible(HZ / 10);
578
579 if (efx->type->monitor != NULL) {
580 mutex_lock(&efx->mac_lock);
581 efx->type->monitor(efx);
582 mutex_unlock(&efx->mac_lock);
583 } else {
584 struct efx_channel *channel = &efx->channel[0];
585 if (channel->work_pending)
586 efx_process_channel_now(channel);
587 }
588
589 mutex_lock(&efx->mac_lock);
590 link_up = link_state->up;
591 if (link_up)
592 link_up = !efx->mac_op->check_fault(efx);
593 mutex_unlock(&efx->mac_lock);
594
595 if (link_up)
596 return 0;
597 }
598
599 return -ETIMEDOUT;
600}
601
571static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, 602static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
572 unsigned int loopback_modes) 603 unsigned int loopback_modes)
573{ 604{
574 enum efx_loopback_mode mode; 605 enum efx_loopback_mode mode;
575 struct efx_loopback_state *state; 606 struct efx_loopback_state *state;
576 struct efx_tx_queue *tx_queue; 607 struct efx_tx_queue *tx_queue;
577 bool link_up; 608 int rc = 0;
578 int count, rc = 0;
579 609
580 /* Set the port loopback_selftest member. From this point on 610 /* Set the port loopback_selftest member. From this point on
581 * all received packets will be dropped. Mark the state as 611 * all received packets will be dropped. Mark the state as
@@ -594,46 +624,23 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
594 624
595 /* Move the port into the specified loopback mode. */ 625 /* Move the port into the specified loopback mode. */
596 state->flush = true; 626 state->flush = true;
627 mutex_lock(&efx->mac_lock);
597 efx->loopback_mode = mode; 628 efx->loopback_mode = mode;
598 efx_reconfigure_port(efx); 629 rc = __efx_reconfigure_port(efx);
599 630 mutex_unlock(&efx->mac_lock);
600 /* Wait for the PHY to signal the link is up. Interrupts 631 if (rc) {
601 * are enabled for PHY's using LASI, otherwise we poll() 632 EFX_ERR(efx, "unable to move into %s loopback\n",
602 * quickly */ 633 LOOPBACK_MODE(efx));
603 count = 0; 634 goto out;
604 do { 635 }
605 struct efx_channel *channel = &efx->channel[0];
606 636
607 efx->phy_op->poll(efx); 637 rc = efx_wait_for_link(efx);
608 schedule_timeout_uninterruptible(HZ / 10); 638 if (rc) {
609 if (channel->work_pending)
610 efx_process_channel_now(channel);
611 /* Wait for PHY events to be processed */
612 flush_workqueue(efx->workqueue);
613 rmb();
614
615 /* We need both the phy and xaui links to be ok.
616 * rather than relying on the falcon_xmac irq/poll
617 * regime, just poll xaui directly */
618 link_up = efx->link_up;
619 if (link_up && EFX_IS10G(efx) &&
620 !falcon_xaui_link_ok(efx))
621 link_up = false;
622
623 } while ((++count < 20) && !link_up);
624
625 /* The link should now be up. If it isn't, there is no point
626 * in attempting a loopback test */
627 if (!link_up) {
628 EFX_ERR(efx, "loopback %s never came up\n", 639 EFX_ERR(efx, "loopback %s never came up\n",
629 LOOPBACK_MODE(efx)); 640 LOOPBACK_MODE(efx));
630 rc = -EIO;
631 goto out; 641 goto out;
632 } 642 }
633 643
634 EFX_LOG(efx, "link came up in %s loopback in %d iterations\n",
635 LOOPBACK_MODE(efx), count);
636
637 /* Test every TX queue */ 644 /* Test every TX queue */
638 efx_for_each_tx_queue(tx_queue, efx) { 645 efx_for_each_tx_queue(tx_queue, efx) {
639 state->offload_csum = (tx_queue->queue == 646 state->offload_csum = (tx_queue->queue ==
@@ -667,7 +674,6 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
667 enum efx_loopback_mode loopback_mode = efx->loopback_mode; 674 enum efx_loopback_mode loopback_mode = efx->loopback_mode;
668 int phy_mode = efx->phy_mode; 675 int phy_mode = efx->phy_mode;
669 enum reset_type reset_method = RESET_TYPE_INVISIBLE; 676 enum reset_type reset_method = RESET_TYPE_INVISIBLE;
670 struct ethtool_cmd ecmd;
671 struct efx_channel *channel; 677 struct efx_channel *channel;
672 int rc_test = 0, rc_reset = 0, rc; 678 int rc_test = 0, rc_reset = 0, rc;
673 679
@@ -720,21 +726,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
720 mutex_unlock(&efx->mac_lock); 726 mutex_unlock(&efx->mac_lock);
721 727
722 /* free up all consumers of SRAM (including all the queues) */ 728 /* free up all consumers of SRAM (including all the queues) */
723 efx_reset_down(efx, reset_method, &ecmd); 729 efx_reset_down(efx, reset_method);
724 730
725 rc = efx_test_chip(efx, tests); 731 rc = efx_test_chip(efx, tests);
726 if (rc && !rc_test) 732 if (rc && !rc_test)
727 rc_test = rc; 733 rc_test = rc;
728 734
729 /* reset the chip to recover from the register test */ 735 /* reset the chip to recover from the register test */
730 rc_reset = falcon_reset_hw(efx, reset_method); 736 rc_reset = efx->type->reset(efx, reset_method);
731 737
732 /* Ensure that the phy is powered and out of loopback 738 /* Ensure that the phy is powered and out of loopback
733 * for the bist and loopback tests */ 739 * for the bist and loopback tests */
734 efx->phy_mode &= ~PHY_MODE_LOW_POWER; 740 efx->phy_mode &= ~PHY_MODE_LOW_POWER;
735 efx->loopback_mode = LOOPBACK_NONE; 741 efx->loopback_mode = LOOPBACK_NONE;
736 742
737 rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0); 743 rc = efx_reset_up(efx, reset_method, rc_reset == 0);
738 if (rc && !rc_reset) 744 if (rc && !rc_reset)
739 rc_reset = rc; 745 rc_reset = rc;
740 746
@@ -753,10 +759,12 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
753 rc_test = rc; 759 rc_test = rc;
754 760
755 /* restore the PHY to the previous state */ 761 /* restore the PHY to the previous state */
756 efx->loopback_mode = loopback_mode; 762 mutex_lock(&efx->mac_lock);
757 efx->phy_mode = phy_mode; 763 efx->phy_mode = phy_mode;
758 efx->port_inhibited = false; 764 efx->port_inhibited = false;
759 efx_ethtool_set_settings(efx->net_dev, &ecmd); 765 efx->loopback_mode = loopback_mode;
766 __efx_reconfigure_port(efx);
767 mutex_unlock(&efx->mac_lock);
760 768
761 return rc_test; 769 return rc_test;
762} 770}
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
deleted file mode 100644
index 49eb91b5f50c..000000000000
--- a/drivers/net/sfc/sfe4001.c
+++ /dev/null
@@ -1,435 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/*****************************************************************************
11 * Support for the SFE4001 and SFN4111T NICs.
12 *
13 * The SFE4001 does not power-up fully at reset due to its high power
14 * consumption. We control its power via a PCA9539 I/O expander.
15 * Both boards have a MAX6647 temperature monitor which we expose to
16 * the lm90 driver.
17 *
18 * This also provides minimal support for reflashing the PHY, which is
19 * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
20 * On SFE4001 rev A2 and later this is connected to the 3V3X output of
21 * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3.
22 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
23 * exclusive with the network device being open.
24 */
25
26#include <linux/delay.h>
27#include <linux/rtnetlink.h>
28#include "net_driver.h"
29#include "efx.h"
30#include "phy.h"
31#include "boards.h"
32#include "falcon.h"
33#include "falcon_hwdefs.h"
34#include "falcon_io.h"
35#include "mac.h"
36#include "workarounds.h"
37
38/**************************************************************************
39 *
40 * I2C IO Expander device
41 *
42 **************************************************************************/
43#define PCA9539 0x74
44
45#define P0_IN 0x00
46#define P0_OUT 0x02
47#define P0_INVERT 0x04
48#define P0_CONFIG 0x06
49
50#define P0_EN_1V0X_LBN 0
51#define P0_EN_1V0X_WIDTH 1
52#define P0_EN_1V2_LBN 1
53#define P0_EN_1V2_WIDTH 1
54#define P0_EN_2V5_LBN 2
55#define P0_EN_2V5_WIDTH 1
56#define P0_EN_3V3X_LBN 3
57#define P0_EN_3V3X_WIDTH 1
58#define P0_EN_5V_LBN 4
59#define P0_EN_5V_WIDTH 1
60#define P0_SHORTEN_JTAG_LBN 5
61#define P0_SHORTEN_JTAG_WIDTH 1
62#define P0_X_TRST_LBN 6
63#define P0_X_TRST_WIDTH 1
64#define P0_DSP_RESET_LBN 7
65#define P0_DSP_RESET_WIDTH 1
66
67#define P1_IN 0x01
68#define P1_OUT 0x03
69#define P1_INVERT 0x05
70#define P1_CONFIG 0x07
71
72#define P1_AFE_PWD_LBN 0
73#define P1_AFE_PWD_WIDTH 1
74#define P1_DSP_PWD25_LBN 1
75#define P1_DSP_PWD25_WIDTH 1
76#define P1_RESERVED_LBN 2
77#define P1_RESERVED_WIDTH 2
78#define P1_SPARE_LBN 4
79#define P1_SPARE_WIDTH 4
80
81/* Temperature Sensor */
82#define MAX664X_REG_RSL 0x02
83#define MAX664X_REG_WLHO 0x0B
84
85static void sfe4001_poweroff(struct efx_nic *efx)
86{
87 struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
88 struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
89
90 /* Turn off all power rails and disable outputs */
91 i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
92 i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
93 i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
94
95 /* Clear any over-temperature alert */
96 i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
97}
98
99static int sfe4001_poweron(struct efx_nic *efx)
100{
101 struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
102 struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
103 unsigned int i, j;
104 int rc;
105 u8 out;
106
107 /* Clear any previous over-temperature alert */
108 rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
109 if (rc < 0)
110 return rc;
111
112 /* Enable port 0 and port 1 outputs on IO expander */
113 rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
114 if (rc)
115 return rc;
116 rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
117 0xff & ~(1 << P1_SPARE_LBN));
118 if (rc)
119 goto fail_on;
120
121 /* If PHY power is on, turn it all off and wait 1 second to
122 * ensure a full reset.
123 */
124 rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
125 if (rc < 0)
126 goto fail_on;
127 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
128 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
129 (0 << P0_EN_1V0X_LBN));
130 if (rc != out) {
131 EFX_INFO(efx, "power-cycling PHY\n");
132 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
133 if (rc)
134 goto fail_on;
135 schedule_timeout_uninterruptible(HZ);
136 }
137
138 for (i = 0; i < 20; ++i) {
139 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
140 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
141 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
142 (1 << P0_X_TRST_LBN));
143 if (efx->phy_mode & PHY_MODE_SPECIAL)
144 out |= 1 << P0_EN_3V3X_LBN;
145
146 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
147 if (rc)
148 goto fail_on;
149 msleep(10);
150
151 /* Turn on 1V power rail */
152 out &= ~(1 << P0_EN_1V0X_LBN);
153 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
154 if (rc)
155 goto fail_on;
156
157 EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
158
159 /* In flash config mode, DSP does not turn on AFE, so
160 * just wait 1 second.
161 */
162 if (efx->phy_mode & PHY_MODE_SPECIAL) {
163 schedule_timeout_uninterruptible(HZ);
164 return 0;
165 }
166
167 for (j = 0; j < 10; ++j) {
168 msleep(100);
169
170 /* Check DSP has asserted AFE power line */
171 rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
172 if (rc < 0)
173 goto fail_on;
174 if (rc & (1 << P1_AFE_PWD_LBN))
175 return 0;
176 }
177 }
178
179 EFX_INFO(efx, "timed out waiting for DSP boot\n");
180 rc = -ETIMEDOUT;
181fail_on:
182 sfe4001_poweroff(efx);
183 return rc;
184}
185
186static int sfn4111t_reset(struct efx_nic *efx)
187{
188 efx_oword_t reg;
189
190 /* GPIO 3 and the GPIO register are shared with I2C, so block that */
191 i2c_lock_adapter(&efx->i2c_adap);
192
193 /* Pull RST_N (GPIO 2) low then let it up again, setting the
194 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
195 * output enables; the output levels should always be 0 (low)
196 * and we rely on external pull-ups. */
197 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
198 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true);
199 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
200 msleep(1000);
201 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false);
202 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN,
203 !!(efx->phy_mode & PHY_MODE_SPECIAL));
204 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
205 msleep(1);
206
207 i2c_unlock_adapter(&efx->i2c_adap);
208
209 ssleep(1);
210 return 0;
211}
212
213static ssize_t show_phy_flash_cfg(struct device *dev,
214 struct device_attribute *attr, char *buf)
215{
216 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
217 return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
218}
219
220static ssize_t set_phy_flash_cfg(struct device *dev,
221 struct device_attribute *attr,
222 const char *buf, size_t count)
223{
224 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
225 enum efx_phy_mode old_mode, new_mode;
226 int err;
227
228 rtnl_lock();
229 old_mode = efx->phy_mode;
230 if (count == 0 || *buf == '0')
231 new_mode = old_mode & ~PHY_MODE_SPECIAL;
232 else
233 new_mode = PHY_MODE_SPECIAL;
234 if (old_mode == new_mode) {
235 err = 0;
236 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
237 err = -EBUSY;
238 } else {
239 /* Reset the PHY, reconfigure the MAC and enable/disable
240 * MAC stats accordingly. */
241 efx->phy_mode = new_mode;
242 if (new_mode & PHY_MODE_SPECIAL)
243 efx_stats_disable(efx);
244 if (efx->board_info.type == EFX_BOARD_SFE4001)
245 err = sfe4001_poweron(efx);
246 else
247 err = sfn4111t_reset(efx);
248 efx_reconfigure_port(efx);
249 if (!(new_mode & PHY_MODE_SPECIAL))
250 efx_stats_enable(efx);
251 }
252 rtnl_unlock();
253
254 return err ? err : count;
255}
256
257static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
258
259static void sfe4001_fini(struct efx_nic *efx)
260{
261 EFX_INFO(efx, "%s\n", __func__);
262
263 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
264 sfe4001_poweroff(efx);
265 i2c_unregister_device(efx->board_info.ioexp_client);
266 i2c_unregister_device(efx->board_info.hwmon_client);
267}
268
269static int sfe4001_check_hw(struct efx_nic *efx)
270{
271 s32 status;
272
273 /* If XAUI link is up then do not monitor */
274 if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
275 return 0;
276
277 /* Check the powered status of the PHY. Lack of power implies that
278 * the MAX6647 has shut down power to it, probably due to a temp.
279 * alarm. Reading the power status rather than the MAX6647 status
280 * directly because the later is read-to-clear and would thus
281 * start to power up the PHY again when polled, causing us to blip
282 * the power undesirably.
283 * We know we can read from the IO expander because we did
284 * it during power-on. Assume failure now is bad news. */
285 status = i2c_smbus_read_byte_data(efx->board_info.ioexp_client, P1_IN);
286 if (status >= 0 &&
287 (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
288 return 0;
289
290 /* Use board power control, not PHY power control */
291 sfe4001_poweroff(efx);
292 efx->phy_mode = PHY_MODE_OFF;
293
294 return (status < 0) ? -EIO : -ERANGE;
295}
296
297static struct i2c_board_info sfe4001_hwmon_info = {
298 I2C_BOARD_INFO("max6647", 0x4e),
299};
300
301/* This board uses an I2C expander to provider power to the PHY, which needs to
302 * be turned on before the PHY can be used.
303 * Context: Process context, rtnl lock held
304 */
305int sfe4001_init(struct efx_nic *efx)
306{
307 int rc;
308
309#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
310 efx->board_info.hwmon_client =
311 i2c_new_device(&efx->i2c_adap, &sfe4001_hwmon_info);
312#else
313 efx->board_info.hwmon_client =
314 i2c_new_dummy(&efx->i2c_adap, sfe4001_hwmon_info.addr);
315#endif
316 if (!efx->board_info.hwmon_client)
317 return -EIO;
318
319 /* Raise board/PHY high limit from 85 to 90 degrees Celsius */
320 rc = i2c_smbus_write_byte_data(efx->board_info.hwmon_client,
321 MAX664X_REG_WLHO, 90);
322 if (rc)
323 goto fail_hwmon;
324
325 efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
326 if (!efx->board_info.ioexp_client) {
327 rc = -EIO;
328 goto fail_hwmon;
329 }
330
331 /* 10Xpress has fixed-function LED pins, so there is no board-specific
332 * blink code. */
333 efx->board_info.blink = tenxpress_phy_blink;
334
335 efx->board_info.monitor = sfe4001_check_hw;
336 efx->board_info.fini = sfe4001_fini;
337
338 if (efx->phy_mode & PHY_MODE_SPECIAL) {
339 /* PHY won't generate a 156.25 MHz clock and MAC stats fetch
340 * will fail. */
341 efx_stats_disable(efx);
342 }
343 rc = sfe4001_poweron(efx);
344 if (rc)
345 goto fail_ioexp;
346
347 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
348 if (rc)
349 goto fail_on;
350
351 EFX_INFO(efx, "PHY is powered on\n");
352 return 0;
353
354fail_on:
355 sfe4001_poweroff(efx);
356fail_ioexp:
357 i2c_unregister_device(efx->board_info.ioexp_client);
358fail_hwmon:
359 i2c_unregister_device(efx->board_info.hwmon_client);
360 return rc;
361}
362
363static int sfn4111t_check_hw(struct efx_nic *efx)
364{
365 s32 status;
366
367 /* If XAUI link is up then do not monitor */
368 if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
369 return 0;
370
371 /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
372 status = i2c_smbus_read_byte_data(efx->board_info.hwmon_client,
373 MAX664X_REG_RSL);
374 if (status < 0)
375 return -EIO;
376 if (status & 0x57)
377 return -ERANGE;
378 return 0;
379}
380
381static void sfn4111t_fini(struct efx_nic *efx)
382{
383 EFX_INFO(efx, "%s\n", __func__);
384
385 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
386 i2c_unregister_device(efx->board_info.hwmon_client);
387}
388
389static struct i2c_board_info sfn4111t_a0_hwmon_info = {
390 I2C_BOARD_INFO("max6647", 0x4e),
391};
392
393static struct i2c_board_info sfn4111t_r5_hwmon_info = {
394 I2C_BOARD_INFO("max6646", 0x4d),
395};
396
397int sfn4111t_init(struct efx_nic *efx)
398{
399 int i = 0;
400 int rc;
401
402 efx->board_info.hwmon_client =
403 i2c_new_device(&efx->i2c_adap,
404 (efx->board_info.minor < 5) ?
405 &sfn4111t_a0_hwmon_info :
406 &sfn4111t_r5_hwmon_info);
407 if (!efx->board_info.hwmon_client)
408 return -EIO;
409
410 efx->board_info.blink = tenxpress_phy_blink;
411 efx->board_info.monitor = sfn4111t_check_hw;
412 efx->board_info.fini = sfn4111t_fini;
413
414 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
415 if (rc)
416 goto fail_hwmon;
417
418 do {
419 if (efx->phy_mode & PHY_MODE_SPECIAL) {
420 /* PHY may not generate a 156.25 MHz clock and MAC
421 * stats fetch will fail. */
422 efx_stats_disable(efx);
423 sfn4111t_reset(efx);
424 }
425 rc = sft9001_wait_boot(efx);
426 if (rc == 0)
427 return 0;
428 efx->phy_mode = PHY_MODE_SPECIAL;
429 } while (rc == -EINVAL && ++i < 2);
430
431 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
432fail_hwmon:
433 i2c_unregister_device(efx->board_info.hwmon_client);
434 return rc;
435}
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
new file mode 100644
index 000000000000..de07a4f031b2
--- /dev/null
+++ b/drivers/net/sfc/siena.c
@@ -0,0 +1,604 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/pci.h>
14#include <linux/module.h>
15#include "net_driver.h"
16#include "bitfield.h"
17#include "efx.h"
18#include "nic.h"
19#include "mac.h"
20#include "spi.h"
21#include "regs.h"
22#include "io.h"
23#include "phy.h"
24#include "workarounds.h"
25#include "mcdi.h"
26#include "mcdi_pcol.h"
27
28/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
29
30static void siena_init_wol(struct efx_nic *efx);
31
32
33static void siena_push_irq_moderation(struct efx_channel *channel)
34{
35 efx_dword_t timer_cmd;
36
37 if (channel->irq_moderation)
38 EFX_POPULATE_DWORD_2(timer_cmd,
39 FRF_CZ_TC_TIMER_MODE,
40 FFE_CZ_TIMER_MODE_INT_HLDOFF,
41 FRF_CZ_TC_TIMER_VAL,
42 channel->irq_moderation - 1);
43 else
44 EFX_POPULATE_DWORD_2(timer_cmd,
45 FRF_CZ_TC_TIMER_MODE,
46 FFE_CZ_TIMER_MODE_DIS,
47 FRF_CZ_TC_TIMER_VAL, 0);
48 efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
49 channel->channel);
50}
51
52static void siena_push_multicast_hash(struct efx_nic *efx)
53{
54 WARN_ON(!mutex_is_locked(&efx->mac_lock));
55
56 efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
57 efx->multicast_hash.byte, sizeof(efx->multicast_hash),
58 NULL, 0, NULL);
59}
60
61static int siena_mdio_write(struct net_device *net_dev,
62 int prtad, int devad, u16 addr, u16 value)
63{
64 struct efx_nic *efx = netdev_priv(net_dev);
65 uint32_t status;
66 int rc;
67
68 rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
69 addr, value, &status);
70 if (rc)
71 return rc;
72 if (status != MC_CMD_MDIO_STATUS_GOOD)
73 return -EIO;
74
75 return 0;
76}
77
78static int siena_mdio_read(struct net_device *net_dev,
79 int prtad, int devad, u16 addr)
80{
81 struct efx_nic *efx = netdev_priv(net_dev);
82 uint16_t value;
83 uint32_t status;
84 int rc;
85
86 rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
87 addr, &value, &status);
88 if (rc)
89 return rc;
90 if (status != MC_CMD_MDIO_STATUS_GOOD)
91 return -EIO;
92
93 return (int)value;
94}
95
96/* This call is responsible for hooking in the MAC and PHY operations */
97static int siena_probe_port(struct efx_nic *efx)
98{
99 int rc;
100
101 /* Hook in PHY operations table */
102 efx->phy_op = &efx_mcdi_phy_ops;
103
104 /* Set up MDIO structure for PHY */
105 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
106 efx->mdio.mdio_read = siena_mdio_read;
107 efx->mdio.mdio_write = siena_mdio_write;
108
109 /* Fill out MDIO structure and loopback modes */
110 rc = efx->phy_op->probe(efx);
111 if (rc != 0)
112 return rc;
113
114 /* Initial assumption */
115 efx->link_state.speed = 10000;
116 efx->link_state.fd = true;
117 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
118
119 /* Allocate buffer for stats */
120 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
121 MC_CMD_MAC_NSTATS * sizeof(u64));
122 if (rc)
123 return rc;
124 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
125 (u64)efx->stats_buffer.dma_addr,
126 efx->stats_buffer.addr,
127 (u64)virt_to_phys(efx->stats_buffer.addr));
128
129 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
130
131 return 0;
132}
133
134void siena_remove_port(struct efx_nic *efx)
135{
136 efx_nic_free_buffer(efx, &efx->stats_buffer);
137}
138
139static const struct efx_nic_register_test siena_register_tests[] = {
140 { FR_AZ_ADR_REGION,
141 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
142 { FR_CZ_USR_EV_CFG,
143 EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
144 { FR_AZ_RX_CFG,
145 EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) },
146 { FR_AZ_TX_CFG,
147 EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) },
148 { FR_AZ_TX_RESERVED,
149 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
150 { FR_AZ_SRM_TX_DC_CFG,
151 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
152 { FR_AZ_RX_DC_CFG,
153 EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) },
154 { FR_AZ_RX_DC_PF_WM,
155 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
156 { FR_BZ_DP_CTRL,
157 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
158 { FR_BZ_RX_RSS_TKEY,
159 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
160 { FR_CZ_RX_RSS_IPV6_REG1,
161 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
162 { FR_CZ_RX_RSS_IPV6_REG2,
163 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
164 { FR_CZ_RX_RSS_IPV6_REG3,
165 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
166};
167
168static int siena_test_registers(struct efx_nic *efx)
169{
170 return efx_nic_test_registers(efx, siena_register_tests,
171 ARRAY_SIZE(siena_register_tests));
172}
173
174/**************************************************************************
175 *
176 * Device reset
177 *
178 **************************************************************************
179 */
180
181static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
182{
183
184 if (method == RESET_TYPE_WORLD)
185 return efx_mcdi_reset_mc(efx);
186 else
187 return efx_mcdi_reset_port(efx);
188}
189
190static int siena_probe_nvconfig(struct efx_nic *efx)
191{
192 int rc;
193
194 rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL);
195 if (rc)
196 return rc;
197
198 return 0;
199}
200
201static int siena_probe_nic(struct efx_nic *efx)
202{
203 struct siena_nic_data *nic_data;
204 bool already_attached = 0;
205 int rc;
206
207 /* Allocate storage for hardware specific data */
208 nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
209 if (!nic_data)
210 return -ENOMEM;
211 efx->nic_data = nic_data;
212
213 if (efx_nic_fpga_ver(efx) != 0) {
214 EFX_ERR(efx, "Siena FPGA not supported\n");
215 rc = -ENODEV;
216 goto fail1;
217 }
218
219 efx_mcdi_init(efx);
220
221 /* Recover from a failed assertion before probing */
222 rc = efx_mcdi_handle_assertion(efx);
223 if (rc)
224 goto fail1;
225
226 rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
227 if (rc) {
228 EFX_ERR(efx, "Failed to read MCPU firmware version - "
229 "rc %d\n", rc);
230 goto fail1; /* MCPU absent? */
231 }
232
233 /* Let the BMC know that the driver is now in charge of link and
234 * filter settings. We must do this before we reset the NIC */
235 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
236 if (rc) {
237 EFX_ERR(efx, "Unable to register driver with MCPU\n");
238 goto fail2;
239 }
240 if (already_attached)
241 /* Not a fatal error */
242 EFX_ERR(efx, "Host already registered with MCPU\n");
243
244 /* Now we can reset the NIC */
245 rc = siena_reset_hw(efx, RESET_TYPE_ALL);
246 if (rc) {
247 EFX_ERR(efx, "failed to reset NIC\n");
248 goto fail3;
249 }
250
251 siena_init_wol(efx);
252
253 /* Allocate memory for INT_KER */
254 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
255 if (rc)
256 goto fail4;
257 BUG_ON(efx->irq_status.dma_addr & 0x0f);
258
259 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
260 (unsigned long long)efx->irq_status.dma_addr,
261 efx->irq_status.addr,
262 (unsigned long long)virt_to_phys(efx->irq_status.addr));
263
264 /* Read in the non-volatile configuration */
265 rc = siena_probe_nvconfig(efx);
266 if (rc == -EINVAL) {
267 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
268 efx->phy_type = PHY_TYPE_NONE;
269 efx->mdio.prtad = MDIO_PRTAD_NONE;
270 } else if (rc) {
271 goto fail5;
272 }
273
274 return 0;
275
276fail5:
277 efx_nic_free_buffer(efx, &efx->irq_status);
278fail4:
279fail3:
280 efx_mcdi_drv_attach(efx, false, NULL);
281fail2:
282fail1:
283 kfree(efx->nic_data);
284 return rc;
285}
286
287/* This call performs hardware-specific global initialisation, such as
288 * defining the descriptor cache sizes and number of RSS channels.
289 * It does not set up any buffers, descriptor rings or event queues.
290 */
291static int siena_init_nic(struct efx_nic *efx)
292{
293 efx_oword_t temp;
294 int rc;
295
296 /* Recover from a failed assertion post-reset */
297 rc = efx_mcdi_handle_assertion(efx);
298 if (rc)
299 return rc;
300
301 /* Squash TX of packets of 16 bytes or less */
302 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
303 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
304 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
305
306 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
307 * descriptors (which is bad).
308 */
309 efx_reado(efx, &temp, FR_AZ_TX_CFG);
310 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
311 EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1);
312 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
313
314 efx_reado(efx, &temp, FR_AZ_RX_CFG);
315 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
316 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
317 efx_writeo(efx, &temp, FR_AZ_RX_CFG);
318
319 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
320 /* No MCDI operation has been defined to set thresholds */
321 EFX_ERR(efx, "ignoring RX flow control thresholds\n");
322
323 /* Enable event logging */
324 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
325 if (rc)
326 return rc;
327
328 /* Set destination of both TX and RX Flush events */
329 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
330 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
331
332 EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
333 efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
334
335 efx_nic_init_common(efx);
336 return 0;
337}
338
339static void siena_remove_nic(struct efx_nic *efx)
340{
341 efx_nic_free_buffer(efx, &efx->irq_status);
342
343 siena_reset_hw(efx, RESET_TYPE_ALL);
344
345 /* Relinquish the device back to the BMC */
346 if (efx_nic_has_mc(efx))
347 efx_mcdi_drv_attach(efx, false, NULL);
348
349 /* Tear down the private nic state */
350 kfree(efx->nic_data);
351 efx->nic_data = NULL;
352}
353
354#define STATS_GENERATION_INVALID ((u64)(-1))
355
356static int siena_try_update_nic_stats(struct efx_nic *efx)
357{
358 u64 *dma_stats;
359 struct efx_mac_stats *mac_stats;
360 u64 generation_start;
361 u64 generation_end;
362
363 mac_stats = &efx->mac_stats;
364 dma_stats = (u64 *)efx->stats_buffer.addr;
365
366 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
367 if (generation_end == STATS_GENERATION_INVALID)
368 return 0;
369 rmb();
370
371#define MAC_STAT(M, D) \
372 mac_stats->M = dma_stats[MC_CMD_MAC_ ## D]
373
374 MAC_STAT(tx_bytes, TX_BYTES);
375 MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
376 mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
377 mac_stats->tx_bad_bytes);
378 MAC_STAT(tx_packets, TX_PKTS);
379 MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
380 MAC_STAT(tx_pause, TX_PAUSE_PKTS);
381 MAC_STAT(tx_control, TX_CONTROL_PKTS);
382 MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
383 MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
384 MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
385 MAC_STAT(tx_lt64, TX_LT64_PKTS);
386 MAC_STAT(tx_64, TX_64_PKTS);
387 MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
388 MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
389 MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
390 MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
391 MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
392 MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
393 MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
394 mac_stats->tx_collision = 0;
395 MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
396 MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
397 MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
398 MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
399 MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
400 mac_stats->tx_collision = (mac_stats->tx_single_collision +
401 mac_stats->tx_multiple_collision +
402 mac_stats->tx_excessive_collision +
403 mac_stats->tx_late_collision);
404 MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
405 MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
406 MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
407 MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
408 MAC_STAT(rx_bytes, RX_BYTES);
409 MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
410 mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
411 mac_stats->rx_bad_bytes);
412 MAC_STAT(rx_packets, RX_PKTS);
413 MAC_STAT(rx_good, RX_GOOD_PKTS);
414 mac_stats->rx_bad = mac_stats->rx_packets - mac_stats->rx_good;
415 MAC_STAT(rx_pause, RX_PAUSE_PKTS);
416 MAC_STAT(rx_control, RX_CONTROL_PKTS);
417 MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
418 MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
419 MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
420 MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
421 MAC_STAT(rx_64, RX_64_PKTS);
422 MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
423 MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
424 MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
425 MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
426 MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
427 MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
428 MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
429 mac_stats->rx_bad_lt64 = 0;
430 mac_stats->rx_bad_64_to_15xx = 0;
431 mac_stats->rx_bad_15xx_to_jumbo = 0;
432 MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
433 MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
434 mac_stats->rx_missed = 0;
435 MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
436 MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
437 MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
438 MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
439 MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
440 mac_stats->rx_good_lt64 = 0;
441
442 efx->n_rx_nodesc_drop_cnt = dma_stats[MC_CMD_MAC_RX_NODESC_DROPS];
443
444#undef MAC_STAT
445
446 rmb();
447 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
448 if (generation_end != generation_start)
449 return -EAGAIN;
450
451 return 0;
452}
453
454static void siena_update_nic_stats(struct efx_nic *efx)
455{
456 while (siena_try_update_nic_stats(efx) == -EAGAIN)
457 cpu_relax();
458}
459
460static void siena_start_nic_stats(struct efx_nic *efx)
461{
462 u64 *dma_stats = (u64 *)efx->stats_buffer.addr;
463
464 dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
465
466 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
467 MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
468}
469
470static void siena_stop_nic_stats(struct efx_nic *efx)
471{
472 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
473}
474
475void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len)
476{
477 struct siena_nic_data *nic_data = efx->nic_data;
478 snprintf(buf, len, "%u.%u.%u.%u",
479 (unsigned int)(nic_data->fw_version >> 48),
480 (unsigned int)(nic_data->fw_version >> 32 & 0xffff),
481 (unsigned int)(nic_data->fw_version >> 16 & 0xffff),
482 (unsigned int)(nic_data->fw_version & 0xffff));
483}
484
485/**************************************************************************
486 *
487 * Wake on LAN
488 *
489 **************************************************************************
490 */
491
492static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
493{
494 struct siena_nic_data *nic_data = efx->nic_data;
495
496 wol->supported = WAKE_MAGIC;
497 if (nic_data->wol_filter_id != -1)
498 wol->wolopts = WAKE_MAGIC;
499 else
500 wol->wolopts = 0;
501 memset(&wol->sopass, 0, sizeof(wol->sopass));
502}
503
504
505static int siena_set_wol(struct efx_nic *efx, u32 type)
506{
507 struct siena_nic_data *nic_data = efx->nic_data;
508 int rc;
509
510 if (type & ~WAKE_MAGIC)
511 return -EINVAL;
512
513 if (type & WAKE_MAGIC) {
514 if (nic_data->wol_filter_id != -1)
515 efx_mcdi_wol_filter_remove(efx,
516 nic_data->wol_filter_id);
517 rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address,
518 &nic_data->wol_filter_id);
519 if (rc)
520 goto fail;
521
522 pci_wake_from_d3(efx->pci_dev, true);
523 } else {
524 rc = efx_mcdi_wol_filter_reset(efx);
525 nic_data->wol_filter_id = -1;
526 pci_wake_from_d3(efx->pci_dev, false);
527 if (rc)
528 goto fail;
529 }
530
531 return 0;
532 fail:
533 EFX_ERR(efx, "%s failed: type=%d rc=%d\n", __func__, type, rc);
534 return rc;
535}
536
537
538static void siena_init_wol(struct efx_nic *efx)
539{
540 struct siena_nic_data *nic_data = efx->nic_data;
541 int rc;
542
543 rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
544
545 if (rc != 0) {
546 /* If it failed, attempt to get into a synchronised
547 * state with MC by resetting any set WoL filters */
548 efx_mcdi_wol_filter_reset(efx);
549 nic_data->wol_filter_id = -1;
550 } else if (nic_data->wol_filter_id != -1) {
551 pci_wake_from_d3(efx->pci_dev, true);
552 }
553}
554
555
556/**************************************************************************
557 *
558 * Revision-dependent attributes used by efx.c and nic.c
559 *
560 **************************************************************************
561 */
562
563struct efx_nic_type siena_a0_nic_type = {
564 .probe = siena_probe_nic,
565 .remove = siena_remove_nic,
566 .init = siena_init_nic,
567 .fini = efx_port_dummy_op_void,
568 .monitor = NULL,
569 .reset = siena_reset_hw,
570 .probe_port = siena_probe_port,
571 .remove_port = siena_remove_port,
572 .prepare_flush = efx_port_dummy_op_void,
573 .update_stats = siena_update_nic_stats,
574 .start_stats = siena_start_nic_stats,
575 .stop_stats = siena_stop_nic_stats,
576 .set_id_led = efx_mcdi_set_id_led,
577 .push_irq_moderation = siena_push_irq_moderation,
578 .push_multicast_hash = siena_push_multicast_hash,
579 .reconfigure_port = efx_mcdi_phy_reconfigure,
580 .get_wol = siena_get_wol,
581 .set_wol = siena_set_wol,
582 .resume_wol = siena_init_wol,
583 .test_registers = siena_test_registers,
584 .default_mac_ops = &efx_mcdi_mac_operations,
585
586 .revision = EFX_REV_SIENA_A0,
587 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
588 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
589 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
590 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
591 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
592 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
593 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
594 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
595 .rx_buffer_padding = 0,
596 .max_interrupt_mode = EFX_INT_MODE_MSIX,
597 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
598 * interrupt handler only supports 32
599 * channels */
600 .tx_dc_base = 0x88000,
601 .rx_dc_base = 0x68000,
602 .offload_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM,
603 .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
604};
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 1b1ceb411671..8bf4fce0813a 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -36,8 +36,6 @@
36 36
37/** 37/**
38 * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device 38 * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
39 * @efx: The Efx controller that owns this device
40 * @mtd: MTD state
41 * @device_id: Controller's id for the device 39 * @device_id: Controller's id for the device
42 * @size: Size (in bytes) 40 * @size: Size (in bytes)
43 * @addr_len: Number of address bytes in read/write commands 41 * @addr_len: Number of address bytes in read/write commands
@@ -54,10 +52,6 @@
54 * Write commands are limited to blocks with this size and alignment. 52 * Write commands are limited to blocks with this size and alignment.
55 */ 53 */
56struct efx_spi_device { 54struct efx_spi_device {
57 struct efx_nic *efx;
58#ifdef CONFIG_SFC_MTD
59 void *mtd;
60#endif
61 int device_id; 55 int device_id;
62 unsigned int size; 56 unsigned int size;
63 unsigned int addr_len; 57 unsigned int addr_len;
@@ -67,12 +61,16 @@ struct efx_spi_device {
67 unsigned int block_size; 61 unsigned int block_size;
68}; 62};
69 63
70int falcon_spi_cmd(const struct efx_spi_device *spi, unsigned int command, 64int falcon_spi_cmd(struct efx_nic *efx,
65 const struct efx_spi_device *spi, unsigned int command,
71 int address, const void* in, void *out, size_t len); 66 int address, const void* in, void *out, size_t len);
72int falcon_spi_wait_write(const struct efx_spi_device *spi); 67int falcon_spi_wait_write(struct efx_nic *efx,
73int falcon_spi_read(const struct efx_spi_device *spi, loff_t start, 68 const struct efx_spi_device *spi);
69int falcon_spi_read(struct efx_nic *efx,
70 const struct efx_spi_device *spi, loff_t start,
74 size_t len, size_t *retlen, u8 *buffer); 71 size_t len, size_t *retlen, u8 *buffer);
75int falcon_spi_write(const struct efx_spi_device *spi, loff_t start, 72int falcon_spi_write(struct efx_nic *efx,
73 const struct efx_spi_device *spi, loff_t start,
76 size_t len, size_t *retlen, const u8 *buffer); 74 size_t len, size_t *retlen, const u8 *buffer);
77 75
78/* 76/*
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f4d509015f75..ca11572a49a9 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc. 3 * Copyright 2007-2009 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -12,10 +12,9 @@
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include "efx.h" 13#include "efx.h"
14#include "mdio_10g.h" 14#include "mdio_10g.h"
15#include "falcon.h" 15#include "nic.h"
16#include "phy.h" 16#include "phy.h"
17#include "falcon_hwdefs.h" 17#include "regs.h"
18#include "boards.h"
19#include "workarounds.h" 18#include "workarounds.h"
20#include "selftest.h" 19#include "selftest.h"
21 20
@@ -31,13 +30,13 @@
31#define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ 30#define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
32 (1 << LOOPBACK_PCS) | \ 31 (1 << LOOPBACK_PCS) | \
33 (1 << LOOPBACK_PMAPMD) | \ 32 (1 << LOOPBACK_PMAPMD) | \
34 (1 << LOOPBACK_NETWORK)) 33 (1 << LOOPBACK_PHYXS_WS))
35 34
36#define SFT9001_LOOPBACKS ((1 << LOOPBACK_GPHY) | \ 35#define SFT9001_LOOPBACKS ((1 << LOOPBACK_GPHY) | \
37 (1 << LOOPBACK_PHYXS) | \ 36 (1 << LOOPBACK_PHYXS) | \
38 (1 << LOOPBACK_PCS) | \ 37 (1 << LOOPBACK_PCS) | \
39 (1 << LOOPBACK_PMAPMD) | \ 38 (1 << LOOPBACK_PMAPMD) | \
40 (1 << LOOPBACK_NETWORK)) 39 (1 << LOOPBACK_PHYXS_WS))
41 40
42/* We complain if we fail to see the link partner as 10G capable this many 41/* We complain if we fail to see the link partner as 10G capable this many
43 * times in a row (must be > 1 as sampling the autoneg. registers is racy) 42 * times in a row (must be > 1 as sampling the autoneg. registers is racy)
@@ -84,9 +83,9 @@
84#define PMA_PMD_LED_FLASH (3) 83#define PMA_PMD_LED_FLASH (3)
85#define PMA_PMD_LED_MASK 3 84#define PMA_PMD_LED_MASK 3
86/* All LEDs under hardware control */ 85/* All LEDs under hardware control */
87#define PMA_PMD_LED_FULL_AUTO (0) 86#define SFT9001_PMA_PMD_LED_DEFAULT 0
88/* Green and Amber under hardware control, Red off */ 87/* Green and Amber under hardware control, Red off */
89#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) 88#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
90 89
91#define PMA_PMD_SPEED_ENABLE_REG 49192 90#define PMA_PMD_SPEED_ENABLE_REG 49192
92#define PMA_PMD_100TX_ADV_LBN 1 91#define PMA_PMD_100TX_ADV_LBN 1
@@ -200,15 +199,16 @@ static ssize_t set_phy_short_reach(struct device *dev,
200 const char *buf, size_t count) 199 const char *buf, size_t count)
201{ 200{
202 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 201 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
202 int rc;
203 203
204 rtnl_lock(); 204 rtnl_lock();
205 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR, 205 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
206 MDIO_PMA_10GBT_TXPWR_SHORT, 206 MDIO_PMA_10GBT_TXPWR_SHORT,
207 count != 0 && *buf != '0'); 207 count != 0 && *buf != '0');
208 efx_reconfigure_port(efx); 208 rc = efx_reconfigure_port(efx);
209 rtnl_unlock(); 209 rtnl_unlock();
210 210
211 return count; 211 return rc < 0 ? rc : (ssize_t)count;
212} 212}
213 213
214static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach, 214static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach,
@@ -292,17 +292,36 @@ static int tenxpress_init(struct efx_nic *efx)
292 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG, 292 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
293 1 << PMA_PMA_LED_ACTIVITY_LBN, true); 293 1 << PMA_PMA_LED_ACTIVITY_LBN, true);
294 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, 294 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
295 PMA_PMD_LED_DEFAULT); 295 SFX7101_PMA_PMD_LED_DEFAULT);
296 } 296 }
297 297
298 return 0; 298 return 0;
299} 299}
300 300
301static int sfx7101_phy_probe(struct efx_nic *efx)
302{
303 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
304 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
305 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
306 return 0;
307}
308
309static int sft9001_phy_probe(struct efx_nic *efx)
310{
311 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
312 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
313 efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS |
314 FALCON_GMAC_LOOPBACKS);
315 return 0;
316}
317
301static int tenxpress_phy_init(struct efx_nic *efx) 318static int tenxpress_phy_init(struct efx_nic *efx)
302{ 319{
303 struct tenxpress_phy_data *phy_data; 320 struct tenxpress_phy_data *phy_data;
304 int rc = 0; 321 int rc = 0;
305 322
323 falcon_board(efx)->type->init_phy(efx);
324
306 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 325 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
307 if (!phy_data) 326 if (!phy_data)
308 return -ENOMEM; 327 return -ENOMEM;
@@ -333,6 +352,15 @@ static int tenxpress_phy_init(struct efx_nic *efx)
333 if (rc < 0) 352 if (rc < 0)
334 goto fail; 353 goto fail;
335 354
355 /* Initialise advertising flags */
356 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
357 ADVERTISED_10000baseT_Full);
358 if (efx->phy_type != PHY_TYPE_SFX7101)
359 efx->link_advertising |= (ADVERTISED_1000baseT_Full |
360 ADVERTISED_100baseT_Full);
361 efx_link_set_wanted_fc(efx, efx->wanted_fc);
362 efx_mdio_an_reconfigure(efx);
363
336 if (efx->phy_type == PHY_TYPE_SFT9001B) { 364 if (efx->phy_type == PHY_TYPE_SFT9001B) {
337 rc = device_create_file(&efx->pci_dev->dev, 365 rc = device_create_file(&efx->pci_dev->dev,
338 &dev_attr_phy_short_reach); 366 &dev_attr_phy_short_reach);
@@ -363,7 +391,7 @@ static int tenxpress_special_reset(struct efx_nic *efx)
363 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so 391 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
364 * a special software reset can glitch the XGMAC sufficiently for stats 392 * a special software reset can glitch the XGMAC sufficiently for stats
365 * requests to fail. */ 393 * requests to fail. */
366 efx_stats_disable(efx); 394 falcon_stop_nic_stats(efx);
367 395
368 /* Initiate reset */ 396 /* Initiate reset */
369 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG); 397 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
@@ -385,7 +413,7 @@ static int tenxpress_special_reset(struct efx_nic *efx)
385 /* Wait for the XGXS state machine to churn */ 413 /* Wait for the XGXS state machine to churn */
386 mdelay(10); 414 mdelay(10);
387out: 415out:
388 efx_stats_enable(efx); 416 falcon_start_nic_stats(efx);
389 return rc; 417 return rc;
390} 418}
391 419
@@ -489,95 +517,76 @@ static void tenxpress_low_power(struct efx_nic *efx)
489 !!(efx->phy_mode & PHY_MODE_LOW_POWER)); 517 !!(efx->phy_mode & PHY_MODE_LOW_POWER));
490} 518}
491 519
492static void tenxpress_phy_reconfigure(struct efx_nic *efx) 520static int tenxpress_phy_reconfigure(struct efx_nic *efx)
493{ 521{
494 struct tenxpress_phy_data *phy_data = efx->phy_data; 522 struct tenxpress_phy_data *phy_data = efx->phy_data;
495 struct ethtool_cmd ecmd;
496 bool phy_mode_change, loop_reset; 523 bool phy_mode_change, loop_reset;
497 524
498 if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { 525 if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
499 phy_data->phy_mode = efx->phy_mode; 526 phy_data->phy_mode = efx->phy_mode;
500 return; 527 return 0;
501 } 528 }
502 529
503 tenxpress_low_power(efx);
504
505 phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && 530 phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
506 phy_data->phy_mode != PHY_MODE_NORMAL); 531 phy_data->phy_mode != PHY_MODE_NORMAL);
507 loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) || 532 loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, LOOPBACKS_EXTERNAL(efx)) ||
508 LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); 533 LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
509 534
510 if (loop_reset || phy_mode_change) { 535 if (loop_reset || phy_mode_change) {
511 int rc; 536 tenxpress_special_reset(efx);
512
513 efx->phy_op->get_settings(efx, &ecmd);
514
515 if (loop_reset || phy_mode_change) {
516 tenxpress_special_reset(efx);
517
518 /* Reset XAUI if we were in 10G, and are staying
519 * in 10G. If we're moving into and out of 10G
520 * then xaui will be reset anyway */
521 if (EFX_IS10G(efx))
522 falcon_reset_xaui(efx);
523 }
524 537
525 rc = efx->phy_op->set_settings(efx, &ecmd); 538 /* Reset XAUI if we were in 10G, and are staying
526 WARN_ON(rc); 539 * in 10G. If we're moving into and out of 10G
540 * then xaui will be reset anyway */
541 if (EFX_IS10G(efx))
542 falcon_reset_xaui(efx);
527 } 543 }
528 544
545 tenxpress_low_power(efx);
529 efx_mdio_transmit_disable(efx); 546 efx_mdio_transmit_disable(efx);
530 efx_mdio_phy_reconfigure(efx); 547 efx_mdio_phy_reconfigure(efx);
531 tenxpress_ext_loopback(efx); 548 tenxpress_ext_loopback(efx);
549 efx_mdio_an_reconfigure(efx);
532 550
533 phy_data->loopback_mode = efx->loopback_mode; 551 phy_data->loopback_mode = efx->loopback_mode;
534 phy_data->phy_mode = efx->phy_mode; 552 phy_data->phy_mode = efx->phy_mode;
535 553
536 if (efx->phy_type == PHY_TYPE_SFX7101) { 554 return 0;
537 efx->link_speed = 10000;
538 efx->link_fd = true;
539 efx->link_up = sfx7101_link_ok(efx);
540 } else {
541 efx->phy_op->get_settings(efx, &ecmd);
542 efx->link_speed = ecmd.speed;
543 efx->link_fd = ecmd.duplex == DUPLEX_FULL;
544 efx->link_up = sft9001_link_ok(efx, &ecmd);
545 }
546 efx->link_fc = efx_mdio_get_pause(efx);
547} 555}
548 556
549/* Poll PHY for interrupt */ 557static void
550static void tenxpress_phy_poll(struct efx_nic *efx) 558tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
559
560/* Poll for link state changes */
561static bool tenxpress_phy_poll(struct efx_nic *efx)
551{ 562{
552 struct tenxpress_phy_data *phy_data = efx->phy_data; 563 struct efx_link_state old_state = efx->link_state;
553 bool change = false;
554 564
555 if (efx->phy_type == PHY_TYPE_SFX7101) { 565 if (efx->phy_type == PHY_TYPE_SFX7101) {
556 bool link_ok = sfx7101_link_ok(efx); 566 efx->link_state.up = sfx7101_link_ok(efx);
557 if (link_ok != efx->link_up) { 567 efx->link_state.speed = 10000;
558 change = true; 568 efx->link_state.fd = true;
559 } else { 569 efx->link_state.fc = efx_mdio_get_pause(efx);
560 unsigned int link_fc = efx_mdio_get_pause(efx); 570
561 if (link_fc != efx->link_fc) 571 sfx7101_check_bad_lp(efx, efx->link_state.up);
562 change = true;
563 }
564 sfx7101_check_bad_lp(efx, link_ok);
565 } else if (efx->loopback_mode) {
566 bool link_ok = sft9001_link_ok(efx, NULL);
567 if (link_ok != efx->link_up)
568 change = true;
569 } else { 572 } else {
570 int status = efx_mdio_read(efx, MDIO_MMD_PMAPMD, 573 struct ethtool_cmd ecmd;
571 MDIO_PMA_LASI_STAT);
572 if (status & MDIO_PMA_LASI_LSALARM)
573 change = true;
574 }
575 574
576 if (change) 575 /* Check the LASI alarm first */
577 falcon_sim_phy_event(efx); 576 if (efx->loopback_mode == LOOPBACK_NONE &&
577 !(efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT) &
578 MDIO_PMA_LASI_LSALARM))
579 return false;
578 580
579 if (phy_data->phy_mode != PHY_MODE_NORMAL) 581 tenxpress_get_settings(efx, &ecmd);
580 return; 582
583 efx->link_state.up = sft9001_link_ok(efx, &ecmd);
584 efx->link_state.speed = ecmd.speed;
585 efx->link_state.fd = (ecmd.duplex == DUPLEX_FULL);
586 efx->link_state.fc = efx_mdio_get_pause(efx);
587 }
588
589 return !efx_link_state_equal(&efx->link_state, &old_state);
581} 590}
582 591
583static void tenxpress_phy_fini(struct efx_nic *efx) 592static void tenxpress_phy_fini(struct efx_nic *efx)
@@ -604,18 +613,29 @@ static void tenxpress_phy_fini(struct efx_nic *efx)
604} 613}
605 614
606 615
607/* Set the RX and TX LEDs and Link LED flashing. The other LEDs 616/* Override the RX, TX and link LEDs */
608 * (which probably aren't wired anyway) are left in AUTO mode */ 617void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
609void tenxpress_phy_blink(struct efx_nic *efx, bool blink)
610{ 618{
611 int reg; 619 int reg;
612 620
613 if (blink) 621 switch (mode) {
614 reg = (PMA_PMD_LED_FLASH << PMA_PMD_LED_TX_LBN) | 622 case EFX_LED_OFF:
615 (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN) | 623 reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) |
616 (PMA_PMD_LED_FLASH << PMA_PMD_LED_LINK_LBN); 624 (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
617 else 625 (PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN);
618 reg = PMA_PMD_LED_DEFAULT; 626 break;
627 case EFX_LED_ON:
628 reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) |
629 (PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) |
630 (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
631 break;
632 default:
633 if (efx->phy_type == PHY_TYPE_SFX7101)
634 reg = SFX7101_PMA_PMD_LED_DEFAULT;
635 else
636 reg = SFT9001_PMA_PMD_LED_DEFAULT;
637 break;
638 }
619 639
620 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg); 640 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
621} 641}
@@ -624,6 +644,13 @@ static const char *const sfx7101_test_names[] = {
624 "bist" 644 "bist"
625}; 645};
626 646
647static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index)
648{
649 if (index < ARRAY_SIZE(sfx7101_test_names))
650 return sfx7101_test_names[index];
651 return NULL;
652}
653
627static int 654static int
628sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags) 655sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
629{ 656{
@@ -635,6 +662,9 @@ sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
635 /* BIST is automatically run after a special software reset */ 662 /* BIST is automatically run after a special software reset */
636 rc = tenxpress_special_reset(efx); 663 rc = tenxpress_special_reset(efx);
637 results[0] = rc ? -1 : 1; 664 results[0] = rc ? -1 : 1;
665
666 efx_mdio_an_reconfigure(efx);
667
638 return rc; 668 return rc;
639} 669}
640 670
@@ -650,14 +680,17 @@ static const char *const sft9001_test_names[] = {
650 "cable.pairD.length", 680 "cable.pairD.length",
651}; 681};
652 682
683static const char *sft9001_test_name(struct efx_nic *efx, unsigned int index)
684{
685 if (index < ARRAY_SIZE(sft9001_test_names))
686 return sft9001_test_names[index];
687 return NULL;
688}
689
653static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags) 690static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
654{ 691{
655 struct ethtool_cmd ecmd;
656 int rc = 0, rc2, i, ctrl_reg, res_reg; 692 int rc = 0, rc2, i, ctrl_reg, res_reg;
657 693
658 if (flags & ETH_TEST_FL_OFFLINE)
659 efx->phy_op->get_settings(efx, &ecmd);
660
661 /* Initialise cable diagnostic results to unknown failure */ 694 /* Initialise cable diagnostic results to unknown failure */
662 for (i = 1; i < 9; ++i) 695 for (i = 1; i < 9; ++i)
663 results[i] = -1; 696 results[i] = -1;
@@ -709,9 +742,7 @@ out:
709 if (!rc) 742 if (!rc)
710 rc = rc2; 743 rc = rc2;
711 744
712 rc2 = efx->phy_op->set_settings(efx, &ecmd); 745 efx_mdio_an_reconfigure(efx);
713 if (!rc)
714 rc = rc2;
715 } 746 }
716 747
717 return rc; 748 return rc;
@@ -758,7 +789,7 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
758 * but doesn't advertise the correct speed. So override it */ 789 * but doesn't advertise the correct speed. So override it */
759 if (efx->loopback_mode == LOOPBACK_GPHY) 790 if (efx->loopback_mode == LOOPBACK_GPHY)
760 ecmd->speed = SPEED_1000; 791 ecmd->speed = SPEED_1000;
761 else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks) 792 else if (LOOPBACK_EXTERNAL(efx))
762 ecmd->speed = SPEED_10000; 793 ecmd->speed = SPEED_10000;
763} 794}
764 795
@@ -788,35 +819,27 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
788} 819}
789 820
790struct efx_phy_operations falcon_sfx7101_phy_ops = { 821struct efx_phy_operations falcon_sfx7101_phy_ops = {
791 .macs = EFX_XMAC, 822 .probe = sfx7101_phy_probe,
792 .init = tenxpress_phy_init, 823 .init = tenxpress_phy_init,
793 .reconfigure = tenxpress_phy_reconfigure, 824 .reconfigure = tenxpress_phy_reconfigure,
794 .poll = tenxpress_phy_poll, 825 .poll = tenxpress_phy_poll,
795 .fini = tenxpress_phy_fini, 826 .fini = tenxpress_phy_fini,
796 .clear_interrupt = efx_port_dummy_op_void,
797 .get_settings = tenxpress_get_settings, 827 .get_settings = tenxpress_get_settings,
798 .set_settings = tenxpress_set_settings, 828 .set_settings = tenxpress_set_settings,
799 .set_npage_adv = sfx7101_set_npage_adv, 829 .set_npage_adv = sfx7101_set_npage_adv,
800 .num_tests = ARRAY_SIZE(sfx7101_test_names), 830 .test_name = sfx7101_test_name,
801 .test_names = sfx7101_test_names,
802 .run_tests = sfx7101_run_tests, 831 .run_tests = sfx7101_run_tests,
803 .mmds = TENXPRESS_REQUIRED_DEVS,
804 .loopbacks = SFX7101_LOOPBACKS,
805}; 832};
806 833
807struct efx_phy_operations falcon_sft9001_phy_ops = { 834struct efx_phy_operations falcon_sft9001_phy_ops = {
808 .macs = EFX_GMAC | EFX_XMAC, 835 .probe = sft9001_phy_probe,
809 .init = tenxpress_phy_init, 836 .init = tenxpress_phy_init,
810 .reconfigure = tenxpress_phy_reconfigure, 837 .reconfigure = tenxpress_phy_reconfigure,
811 .poll = tenxpress_phy_poll, 838 .poll = tenxpress_phy_poll,
812 .fini = tenxpress_phy_fini, 839 .fini = tenxpress_phy_fini,
813 .clear_interrupt = efx_port_dummy_op_void,
814 .get_settings = tenxpress_get_settings, 840 .get_settings = tenxpress_get_settings,
815 .set_settings = tenxpress_set_settings, 841 .set_settings = tenxpress_set_settings,
816 .set_npage_adv = sft9001_set_npage_adv, 842 .set_npage_adv = sft9001_set_npage_adv,
817 .num_tests = ARRAY_SIZE(sft9001_test_names), 843 .test_name = sft9001_test_name,
818 .test_names = sft9001_test_names,
819 .run_tests = sft9001_run_tests, 844 .run_tests = sft9001_run_tests,
820 .mmds = TENXPRESS_REQUIRED_DEVS,
821 .loopbacks = SFT9001_LOOPBACKS,
822}; 845};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 489c4de31447..e669f94e821b 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc. 4 * Copyright 2005-2009 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -12,12 +12,13 @@
12#include <linux/tcp.h> 12#include <linux/tcp.h>
13#include <linux/ip.h> 13#include <linux/ip.h>
14#include <linux/in.h> 14#include <linux/in.h>
15#include <linux/ipv6.h>
16#include <net/ipv6.h>
15#include <linux/if_ether.h> 17#include <linux/if_ether.h>
16#include <linux/highmem.h> 18#include <linux/highmem.h>
17#include "net_driver.h" 19#include "net_driver.h"
18#include "tx.h"
19#include "efx.h" 20#include "efx.h"
20#include "falcon.h" 21#include "nic.h"
21#include "workarounds.h" 22#include "workarounds.h"
22 23
23/* 24/*
@@ -26,8 +27,7 @@
26 * The tx_queue descriptor ring fill-level must fall below this value 27 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue 28 * before we restart the netif queue
28 */ 29 */
29#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \ 30#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
31 31
32/* We want to be able to nest calls to netif_stop_queue(), since each 32/* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue. 33 * channel can have an individual stop on the queue.
@@ -125,6 +125,24 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
125} 125}
126 126
127 127
128static inline unsigned
129efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
130{
131 /* Depending on the NIC revision, we can use descriptor
132 * lengths up to 8K or 8K-1. However, since PCI Express
133 * devices must split read requests at 4K boundaries, there is
134 * little benefit from using descriptors that cross those
135 * boundaries and we keep things simple by not doing so.
136 */
137 unsigned len = (~dma_addr & 0xfff) + 1;
138
139 /* Work around hardware bug for unaligned buffers. */
140 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
141 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
142
143 return len;
144}
145
128/* 146/*
129 * Add a socket buffer to a TX queue 147 * Add a socket buffer to a TX queue
130 * 148 *
@@ -135,11 +153,13 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
135 * If any DMA mapping fails, any mapped fragments will be unmapped, 153 * If any DMA mapping fails, any mapped fragments will be unmapped,
136 * the queue's insert pointer will be restored to its original value. 154 * the queue's insert pointer will be restored to its original value.
137 * 155 *
156 * This function is split out from efx_hard_start_xmit to allow the
157 * loopback test to direct packets via specific TX queues.
158 *
138 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 159 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
139 * You must hold netif_tx_lock() to call this function. 160 * You must hold netif_tx_lock() to call this function.
140 */ 161 */
141static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, 162netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
142 struct sk_buff *skb)
143{ 163{
144 struct efx_nic *efx = tx_queue->efx; 164 struct efx_nic *efx = tx_queue->efx;
145 struct pci_dev *pci_dev = efx->pci_dev; 165 struct pci_dev *pci_dev = efx->pci_dev;
@@ -147,7 +167,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
147 skb_frag_t *fragment; 167 skb_frag_t *fragment;
148 struct page *page; 168 struct page *page;
149 int page_offset; 169 int page_offset;
150 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; 170 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
151 dma_addr_t dma_addr, unmap_addr = 0; 171 dma_addr_t dma_addr, unmap_addr = 0;
152 unsigned int dma_len; 172 unsigned int dma_len;
153 bool unmap_single; 173 bool unmap_single;
@@ -156,7 +176,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
156 176
157 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 177 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
158 178
159 if (skb_shinfo((struct sk_buff *)skb)->gso_size) 179 if (skb_shinfo(skb)->gso_size)
160 return efx_enqueue_skb_tso(tx_queue, skb); 180 return efx_enqueue_skb_tso(tx_queue, skb);
161 181
162 /* Get size of the initial fragment */ 182 /* Get size of the initial fragment */
@@ -171,7 +191,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
171 } 191 }
172 192
173 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 193 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
174 q_space = efx->type->txd_ring_mask - 1 - fill_level; 194 q_space = EFX_TXQ_MASK - 1 - fill_level;
175 195
176 /* Map for DMA. Use pci_map_single rather than pci_map_page 196 /* Map for DMA. Use pci_map_single rather than pci_map_page
177 * since this is more efficient on machines with sparse 197 * since this is more efficient on machines with sparse
@@ -208,16 +228,14 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
208 &tx_queue->read_count; 228 &tx_queue->read_count;
209 fill_level = (tx_queue->insert_count 229 fill_level = (tx_queue->insert_count
210 - tx_queue->old_read_count); 230 - tx_queue->old_read_count);
211 q_space = (efx->type->txd_ring_mask - 1 - 231 q_space = EFX_TXQ_MASK - 1 - fill_level;
212 fill_level);
213 if (unlikely(q_space-- <= 0)) 232 if (unlikely(q_space-- <= 0))
214 goto stop; 233 goto stop;
215 smp_mb(); 234 smp_mb();
216 --tx_queue->stopped; 235 --tx_queue->stopped;
217 } 236 }
218 237
219 insert_ptr = (tx_queue->insert_count & 238 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
220 efx->type->txd_ring_mask);
221 buffer = &tx_queue->buffer[insert_ptr]; 239 buffer = &tx_queue->buffer[insert_ptr];
222 efx_tsoh_free(tx_queue, buffer); 240 efx_tsoh_free(tx_queue, buffer);
223 EFX_BUG_ON_PARANOID(buffer->tsoh); 241 EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -226,14 +244,10 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
226 EFX_BUG_ON_PARANOID(!buffer->continuation); 244 EFX_BUG_ON_PARANOID(!buffer->continuation);
227 EFX_BUG_ON_PARANOID(buffer->unmap_len); 245 EFX_BUG_ON_PARANOID(buffer->unmap_len);
228 246
229 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); 247 dma_len = efx_max_tx_len(efx, dma_addr);
230 if (likely(dma_len > len)) 248 if (likely(dma_len >= len))
231 dma_len = len; 249 dma_len = len;
232 250
233 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
234 if (misalign && dma_len + misalign > 512)
235 dma_len = 512 - misalign;
236
237 /* Fill out per descriptor fields */ 251 /* Fill out per descriptor fields */
238 buffer->len = dma_len; 252 buffer->len = dma_len;
239 buffer->dma_addr = dma_addr; 253 buffer->dma_addr = dma_addr;
@@ -266,7 +280,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
266 buffer->continuation = false; 280 buffer->continuation = false;
267 281
268 /* Pass off to hardware */ 282 /* Pass off to hardware */
269 falcon_push_buffers(tx_queue); 283 efx_nic_push_buffers(tx_queue);
270 284
271 return NETDEV_TX_OK; 285 return NETDEV_TX_OK;
272 286
@@ -276,7 +290,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
276 skb_shinfo(skb)->nr_frags + 1); 290 skb_shinfo(skb)->nr_frags + 1);
277 291
278 /* Mark the packet as transmitted, and free the SKB ourselves */ 292 /* Mark the packet as transmitted, and free the SKB ourselves */
279 dev_kfree_skb_any((struct sk_buff *)skb); 293 dev_kfree_skb_any(skb);
280 goto unwind; 294 goto unwind;
281 295
282 stop: 296 stop:
@@ -289,7 +303,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
289 /* Work backwards until we hit the original insert pointer value */ 303 /* Work backwards until we hit the original insert pointer value */
290 while (tx_queue->insert_count != tx_queue->write_count) { 304 while (tx_queue->insert_count != tx_queue->write_count) {
291 --tx_queue->insert_count; 305 --tx_queue->insert_count;
292 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 306 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
293 buffer = &tx_queue->buffer[insert_ptr]; 307 buffer = &tx_queue->buffer[insert_ptr];
294 efx_dequeue_buffer(tx_queue, buffer); 308 efx_dequeue_buffer(tx_queue, buffer);
295 buffer->len = 0; 309 buffer->len = 0;
@@ -318,10 +332,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
318{ 332{
319 struct efx_nic *efx = tx_queue->efx; 333 struct efx_nic *efx = tx_queue->efx;
320 unsigned int stop_index, read_ptr; 334 unsigned int stop_index, read_ptr;
321 unsigned int mask = tx_queue->efx->type->txd_ring_mask;
322 335
323 stop_index = (index + 1) & mask; 336 stop_index = (index + 1) & EFX_TXQ_MASK;
324 read_ptr = tx_queue->read_count & mask; 337 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
325 338
326 while (read_ptr != stop_index) { 339 while (read_ptr != stop_index) {
327 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 340 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -338,28 +351,10 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
338 buffer->len = 0; 351 buffer->len = 0;
339 352
340 ++tx_queue->read_count; 353 ++tx_queue->read_count;
341 read_ptr = tx_queue->read_count & mask; 354 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
342 } 355 }
343} 356}
344 357
345/* Initiate a packet transmission on the specified TX queue.
346 * Note that returning anything other than NETDEV_TX_OK will cause the
347 * OS to free the skb.
348 *
349 * This function is split out from efx_hard_start_xmit to allow the
350 * loopback test to direct packets via specific TX queues. It is
351 * therefore a non-static inline, so as not to penalise performance
352 * for non-loopback transmissions.
353 *
354 * Context: netif_tx_lock held
355 */
356inline netdev_tx_t efx_xmit(struct efx_nic *efx,
357 struct efx_tx_queue *tx_queue, struct sk_buff *skb)
358{
359 /* Map fragments for DMA and add to TX queue */
360 return efx_enqueue_skb(tx_queue, skb);
361}
362
363/* Initiate a packet transmission. We use one channel per CPU 358/* Initiate a packet transmission. We use one channel per CPU
364 * (sharing when we have more CPUs than channels). On Falcon, the TX 359 * (sharing when we have more CPUs than channels). On Falcon, the TX
365 * completion events will be directed back to the CPU that transmitted 360 * completion events will be directed back to the CPU that transmitted
@@ -383,7 +378,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
383 else 378 else
384 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; 379 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
385 380
386 return efx_xmit(efx, tx_queue, skb); 381 return efx_enqueue_skb(tx_queue, skb);
387} 382}
388 383
389void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 384void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
@@ -391,7 +386,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
391 unsigned fill_level; 386 unsigned fill_level;
392 struct efx_nic *efx = tx_queue->efx; 387 struct efx_nic *efx = tx_queue->efx;
393 388
394 EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask); 389 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
395 390
396 efx_dequeue_buffers(tx_queue, index); 391 efx_dequeue_buffers(tx_queue, index);
397 392
@@ -401,7 +396,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
401 smp_mb(); 396 smp_mb();
402 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 397 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
403 fill_level = tx_queue->insert_count - tx_queue->read_count; 398 fill_level = tx_queue->insert_count - tx_queue->read_count;
404 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 399 if (fill_level < EFX_TXQ_THRESHOLD) {
405 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 400 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
406 401
407 /* Do this under netif_tx_lock(), to avoid racing 402 /* Do this under netif_tx_lock(), to avoid racing
@@ -425,15 +420,15 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
425 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); 420 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
426 421
427 /* Allocate software ring */ 422 /* Allocate software ring */
428 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); 423 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
429 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 424 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
430 if (!tx_queue->buffer) 425 if (!tx_queue->buffer)
431 return -ENOMEM; 426 return -ENOMEM;
432 for (i = 0; i <= efx->type->txd_ring_mask; ++i) 427 for (i = 0; i <= EFX_TXQ_MASK; ++i)
433 tx_queue->buffer[i].continuation = true; 428 tx_queue->buffer[i].continuation = true;
434 429
435 /* Allocate hardware ring */ 430 /* Allocate hardware ring */
436 rc = falcon_probe_tx(tx_queue); 431 rc = efx_nic_probe_tx(tx_queue);
437 if (rc) 432 if (rc)
438 goto fail; 433 goto fail;
439 434
@@ -456,7 +451,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
456 BUG_ON(tx_queue->stopped); 451 BUG_ON(tx_queue->stopped);
457 452
458 /* Set up TX descriptor ring */ 453 /* Set up TX descriptor ring */
459 falcon_init_tx(tx_queue); 454 efx_nic_init_tx(tx_queue);
460} 455}
461 456
462void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 457void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -468,8 +463,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
468 463
469 /* Free any buffers left in the ring */ 464 /* Free any buffers left in the ring */
470 while (tx_queue->read_count != tx_queue->write_count) { 465 while (tx_queue->read_count != tx_queue->write_count) {
471 buffer = &tx_queue->buffer[tx_queue->read_count & 466 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
472 tx_queue->efx->type->txd_ring_mask];
473 efx_dequeue_buffer(tx_queue, buffer); 467 efx_dequeue_buffer(tx_queue, buffer);
474 buffer->continuation = true; 468 buffer->continuation = true;
475 buffer->len = 0; 469 buffer->len = 0;
@@ -483,7 +477,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
483 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue); 477 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
484 478
485 /* Flush TX queue, remove descriptor ring */ 479 /* Flush TX queue, remove descriptor ring */
486 falcon_fini_tx(tx_queue); 480 efx_nic_fini_tx(tx_queue);
487 481
488 efx_release_tx_buffers(tx_queue); 482 efx_release_tx_buffers(tx_queue);
489 483
@@ -500,7 +494,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
500void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 494void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
501{ 495{
502 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue); 496 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
503 falcon_remove_tx(tx_queue); 497 efx_nic_remove_tx(tx_queue);
504 498
505 kfree(tx_queue->buffer); 499 kfree(tx_queue->buffer);
506 tx_queue->buffer = NULL; 500 tx_queue->buffer = NULL;
@@ -539,6 +533,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
539#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) 533#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
540#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) 534#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
541#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) 535#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
536#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
542 537
543/** 538/**
544 * struct tso_state - TSO state for an SKB 539 * struct tso_state - TSO state for an SKB
@@ -551,6 +546,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
551 * @unmap_len: Length of SKB fragment 546 * @unmap_len: Length of SKB fragment
552 * @unmap_addr: DMA address of SKB fragment 547 * @unmap_addr: DMA address of SKB fragment
553 * @unmap_single: DMA single vs page mapping flag 548 * @unmap_single: DMA single vs page mapping flag
549 * @protocol: Network protocol (after any VLAN header)
554 * @header_len: Number of bytes of header 550 * @header_len: Number of bytes of header
555 * @full_packet_size: Number of bytes to put in each outgoing segment 551 * @full_packet_size: Number of bytes to put in each outgoing segment
556 * 552 *
@@ -571,6 +567,7 @@ struct tso_state {
571 dma_addr_t unmap_addr; 567 dma_addr_t unmap_addr;
572 bool unmap_single; 568 bool unmap_single;
573 569
570 __be16 protocol;
574 unsigned header_len; 571 unsigned header_len;
575 int full_packet_size; 572 int full_packet_size;
576}; 573};
@@ -578,9 +575,9 @@ struct tso_state {
578 575
579/* 576/*
580 * Verify that our various assumptions about sk_buffs and the conditions 577 * Verify that our various assumptions about sk_buffs and the conditions
581 * under which TSO will be attempted hold true. 578 * under which TSO will be attempted hold true. Return the protocol number.
582 */ 579 */
583static void efx_tso_check_safe(struct sk_buff *skb) 580static __be16 efx_tso_check_protocol(struct sk_buff *skb)
584{ 581{
585 __be16 protocol = skb->protocol; 582 __be16 protocol = skb->protocol;
586 583
@@ -595,13 +592,22 @@ static void efx_tso_check_safe(struct sk_buff *skb)
595 if (protocol == htons(ETH_P_IP)) 592 if (protocol == htons(ETH_P_IP))
596 skb_set_transport_header(skb, sizeof(*veh) + 593 skb_set_transport_header(skb, sizeof(*veh) +
597 4 * ip_hdr(skb)->ihl); 594 4 * ip_hdr(skb)->ihl);
595 else if (protocol == htons(ETH_P_IPV6))
596 skb_set_transport_header(skb, sizeof(*veh) +
597 sizeof(struct ipv6hdr));
598 } 598 }
599 599
600 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP)); 600 if (protocol == htons(ETH_P_IP)) {
601 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); 601 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
602 } else {
603 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
604 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
605 }
602 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) 606 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
603 + (tcp_hdr(skb)->doff << 2u)) > 607 + (tcp_hdr(skb)->doff << 2u)) >
604 skb_headlen(skb)); 608 skb_headlen(skb));
609
610 return protocol;
605} 611}
606 612
607 613
@@ -708,14 +714,14 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
708{ 714{
709 struct efx_tx_buffer *buffer; 715 struct efx_tx_buffer *buffer;
710 struct efx_nic *efx = tx_queue->efx; 716 struct efx_nic *efx = tx_queue->efx;
711 unsigned dma_len, fill_level, insert_ptr, misalign; 717 unsigned dma_len, fill_level, insert_ptr;
712 int q_space; 718 int q_space;
713 719
714 EFX_BUG_ON_PARANOID(len <= 0); 720 EFX_BUG_ON_PARANOID(len <= 0);
715 721
716 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 722 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
717 /* -1 as there is no way to represent all descriptors used */ 723 /* -1 as there is no way to represent all descriptors used */
718 q_space = efx->type->txd_ring_mask - 1 - fill_level; 724 q_space = EFX_TXQ_MASK - 1 - fill_level;
719 725
720 while (1) { 726 while (1) {
721 if (unlikely(q_space-- <= 0)) { 727 if (unlikely(q_space-- <= 0)) {
@@ -731,7 +737,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
731 *(volatile unsigned *)&tx_queue->read_count; 737 *(volatile unsigned *)&tx_queue->read_count;
732 fill_level = (tx_queue->insert_count 738 fill_level = (tx_queue->insert_count
733 - tx_queue->old_read_count); 739 - tx_queue->old_read_count);
734 q_space = efx->type->txd_ring_mask - 1 - fill_level; 740 q_space = EFX_TXQ_MASK - 1 - fill_level;
735 if (unlikely(q_space-- <= 0)) { 741 if (unlikely(q_space-- <= 0)) {
736 *final_buffer = NULL; 742 *final_buffer = NULL;
737 return 1; 743 return 1;
@@ -740,13 +746,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
740 --tx_queue->stopped; 746 --tx_queue->stopped;
741 } 747 }
742 748
743 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 749 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
744 buffer = &tx_queue->buffer[insert_ptr]; 750 buffer = &tx_queue->buffer[insert_ptr];
745 ++tx_queue->insert_count; 751 ++tx_queue->insert_count;
746 752
747 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 753 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
748 tx_queue->read_count > 754 tx_queue->read_count >
749 efx->type->txd_ring_mask); 755 EFX_TXQ_MASK);
750 756
751 efx_tsoh_free(tx_queue, buffer); 757 efx_tsoh_free(tx_queue, buffer);
752 EFX_BUG_ON_PARANOID(buffer->len); 758 EFX_BUG_ON_PARANOID(buffer->len);
@@ -757,12 +763,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
757 763
758 buffer->dma_addr = dma_addr; 764 buffer->dma_addr = dma_addr;
759 765
760 /* Ensure we do not cross a boundary unsupported by H/W */ 766 dma_len = efx_max_tx_len(efx, dma_addr);
761 dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
762
763 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
764 if (misalign && dma_len + misalign > 512)
765 dma_len = 512 - misalign;
766 767
767 /* If there is enough space to send then do so */ 768 /* If there is enough space to send then do so */
768 if (dma_len >= len) 769 if (dma_len >= len)
@@ -792,8 +793,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
792{ 793{
793 struct efx_tx_buffer *buffer; 794 struct efx_tx_buffer *buffer;
794 795
795 buffer = &tx_queue->buffer[tx_queue->insert_count & 796 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
796 tx_queue->efx->type->txd_ring_mask];
797 efx_tsoh_free(tx_queue, buffer); 797 efx_tsoh_free(tx_queue, buffer);
798 EFX_BUG_ON_PARANOID(buffer->len); 798 EFX_BUG_ON_PARANOID(buffer->len);
799 EFX_BUG_ON_PARANOID(buffer->unmap_len); 799 EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -818,7 +818,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
818 while (tx_queue->insert_count != tx_queue->write_count) { 818 while (tx_queue->insert_count != tx_queue->write_count) {
819 --tx_queue->insert_count; 819 --tx_queue->insert_count;
820 buffer = &tx_queue->buffer[tx_queue->insert_count & 820 buffer = &tx_queue->buffer[tx_queue->insert_count &
821 tx_queue->efx->type->txd_ring_mask]; 821 EFX_TXQ_MASK];
822 efx_tsoh_free(tx_queue, buffer); 822 efx_tsoh_free(tx_queue, buffer);
823 EFX_BUG_ON_PARANOID(buffer->skb); 823 EFX_BUG_ON_PARANOID(buffer->skb);
824 buffer->len = 0; 824 buffer->len = 0;
@@ -850,7 +850,10 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
850 + PTR_DIFF(tcp_hdr(skb), skb->data)); 850 + PTR_DIFF(tcp_hdr(skb), skb->data));
851 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; 851 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
852 852
853 st->ipv4_id = ntohs(ip_hdr(skb)->id); 853 if (st->protocol == htons(ETH_P_IP))
854 st->ipv4_id = ntohs(ip_hdr(skb)->id);
855 else
856 st->ipv4_id = 0;
854 st->seqnum = ntohl(tcp_hdr(skb)->seq); 857 st->seqnum = ntohl(tcp_hdr(skb)->seq);
855 858
856 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 859 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -965,7 +968,6 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
965 struct tso_state *st) 968 struct tso_state *st)
966{ 969{
967 struct efx_tso_header *tsoh; 970 struct efx_tso_header *tsoh;
968 struct iphdr *tsoh_iph;
969 struct tcphdr *tsoh_th; 971 struct tcphdr *tsoh_th;
970 unsigned ip_length; 972 unsigned ip_length;
971 u8 *header; 973 u8 *header;
@@ -989,7 +991,6 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
989 991
990 header = TSOH_BUFFER(tsoh); 992 header = TSOH_BUFFER(tsoh);
991 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); 993 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
992 tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
993 994
994 /* Copy and update the headers. */ 995 /* Copy and update the headers. */
995 memcpy(header, skb->data, st->header_len); 996 memcpy(header, skb->data, st->header_len);
@@ -1007,11 +1008,22 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1007 tsoh_th->fin = tcp_hdr(skb)->fin; 1008 tsoh_th->fin = tcp_hdr(skb)->fin;
1008 tsoh_th->psh = tcp_hdr(skb)->psh; 1009 tsoh_th->psh = tcp_hdr(skb)->psh;
1009 } 1010 }
1010 tsoh_iph->tot_len = htons(ip_length);
1011 1011
1012 /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 1012 if (st->protocol == htons(ETH_P_IP)) {
1013 tsoh_iph->id = htons(st->ipv4_id); 1013 struct iphdr *tsoh_iph =
1014 st->ipv4_id++; 1014 (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1015
1016 tsoh_iph->tot_len = htons(ip_length);
1017
1018 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1019 tsoh_iph->id = htons(st->ipv4_id);
1020 st->ipv4_id++;
1021 } else {
1022 struct ipv6hdr *tsoh_iph =
1023 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1024
1025 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1026 }
1015 1027
1016 st->packet_space = skb_shinfo(skb)->gso_size; 1028 st->packet_space = skb_shinfo(skb)->gso_size;
1017 ++tx_queue->tso_packets; 1029 ++tx_queue->tso_packets;
@@ -1041,8 +1053,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1041 int frag_i, rc, rc2 = NETDEV_TX_OK; 1053 int frag_i, rc, rc2 = NETDEV_TX_OK;
1042 struct tso_state state; 1054 struct tso_state state;
1043 1055
1044 /* Verify TSO is safe - these checks should never fail. */ 1056 /* Find the packet protocol and sanity-check it */
1045 efx_tso_check_safe(skb); 1057 state.protocol = efx_tso_check_protocol(skb);
1046 1058
1047 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 1059 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1048 1060
@@ -1092,14 +1104,14 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1092 } 1104 }
1093 1105
1094 /* Pass off to hardware */ 1106 /* Pass off to hardware */
1095 falcon_push_buffers(tx_queue); 1107 efx_nic_push_buffers(tx_queue);
1096 1108
1097 tx_queue->tso_bursts++; 1109 tx_queue->tso_bursts++;
1098 return NETDEV_TX_OK; 1110 return NETDEV_TX_OK;
1099 1111
1100 mem_err: 1112 mem_err:
1101 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n"); 1113 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
1102 dev_kfree_skb_any((struct sk_buff *)skb); 1114 dev_kfree_skb_any(skb);
1103 goto unwind; 1115 goto unwind;
1104 1116
1105 stop: 1117 stop:
@@ -1135,7 +1147,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1135 unsigned i; 1147 unsigned i;
1136 1148
1137 if (tx_queue->buffer) { 1149 if (tx_queue->buffer) {
1138 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1150 for (i = 0; i <= EFX_TXQ_MASK; ++i)
1139 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1151 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1140 } 1152 }
1141 1153
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
deleted file mode 100644
index e3678962a5b4..000000000000
--- a/drivers/net/sfc/tx.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_TX_H
12#define EFX_TX_H
13
14#include "net_driver.h"
15
16int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
17void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
18void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
19void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
20
21netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
22 struct net_device *net_dev);
23void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
24
25#endif /* EFX_TX_H */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index c821c15445a0..acd9c734e483 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc. 3 * Copyright 2006-2009 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -16,7 +16,9 @@
16 */ 16 */
17 17
18#define EFX_WORKAROUND_ALWAYS(efx) 1 18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) 19#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
20#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
21#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
20#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) 22#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx)
21#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \ 23#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
22 (efx)->phy_type == PHY_TYPE_SFT9001B) 24 (efx)->phy_type == PHY_TYPE_SFT9001B)
@@ -27,20 +29,22 @@
27#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS 29#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
28/* Bit-bashed I2C reads cause performance drop */ 30/* Bit-bashed I2C reads cause performance drop */
29#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G 31#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
30/* TX pkt parser problem with <= 16 byte TXes */
31#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
32/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor 32/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
33 * or a PCIe error (bug 11028) */ 33 * or a PCIe error (bug 11028) */
34#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS 34#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
35/* Transmit flow control may get disabled */ 35/* Transmit flow control may get disabled */
36#define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS 36#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
37/* Flush events can take a very long time to appear */
38#define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
39/* Truncated IPv4 packets can confuse the TX packet parser */ 37/* Truncated IPv4 packets can confuse the TX packet parser */
40#define EFX_WORKAROUND_15592 EFX_WORKAROUND_ALWAYS 38#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
39/* Legacy ISR read can return zero once */
40#define EFX_WORKAROUND_15783 EFX_WORKAROUND_SIENA
41/* Legacy interrupt storm when interrupt fifo fills */
42#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
41 43
42/* Spurious parity errors in TSORT buffers */ 44/* Spurious parity errors in TSORT buffers */
43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 45#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
46/* Unaligned read request >512 bytes after aligning may break TSORT */
47#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
44/* iSCSI parsing errors */ 48/* iSCSI parsing errors */
45#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A 49#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
46/* RX events go missing */ 50/* RX events go missing */