diff options
Diffstat (limited to 'drivers/net/sfc')
46 files changed, 13843 insertions, 6383 deletions
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig index 260aafaac235..a65c98638398 100644 --- a/drivers/net/sfc/Kconfig +++ b/drivers/net/sfc/Kconfig | |||
@@ -1,5 +1,5 @@ | |||
1 | config SFC | 1 | config SFC |
2 | tristate "Solarflare Solarstorm SFC4000 support" | 2 | tristate "Solarflare Solarstorm SFC4000/SFC9000-family support" |
3 | depends on PCI && INET | 3 | depends on PCI && INET |
4 | select MDIO | 4 | select MDIO |
5 | select CRC32 | 5 | select CRC32 |
@@ -7,15 +7,16 @@ config SFC | |||
7 | select I2C_ALGOBIT | 7 | select I2C_ALGOBIT |
8 | help | 8 | help |
9 | This driver supports 10-gigabit Ethernet cards based on | 9 | This driver supports 10-gigabit Ethernet cards based on |
10 | the Solarflare Communications Solarstorm SFC4000 controller. | 10 | the Solarflare Communications Solarstorm SFC4000 and |
11 | SFC9000-family controllers. | ||
11 | 12 | ||
12 | To compile this driver as a module, choose M here. The module | 13 | To compile this driver as a module, choose M here. The module |
13 | will be called sfc. | 14 | will be called sfc. |
14 | config SFC_MTD | 15 | config SFC_MTD |
15 | bool "Solarflare Solarstorm SFC4000 flash MTD support" | 16 | bool "Solarflare Solarstorm SFC4000/SFC9000-family MTD support" |
16 | depends on SFC && MTD && !(SFC=y && MTD=m) | 17 | depends on SFC && MTD && !(SFC=y && MTD=m) |
17 | default y | 18 | default y |
18 | help | 19 | help |
19 | This exposes the on-board flash memory as an MTD device (e.g. | 20 | This exposes the on-board flash memory as MTD devices (e.g. |
20 | /dev/mtd1). This makes it possible to upload new boot code | 21 | /dev/mtd1). This makes it possible to upload new firmware |
21 | to the NIC. | 22 | to the NIC. |
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile index b89f9be3cb13..1047b19c60a5 100644 --- a/drivers/net/sfc/Makefile +++ b/drivers/net/sfc/Makefile | |||
@@ -1,6 +1,7 @@ | |||
1 | sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \ | 1 | sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o \ |
2 | falcon_xmac.o selftest.o ethtool.o xfp_phy.o \ | 2 | falcon_gmac.o falcon_xmac.o mcdi_mac.o \ |
3 | mdio_10g.o tenxpress.o boards.o sfe4001.o | 3 | selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ |
4 | tenxpress.o falcon_boards.o mcdi.o mcdi_phy.o | ||
4 | sfc-$(CONFIG_SFC_MTD) += mtd.o | 5 | sfc-$(CONFIG_SFC_MTD) += mtd.o |
5 | 6 | ||
6 | obj-$(CONFIG_SFC) += sfc.o | 7 | obj-$(CONFIG_SFC) += sfc.o |
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h index d54d84c267b9..098ac2ad757d 100644 --- a/drivers/net/sfc/bitfield.h +++ b/drivers/net/sfc/bitfield.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2008 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -37,6 +37,8 @@ | |||
37 | #define EFX_DWORD_2_WIDTH 32 | 37 | #define EFX_DWORD_2_WIDTH 32 |
38 | #define EFX_DWORD_3_LBN 96 | 38 | #define EFX_DWORD_3_LBN 96 |
39 | #define EFX_DWORD_3_WIDTH 32 | 39 | #define EFX_DWORD_3_WIDTH 32 |
40 | #define EFX_QWORD_0_LBN 0 | ||
41 | #define EFX_QWORD_0_WIDTH 64 | ||
40 | 42 | ||
41 | /* Specified attribute (e.g. LBN) of the specified field */ | 43 | /* Specified attribute (e.g. LBN) of the specified field */ |
42 | #define EFX_VAL(field, attribute) field ## _ ## attribute | 44 | #define EFX_VAL(field, attribute) field ## _ ## attribute |
@@ -520,19 +522,6 @@ typedef union efx_oword { | |||
520 | #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 | 522 | #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 |
521 | #endif | 523 | #endif |
522 | 524 | ||
523 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ | ||
524 | if (falcon_rev(efx) >= FALCON_REV_B0) { \ | ||
525 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ | ||
526 | } else { \ | ||
527 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ | ||
528 | } \ | ||
529 | } while (0) | ||
530 | |||
531 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ | ||
532 | (falcon_rev(efx) >= FALCON_REV_B0 ? \ | ||
533 | EFX_QWORD_FIELD((qword), field##_B0) : \ | ||
534 | EFX_QWORD_FIELD((qword), field##_A1)) | ||
535 | |||
536 | /* Used to avoid compiler warnings about shift range exceeding width | 525 | /* Used to avoid compiler warnings about shift range exceeding width |
537 | * of the data types when dma_addr_t is only 32 bits wide. | 526 | * of the data types when dma_addr_t is only 32 bits wide. |
538 | */ | 527 | */ |
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c deleted file mode 100644 index 4a4c74c891b7..000000000000 --- a/drivers/net/sfc/boards.c +++ /dev/null | |||
@@ -1,328 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2007-2008 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include "net_driver.h" | ||
11 | #include "phy.h" | ||
12 | #include "boards.h" | ||
13 | #include "efx.h" | ||
14 | #include "workarounds.h" | ||
15 | |||
16 | /* Macros for unpacking the board revision */ | ||
17 | /* The revision info is in host byte order. */ | ||
18 | #define BOARD_TYPE(_rev) (_rev >> 8) | ||
19 | #define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf) | ||
20 | #define BOARD_MINOR(_rev) (_rev & 0xf) | ||
21 | |||
22 | /* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */ | ||
23 | #define BLINK_INTERVAL (HZ/2) | ||
24 | |||
25 | static void blink_led_timer(unsigned long context) | ||
26 | { | ||
27 | struct efx_nic *efx = (struct efx_nic *)context; | ||
28 | struct efx_blinker *bl = &efx->board_info.blinker; | ||
29 | efx->board_info.set_id_led(efx, bl->state); | ||
30 | bl->state = !bl->state; | ||
31 | if (bl->resubmit) | ||
32 | mod_timer(&bl->timer, jiffies + BLINK_INTERVAL); | ||
33 | } | ||
34 | |||
35 | static void board_blink(struct efx_nic *efx, bool blink) | ||
36 | { | ||
37 | struct efx_blinker *blinker = &efx->board_info.blinker; | ||
38 | |||
39 | /* The rtnl mutex serialises all ethtool ioctls, so | ||
40 | * nothing special needs doing here. */ | ||
41 | if (blink) { | ||
42 | blinker->resubmit = true; | ||
43 | blinker->state = false; | ||
44 | setup_timer(&blinker->timer, blink_led_timer, | ||
45 | (unsigned long)efx); | ||
46 | mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL); | ||
47 | } else { | ||
48 | blinker->resubmit = false; | ||
49 | if (blinker->timer.function) | ||
50 | del_timer_sync(&blinker->timer); | ||
51 | efx->board_info.init_leds(efx); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | /***************************************************************************** | ||
56 | * Support for LM87 sensor chip used on several boards | ||
57 | */ | ||
58 | #define LM87_REG_ALARMS1 0x41 | ||
59 | #define LM87_REG_ALARMS2 0x42 | ||
60 | #define LM87_IN_LIMITS(nr, _min, _max) \ | ||
61 | 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min | ||
62 | #define LM87_AIN_LIMITS(nr, _min, _max) \ | ||
63 | 0x3B + (nr), _max, 0x1A + (nr), _min | ||
64 | #define LM87_TEMP_INT_LIMITS(_min, _max) \ | ||
65 | 0x39, _max, 0x3A, _min | ||
66 | #define LM87_TEMP_EXT1_LIMITS(_min, _max) \ | ||
67 | 0x37, _max, 0x38, _min | ||
68 | |||
69 | #define LM87_ALARM_TEMP_INT 0x10 | ||
70 | #define LM87_ALARM_TEMP_EXT1 0x20 | ||
71 | |||
72 | #if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE) | ||
73 | |||
74 | static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, | ||
75 | const u8 *reg_values) | ||
76 | { | ||
77 | struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info); | ||
78 | int rc; | ||
79 | |||
80 | if (!client) | ||
81 | return -EIO; | ||
82 | |||
83 | while (*reg_values) { | ||
84 | u8 reg = *reg_values++; | ||
85 | u8 value = *reg_values++; | ||
86 | rc = i2c_smbus_write_byte_data(client, reg, value); | ||
87 | if (rc) | ||
88 | goto err; | ||
89 | } | ||
90 | |||
91 | efx->board_info.hwmon_client = client; | ||
92 | return 0; | ||
93 | |||
94 | err: | ||
95 | i2c_unregister_device(client); | ||
96 | return rc; | ||
97 | } | ||
98 | |||
99 | static void efx_fini_lm87(struct efx_nic *efx) | ||
100 | { | ||
101 | i2c_unregister_device(efx->board_info.hwmon_client); | ||
102 | } | ||
103 | |||
104 | static int efx_check_lm87(struct efx_nic *efx, unsigned mask) | ||
105 | { | ||
106 | struct i2c_client *client = efx->board_info.hwmon_client; | ||
107 | s32 alarms1, alarms2; | ||
108 | |||
109 | /* If link is up then do not monitor temperature */ | ||
110 | if (EFX_WORKAROUND_7884(efx) && efx->link_up) | ||
111 | return 0; | ||
112 | |||
113 | alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); | ||
114 | alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); | ||
115 | if (alarms1 < 0) | ||
116 | return alarms1; | ||
117 | if (alarms2 < 0) | ||
118 | return alarms2; | ||
119 | alarms1 &= mask; | ||
120 | alarms2 &= mask >> 8; | ||
121 | if (alarms1 || alarms2) { | ||
122 | EFX_ERR(efx, | ||
123 | "LM87 detected a hardware failure (status %02x:%02x)" | ||
124 | "%s%s\n", | ||
125 | alarms1, alarms2, | ||
126 | (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "", | ||
127 | (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : ""); | ||
128 | return -ERANGE; | ||
129 | } | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | #else /* !CONFIG_SENSORS_LM87 */ | ||
135 | |||
136 | static inline int | ||
137 | efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, | ||
138 | const u8 *reg_values) | ||
139 | { | ||
140 | return 0; | ||
141 | } | ||
142 | static inline void efx_fini_lm87(struct efx_nic *efx) | ||
143 | { | ||
144 | } | ||
145 | static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask) | ||
146 | { | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | #endif /* CONFIG_SENSORS_LM87 */ | ||
151 | |||
152 | /***************************************************************************** | ||
153 | * Support for the SFE4002 | ||
154 | * | ||
155 | */ | ||
156 | static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ | ||
157 | |||
158 | static const u8 sfe4002_lm87_regs[] = { | ||
159 | LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ | ||
160 | LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ | ||
161 | LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ | ||
162 | LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */ | ||
163 | LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ | ||
164 | LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ | ||
165 | LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */ | ||
166 | LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ | ||
167 | LM87_TEMP_INT_LIMITS(10, 60), /* board */ | ||
168 | LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ | ||
169 | 0 | ||
170 | }; | ||
171 | |||
172 | static struct i2c_board_info sfe4002_hwmon_info = { | ||
173 | I2C_BOARD_INFO("lm87", 0x2e), | ||
174 | .platform_data = &sfe4002_lm87_channel, | ||
175 | }; | ||
176 | |||
177 | /****************************************************************************/ | ||
178 | /* LED allocations. Note that on rev A0 boards the schematic and the reality | ||
179 | * differ: red and green are swapped. Below is the fixed (A1) layout (there | ||
180 | * are only 3 A0 boards in existence, so no real reason to make this | ||
181 | * conditional). | ||
182 | */ | ||
183 | #define SFE4002_FAULT_LED (2) /* Red */ | ||
184 | #define SFE4002_RX_LED (0) /* Green */ | ||
185 | #define SFE4002_TX_LED (1) /* Amber */ | ||
186 | |||
187 | static void sfe4002_init_leds(struct efx_nic *efx) | ||
188 | { | ||
189 | /* Set the TX and RX LEDs to reflect status and activity, and the | ||
190 | * fault LED off */ | ||
191 | xfp_set_led(efx, SFE4002_TX_LED, | ||
192 | QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT); | ||
193 | xfp_set_led(efx, SFE4002_RX_LED, | ||
194 | QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT); | ||
195 | xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF); | ||
196 | } | ||
197 | |||
198 | static void sfe4002_set_id_led(struct efx_nic *efx, bool state) | ||
199 | { | ||
200 | xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON : | ||
201 | QUAKE_LED_OFF); | ||
202 | } | ||
203 | |||
204 | static int sfe4002_check_hw(struct efx_nic *efx) | ||
205 | { | ||
206 | /* A0 board rev. 4002s report a temperature fault the whole time | ||
207 | * (bad sensor) so we mask it out. */ | ||
208 | unsigned alarm_mask = | ||
209 | (efx->board_info.major == 0 && efx->board_info.minor == 0) ? | ||
210 | ~LM87_ALARM_TEMP_EXT1 : ~0; | ||
211 | |||
212 | return efx_check_lm87(efx, alarm_mask); | ||
213 | } | ||
214 | |||
215 | static int sfe4002_init(struct efx_nic *efx) | ||
216 | { | ||
217 | int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs); | ||
218 | if (rc) | ||
219 | return rc; | ||
220 | efx->board_info.monitor = sfe4002_check_hw; | ||
221 | efx->board_info.init_leds = sfe4002_init_leds; | ||
222 | efx->board_info.set_id_led = sfe4002_set_id_led; | ||
223 | efx->board_info.blink = board_blink; | ||
224 | efx->board_info.fini = efx_fini_lm87; | ||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | /***************************************************************************** | ||
229 | * Support for the SFN4112F | ||
230 | * | ||
231 | */ | ||
232 | static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ | ||
233 | |||
234 | static const u8 sfn4112f_lm87_regs[] = { | ||
235 | LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ | ||
236 | LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ | ||
237 | LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ | ||
238 | LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ | ||
239 | LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ | ||
240 | LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ | ||
241 | LM87_TEMP_INT_LIMITS(10, 60), /* board */ | ||
242 | LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ | ||
243 | 0 | ||
244 | }; | ||
245 | |||
246 | static struct i2c_board_info sfn4112f_hwmon_info = { | ||
247 | I2C_BOARD_INFO("lm87", 0x2e), | ||
248 | .platform_data = &sfn4112f_lm87_channel, | ||
249 | }; | ||
250 | |||
251 | #define SFN4112F_ACT_LED 0 | ||
252 | #define SFN4112F_LINK_LED 1 | ||
253 | |||
254 | static void sfn4112f_init_leds(struct efx_nic *efx) | ||
255 | { | ||
256 | xfp_set_led(efx, SFN4112F_ACT_LED, | ||
257 | QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT); | ||
258 | xfp_set_led(efx, SFN4112F_LINK_LED, | ||
259 | QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT); | ||
260 | } | ||
261 | |||
262 | static void sfn4112f_set_id_led(struct efx_nic *efx, bool state) | ||
263 | { | ||
264 | xfp_set_led(efx, SFN4112F_LINK_LED, | ||
265 | state ? QUAKE_LED_ON : QUAKE_LED_OFF); | ||
266 | } | ||
267 | |||
268 | static int sfn4112f_check_hw(struct efx_nic *efx) | ||
269 | { | ||
270 | /* Mask out unused sensors */ | ||
271 | return efx_check_lm87(efx, ~0x48); | ||
272 | } | ||
273 | |||
274 | static int sfn4112f_init(struct efx_nic *efx) | ||
275 | { | ||
276 | int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs); | ||
277 | if (rc) | ||
278 | return rc; | ||
279 | efx->board_info.monitor = sfn4112f_check_hw; | ||
280 | efx->board_info.init_leds = sfn4112f_init_leds; | ||
281 | efx->board_info.set_id_led = sfn4112f_set_id_led; | ||
282 | efx->board_info.blink = board_blink; | ||
283 | efx->board_info.fini = efx_fini_lm87; | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | /* This will get expanded as board-specific details get moved out of the | ||
288 | * PHY drivers. */ | ||
289 | struct efx_board_data { | ||
290 | enum efx_board_type type; | ||
291 | const char *ref_model; | ||
292 | const char *gen_type; | ||
293 | int (*init) (struct efx_nic *nic); | ||
294 | }; | ||
295 | |||
296 | |||
297 | static struct efx_board_data board_data[] = { | ||
298 | { EFX_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init }, | ||
299 | { EFX_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init }, | ||
300 | { EFX_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter", | ||
301 | sfn4111t_init }, | ||
302 | { EFX_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter", | ||
303 | sfn4112f_init }, | ||
304 | }; | ||
305 | |||
306 | void efx_set_board_info(struct efx_nic *efx, u16 revision_info) | ||
307 | { | ||
308 | struct efx_board_data *data = NULL; | ||
309 | int i; | ||
310 | |||
311 | efx->board_info.type = BOARD_TYPE(revision_info); | ||
312 | efx->board_info.major = BOARD_MAJOR(revision_info); | ||
313 | efx->board_info.minor = BOARD_MINOR(revision_info); | ||
314 | |||
315 | for (i = 0; i < ARRAY_SIZE(board_data); i++) | ||
316 | if (board_data[i].type == efx->board_info.type) | ||
317 | data = &board_data[i]; | ||
318 | |||
319 | if (data) { | ||
320 | EFX_INFO(efx, "board is %s rev %c%d\n", | ||
321 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) | ||
322 | ? data->ref_model : data->gen_type, | ||
323 | 'A' + efx->board_info.major, efx->board_info.minor); | ||
324 | efx->board_info.init = data->init; | ||
325 | } else { | ||
326 | EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type); | ||
327 | } | ||
328 | } | ||
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h deleted file mode 100644 index 44942de0e080..000000000000 --- a/drivers/net/sfc/boards.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2007-2008 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #ifndef EFX_BOARDS_H | ||
11 | #define EFX_BOARDS_H | ||
12 | |||
13 | /* Board IDs (must fit in 8 bits) */ | ||
14 | enum efx_board_type { | ||
15 | EFX_BOARD_SFE4001 = 1, | ||
16 | EFX_BOARD_SFE4002 = 2, | ||
17 | EFX_BOARD_SFN4111T = 0x51, | ||
18 | EFX_BOARD_SFN4112F = 0x52, | ||
19 | }; | ||
20 | |||
21 | extern void efx_set_board_info(struct efx_nic *efx, u16 revision_info); | ||
22 | |||
23 | /* SFE4001 (10GBASE-T) */ | ||
24 | extern int sfe4001_init(struct efx_nic *efx); | ||
25 | /* SFN4111T (100/1000/10GBASE-T) */ | ||
26 | extern int sfn4111t_init(struct efx_nic *efx); | ||
27 | |||
28 | #endif | ||
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index cc4b2f99989d..649a264d6a81 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2005-2008 Solarflare Communications Inc. | 4 | * Copyright 2005-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -20,13 +20,75 @@ | |||
20 | #include <linux/crc32.h> | 20 | #include <linux/crc32.h> |
21 | #include <linux/ethtool.h> | 21 | #include <linux/ethtool.h> |
22 | #include <linux/topology.h> | 22 | #include <linux/topology.h> |
23 | #include <linux/gfp.h> | ||
23 | #include "net_driver.h" | 24 | #include "net_driver.h" |
24 | #include "ethtool.h" | ||
25 | #include "tx.h" | ||
26 | #include "rx.h" | ||
27 | #include "efx.h" | 25 | #include "efx.h" |
28 | #include "mdio_10g.h" | 26 | #include "mdio_10g.h" |
29 | #include "falcon.h" | 27 | #include "nic.h" |
28 | |||
29 | #include "mcdi.h" | ||
30 | |||
31 | /************************************************************************** | ||
32 | * | ||
33 | * Type name strings | ||
34 | * | ||
35 | ************************************************************************** | ||
36 | */ | ||
37 | |||
38 | /* Loopback mode names (see LOOPBACK_MODE()) */ | ||
39 | const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; | ||
40 | const char *efx_loopback_mode_names[] = { | ||
41 | [LOOPBACK_NONE] = "NONE", | ||
42 | [LOOPBACK_DATA] = "DATAPATH", | ||
43 | [LOOPBACK_GMAC] = "GMAC", | ||
44 | [LOOPBACK_XGMII] = "XGMII", | ||
45 | [LOOPBACK_XGXS] = "XGXS", | ||
46 | [LOOPBACK_XAUI] = "XAUI", | ||
47 | [LOOPBACK_GMII] = "GMII", | ||
48 | [LOOPBACK_SGMII] = "SGMII", | ||
49 | [LOOPBACK_XGBR] = "XGBR", | ||
50 | [LOOPBACK_XFI] = "XFI", | ||
51 | [LOOPBACK_XAUI_FAR] = "XAUI_FAR", | ||
52 | [LOOPBACK_GMII_FAR] = "GMII_FAR", | ||
53 | [LOOPBACK_SGMII_FAR] = "SGMII_FAR", | ||
54 | [LOOPBACK_XFI_FAR] = "XFI_FAR", | ||
55 | [LOOPBACK_GPHY] = "GPHY", | ||
56 | [LOOPBACK_PHYXS] = "PHYXS", | ||
57 | [LOOPBACK_PCS] = "PCS", | ||
58 | [LOOPBACK_PMAPMD] = "PMA/PMD", | ||
59 | [LOOPBACK_XPORT] = "XPORT", | ||
60 | [LOOPBACK_XGMII_WS] = "XGMII_WS", | ||
61 | [LOOPBACK_XAUI_WS] = "XAUI_WS", | ||
62 | [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", | ||
63 | [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", | ||
64 | [LOOPBACK_GMII_WS] = "GMII_WS", | ||
65 | [LOOPBACK_XFI_WS] = "XFI_WS", | ||
66 | [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", | ||
67 | [LOOPBACK_PHYXS_WS] = "PHYXS_WS", | ||
68 | }; | ||
69 | |||
70 | /* Interrupt mode names (see INT_MODE())) */ | ||
71 | const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; | ||
72 | const char *efx_interrupt_mode_names[] = { | ||
73 | [EFX_INT_MODE_MSIX] = "MSI-X", | ||
74 | [EFX_INT_MODE_MSI] = "MSI", | ||
75 | [EFX_INT_MODE_LEGACY] = "legacy", | ||
76 | }; | ||
77 | |||
78 | const unsigned int efx_reset_type_max = RESET_TYPE_MAX; | ||
79 | const char *efx_reset_type_names[] = { | ||
80 | [RESET_TYPE_INVISIBLE] = "INVISIBLE", | ||
81 | [RESET_TYPE_ALL] = "ALL", | ||
82 | [RESET_TYPE_WORLD] = "WORLD", | ||
83 | [RESET_TYPE_DISABLE] = "DISABLE", | ||
84 | [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", | ||
85 | [RESET_TYPE_INT_ERROR] = "INT_ERROR", | ||
86 | [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", | ||
87 | [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", | ||
88 | [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", | ||
89 | [RESET_TYPE_TX_SKIP] = "TX_SKIP", | ||
90 | [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", | ||
91 | }; | ||
30 | 92 | ||
31 | #define EFX_MAX_MTU (9 * 1024) | 93 | #define EFX_MAX_MTU (9 * 1024) |
32 | 94 | ||
@@ -145,7 +207,8 @@ static void efx_fini_channels(struct efx_nic *efx); | |||
145 | 207 | ||
146 | #define EFX_ASSERT_RESET_SERIALISED(efx) \ | 208 | #define EFX_ASSERT_RESET_SERIALISED(efx) \ |
147 | do { \ | 209 | do { \ |
148 | if (efx->state == STATE_RUNNING) \ | 210 | if ((efx->state == STATE_RUNNING) || \ |
211 | (efx->state == STATE_DISABLED)) \ | ||
149 | ASSERT_RTNL(); \ | 212 | ASSERT_RTNL(); \ |
150 | } while (0) | 213 | } while (0) |
151 | 214 | ||
@@ -171,7 +234,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota) | |||
171 | !channel->enabled)) | 234 | !channel->enabled)) |
172 | return 0; | 235 | return 0; |
173 | 236 | ||
174 | rx_packets = falcon_process_eventq(channel, rx_quota); | 237 | rx_packets = efx_nic_process_eventq(channel, rx_quota); |
175 | if (rx_packets == 0) | 238 | if (rx_packets == 0) |
176 | return 0; | 239 | return 0; |
177 | 240 | ||
@@ -203,7 +266,7 @@ static inline void efx_channel_processed(struct efx_channel *channel) | |||
203 | channel->work_pending = false; | 266 | channel->work_pending = false; |
204 | smp_wmb(); | 267 | smp_wmb(); |
205 | 268 | ||
206 | falcon_eventq_read_ack(channel); | 269 | efx_nic_eventq_read_ack(channel); |
207 | } | 270 | } |
208 | 271 | ||
209 | /* NAPI poll handler | 272 | /* NAPI poll handler |
@@ -228,26 +291,20 @@ static int efx_poll(struct napi_struct *napi, int budget) | |||
228 | if (channel->used_flags & EFX_USED_BY_RX && | 291 | if (channel->used_flags & EFX_USED_BY_RX && |
229 | efx->irq_rx_adaptive && | 292 | efx->irq_rx_adaptive && |
230 | unlikely(++channel->irq_count == 1000)) { | 293 | unlikely(++channel->irq_count == 1000)) { |
231 | unsigned old_irq_moderation = channel->irq_moderation; | ||
232 | |||
233 | if (unlikely(channel->irq_mod_score < | 294 | if (unlikely(channel->irq_mod_score < |
234 | irq_adapt_low_thresh)) { | 295 | irq_adapt_low_thresh)) { |
235 | channel->irq_moderation = | 296 | if (channel->irq_moderation > 1) { |
236 | max_t(int, | 297 | channel->irq_moderation -= 1; |
237 | channel->irq_moderation - | 298 | efx->type->push_irq_moderation(channel); |
238 | FALCON_IRQ_MOD_RESOLUTION, | 299 | } |
239 | FALCON_IRQ_MOD_RESOLUTION); | ||
240 | } else if (unlikely(channel->irq_mod_score > | 300 | } else if (unlikely(channel->irq_mod_score > |
241 | irq_adapt_high_thresh)) { | 301 | irq_adapt_high_thresh)) { |
242 | channel->irq_moderation = | 302 | if (channel->irq_moderation < |
243 | min(channel->irq_moderation + | 303 | efx->irq_rx_moderation) { |
244 | FALCON_IRQ_MOD_RESOLUTION, | 304 | channel->irq_moderation += 1; |
245 | efx->irq_rx_moderation); | 305 | efx->type->push_irq_moderation(channel); |
306 | } | ||
246 | } | 307 | } |
247 | |||
248 | if (channel->irq_moderation != old_irq_moderation) | ||
249 | falcon_set_int_moderation(channel); | ||
250 | |||
251 | channel->irq_count = 0; | 308 | channel->irq_count = 0; |
252 | channel->irq_mod_score = 0; | 309 | channel->irq_mod_score = 0; |
253 | } | 310 | } |
@@ -280,7 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
280 | BUG_ON(!channel->enabled); | 337 | BUG_ON(!channel->enabled); |
281 | 338 | ||
282 | /* Disable interrupts and wait for ISRs to complete */ | 339 | /* Disable interrupts and wait for ISRs to complete */ |
283 | falcon_disable_interrupts(efx); | 340 | efx_nic_disable_interrupts(efx); |
284 | if (efx->legacy_irq) | 341 | if (efx->legacy_irq) |
285 | synchronize_irq(efx->legacy_irq); | 342 | synchronize_irq(efx->legacy_irq); |
286 | if (channel->irq) | 343 | if (channel->irq) |
@@ -290,14 +347,14 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
290 | napi_disable(&channel->napi_str); | 347 | napi_disable(&channel->napi_str); |
291 | 348 | ||
292 | /* Poll the channel */ | 349 | /* Poll the channel */ |
293 | efx_process_channel(channel, efx->type->evq_size); | 350 | efx_process_channel(channel, EFX_EVQ_SIZE); |
294 | 351 | ||
295 | /* Ack the eventq. This may cause an interrupt to be generated | 352 | /* Ack the eventq. This may cause an interrupt to be generated |
296 | * when they are reenabled */ | 353 | * when they are reenabled */ |
297 | efx_channel_processed(channel); | 354 | efx_channel_processed(channel); |
298 | 355 | ||
299 | napi_enable(&channel->napi_str); | 356 | napi_enable(&channel->napi_str); |
300 | falcon_enable_interrupts(efx); | 357 | efx_nic_enable_interrupts(efx); |
301 | } | 358 | } |
302 | 359 | ||
303 | /* Create event queue | 360 | /* Create event queue |
@@ -309,7 +366,7 @@ static int efx_probe_eventq(struct efx_channel *channel) | |||
309 | { | 366 | { |
310 | EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel); | 367 | EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel); |
311 | 368 | ||
312 | return falcon_probe_eventq(channel); | 369 | return efx_nic_probe_eventq(channel); |
313 | } | 370 | } |
314 | 371 | ||
315 | /* Prepare channel's event queue */ | 372 | /* Prepare channel's event queue */ |
@@ -319,21 +376,21 @@ static void efx_init_eventq(struct efx_channel *channel) | |||
319 | 376 | ||
320 | channel->eventq_read_ptr = 0; | 377 | channel->eventq_read_ptr = 0; |
321 | 378 | ||
322 | falcon_init_eventq(channel); | 379 | efx_nic_init_eventq(channel); |
323 | } | 380 | } |
324 | 381 | ||
325 | static void efx_fini_eventq(struct efx_channel *channel) | 382 | static void efx_fini_eventq(struct efx_channel *channel) |
326 | { | 383 | { |
327 | EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel); | 384 | EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel); |
328 | 385 | ||
329 | falcon_fini_eventq(channel); | 386 | efx_nic_fini_eventq(channel); |
330 | } | 387 | } |
331 | 388 | ||
332 | static void efx_remove_eventq(struct efx_channel *channel) | 389 | static void efx_remove_eventq(struct efx_channel *channel) |
333 | { | 390 | { |
334 | EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel); | 391 | EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel); |
335 | 392 | ||
336 | falcon_remove_eventq(channel); | 393 | efx_nic_remove_eventq(channel); |
337 | } | 394 | } |
338 | 395 | ||
339 | /************************************************************************** | 396 | /************************************************************************** |
@@ -499,7 +556,7 @@ static void efx_fini_channels(struct efx_nic *efx) | |||
499 | EFX_ASSERT_RESET_SERIALISED(efx); | 556 | EFX_ASSERT_RESET_SERIALISED(efx); |
500 | BUG_ON(efx->port_enabled); | 557 | BUG_ON(efx->port_enabled); |
501 | 558 | ||
502 | rc = falcon_flush_queues(efx); | 559 | rc = efx_nic_flush_queues(efx); |
503 | if (rc) | 560 | if (rc) |
504 | EFX_ERR(efx, "failed to flush queues\n"); | 561 | EFX_ERR(efx, "failed to flush queues\n"); |
505 | else | 562 | else |
@@ -547,8 +604,10 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) | |||
547 | * netif_carrier_on/off) of the link status, and also maintains the | 604 | * netif_carrier_on/off) of the link status, and also maintains the |
548 | * link status's stop on the port's TX queue. | 605 | * link status's stop on the port's TX queue. |
549 | */ | 606 | */ |
550 | static void efx_link_status_changed(struct efx_nic *efx) | 607 | void efx_link_status_changed(struct efx_nic *efx) |
551 | { | 608 | { |
609 | struct efx_link_state *link_state = &efx->link_state; | ||
610 | |||
552 | /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure | 611 | /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure |
553 | * that no events are triggered between unregister_netdev() and the | 612 | * that no events are triggered between unregister_netdev() and the |
554 | * driver unloading. A more general condition is that NETDEV_CHANGE | 613 | * driver unloading. A more general condition is that NETDEV_CHANGE |
@@ -561,19 +620,19 @@ static void efx_link_status_changed(struct efx_nic *efx) | |||
561 | return; | 620 | return; |
562 | } | 621 | } |
563 | 622 | ||
564 | if (efx->link_up != netif_carrier_ok(efx->net_dev)) { | 623 | if (link_state->up != netif_carrier_ok(efx->net_dev)) { |
565 | efx->n_link_state_changes++; | 624 | efx->n_link_state_changes++; |
566 | 625 | ||
567 | if (efx->link_up) | 626 | if (link_state->up) |
568 | netif_carrier_on(efx->net_dev); | 627 | netif_carrier_on(efx->net_dev); |
569 | else | 628 | else |
570 | netif_carrier_off(efx->net_dev); | 629 | netif_carrier_off(efx->net_dev); |
571 | } | 630 | } |
572 | 631 | ||
573 | /* Status message for kernel log */ | 632 | /* Status message for kernel log */ |
574 | if (efx->link_up) { | 633 | if (link_state->up) { |
575 | EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n", | 634 | EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n", |
576 | efx->link_speed, efx->link_fd ? "full" : "half", | 635 | link_state->speed, link_state->fd ? "full" : "half", |
577 | efx->net_dev->mtu, | 636 | efx->net_dev->mtu, |
578 | (efx->promiscuous ? " [PROMISC]" : "")); | 637 | (efx->promiscuous ? " [PROMISC]" : "")); |
579 | } else { | 638 | } else { |
@@ -582,16 +641,49 @@ static void efx_link_status_changed(struct efx_nic *efx) | |||
582 | 641 | ||
583 | } | 642 | } |
584 | 643 | ||
644 | void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) | ||
645 | { | ||
646 | efx->link_advertising = advertising; | ||
647 | if (advertising) { | ||
648 | if (advertising & ADVERTISED_Pause) | ||
649 | efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); | ||
650 | else | ||
651 | efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); | ||
652 | if (advertising & ADVERTISED_Asym_Pause) | ||
653 | efx->wanted_fc ^= EFX_FC_TX; | ||
654 | } | ||
655 | } | ||
656 | |||
657 | void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type wanted_fc) | ||
658 | { | ||
659 | efx->wanted_fc = wanted_fc; | ||
660 | if (efx->link_advertising) { | ||
661 | if (wanted_fc & EFX_FC_RX) | ||
662 | efx->link_advertising |= (ADVERTISED_Pause | | ||
663 | ADVERTISED_Asym_Pause); | ||
664 | else | ||
665 | efx->link_advertising &= ~(ADVERTISED_Pause | | ||
666 | ADVERTISED_Asym_Pause); | ||
667 | if (wanted_fc & EFX_FC_TX) | ||
668 | efx->link_advertising ^= ADVERTISED_Asym_Pause; | ||
669 | } | ||
670 | } | ||
671 | |||
585 | static void efx_fini_port(struct efx_nic *efx); | 672 | static void efx_fini_port(struct efx_nic *efx); |
586 | 673 | ||
587 | /* This call reinitialises the MAC to pick up new PHY settings. The | 674 | /* Push loopback/power/transmit disable settings to the PHY, and reconfigure |
588 | * caller must hold the mac_lock */ | 675 | * the MAC appropriately. All other PHY configuration changes are pushed |
589 | void __efx_reconfigure_port(struct efx_nic *efx) | 676 | * through phy_op->set_settings(), and pushed asynchronously to the MAC |
677 | * through efx_monitor(). | ||
678 | * | ||
679 | * Callers must hold the mac_lock | ||
680 | */ | ||
681 | int __efx_reconfigure_port(struct efx_nic *efx) | ||
590 | { | 682 | { |
591 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | 683 | enum efx_phy_mode phy_mode; |
684 | int rc; | ||
592 | 685 | ||
593 | EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n", | 686 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); |
594 | raw_smp_processor_id()); | ||
595 | 687 | ||
596 | /* Serialise the promiscuous flag with efx_set_multicast_list. */ | 688 | /* Serialise the promiscuous flag with efx_set_multicast_list. */ |
597 | if (efx_dev_registered(efx)) { | 689 | if (efx_dev_registered(efx)) { |
@@ -599,61 +691,48 @@ void __efx_reconfigure_port(struct efx_nic *efx) | |||
599 | netif_addr_unlock_bh(efx->net_dev); | 691 | netif_addr_unlock_bh(efx->net_dev); |
600 | } | 692 | } |
601 | 693 | ||
602 | falcon_deconfigure_mac_wrapper(efx); | 694 | /* Disable PHY transmit in mac level loopbacks */ |
603 | 695 | phy_mode = efx->phy_mode; | |
604 | /* Reconfigure the PHY, disabling transmit in mac level loopback. */ | ||
605 | if (LOOPBACK_INTERNAL(efx)) | 696 | if (LOOPBACK_INTERNAL(efx)) |
606 | efx->phy_mode |= PHY_MODE_TX_DISABLED; | 697 | efx->phy_mode |= PHY_MODE_TX_DISABLED; |
607 | else | 698 | else |
608 | efx->phy_mode &= ~PHY_MODE_TX_DISABLED; | 699 | efx->phy_mode &= ~PHY_MODE_TX_DISABLED; |
609 | efx->phy_op->reconfigure(efx); | ||
610 | |||
611 | if (falcon_switch_mac(efx)) | ||
612 | goto fail; | ||
613 | 700 | ||
614 | efx->mac_op->reconfigure(efx); | 701 | rc = efx->type->reconfigure_port(efx); |
615 | 702 | ||
616 | /* Inform kernel of loss/gain of carrier */ | 703 | if (rc) |
617 | efx_link_status_changed(efx); | 704 | efx->phy_mode = phy_mode; |
618 | return; | ||
619 | 705 | ||
620 | fail: | 706 | return rc; |
621 | EFX_ERR(efx, "failed to reconfigure MAC\n"); | ||
622 | efx->port_enabled = false; | ||
623 | efx_fini_port(efx); | ||
624 | } | 707 | } |
625 | 708 | ||
626 | /* Reinitialise the MAC to pick up new PHY settings, even if the port is | 709 | /* Reinitialise the MAC to pick up new PHY settings, even if the port is |
627 | * disabled. */ | 710 | * disabled. */ |
628 | void efx_reconfigure_port(struct efx_nic *efx) | 711 | int efx_reconfigure_port(struct efx_nic *efx) |
629 | { | 712 | { |
713 | int rc; | ||
714 | |||
630 | EFX_ASSERT_RESET_SERIALISED(efx); | 715 | EFX_ASSERT_RESET_SERIALISED(efx); |
631 | 716 | ||
632 | mutex_lock(&efx->mac_lock); | 717 | mutex_lock(&efx->mac_lock); |
633 | __efx_reconfigure_port(efx); | 718 | rc = __efx_reconfigure_port(efx); |
634 | mutex_unlock(&efx->mac_lock); | 719 | mutex_unlock(&efx->mac_lock); |
635 | } | ||
636 | |||
637 | /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all() | ||
638 | * we don't efx_reconfigure_port() if the port is disabled. Care is taken | ||
639 | * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */ | ||
640 | static void efx_phy_work(struct work_struct *data) | ||
641 | { | ||
642 | struct efx_nic *efx = container_of(data, struct efx_nic, phy_work); | ||
643 | 720 | ||
644 | mutex_lock(&efx->mac_lock); | 721 | return rc; |
645 | if (efx->port_enabled) | ||
646 | __efx_reconfigure_port(efx); | ||
647 | mutex_unlock(&efx->mac_lock); | ||
648 | } | 722 | } |
649 | 723 | ||
724 | /* Asynchronous work item for changing MAC promiscuity and multicast | ||
725 | * hash. Avoid a drain/rx_ingress enable by reconfiguring the current | ||
726 | * MAC directly. */ | ||
650 | static void efx_mac_work(struct work_struct *data) | 727 | static void efx_mac_work(struct work_struct *data) |
651 | { | 728 | { |
652 | struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); | 729 | struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); |
653 | 730 | ||
654 | mutex_lock(&efx->mac_lock); | 731 | mutex_lock(&efx->mac_lock); |
655 | if (efx->port_enabled) | 732 | if (efx->port_enabled) { |
656 | efx->mac_op->irq(efx); | 733 | efx->type->push_multicast_hash(efx); |
734 | efx->mac_op->reconfigure(efx); | ||
735 | } | ||
657 | mutex_unlock(&efx->mac_lock); | 736 | mutex_unlock(&efx->mac_lock); |
658 | } | 737 | } |
659 | 738 | ||
@@ -663,14 +742,14 @@ static int efx_probe_port(struct efx_nic *efx) | |||
663 | 742 | ||
664 | EFX_LOG(efx, "create port\n"); | 743 | EFX_LOG(efx, "create port\n"); |
665 | 744 | ||
666 | /* Connect up MAC/PHY operations table and read MAC address */ | ||
667 | rc = falcon_probe_port(efx); | ||
668 | if (rc) | ||
669 | goto err; | ||
670 | |||
671 | if (phy_flash_cfg) | 745 | if (phy_flash_cfg) |
672 | efx->phy_mode = PHY_MODE_SPECIAL; | 746 | efx->phy_mode = PHY_MODE_SPECIAL; |
673 | 747 | ||
748 | /* Connect up MAC/PHY operations table */ | ||
749 | rc = efx->type->probe_port(efx); | ||
750 | if (rc) | ||
751 | goto err; | ||
752 | |||
674 | /* Sanity check MAC address */ | 753 | /* Sanity check MAC address */ |
675 | if (is_valid_ether_addr(efx->mac_address)) { | 754 | if (is_valid_ether_addr(efx->mac_address)) { |
676 | memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); | 755 | memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); |
@@ -699,29 +778,33 @@ static int efx_init_port(struct efx_nic *efx) | |||
699 | 778 | ||
700 | EFX_LOG(efx, "init port\n"); | 779 | EFX_LOG(efx, "init port\n"); |
701 | 780 | ||
702 | rc = efx->phy_op->init(efx); | ||
703 | if (rc) | ||
704 | return rc; | ||
705 | mutex_lock(&efx->mac_lock); | 781 | mutex_lock(&efx->mac_lock); |
706 | efx->phy_op->reconfigure(efx); | 782 | |
707 | rc = falcon_switch_mac(efx); | 783 | rc = efx->phy_op->init(efx); |
708 | mutex_unlock(&efx->mac_lock); | ||
709 | if (rc) | 784 | if (rc) |
710 | goto fail; | 785 | goto fail1; |
711 | efx->mac_op->reconfigure(efx); | ||
712 | 786 | ||
713 | efx->port_initialized = true; | 787 | efx->port_initialized = true; |
714 | efx_stats_enable(efx); | 788 | |
789 | /* Reconfigure the MAC before creating dma queues (required for | ||
790 | * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ | ||
791 | efx->mac_op->reconfigure(efx); | ||
792 | |||
793 | /* Ensure the PHY advertises the correct flow control settings */ | ||
794 | rc = efx->phy_op->reconfigure(efx); | ||
795 | if (rc) | ||
796 | goto fail2; | ||
797 | |||
798 | mutex_unlock(&efx->mac_lock); | ||
715 | return 0; | 799 | return 0; |
716 | 800 | ||
717 | fail: | 801 | fail2: |
718 | efx->phy_op->fini(efx); | 802 | efx->phy_op->fini(efx); |
803 | fail1: | ||
804 | mutex_unlock(&efx->mac_lock); | ||
719 | return rc; | 805 | return rc; |
720 | } | 806 | } |
721 | 807 | ||
722 | /* Allow efx_reconfigure_port() to be scheduled, and close the window | ||
723 | * between efx_stop_port and efx_flush_all whereby a previously scheduled | ||
724 | * efx_phy_work()/efx_mac_work() may have been cancelled */ | ||
725 | static void efx_start_port(struct efx_nic *efx) | 808 | static void efx_start_port(struct efx_nic *efx) |
726 | { | 809 | { |
727 | EFX_LOG(efx, "start port\n"); | 810 | EFX_LOG(efx, "start port\n"); |
@@ -729,15 +812,16 @@ static void efx_start_port(struct efx_nic *efx) | |||
729 | 812 | ||
730 | mutex_lock(&efx->mac_lock); | 813 | mutex_lock(&efx->mac_lock); |
731 | efx->port_enabled = true; | 814 | efx->port_enabled = true; |
732 | __efx_reconfigure_port(efx); | 815 | |
733 | efx->mac_op->irq(efx); | 816 | /* efx_mac_work() might have been scheduled after efx_stop_port(), |
817 | * and then cancelled by efx_flush_all() */ | ||
818 | efx->type->push_multicast_hash(efx); | ||
819 | efx->mac_op->reconfigure(efx); | ||
820 | |||
734 | mutex_unlock(&efx->mac_lock); | 821 | mutex_unlock(&efx->mac_lock); |
735 | } | 822 | } |
736 | 823 | ||
737 | /* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing, | 824 | /* Prevent efx_mac_work() and efx_monitor() from working */ |
738 | * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work | ||
739 | * and efx_mac_work may still be scheduled via NAPI processing until | ||
740 | * efx_flush_all() is called */ | ||
741 | static void efx_stop_port(struct efx_nic *efx) | 825 | static void efx_stop_port(struct efx_nic *efx) |
742 | { | 826 | { |
743 | EFX_LOG(efx, "stop port\n"); | 827 | EFX_LOG(efx, "stop port\n"); |
@@ -760,11 +844,10 @@ static void efx_fini_port(struct efx_nic *efx) | |||
760 | if (!efx->port_initialized) | 844 | if (!efx->port_initialized) |
761 | return; | 845 | return; |
762 | 846 | ||
763 | efx_stats_disable(efx); | ||
764 | efx->phy_op->fini(efx); | 847 | efx->phy_op->fini(efx); |
765 | efx->port_initialized = false; | 848 | efx->port_initialized = false; |
766 | 849 | ||
767 | efx->link_up = false; | 850 | efx->link_state.up = false; |
768 | efx_link_status_changed(efx); | 851 | efx_link_status_changed(efx); |
769 | } | 852 | } |
770 | 853 | ||
@@ -772,7 +855,7 @@ static void efx_remove_port(struct efx_nic *efx) | |||
772 | { | 855 | { |
773 | EFX_LOG(efx, "destroying port\n"); | 856 | EFX_LOG(efx, "destroying port\n"); |
774 | 857 | ||
775 | falcon_remove_port(efx); | 858 | efx->type->remove_port(efx); |
776 | } | 859 | } |
777 | 860 | ||
778 | /************************************************************************** | 861 | /************************************************************************** |
@@ -824,9 +907,8 @@ static int efx_init_io(struct efx_nic *efx) | |||
824 | goto fail2; | 907 | goto fail2; |
825 | } | 908 | } |
826 | 909 | ||
827 | efx->membase_phys = pci_resource_start(efx->pci_dev, | 910 | efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); |
828 | efx->type->mem_bar); | 911 | rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); |
829 | rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc"); | ||
830 | if (rc) { | 912 | if (rc) { |
831 | EFX_ERR(efx, "request for memory BAR failed\n"); | 913 | EFX_ERR(efx, "request for memory BAR failed\n"); |
832 | rc = -EIO; | 914 | rc = -EIO; |
@@ -835,21 +917,20 @@ static int efx_init_io(struct efx_nic *efx) | |||
835 | efx->membase = ioremap_nocache(efx->membase_phys, | 917 | efx->membase = ioremap_nocache(efx->membase_phys, |
836 | efx->type->mem_map_size); | 918 | efx->type->mem_map_size); |
837 | if (!efx->membase) { | 919 | if (!efx->membase) { |
838 | EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", | 920 | EFX_ERR(efx, "could not map memory BAR at %llx+%x\n", |
839 | efx->type->mem_bar, | ||
840 | (unsigned long long)efx->membase_phys, | 921 | (unsigned long long)efx->membase_phys, |
841 | efx->type->mem_map_size); | 922 | efx->type->mem_map_size); |
842 | rc = -ENOMEM; | 923 | rc = -ENOMEM; |
843 | goto fail4; | 924 | goto fail4; |
844 | } | 925 | } |
845 | EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", | 926 | EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n", |
846 | efx->type->mem_bar, (unsigned long long)efx->membase_phys, | 927 | (unsigned long long)efx->membase_phys, |
847 | efx->type->mem_map_size, efx->membase); | 928 | efx->type->mem_map_size, efx->membase); |
848 | 929 | ||
849 | return 0; | 930 | return 0; |
850 | 931 | ||
851 | fail4: | 932 | fail4: |
852 | pci_release_region(efx->pci_dev, efx->type->mem_bar); | 933 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); |
853 | fail3: | 934 | fail3: |
854 | efx->membase_phys = 0; | 935 | efx->membase_phys = 0; |
855 | fail2: | 936 | fail2: |
@@ -868,7 +949,7 @@ static void efx_fini_io(struct efx_nic *efx) | |||
868 | } | 949 | } |
869 | 950 | ||
870 | if (efx->membase_phys) { | 951 | if (efx->membase_phys) { |
871 | pci_release_region(efx->pci_dev, efx->type->mem_bar); | 952 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); |
872 | efx->membase_phys = 0; | 953 | efx->membase_phys = 0; |
873 | } | 954 | } |
874 | 955 | ||
@@ -1011,7 +1092,7 @@ static int efx_probe_nic(struct efx_nic *efx) | |||
1011 | EFX_LOG(efx, "creating NIC\n"); | 1092 | EFX_LOG(efx, "creating NIC\n"); |
1012 | 1093 | ||
1013 | /* Carry out hardware-type specific initialisation */ | 1094 | /* Carry out hardware-type specific initialisation */ |
1014 | rc = falcon_probe_nic(efx); | 1095 | rc = efx->type->probe(efx); |
1015 | if (rc) | 1096 | if (rc) |
1016 | return rc; | 1097 | return rc; |
1017 | 1098 | ||
@@ -1032,7 +1113,7 @@ static void efx_remove_nic(struct efx_nic *efx) | |||
1032 | EFX_LOG(efx, "destroying NIC\n"); | 1113 | EFX_LOG(efx, "destroying NIC\n"); |
1033 | 1114 | ||
1034 | efx_remove_interrupts(efx); | 1115 | efx_remove_interrupts(efx); |
1035 | falcon_remove_nic(efx); | 1116 | efx->type->remove(efx); |
1036 | } | 1117 | } |
1037 | 1118 | ||
1038 | /************************************************************************** | 1119 | /************************************************************************** |
@@ -1112,12 +1193,31 @@ static void efx_start_all(struct efx_nic *efx) | |||
1112 | efx_for_each_channel(channel, efx) | 1193 | efx_for_each_channel(channel, efx) |
1113 | efx_start_channel(channel); | 1194 | efx_start_channel(channel); |
1114 | 1195 | ||
1115 | falcon_enable_interrupts(efx); | 1196 | efx_nic_enable_interrupts(efx); |
1116 | 1197 | ||
1117 | /* Start hardware monitor if we're in RUNNING */ | 1198 | /* Switch to event based MCDI completions after enabling interrupts. |
1118 | if (efx->state == STATE_RUNNING) | 1199 | * If a reset has been scheduled, then we need to stay in polled mode. |
1200 | * Rather than serialising efx_mcdi_mode_event() [which sleeps] and | ||
1201 | * reset_pending [modified from an atomic context], we instead guarantee | ||
1202 | * that efx_mcdi_mode_poll() isn't reverted erroneously */ | ||
1203 | efx_mcdi_mode_event(efx); | ||
1204 | if (efx->reset_pending != RESET_TYPE_NONE) | ||
1205 | efx_mcdi_mode_poll(efx); | ||
1206 | |||
1207 | /* Start the hardware monitor if there is one. Otherwise (we're link | ||
1208 | * event driven), we have to poll the PHY because after an event queue | ||
1209 | * flush, we could have a missed a link state change */ | ||
1210 | if (efx->type->monitor != NULL) { | ||
1119 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | 1211 | queue_delayed_work(efx->workqueue, &efx->monitor_work, |
1120 | efx_monitor_interval); | 1212 | efx_monitor_interval); |
1213 | } else { | ||
1214 | mutex_lock(&efx->mac_lock); | ||
1215 | if (efx->phy_op->poll(efx)) | ||
1216 | efx_link_status_changed(efx); | ||
1217 | mutex_unlock(&efx->mac_lock); | ||
1218 | } | ||
1219 | |||
1220 | efx->type->start_stats(efx); | ||
1121 | } | 1221 | } |
1122 | 1222 | ||
1123 | /* Flush all delayed work. Should only be called when no more delayed work | 1223 | /* Flush all delayed work. Should only be called when no more delayed work |
@@ -1136,8 +1236,6 @@ static void efx_flush_all(struct efx_nic *efx) | |||
1136 | 1236 | ||
1137 | /* Stop scheduled port reconfigurations */ | 1237 | /* Stop scheduled port reconfigurations */ |
1138 | cancel_work_sync(&efx->mac_work); | 1238 | cancel_work_sync(&efx->mac_work); |
1139 | cancel_work_sync(&efx->phy_work); | ||
1140 | |||
1141 | } | 1239 | } |
1142 | 1240 | ||
1143 | /* Quiesce hardware and software without bringing the link down. | 1241 | /* Quiesce hardware and software without bringing the link down. |
@@ -1155,8 +1253,13 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1155 | if (!efx->port_enabled) | 1253 | if (!efx->port_enabled) |
1156 | return; | 1254 | return; |
1157 | 1255 | ||
1256 | efx->type->stop_stats(efx); | ||
1257 | |||
1258 | /* Switch to MCDI polling on Siena before disabling interrupts */ | ||
1259 | efx_mcdi_mode_poll(efx); | ||
1260 | |||
1158 | /* Disable interrupts and wait for ISR to complete */ | 1261 | /* Disable interrupts and wait for ISR to complete */ |
1159 | falcon_disable_interrupts(efx); | 1262 | efx_nic_disable_interrupts(efx); |
1160 | if (efx->legacy_irq) | 1263 | if (efx->legacy_irq) |
1161 | synchronize_irq(efx->legacy_irq); | 1264 | synchronize_irq(efx->legacy_irq); |
1162 | efx_for_each_channel(channel, efx) { | 1265 | efx_for_each_channel(channel, efx) { |
@@ -1173,15 +1276,9 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1173 | * window to loose phy events */ | 1276 | * window to loose phy events */ |
1174 | efx_stop_port(efx); | 1277 | efx_stop_port(efx); |
1175 | 1278 | ||
1176 | /* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */ | 1279 | /* Flush efx_mac_work(), refill_workqueue, monitor_work */ |
1177 | efx_flush_all(efx); | 1280 | efx_flush_all(efx); |
1178 | 1281 | ||
1179 | /* Isolate the MAC from the TX and RX engines, so that queue | ||
1180 | * flushes will complete in a timely fashion. */ | ||
1181 | falcon_deconfigure_mac_wrapper(efx); | ||
1182 | msleep(10); /* Let the Rx FIFO drain */ | ||
1183 | falcon_drain_tx_fifo(efx); | ||
1184 | |||
1185 | /* Stop the kernel transmit interface late, so the watchdog | 1282 | /* Stop the kernel transmit interface late, so the watchdog |
1186 | * timer isn't ticking over the flush */ | 1283 | * timer isn't ticking over the flush */ |
1187 | if (efx_dev_registered(efx)) { | 1284 | if (efx_dev_registered(efx)) { |
@@ -1201,41 +1298,39 @@ static void efx_remove_all(struct efx_nic *efx) | |||
1201 | efx_remove_nic(efx); | 1298 | efx_remove_nic(efx); |
1202 | } | 1299 | } |
1203 | 1300 | ||
1204 | /* A convinience function to safely flush all the queues */ | ||
1205 | void efx_flush_queues(struct efx_nic *efx) | ||
1206 | { | ||
1207 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
1208 | |||
1209 | efx_stop_all(efx); | ||
1210 | |||
1211 | efx_fini_channels(efx); | ||
1212 | efx_init_channels(efx); | ||
1213 | |||
1214 | efx_start_all(efx); | ||
1215 | } | ||
1216 | |||
1217 | /************************************************************************** | 1301 | /************************************************************************** |
1218 | * | 1302 | * |
1219 | * Interrupt moderation | 1303 | * Interrupt moderation |
1220 | * | 1304 | * |
1221 | **************************************************************************/ | 1305 | **************************************************************************/ |
1222 | 1306 | ||
1307 | static unsigned irq_mod_ticks(int usecs, int resolution) | ||
1308 | { | ||
1309 | if (usecs <= 0) | ||
1310 | return 0; /* cannot receive interrupts ahead of time :-) */ | ||
1311 | if (usecs < resolution) | ||
1312 | return 1; /* never round down to 0 */ | ||
1313 | return usecs / resolution; | ||
1314 | } | ||
1315 | |||
1223 | /* Set interrupt moderation parameters */ | 1316 | /* Set interrupt moderation parameters */ |
1224 | void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, | 1317 | void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, |
1225 | bool rx_adaptive) | 1318 | bool rx_adaptive) |
1226 | { | 1319 | { |
1227 | struct efx_tx_queue *tx_queue; | 1320 | struct efx_tx_queue *tx_queue; |
1228 | struct efx_rx_queue *rx_queue; | 1321 | struct efx_rx_queue *rx_queue; |
1322 | unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); | ||
1323 | unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); | ||
1229 | 1324 | ||
1230 | EFX_ASSERT_RESET_SERIALISED(efx); | 1325 | EFX_ASSERT_RESET_SERIALISED(efx); |
1231 | 1326 | ||
1232 | efx_for_each_tx_queue(tx_queue, efx) | 1327 | efx_for_each_tx_queue(tx_queue, efx) |
1233 | tx_queue->channel->irq_moderation = tx_usecs; | 1328 | tx_queue->channel->irq_moderation = tx_ticks; |
1234 | 1329 | ||
1235 | efx->irq_rx_adaptive = rx_adaptive; | 1330 | efx->irq_rx_adaptive = rx_adaptive; |
1236 | efx->irq_rx_moderation = rx_usecs; | 1331 | efx->irq_rx_moderation = rx_ticks; |
1237 | efx_for_each_rx_queue(rx_queue, efx) | 1332 | efx_for_each_rx_queue(rx_queue, efx) |
1238 | rx_queue->channel->irq_moderation = rx_usecs; | 1333 | rx_queue->channel->irq_moderation = rx_ticks; |
1239 | } | 1334 | } |
1240 | 1335 | ||
1241 | /************************************************************************** | 1336 | /************************************************************************** |
@@ -1250,10 +1345,10 @@ static void efx_monitor(struct work_struct *data) | |||
1250 | { | 1345 | { |
1251 | struct efx_nic *efx = container_of(data, struct efx_nic, | 1346 | struct efx_nic *efx = container_of(data, struct efx_nic, |
1252 | monitor_work.work); | 1347 | monitor_work.work); |
1253 | int rc; | ||
1254 | 1348 | ||
1255 | EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", | 1349 | EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", |
1256 | raw_smp_processor_id()); | 1350 | raw_smp_processor_id()); |
1351 | BUG_ON(efx->type->monitor == NULL); | ||
1257 | 1352 | ||
1258 | /* If the mac_lock is already held then it is likely a port | 1353 | /* If the mac_lock is already held then it is likely a port |
1259 | * reconfiguration is already in place, which will likely do | 1354 | * reconfiguration is already in place, which will likely do |
@@ -1262,15 +1357,7 @@ static void efx_monitor(struct work_struct *data) | |||
1262 | goto out_requeue; | 1357 | goto out_requeue; |
1263 | if (!efx->port_enabled) | 1358 | if (!efx->port_enabled) |
1264 | goto out_unlock; | 1359 | goto out_unlock; |
1265 | rc = efx->board_info.monitor(efx); | 1360 | efx->type->monitor(efx); |
1266 | if (rc) { | ||
1267 | EFX_ERR(efx, "Board sensor %s; shutting down PHY\n", | ||
1268 | (rc == -ERANGE) ? "reported fault" : "failed"); | ||
1269 | efx->phy_mode |= PHY_MODE_LOW_POWER; | ||
1270 | falcon_sim_phy_event(efx); | ||
1271 | } | ||
1272 | efx->phy_op->poll(efx); | ||
1273 | efx->mac_op->poll(efx); | ||
1274 | 1361 | ||
1275 | out_unlock: | 1362 | out_unlock: |
1276 | mutex_unlock(&efx->mac_lock); | 1363 | mutex_unlock(&efx->mac_lock); |
@@ -1374,6 +1461,12 @@ static int efx_net_open(struct net_device *net_dev) | |||
1374 | return -EIO; | 1461 | return -EIO; |
1375 | if (efx->phy_mode & PHY_MODE_SPECIAL) | 1462 | if (efx->phy_mode & PHY_MODE_SPECIAL) |
1376 | return -EBUSY; | 1463 | return -EBUSY; |
1464 | if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) | ||
1465 | return -EIO; | ||
1466 | |||
1467 | /* Notify the kernel of the link state polled during driver load, | ||
1468 | * before the monitor starts running */ | ||
1469 | efx_link_status_changed(efx); | ||
1377 | 1470 | ||
1378 | efx_start_all(efx); | 1471 | efx_start_all(efx); |
1379 | return 0; | 1472 | return 0; |
@@ -1400,20 +1493,6 @@ static int efx_net_stop(struct net_device *net_dev) | |||
1400 | return 0; | 1493 | return 0; |
1401 | } | 1494 | } |
1402 | 1495 | ||
1403 | void efx_stats_disable(struct efx_nic *efx) | ||
1404 | { | ||
1405 | spin_lock(&efx->stats_lock); | ||
1406 | ++efx->stats_disable_count; | ||
1407 | spin_unlock(&efx->stats_lock); | ||
1408 | } | ||
1409 | |||
1410 | void efx_stats_enable(struct efx_nic *efx) | ||
1411 | { | ||
1412 | spin_lock(&efx->stats_lock); | ||
1413 | --efx->stats_disable_count; | ||
1414 | spin_unlock(&efx->stats_lock); | ||
1415 | } | ||
1416 | |||
1417 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ | 1496 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ |
1418 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | 1497 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) |
1419 | { | 1498 | { |
@@ -1421,17 +1500,9 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | |||
1421 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 1500 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
1422 | struct net_device_stats *stats = &net_dev->stats; | 1501 | struct net_device_stats *stats = &net_dev->stats; |
1423 | 1502 | ||
1424 | /* Update stats if possible, but do not wait if another thread | 1503 | spin_lock_bh(&efx->stats_lock); |
1425 | * is updating them or if MAC stats fetches are temporarily | 1504 | efx->type->update_stats(efx); |
1426 | * disabled; slightly stale stats are acceptable. | 1505 | spin_unlock_bh(&efx->stats_lock); |
1427 | */ | ||
1428 | if (!spin_trylock(&efx->stats_lock)) | ||
1429 | return stats; | ||
1430 | if (!efx->stats_disable_count) { | ||
1431 | efx->mac_op->update_stats(efx); | ||
1432 | falcon_update_nic_stats(efx); | ||
1433 | } | ||
1434 | spin_unlock(&efx->stats_lock); | ||
1435 | 1506 | ||
1436 | stats->rx_packets = mac_stats->rx_packets; | 1507 | stats->rx_packets = mac_stats->rx_packets; |
1437 | stats->tx_packets = mac_stats->tx_packets; | 1508 | stats->tx_packets = mac_stats->tx_packets; |
@@ -1490,7 +1561,14 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) | |||
1490 | EFX_LOG(efx, "changing MTU to %d\n", new_mtu); | 1561 | EFX_LOG(efx, "changing MTU to %d\n", new_mtu); |
1491 | 1562 | ||
1492 | efx_fini_channels(efx); | 1563 | efx_fini_channels(efx); |
1564 | |||
1565 | mutex_lock(&efx->mac_lock); | ||
1566 | /* Reconfigure the MAC before enabling the dma queues so that | ||
1567 | * the RX buffers don't overflow */ | ||
1493 | net_dev->mtu = new_mtu; | 1568 | net_dev->mtu = new_mtu; |
1569 | efx->mac_op->reconfigure(efx); | ||
1570 | mutex_unlock(&efx->mac_lock); | ||
1571 | |||
1494 | efx_init_channels(efx); | 1572 | efx_init_channels(efx); |
1495 | 1573 | ||
1496 | efx_start_all(efx); | 1574 | efx_start_all(efx); |
@@ -1514,7 +1592,9 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) | |||
1514 | memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); | 1592 | memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); |
1515 | 1593 | ||
1516 | /* Reconfigure the MAC */ | 1594 | /* Reconfigure the MAC */ |
1517 | efx_reconfigure_port(efx); | 1595 | mutex_lock(&efx->mac_lock); |
1596 | efx->mac_op->reconfigure(efx); | ||
1597 | mutex_unlock(&efx->mac_lock); | ||
1518 | 1598 | ||
1519 | return 0; | 1599 | return 0; |
1520 | } | 1600 | } |
@@ -1523,38 +1603,34 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) | |||
1523 | static void efx_set_multicast_list(struct net_device *net_dev) | 1603 | static void efx_set_multicast_list(struct net_device *net_dev) |
1524 | { | 1604 | { |
1525 | struct efx_nic *efx = netdev_priv(net_dev); | 1605 | struct efx_nic *efx = netdev_priv(net_dev); |
1526 | struct dev_mc_list *mc_list = net_dev->mc_list; | 1606 | struct dev_mc_list *mc_list; |
1527 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; | 1607 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; |
1528 | bool promiscuous = !!(net_dev->flags & IFF_PROMISC); | ||
1529 | bool changed = (efx->promiscuous != promiscuous); | ||
1530 | u32 crc; | 1608 | u32 crc; |
1531 | int bit; | 1609 | int bit; |
1532 | int i; | ||
1533 | 1610 | ||
1534 | efx->promiscuous = promiscuous; | 1611 | efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); |
1535 | 1612 | ||
1536 | /* Build multicast hash table */ | 1613 | /* Build multicast hash table */ |
1537 | if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) { | 1614 | if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { |
1538 | memset(mc_hash, 0xff, sizeof(*mc_hash)); | 1615 | memset(mc_hash, 0xff, sizeof(*mc_hash)); |
1539 | } else { | 1616 | } else { |
1540 | memset(mc_hash, 0x00, sizeof(*mc_hash)); | 1617 | memset(mc_hash, 0x00, sizeof(*mc_hash)); |
1541 | for (i = 0; i < net_dev->mc_count; i++) { | 1618 | netdev_for_each_mc_addr(mc_list, net_dev) { |
1542 | crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr); | 1619 | crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr); |
1543 | bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); | 1620 | bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); |
1544 | set_bit_le(bit, mc_hash->byte); | 1621 | set_bit_le(bit, mc_hash->byte); |
1545 | mc_list = mc_list->next; | ||
1546 | } | 1622 | } |
1547 | } | ||
1548 | |||
1549 | if (!efx->port_enabled) | ||
1550 | /* Delay pushing settings until efx_start_port() */ | ||
1551 | return; | ||
1552 | 1623 | ||
1553 | if (changed) | 1624 | /* Broadcast packets go through the multicast hash filter. |
1554 | queue_work(efx->workqueue, &efx->phy_work); | 1625 | * ether_crc_le() of the broadcast address is 0xbe2612ff |
1626 | * so we always add bit 0xff to the mask. | ||
1627 | */ | ||
1628 | set_bit_le(0xff, mc_hash->byte); | ||
1629 | } | ||
1555 | 1630 | ||
1556 | /* Create and activate new global multicast hash table */ | 1631 | if (efx->port_enabled) |
1557 | falcon_set_multicast_hash(efx); | 1632 | queue_work(efx->workqueue, &efx->mac_work); |
1633 | /* Otherwise efx_start_port() will do this */ | ||
1558 | } | 1634 | } |
1559 | 1635 | ||
1560 | static const struct net_device_ops efx_netdev_ops = { | 1636 | static const struct net_device_ops efx_netdev_ops = { |
@@ -1683,21 +1759,18 @@ static void efx_unregister_netdev(struct efx_nic *efx) | |||
1683 | 1759 | ||
1684 | /* Tears down the entire software state and most of the hardware state | 1760 | /* Tears down the entire software state and most of the hardware state |
1685 | * before reset. */ | 1761 | * before reset. */ |
1686 | void efx_reset_down(struct efx_nic *efx, enum reset_type method, | 1762 | void efx_reset_down(struct efx_nic *efx, enum reset_type method) |
1687 | struct ethtool_cmd *ecmd) | ||
1688 | { | 1763 | { |
1689 | EFX_ASSERT_RESET_SERIALISED(efx); | 1764 | EFX_ASSERT_RESET_SERIALISED(efx); |
1690 | 1765 | ||
1691 | efx_stats_disable(efx); | ||
1692 | efx_stop_all(efx); | 1766 | efx_stop_all(efx); |
1693 | mutex_lock(&efx->mac_lock); | 1767 | mutex_lock(&efx->mac_lock); |
1694 | mutex_lock(&efx->spi_lock); | 1768 | mutex_lock(&efx->spi_lock); |
1695 | 1769 | ||
1696 | efx->phy_op->get_settings(efx, ecmd); | ||
1697 | |||
1698 | efx_fini_channels(efx); | 1770 | efx_fini_channels(efx); |
1699 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) | 1771 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) |
1700 | efx->phy_op->fini(efx); | 1772 | efx->phy_op->fini(efx); |
1773 | efx->type->fini(efx); | ||
1701 | } | 1774 | } |
1702 | 1775 | ||
1703 | /* This function will always ensure that the locks acquired in | 1776 | /* This function will always ensure that the locks acquired in |
@@ -1705,79 +1778,67 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method, | |||
1705 | * that we were unable to reinitialise the hardware, and the | 1778 | * that we were unable to reinitialise the hardware, and the |
1706 | * driver should be disabled. If ok is false, then the rx and tx | 1779 | * driver should be disabled. If ok is false, then the rx and tx |
1707 | * engines are not restarted, pending a RESET_DISABLE. */ | 1780 | * engines are not restarted, pending a RESET_DISABLE. */ |
1708 | int efx_reset_up(struct efx_nic *efx, enum reset_type method, | 1781 | int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) |
1709 | struct ethtool_cmd *ecmd, bool ok) | ||
1710 | { | 1782 | { |
1711 | int rc; | 1783 | int rc; |
1712 | 1784 | ||
1713 | EFX_ASSERT_RESET_SERIALISED(efx); | 1785 | EFX_ASSERT_RESET_SERIALISED(efx); |
1714 | 1786 | ||
1715 | rc = falcon_init_nic(efx); | 1787 | rc = efx->type->init(efx); |
1716 | if (rc) { | 1788 | if (rc) { |
1717 | EFX_ERR(efx, "failed to initialise NIC\n"); | 1789 | EFX_ERR(efx, "failed to initialise NIC\n"); |
1718 | ok = false; | 1790 | goto fail; |
1719 | } | 1791 | } |
1720 | 1792 | ||
1793 | if (!ok) | ||
1794 | goto fail; | ||
1795 | |||
1721 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { | 1796 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { |
1722 | if (ok) { | 1797 | rc = efx->phy_op->init(efx); |
1723 | rc = efx->phy_op->init(efx); | 1798 | if (rc) |
1724 | if (rc) | 1799 | goto fail; |
1725 | ok = false; | 1800 | if (efx->phy_op->reconfigure(efx)) |
1726 | } | 1801 | EFX_ERR(efx, "could not restore PHY settings\n"); |
1727 | if (!ok) | ||
1728 | efx->port_initialized = false; | ||
1729 | } | 1802 | } |
1730 | 1803 | ||
1731 | if (ok) { | 1804 | efx->mac_op->reconfigure(efx); |
1732 | efx_init_channels(efx); | ||
1733 | 1805 | ||
1734 | if (efx->phy_op->set_settings(efx, ecmd)) | 1806 | efx_init_channels(efx); |
1735 | EFX_ERR(efx, "could not restore PHY settings\n"); | 1807 | |
1736 | } | 1808 | mutex_unlock(&efx->spi_lock); |
1809 | mutex_unlock(&efx->mac_lock); | ||
1810 | |||
1811 | efx_start_all(efx); | ||
1812 | |||
1813 | return 0; | ||
1814 | |||
1815 | fail: | ||
1816 | efx->port_initialized = false; | ||
1737 | 1817 | ||
1738 | mutex_unlock(&efx->spi_lock); | 1818 | mutex_unlock(&efx->spi_lock); |
1739 | mutex_unlock(&efx->mac_lock); | 1819 | mutex_unlock(&efx->mac_lock); |
1740 | 1820 | ||
1741 | if (ok) { | ||
1742 | efx_start_all(efx); | ||
1743 | efx_stats_enable(efx); | ||
1744 | } | ||
1745 | return rc; | 1821 | return rc; |
1746 | } | 1822 | } |
1747 | 1823 | ||
1748 | /* Reset the NIC as transparently as possible. Do not reset the PHY | 1824 | /* Reset the NIC using the specified method. Note that the reset may |
1749 | * Note that the reset may fail, in which case the card will be left | 1825 | * fail, in which case the card will be left in an unusable state. |
1750 | * in a most-probably-unusable state. | ||
1751 | * | ||
1752 | * This function will sleep. You cannot reset from within an atomic | ||
1753 | * state; use efx_schedule_reset() instead. | ||
1754 | * | 1826 | * |
1755 | * Grabs the rtnl_lock. | 1827 | * Caller must hold the rtnl_lock. |
1756 | */ | 1828 | */ |
1757 | static int efx_reset(struct efx_nic *efx) | 1829 | int efx_reset(struct efx_nic *efx, enum reset_type method) |
1758 | { | 1830 | { |
1759 | struct ethtool_cmd ecmd; | 1831 | int rc, rc2; |
1760 | enum reset_type method = efx->reset_pending; | 1832 | bool disabled; |
1761 | int rc = 0; | ||
1762 | |||
1763 | /* Serialise with kernel interfaces */ | ||
1764 | rtnl_lock(); | ||
1765 | |||
1766 | /* If we're not RUNNING then don't reset. Leave the reset_pending | ||
1767 | * flag set so that efx_pci_probe_main will be retried */ | ||
1768 | if (efx->state != STATE_RUNNING) { | ||
1769 | EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n"); | ||
1770 | goto out_unlock; | ||
1771 | } | ||
1772 | 1833 | ||
1773 | EFX_INFO(efx, "resetting (%d)\n", method); | 1834 | EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method)); |
1774 | 1835 | ||
1775 | efx_reset_down(efx, method, &ecmd); | 1836 | efx_reset_down(efx, method); |
1776 | 1837 | ||
1777 | rc = falcon_reset_hw(efx, method); | 1838 | rc = efx->type->reset(efx, method); |
1778 | if (rc) { | 1839 | if (rc) { |
1779 | EFX_ERR(efx, "failed to reset hardware\n"); | 1840 | EFX_ERR(efx, "failed to reset hardware\n"); |
1780 | goto out_disable; | 1841 | goto out; |
1781 | } | 1842 | } |
1782 | 1843 | ||
1783 | /* Allow resets to be rescheduled. */ | 1844 | /* Allow resets to be rescheduled. */ |
@@ -1789,25 +1850,23 @@ static int efx_reset(struct efx_nic *efx) | |||
1789 | * can respond to requests. */ | 1850 | * can respond to requests. */ |
1790 | pci_set_master(efx->pci_dev); | 1851 | pci_set_master(efx->pci_dev); |
1791 | 1852 | ||
1853 | out: | ||
1792 | /* Leave device stopped if necessary */ | 1854 | /* Leave device stopped if necessary */ |
1793 | if (method == RESET_TYPE_DISABLE) { | 1855 | disabled = rc || method == RESET_TYPE_DISABLE; |
1794 | efx_reset_up(efx, method, &ecmd, false); | 1856 | rc2 = efx_reset_up(efx, method, !disabled); |
1795 | rc = -EIO; | 1857 | if (rc2) { |
1796 | } else { | 1858 | disabled = true; |
1797 | rc = efx_reset_up(efx, method, &ecmd, true); | 1859 | if (!rc) |
1860 | rc = rc2; | ||
1798 | } | 1861 | } |
1799 | 1862 | ||
1800 | out_disable: | 1863 | if (disabled) { |
1801 | if (rc) { | 1864 | dev_close(efx->net_dev); |
1802 | EFX_ERR(efx, "has been disabled\n"); | 1865 | EFX_ERR(efx, "has been disabled\n"); |
1803 | efx->state = STATE_DISABLED; | 1866 | efx->state = STATE_DISABLED; |
1804 | dev_close(efx->net_dev); | ||
1805 | } else { | 1867 | } else { |
1806 | EFX_LOG(efx, "reset complete\n"); | 1868 | EFX_LOG(efx, "reset complete\n"); |
1807 | } | 1869 | } |
1808 | |||
1809 | out_unlock: | ||
1810 | rtnl_unlock(); | ||
1811 | return rc; | 1870 | return rc; |
1812 | } | 1871 | } |
1813 | 1872 | ||
@@ -1816,9 +1875,18 @@ out_unlock: | |||
1816 | */ | 1875 | */ |
1817 | static void efx_reset_work(struct work_struct *data) | 1876 | static void efx_reset_work(struct work_struct *data) |
1818 | { | 1877 | { |
1819 | struct efx_nic *nic = container_of(data, struct efx_nic, reset_work); | 1878 | struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); |
1879 | |||
1880 | /* If we're not RUNNING then don't reset. Leave the reset_pending | ||
1881 | * flag set so that efx_pci_probe_main will be retried */ | ||
1882 | if (efx->state != STATE_RUNNING) { | ||
1883 | EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n"); | ||
1884 | return; | ||
1885 | } | ||
1820 | 1886 | ||
1821 | efx_reset(nic); | 1887 | rtnl_lock(); |
1888 | (void)efx_reset(efx, efx->reset_pending); | ||
1889 | rtnl_unlock(); | ||
1822 | } | 1890 | } |
1823 | 1891 | ||
1824 | void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | 1892 | void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) |
@@ -1843,18 +1911,24 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | |||
1843 | case RESET_TYPE_TX_SKIP: | 1911 | case RESET_TYPE_TX_SKIP: |
1844 | method = RESET_TYPE_INVISIBLE; | 1912 | method = RESET_TYPE_INVISIBLE; |
1845 | break; | 1913 | break; |
1914 | case RESET_TYPE_MC_FAILURE: | ||
1846 | default: | 1915 | default: |
1847 | method = RESET_TYPE_ALL; | 1916 | method = RESET_TYPE_ALL; |
1848 | break; | 1917 | break; |
1849 | } | 1918 | } |
1850 | 1919 | ||
1851 | if (method != type) | 1920 | if (method != type) |
1852 | EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method); | 1921 | EFX_LOG(efx, "scheduling %s reset for %s\n", |
1922 | RESET_TYPE(method), RESET_TYPE(type)); | ||
1853 | else | 1923 | else |
1854 | EFX_LOG(efx, "scheduling reset (%d)\n", method); | 1924 | EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method)); |
1855 | 1925 | ||
1856 | efx->reset_pending = method; | 1926 | efx->reset_pending = method; |
1857 | 1927 | ||
1928 | /* efx_process_channel() will no longer read events once a | ||
1929 | * reset is scheduled. So switch back to poll'd MCDI completions. */ | ||
1930 | efx_mcdi_mode_poll(efx); | ||
1931 | |||
1858 | queue_work(reset_workqueue, &efx->reset_work); | 1932 | queue_work(reset_workqueue, &efx->reset_work); |
1859 | } | 1933 | } |
1860 | 1934 | ||
@@ -1865,17 +1939,21 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | |||
1865 | **************************************************************************/ | 1939 | **************************************************************************/ |
1866 | 1940 | ||
1867 | /* PCI device ID table */ | 1941 | /* PCI device ID table */ |
1868 | static struct pci_device_id efx_pci_table[] __devinitdata = { | 1942 | static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { |
1869 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), | 1943 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), |
1870 | .driver_data = (unsigned long) &falcon_a_nic_type}, | 1944 | .driver_data = (unsigned long) &falcon_a1_nic_type}, |
1871 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), | 1945 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), |
1872 | .driver_data = (unsigned long) &falcon_b_nic_type}, | 1946 | .driver_data = (unsigned long) &falcon_b0_nic_type}, |
1947 | {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), | ||
1948 | .driver_data = (unsigned long) &siena_a0_nic_type}, | ||
1949 | {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), | ||
1950 | .driver_data = (unsigned long) &siena_a0_nic_type}, | ||
1873 | {0} /* end of list */ | 1951 | {0} /* end of list */ |
1874 | }; | 1952 | }; |
1875 | 1953 | ||
1876 | /************************************************************************** | 1954 | /************************************************************************** |
1877 | * | 1955 | * |
1878 | * Dummy PHY/MAC/Board operations | 1956 | * Dummy PHY/MAC operations |
1879 | * | 1957 | * |
1880 | * Can be used for some unimplemented operations | 1958 | * Can be used for some unimplemented operations |
1881 | * Needed so all function pointers are valid and do not have to be tested | 1959 | * Needed so all function pointers are valid and do not have to be tested |
@@ -1887,29 +1965,19 @@ int efx_port_dummy_op_int(struct efx_nic *efx) | |||
1887 | return 0; | 1965 | return 0; |
1888 | } | 1966 | } |
1889 | void efx_port_dummy_op_void(struct efx_nic *efx) {} | 1967 | void efx_port_dummy_op_void(struct efx_nic *efx) {} |
1890 | void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {} | 1968 | void efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) |
1891 | 1969 | { | |
1892 | static struct efx_mac_operations efx_dummy_mac_operations = { | 1970 | } |
1893 | .reconfigure = efx_port_dummy_op_void, | 1971 | bool efx_port_dummy_op_poll(struct efx_nic *efx) |
1894 | .poll = efx_port_dummy_op_void, | 1972 | { |
1895 | .irq = efx_port_dummy_op_void, | 1973 | return false; |
1896 | }; | 1974 | } |
1897 | 1975 | ||
1898 | static struct efx_phy_operations efx_dummy_phy_operations = { | 1976 | static struct efx_phy_operations efx_dummy_phy_operations = { |
1899 | .init = efx_port_dummy_op_int, | 1977 | .init = efx_port_dummy_op_int, |
1900 | .reconfigure = efx_port_dummy_op_void, | 1978 | .reconfigure = efx_port_dummy_op_int, |
1901 | .poll = efx_port_dummy_op_void, | 1979 | .poll = efx_port_dummy_op_poll, |
1902 | .fini = efx_port_dummy_op_void, | 1980 | .fini = efx_port_dummy_op_void, |
1903 | .clear_interrupt = efx_port_dummy_op_void, | ||
1904 | }; | ||
1905 | |||
1906 | static struct efx_board efx_dummy_board_info = { | ||
1907 | .init = efx_port_dummy_op_int, | ||
1908 | .init_leds = efx_port_dummy_op_void, | ||
1909 | .set_id_led = efx_port_dummy_op_blink, | ||
1910 | .monitor = efx_port_dummy_op_int, | ||
1911 | .blink = efx_port_dummy_op_blink, | ||
1912 | .fini = efx_port_dummy_op_void, | ||
1913 | }; | 1981 | }; |
1914 | 1982 | ||
1915 | /************************************************************************** | 1983 | /************************************************************************** |
@@ -1932,26 +2000,26 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
1932 | /* Initialise common structures */ | 2000 | /* Initialise common structures */ |
1933 | memset(efx, 0, sizeof(*efx)); | 2001 | memset(efx, 0, sizeof(*efx)); |
1934 | spin_lock_init(&efx->biu_lock); | 2002 | spin_lock_init(&efx->biu_lock); |
1935 | spin_lock_init(&efx->phy_lock); | 2003 | mutex_init(&efx->mdio_lock); |
1936 | mutex_init(&efx->spi_lock); | 2004 | mutex_init(&efx->spi_lock); |
2005 | #ifdef CONFIG_SFC_MTD | ||
2006 | INIT_LIST_HEAD(&efx->mtd_list); | ||
2007 | #endif | ||
1937 | INIT_WORK(&efx->reset_work, efx_reset_work); | 2008 | INIT_WORK(&efx->reset_work, efx_reset_work); |
1938 | INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); | 2009 | INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); |
1939 | efx->pci_dev = pci_dev; | 2010 | efx->pci_dev = pci_dev; |
1940 | efx->state = STATE_INIT; | 2011 | efx->state = STATE_INIT; |
1941 | efx->reset_pending = RESET_TYPE_NONE; | 2012 | efx->reset_pending = RESET_TYPE_NONE; |
1942 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); | 2013 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); |
1943 | efx->board_info = efx_dummy_board_info; | ||
1944 | 2014 | ||
1945 | efx->net_dev = net_dev; | 2015 | efx->net_dev = net_dev; |
1946 | efx->rx_checksum_enabled = true; | 2016 | efx->rx_checksum_enabled = true; |
1947 | spin_lock_init(&efx->netif_stop_lock); | 2017 | spin_lock_init(&efx->netif_stop_lock); |
1948 | spin_lock_init(&efx->stats_lock); | 2018 | spin_lock_init(&efx->stats_lock); |
1949 | efx->stats_disable_count = 1; | ||
1950 | mutex_init(&efx->mac_lock); | 2019 | mutex_init(&efx->mac_lock); |
1951 | efx->mac_op = &efx_dummy_mac_operations; | 2020 | efx->mac_op = type->default_mac_ops; |
1952 | efx->phy_op = &efx_dummy_phy_operations; | 2021 | efx->phy_op = &efx_dummy_phy_operations; |
1953 | efx->mdio.dev = net_dev; | 2022 | efx->mdio.dev = net_dev; |
1954 | INIT_WORK(&efx->phy_work, efx_phy_work); | ||
1955 | INIT_WORK(&efx->mac_work, efx_mac_work); | 2023 | INIT_WORK(&efx->mac_work, efx_mac_work); |
1956 | atomic_set(&efx->netif_stop_count, 1); | 2024 | atomic_set(&efx->netif_stop_count, 1); |
1957 | 2025 | ||
@@ -1981,17 +2049,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
1981 | 2049 | ||
1982 | efx->type = type; | 2050 | efx->type = type; |
1983 | 2051 | ||
1984 | /* Sanity-check NIC type */ | ||
1985 | EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask & | ||
1986 | (efx->type->txd_ring_mask + 1)); | ||
1987 | EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask & | ||
1988 | (efx->type->rxd_ring_mask + 1)); | ||
1989 | EFX_BUG_ON_PARANOID(efx->type->evq_size & | ||
1990 | (efx->type->evq_size - 1)); | ||
1991 | /* As close as we can get to guaranteeing that we don't overflow */ | 2052 | /* As close as we can get to guaranteeing that we don't overflow */ |
1992 | EFX_BUG_ON_PARANOID(efx->type->evq_size < | 2053 | BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE); |
1993 | (efx->type->txd_ring_mask + 1 + | 2054 | |
1994 | efx->type->rxd_ring_mask + 1)); | ||
1995 | EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); | 2055 | EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); |
1996 | 2056 | ||
1997 | /* Higher numbered interrupt modes are less capable! */ | 2057 | /* Higher numbered interrupt modes are less capable! */ |
@@ -2027,19 +2087,10 @@ static void efx_fini_struct(struct efx_nic *efx) | |||
2027 | */ | 2087 | */ |
2028 | static void efx_pci_remove_main(struct efx_nic *efx) | 2088 | static void efx_pci_remove_main(struct efx_nic *efx) |
2029 | { | 2089 | { |
2030 | EFX_ASSERT_RESET_SERIALISED(efx); | 2090 | efx_nic_fini_interrupt(efx); |
2031 | |||
2032 | /* Skip everything if we never obtained a valid membase */ | ||
2033 | if (!efx->membase) | ||
2034 | return; | ||
2035 | |||
2036 | efx_fini_channels(efx); | 2091 | efx_fini_channels(efx); |
2037 | efx_fini_port(efx); | 2092 | efx_fini_port(efx); |
2038 | 2093 | efx->type->fini(efx); | |
2039 | /* Shutdown the board, then the NIC and board state */ | ||
2040 | efx->board_info.fini(efx); | ||
2041 | falcon_fini_interrupt(efx); | ||
2042 | |||
2043 | efx_fini_napi(efx); | 2094 | efx_fini_napi(efx); |
2044 | efx_remove_all(efx); | 2095 | efx_remove_all(efx); |
2045 | } | 2096 | } |
@@ -2063,9 +2114,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev) | |||
2063 | /* Allow any queued efx_resets() to complete */ | 2114 | /* Allow any queued efx_resets() to complete */ |
2064 | rtnl_unlock(); | 2115 | rtnl_unlock(); |
2065 | 2116 | ||
2066 | if (efx->membase == NULL) | ||
2067 | goto out; | ||
2068 | |||
2069 | efx_unregister_netdev(efx); | 2117 | efx_unregister_netdev(efx); |
2070 | 2118 | ||
2071 | efx_mtd_remove(efx); | 2119 | efx_mtd_remove(efx); |
@@ -2078,7 +2126,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev) | |||
2078 | 2126 | ||
2079 | efx_pci_remove_main(efx); | 2127 | efx_pci_remove_main(efx); |
2080 | 2128 | ||
2081 | out: | ||
2082 | efx_fini_io(efx); | 2129 | efx_fini_io(efx); |
2083 | EFX_LOG(efx, "shutdown successful\n"); | 2130 | EFX_LOG(efx, "shutdown successful\n"); |
2084 | 2131 | ||
@@ -2103,39 +2150,31 @@ static int efx_pci_probe_main(struct efx_nic *efx) | |||
2103 | if (rc) | 2150 | if (rc) |
2104 | goto fail2; | 2151 | goto fail2; |
2105 | 2152 | ||
2106 | /* Initialise the board */ | 2153 | rc = efx->type->init(efx); |
2107 | rc = efx->board_info.init(efx); | ||
2108 | if (rc) { | ||
2109 | EFX_ERR(efx, "failed to initialise board\n"); | ||
2110 | goto fail3; | ||
2111 | } | ||
2112 | |||
2113 | rc = falcon_init_nic(efx); | ||
2114 | if (rc) { | 2154 | if (rc) { |
2115 | EFX_ERR(efx, "failed to initialise NIC\n"); | 2155 | EFX_ERR(efx, "failed to initialise NIC\n"); |
2116 | goto fail4; | 2156 | goto fail3; |
2117 | } | 2157 | } |
2118 | 2158 | ||
2119 | rc = efx_init_port(efx); | 2159 | rc = efx_init_port(efx); |
2120 | if (rc) { | 2160 | if (rc) { |
2121 | EFX_ERR(efx, "failed to initialise port\n"); | 2161 | EFX_ERR(efx, "failed to initialise port\n"); |
2122 | goto fail5; | 2162 | goto fail4; |
2123 | } | 2163 | } |
2124 | 2164 | ||
2125 | efx_init_channels(efx); | 2165 | efx_init_channels(efx); |
2126 | 2166 | ||
2127 | rc = falcon_init_interrupt(efx); | 2167 | rc = efx_nic_init_interrupt(efx); |
2128 | if (rc) | 2168 | if (rc) |
2129 | goto fail6; | 2169 | goto fail5; |
2130 | 2170 | ||
2131 | return 0; | 2171 | return 0; |
2132 | 2172 | ||
2133 | fail6: | 2173 | fail5: |
2134 | efx_fini_channels(efx); | 2174 | efx_fini_channels(efx); |
2135 | efx_fini_port(efx); | 2175 | efx_fini_port(efx); |
2136 | fail5: | ||
2137 | fail4: | 2176 | fail4: |
2138 | efx->board_info.fini(efx); | 2177 | efx->type->fini(efx); |
2139 | fail3: | 2178 | fail3: |
2140 | efx_fini_napi(efx); | 2179 | efx_fini_napi(efx); |
2141 | fail2: | 2180 | fail2: |
@@ -2165,9 +2204,11 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2165 | net_dev = alloc_etherdev(sizeof(*efx)); | 2204 | net_dev = alloc_etherdev(sizeof(*efx)); |
2166 | if (!net_dev) | 2205 | if (!net_dev) |
2167 | return -ENOMEM; | 2206 | return -ENOMEM; |
2168 | net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | | 2207 | net_dev->features |= (type->offload_features | NETIF_F_SG | |
2169 | NETIF_F_HIGHDMA | NETIF_F_TSO | | 2208 | NETIF_F_HIGHDMA | NETIF_F_TSO | |
2170 | NETIF_F_GRO); | 2209 | NETIF_F_GRO); |
2210 | if (type->offload_features & NETIF_F_V6_CSUM) | ||
2211 | net_dev->features |= NETIF_F_TSO6; | ||
2171 | /* Mask for features that also apply to VLAN devices */ | 2212 | /* Mask for features that also apply to VLAN devices */ |
2172 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | | 2213 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | |
2173 | NETIF_F_HIGHDMA | NETIF_F_TSO); | 2214 | NETIF_F_HIGHDMA | NETIF_F_TSO); |
@@ -2219,18 +2260,19 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2219 | goto fail4; | 2260 | goto fail4; |
2220 | } | 2261 | } |
2221 | 2262 | ||
2222 | /* Switch to the running state before we expose the device to | 2263 | /* Switch to the running state before we expose the device to the OS, |
2223 | * the OS. This is to ensure that the initial gathering of | 2264 | * so that dev_open()|efx_start_all() will actually start the device */ |
2224 | * MAC stats succeeds. */ | ||
2225 | efx->state = STATE_RUNNING; | 2265 | efx->state = STATE_RUNNING; |
2226 | 2266 | ||
2227 | efx_mtd_probe(efx); /* allowed to fail */ | ||
2228 | |||
2229 | rc = efx_register_netdev(efx); | 2267 | rc = efx_register_netdev(efx); |
2230 | if (rc) | 2268 | if (rc) |
2231 | goto fail5; | 2269 | goto fail5; |
2232 | 2270 | ||
2233 | EFX_LOG(efx, "initialisation successful\n"); | 2271 | EFX_LOG(efx, "initialisation successful\n"); |
2272 | |||
2273 | rtnl_lock(); | ||
2274 | efx_mtd_probe(efx); /* allowed to fail */ | ||
2275 | rtnl_unlock(); | ||
2234 | return 0; | 2276 | return 0; |
2235 | 2277 | ||
2236 | fail5: | 2278 | fail5: |
@@ -2241,16 +2283,113 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2241 | fail2: | 2283 | fail2: |
2242 | efx_fini_struct(efx); | 2284 | efx_fini_struct(efx); |
2243 | fail1: | 2285 | fail1: |
2286 | WARN_ON(rc > 0); | ||
2244 | EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); | 2287 | EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); |
2245 | free_netdev(net_dev); | 2288 | free_netdev(net_dev); |
2246 | return rc; | 2289 | return rc; |
2247 | } | 2290 | } |
2248 | 2291 | ||
2292 | static int efx_pm_freeze(struct device *dev) | ||
2293 | { | ||
2294 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
2295 | |||
2296 | efx->state = STATE_FINI; | ||
2297 | |||
2298 | netif_device_detach(efx->net_dev); | ||
2299 | |||
2300 | efx_stop_all(efx); | ||
2301 | efx_fini_channels(efx); | ||
2302 | |||
2303 | return 0; | ||
2304 | } | ||
2305 | |||
2306 | static int efx_pm_thaw(struct device *dev) | ||
2307 | { | ||
2308 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
2309 | |||
2310 | efx->state = STATE_INIT; | ||
2311 | |||
2312 | efx_init_channels(efx); | ||
2313 | |||
2314 | mutex_lock(&efx->mac_lock); | ||
2315 | efx->phy_op->reconfigure(efx); | ||
2316 | mutex_unlock(&efx->mac_lock); | ||
2317 | |||
2318 | efx_start_all(efx); | ||
2319 | |||
2320 | netif_device_attach(efx->net_dev); | ||
2321 | |||
2322 | efx->state = STATE_RUNNING; | ||
2323 | |||
2324 | efx->type->resume_wol(efx); | ||
2325 | |||
2326 | return 0; | ||
2327 | } | ||
2328 | |||
2329 | static int efx_pm_poweroff(struct device *dev) | ||
2330 | { | ||
2331 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
2332 | struct efx_nic *efx = pci_get_drvdata(pci_dev); | ||
2333 | |||
2334 | efx->type->fini(efx); | ||
2335 | |||
2336 | efx->reset_pending = RESET_TYPE_NONE; | ||
2337 | |||
2338 | pci_save_state(pci_dev); | ||
2339 | return pci_set_power_state(pci_dev, PCI_D3hot); | ||
2340 | } | ||
2341 | |||
2342 | /* Used for both resume and restore */ | ||
2343 | static int efx_pm_resume(struct device *dev) | ||
2344 | { | ||
2345 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
2346 | struct efx_nic *efx = pci_get_drvdata(pci_dev); | ||
2347 | int rc; | ||
2348 | |||
2349 | rc = pci_set_power_state(pci_dev, PCI_D0); | ||
2350 | if (rc) | ||
2351 | return rc; | ||
2352 | pci_restore_state(pci_dev); | ||
2353 | rc = pci_enable_device(pci_dev); | ||
2354 | if (rc) | ||
2355 | return rc; | ||
2356 | pci_set_master(efx->pci_dev); | ||
2357 | rc = efx->type->reset(efx, RESET_TYPE_ALL); | ||
2358 | if (rc) | ||
2359 | return rc; | ||
2360 | rc = efx->type->init(efx); | ||
2361 | if (rc) | ||
2362 | return rc; | ||
2363 | efx_pm_thaw(dev); | ||
2364 | return 0; | ||
2365 | } | ||
2366 | |||
2367 | static int efx_pm_suspend(struct device *dev) | ||
2368 | { | ||
2369 | int rc; | ||
2370 | |||
2371 | efx_pm_freeze(dev); | ||
2372 | rc = efx_pm_poweroff(dev); | ||
2373 | if (rc) | ||
2374 | efx_pm_resume(dev); | ||
2375 | return rc; | ||
2376 | } | ||
2377 | |||
2378 | static struct dev_pm_ops efx_pm_ops = { | ||
2379 | .suspend = efx_pm_suspend, | ||
2380 | .resume = efx_pm_resume, | ||
2381 | .freeze = efx_pm_freeze, | ||
2382 | .thaw = efx_pm_thaw, | ||
2383 | .poweroff = efx_pm_poweroff, | ||
2384 | .restore = efx_pm_resume, | ||
2385 | }; | ||
2386 | |||
2249 | static struct pci_driver efx_pci_driver = { | 2387 | static struct pci_driver efx_pci_driver = { |
2250 | .name = EFX_DRIVER_NAME, | 2388 | .name = EFX_DRIVER_NAME, |
2251 | .id_table = efx_pci_table, | 2389 | .id_table = efx_pci_table, |
2252 | .probe = efx_pci_probe, | 2390 | .probe = efx_pci_probe, |
2253 | .remove = efx_pci_remove, | 2391 | .remove = efx_pci_remove, |
2392 | .driver.pm = &efx_pm_ops, | ||
2254 | }; | 2393 | }; |
2255 | 2394 | ||
2256 | /************************************************************************** | 2395 | /************************************************************************** |
@@ -2314,8 +2453,8 @@ static void __exit efx_exit_module(void) | |||
2314 | module_init(efx_init_module); | 2453 | module_init(efx_init_module); |
2315 | module_exit(efx_exit_module); | 2454 | module_exit(efx_exit_module); |
2316 | 2455 | ||
2317 | MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and " | 2456 | MODULE_AUTHOR("Solarflare Communications and " |
2318 | "Solarflare Communications"); | 2457 | "Michael Brown <mbrown@fensystems.co.uk>"); |
2319 | MODULE_DESCRIPTION("Solarflare Communications network driver"); | 2458 | MODULE_DESCRIPTION("Solarflare Communications network driver"); |
2320 | MODULE_LICENSE("GPL"); | 2459 | MODULE_LICENSE("GPL"); |
2321 | MODULE_DEVICE_TABLE(pci, efx_pci_table); | 2460 | MODULE_DEVICE_TABLE(pci, efx_pci_table); |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index aecaf62f4929..7eff0a615cb3 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2008 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -18,40 +18,67 @@ | |||
18 | #define FALCON_A_P_DEVID 0x0703 | 18 | #define FALCON_A_P_DEVID 0x0703 |
19 | #define FALCON_A_S_DEVID 0x6703 | 19 | #define FALCON_A_S_DEVID 0x6703 |
20 | #define FALCON_B_P_DEVID 0x0710 | 20 | #define FALCON_B_P_DEVID 0x0710 |
21 | #define BETHPAGE_A_P_DEVID 0x0803 | ||
22 | #define SIENA_A_P_DEVID 0x0813 | ||
23 | |||
24 | /* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ | ||
25 | #define EFX_MEM_BAR 2 | ||
21 | 26 | ||
22 | /* TX */ | 27 | /* TX */ |
23 | extern netdev_tx_t efx_xmit(struct efx_nic *efx, | 28 | extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); |
24 | struct efx_tx_queue *tx_queue, | 29 | extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); |
25 | struct sk_buff *skb); | 30 | extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); |
31 | extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); | ||
32 | extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); | ||
33 | extern netdev_tx_t | ||
34 | efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); | ||
35 | extern netdev_tx_t | ||
36 | efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); | ||
37 | extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); | ||
26 | extern void efx_stop_queue(struct efx_nic *efx); | 38 | extern void efx_stop_queue(struct efx_nic *efx); |
27 | extern void efx_wake_queue(struct efx_nic *efx); | 39 | extern void efx_wake_queue(struct efx_nic *efx); |
40 | #define EFX_TXQ_SIZE 1024 | ||
41 | #define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1) | ||
28 | 42 | ||
29 | /* RX */ | 43 | /* RX */ |
30 | extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); | 44 | extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); |
45 | extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); | ||
46 | extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); | ||
47 | extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); | ||
48 | extern void efx_rx_strategy(struct efx_channel *channel); | ||
49 | extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); | ||
50 | extern void efx_rx_work(struct work_struct *data); | ||
51 | extern void __efx_rx_packet(struct efx_channel *channel, | ||
52 | struct efx_rx_buffer *rx_buf, bool checksummed); | ||
31 | extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | 53 | extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
32 | unsigned int len, bool checksummed, bool discard); | 54 | unsigned int len, bool checksummed, bool discard); |
33 | extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); | 55 | extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); |
56 | #define EFX_RXQ_SIZE 1024 | ||
57 | #define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1) | ||
34 | 58 | ||
35 | /* Channels */ | 59 | /* Channels */ |
36 | extern void efx_process_channel_now(struct efx_channel *channel); | 60 | extern void efx_process_channel_now(struct efx_channel *channel); |
37 | extern void efx_flush_queues(struct efx_nic *efx); | 61 | #define EFX_EVQ_SIZE 4096 |
62 | #define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1) | ||
38 | 63 | ||
39 | /* Ports */ | 64 | /* Ports */ |
40 | extern void efx_stats_disable(struct efx_nic *efx); | 65 | extern int efx_reconfigure_port(struct efx_nic *efx); |
41 | extern void efx_stats_enable(struct efx_nic *efx); | 66 | extern int __efx_reconfigure_port(struct efx_nic *efx); |
42 | extern void efx_reconfigure_port(struct efx_nic *efx); | 67 | |
43 | extern void __efx_reconfigure_port(struct efx_nic *efx); | 68 | /* Ethtool support */ |
69 | extern int efx_ethtool_get_settings(struct net_device *net_dev, | ||
70 | struct ethtool_cmd *ecmd); | ||
71 | extern int efx_ethtool_set_settings(struct net_device *net_dev, | ||
72 | struct ethtool_cmd *ecmd); | ||
73 | extern const struct ethtool_ops efx_ethtool_ops; | ||
44 | 74 | ||
45 | /* Reset handling */ | 75 | /* Reset handling */ |
46 | extern void efx_reset_down(struct efx_nic *efx, enum reset_type method, | 76 | extern int efx_reset(struct efx_nic *efx, enum reset_type method); |
47 | struct ethtool_cmd *ecmd); | 77 | extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); |
48 | extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, | 78 | extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); |
49 | struct ethtool_cmd *ecmd, bool ok); | ||
50 | 79 | ||
51 | /* Global */ | 80 | /* Global */ |
52 | extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); | 81 | extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); |
53 | extern void efx_suspend(struct efx_nic *efx); | ||
54 | extern void efx_resume(struct efx_nic *efx); | ||
55 | extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, | 82 | extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, |
56 | int rx_usecs, bool rx_adaptive); | 83 | int rx_usecs, bool rx_adaptive); |
57 | extern int efx_request_power(struct efx_nic *efx, int mw, const char *name); | 84 | extern int efx_request_power(struct efx_nic *efx, int mw, const char *name); |
@@ -60,7 +87,9 @@ extern void efx_hex_dump(const u8 *, unsigned int, const char *); | |||
60 | /* Dummy PHY ops for PHY drivers */ | 87 | /* Dummy PHY ops for PHY drivers */ |
61 | extern int efx_port_dummy_op_int(struct efx_nic *efx); | 88 | extern int efx_port_dummy_op_int(struct efx_nic *efx); |
62 | extern void efx_port_dummy_op_void(struct efx_nic *efx); | 89 | extern void efx_port_dummy_op_void(struct efx_nic *efx); |
63 | extern void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink); | 90 | extern void |
91 | efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); | ||
92 | extern bool efx_port_dummy_op_poll(struct efx_nic *efx); | ||
64 | 93 | ||
65 | /* MTD */ | 94 | /* MTD */ |
66 | #ifdef CONFIG_SFC_MTD | 95 | #ifdef CONFIG_SFC_MTD |
@@ -84,4 +113,8 @@ static inline void efx_schedule_channel(struct efx_channel *channel) | |||
84 | napi_schedule(&channel->napi_str); | 113 | napi_schedule(&channel->napi_str); |
85 | } | 114 | } |
86 | 115 | ||
116 | extern void efx_link_status_changed(struct efx_nic *efx); | ||
117 | extern void efx_link_set_advertising(struct efx_nic *efx, u32); | ||
118 | extern void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type); | ||
119 | |||
87 | #endif /* EFX_EFX_H */ | 120 | #endif /* EFX_EFX_H */ |
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h index 60cbc6e1e66b..384cfe3b1be1 100644 --- a/drivers/net/sfc/enum.h +++ b/drivers/net/sfc/enum.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2007-2008 Solarflare Communications Inc. | 3 | * Copyright 2007-2009 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
@@ -13,44 +13,101 @@ | |||
13 | /** | 13 | /** |
14 | * enum efx_loopback_mode - loopback modes | 14 | * enum efx_loopback_mode - loopback modes |
15 | * @LOOPBACK_NONE: no loopback | 15 | * @LOOPBACK_NONE: no loopback |
16 | * @LOOPBACK_GMAC: loopback within GMAC at unspecified level | 16 | * @LOOPBACK_DATA: data path loopback |
17 | * @LOOPBACK_XGMII: loopback within XMAC at XGMII level | 17 | * @LOOPBACK_GMAC: loopback within GMAC |
18 | * @LOOPBACK_XGXS: loopback within XMAC at XGXS level | 18 | * @LOOPBACK_XGMII: loopback after XMAC |
19 | * @LOOPBACK_XAUI: loopback within XMAC at XAUI level | 19 | * @LOOPBACK_XGXS: loopback within BPX after XGXS |
20 | * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes | ||
21 | * @LOOPBACK_GMII: loopback within BPX after GMAC | ||
22 | * @LOOPBACK_SGMII: loopback within BPX within SGMII | ||
23 | * @LOOPBACK_XGBR: loopback within BPX within XGBR | ||
24 | * @LOOPBACK_XFI: loopback within BPX before XFI serdes | ||
25 | * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes | ||
26 | * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII | ||
27 | * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII | ||
28 | * @LOOPBACK_XFI_FAR: loopback after XFI serdes | ||
20 | * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level | 29 | * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level |
21 | * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level | 30 | * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level |
22 | * @LOOPBACK_PCS: loopback within 10G PHY at PCS level | 31 | * @LOOPBACK_PCS: loopback within 10G PHY at PCS level |
23 | * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level | 32 | * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level |
24 | * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!) | 33 | * @LOOPBACK_XPORT: cross port loopback |
34 | * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC | ||
35 | * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes | ||
36 | * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes | ||
37 | * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes | ||
38 | * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC | ||
39 | * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes | ||
40 | * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes | ||
41 | * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level | ||
25 | */ | 42 | */ |
26 | /* Please keep in order and up-to-date w.r.t the following two #defines */ | 43 | /* Please keep up-to-date w.r.t the following two #defines */ |
27 | enum efx_loopback_mode { | 44 | enum efx_loopback_mode { |
28 | LOOPBACK_NONE = 0, | 45 | LOOPBACK_NONE = 0, |
29 | LOOPBACK_GMAC = 1, | 46 | LOOPBACK_DATA = 1, |
30 | LOOPBACK_XGMII = 2, | 47 | LOOPBACK_GMAC = 2, |
31 | LOOPBACK_XGXS = 3, | 48 | LOOPBACK_XGMII = 3, |
32 | LOOPBACK_XAUI = 4, | 49 | LOOPBACK_XGXS = 4, |
33 | LOOPBACK_GPHY = 5, | 50 | LOOPBACK_XAUI = 5, |
34 | LOOPBACK_PHYXS = 6, | 51 | LOOPBACK_GMII = 6, |
35 | LOOPBACK_PCS = 7, | 52 | LOOPBACK_SGMII = 7, |
36 | LOOPBACK_PMAPMD = 8, | 53 | LOOPBACK_XGBR = 8, |
37 | LOOPBACK_NETWORK = 9, | 54 | LOOPBACK_XFI = 9, |
55 | LOOPBACK_XAUI_FAR = 10, | ||
56 | LOOPBACK_GMII_FAR = 11, | ||
57 | LOOPBACK_SGMII_FAR = 12, | ||
58 | LOOPBACK_XFI_FAR = 13, | ||
59 | LOOPBACK_GPHY = 14, | ||
60 | LOOPBACK_PHYXS = 15, | ||
61 | LOOPBACK_PCS = 16, | ||
62 | LOOPBACK_PMAPMD = 17, | ||
63 | LOOPBACK_XPORT = 18, | ||
64 | LOOPBACK_XGMII_WS = 19, | ||
65 | LOOPBACK_XAUI_WS = 20, | ||
66 | LOOPBACK_XAUI_WS_FAR = 21, | ||
67 | LOOPBACK_XAUI_WS_NEAR = 22, | ||
68 | LOOPBACK_GMII_WS = 23, | ||
69 | LOOPBACK_XFI_WS = 24, | ||
70 | LOOPBACK_XFI_WS_FAR = 25, | ||
71 | LOOPBACK_PHYXS_WS = 26, | ||
38 | LOOPBACK_MAX | 72 | LOOPBACK_MAX |
39 | }; | 73 | }; |
40 | |||
41 | #define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD | 74 | #define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD |
42 | 75 | ||
43 | extern const char *efx_loopback_mode_names[]; | ||
44 | #define LOOPBACK_MODE_NAME(mode) \ | ||
45 | STRING_TABLE_LOOKUP(mode, efx_loopback_mode) | ||
46 | #define LOOPBACK_MODE(efx) \ | ||
47 | LOOPBACK_MODE_NAME(efx->loopback_mode) | ||
48 | |||
49 | /* These loopbacks occur within the controller */ | 76 | /* These loopbacks occur within the controller */ |
50 | #define LOOPBACKS_INTERNAL ((1 << LOOPBACK_GMAC) | \ | 77 | #define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \ |
51 | (1 << LOOPBACK_XGMII)| \ | 78 | (1 << LOOPBACK_GMAC) | \ |
52 | (1 << LOOPBACK_XGXS) | \ | 79 | (1 << LOOPBACK_XGMII)| \ |
53 | (1 << LOOPBACK_XAUI)) | 80 | (1 << LOOPBACK_XGXS) | \ |
81 | (1 << LOOPBACK_XAUI) | \ | ||
82 | (1 << LOOPBACK_GMII) | \ | ||
83 | (1 << LOOPBACK_SGMII) | \ | ||
84 | (1 << LOOPBACK_SGMII) | \ | ||
85 | (1 << LOOPBACK_XGBR) | \ | ||
86 | (1 << LOOPBACK_XFI) | \ | ||
87 | (1 << LOOPBACK_XAUI_FAR) | \ | ||
88 | (1 << LOOPBACK_GMII_FAR) | \ | ||
89 | (1 << LOOPBACK_SGMII_FAR) | \ | ||
90 | (1 << LOOPBACK_XFI_FAR) | \ | ||
91 | (1 << LOOPBACK_XGMII_WS) | \ | ||
92 | (1 << LOOPBACK_XAUI_WS) | \ | ||
93 | (1 << LOOPBACK_XAUI_WS_FAR) | \ | ||
94 | (1 << LOOPBACK_XAUI_WS_NEAR) | \ | ||
95 | (1 << LOOPBACK_GMII_WS) | \ | ||
96 | (1 << LOOPBACK_XFI_WS) | \ | ||
97 | (1 << LOOPBACK_XFI_WS_FAR)) | ||
98 | |||
99 | #define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \ | ||
100 | (1 << LOOPBACK_XAUI_WS) | \ | ||
101 | (1 << LOOPBACK_XAUI_WS_FAR) | \ | ||
102 | (1 << LOOPBACK_XAUI_WS_NEAR) | \ | ||
103 | (1 << LOOPBACK_GMII_WS) | \ | ||
104 | (1 << LOOPBACK_XFI_WS) | \ | ||
105 | (1 << LOOPBACK_XFI_WS_FAR) | \ | ||
106 | (1 << LOOPBACK_PHYXS_WS)) | ||
107 | |||
108 | #define LOOPBACKS_EXTERNAL(_efx) \ | ||
109 | ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \ | ||
110 | ~(1 << LOOPBACK_NONE)) | ||
54 | 111 | ||
55 | #define LOOPBACK_MASK(_efx) \ | 112 | #define LOOPBACK_MASK(_efx) \ |
56 | (1 << (_efx)->loopback_mode) | 113 | (1 << (_efx)->loopback_mode) |
@@ -58,6 +115,9 @@ extern const char *efx_loopback_mode_names[]; | |||
58 | #define LOOPBACK_INTERNAL(_efx) \ | 115 | #define LOOPBACK_INTERNAL(_efx) \ |
59 | (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx))) | 116 | (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx))) |
60 | 117 | ||
118 | #define LOOPBACK_EXTERNAL(_efx) \ | ||
119 | (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx))) | ||
120 | |||
61 | #define LOOPBACK_CHANGED(_from, _to, _mask) \ | 121 | #define LOOPBACK_CHANGED(_from, _to, _mask) \ |
62 | (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask))) | 122 | (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask))) |
63 | 123 | ||
@@ -84,6 +144,7 @@ extern const char *efx_loopback_mode_names[]; | |||
84 | * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch | 144 | * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch |
85 | * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch | 145 | * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch |
86 | * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors | 146 | * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors |
147 | * @RESET_TYPE_MC_FAILURE: MC reboot/assertion | ||
87 | */ | 148 | */ |
88 | enum reset_type { | 149 | enum reset_type { |
89 | RESET_TYPE_NONE = -1, | 150 | RESET_TYPE_NONE = -1, |
@@ -98,6 +159,7 @@ enum reset_type { | |||
98 | RESET_TYPE_RX_DESC_FETCH, | 159 | RESET_TYPE_RX_DESC_FETCH, |
99 | RESET_TYPE_TX_DESC_FETCH, | 160 | RESET_TYPE_TX_DESC_FETCH, |
100 | RESET_TYPE_TX_SKIP, | 161 | RESET_TYPE_TX_SKIP, |
162 | RESET_TYPE_MC_FAILURE, | ||
101 | RESET_TYPE_MAX, | 163 | RESET_TYPE_MAX, |
102 | }; | 164 | }; |
103 | 165 | ||
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 45018f283ffa..d9f9c02a928e 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2008 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -10,30 +10,15 @@ | |||
10 | 10 | ||
11 | #include <linux/netdevice.h> | 11 | #include <linux/netdevice.h> |
12 | #include <linux/ethtool.h> | 12 | #include <linux/ethtool.h> |
13 | #include <linux/mdio.h> | ||
14 | #include <linux/rtnetlink.h> | 13 | #include <linux/rtnetlink.h> |
15 | #include "net_driver.h" | 14 | #include "net_driver.h" |
16 | #include "workarounds.h" | 15 | #include "workarounds.h" |
17 | #include "selftest.h" | 16 | #include "selftest.h" |
18 | #include "efx.h" | 17 | #include "efx.h" |
19 | #include "ethtool.h" | 18 | #include "nic.h" |
20 | #include "falcon.h" | ||
21 | #include "spi.h" | 19 | #include "spi.h" |
22 | #include "mdio_10g.h" | 20 | #include "mdio_10g.h" |
23 | 21 | ||
24 | const char *efx_loopback_mode_names[] = { | ||
25 | [LOOPBACK_NONE] = "NONE", | ||
26 | [LOOPBACK_GMAC] = "GMAC", | ||
27 | [LOOPBACK_XGMII] = "XGMII", | ||
28 | [LOOPBACK_XGXS] = "XGXS", | ||
29 | [LOOPBACK_XAUI] = "XAUI", | ||
30 | [LOOPBACK_GPHY] = "GPHY", | ||
31 | [LOOPBACK_PHYXS] = "PHYXS", | ||
32 | [LOOPBACK_PCS] = "PCS", | ||
33 | [LOOPBACK_PMAPMD] = "PMA/PMD", | ||
34 | [LOOPBACK_NETWORK] = "NETWORK", | ||
35 | }; | ||
36 | |||
37 | struct ethtool_string { | 22 | struct ethtool_string { |
38 | char name[ETH_GSTRING_LEN]; | 23 | char name[ETH_GSTRING_LEN]; |
39 | }; | 24 | }; |
@@ -167,6 +152,7 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = { | |||
167 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), | 152 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), |
168 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), | 153 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), |
169 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), | 154 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), |
155 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), | ||
170 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), | 156 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), |
171 | }; | 157 | }; |
172 | 158 | ||
@@ -187,13 +173,15 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count) | |||
187 | { | 173 | { |
188 | struct efx_nic *efx = netdev_priv(net_dev); | 174 | struct efx_nic *efx = netdev_priv(net_dev); |
189 | 175 | ||
190 | efx->board_info.blink(efx, 1); | 176 | do { |
191 | set_current_state(TASK_INTERRUPTIBLE); | 177 | efx->type->set_id_led(efx, EFX_LED_ON); |
192 | if (count) | 178 | schedule_timeout_interruptible(HZ / 2); |
193 | schedule_timeout(count * HZ); | 179 | |
194 | else | 180 | efx->type->set_id_led(efx, EFX_LED_OFF); |
195 | schedule(); | 181 | schedule_timeout_interruptible(HZ / 2); |
196 | efx->board_info.blink(efx, 0); | 182 | } while (!signal_pending(current) && --count != 0); |
183 | |||
184 | efx->type->set_id_led(efx, EFX_LED_DEFAULT); | ||
197 | return 0; | 185 | return 0; |
198 | } | 186 | } |
199 | 187 | ||
@@ -202,13 +190,21 @@ int efx_ethtool_get_settings(struct net_device *net_dev, | |||
202 | struct ethtool_cmd *ecmd) | 190 | struct ethtool_cmd *ecmd) |
203 | { | 191 | { |
204 | struct efx_nic *efx = netdev_priv(net_dev); | 192 | struct efx_nic *efx = netdev_priv(net_dev); |
193 | struct efx_link_state *link_state = &efx->link_state; | ||
205 | 194 | ||
206 | mutex_lock(&efx->mac_lock); | 195 | mutex_lock(&efx->mac_lock); |
207 | efx->phy_op->get_settings(efx, ecmd); | 196 | efx->phy_op->get_settings(efx, ecmd); |
208 | mutex_unlock(&efx->mac_lock); | 197 | mutex_unlock(&efx->mac_lock); |
209 | 198 | ||
210 | /* Falcon GMAC does not support 1000Mbps HD */ | 199 | /* GMAC does not support 1000Mbps HD */ |
211 | ecmd->supported &= ~SUPPORTED_1000baseT_Half; | 200 | ecmd->supported &= ~SUPPORTED_1000baseT_Half; |
201 | /* Both MACs support pause frames (bidirectional and respond-only) */ | ||
202 | ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
203 | |||
204 | if (LOOPBACK_INTERNAL(efx)) { | ||
205 | ecmd->speed = link_state->speed; | ||
206 | ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; | ||
207 | } | ||
212 | 208 | ||
213 | return 0; | 209 | return 0; |
214 | } | 210 | } |
@@ -220,7 +216,7 @@ int efx_ethtool_set_settings(struct net_device *net_dev, | |||
220 | struct efx_nic *efx = netdev_priv(net_dev); | 216 | struct efx_nic *efx = netdev_priv(net_dev); |
221 | int rc; | 217 | int rc; |
222 | 218 | ||
223 | /* Falcon GMAC does not support 1000Mbps HD */ | 219 | /* GMAC does not support 1000Mbps HD */ |
224 | if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { | 220 | if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { |
225 | EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" | 221 | EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" |
226 | " setting\n"); | 222 | " setting\n"); |
@@ -230,9 +226,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev, | |||
230 | mutex_lock(&efx->mac_lock); | 226 | mutex_lock(&efx->mac_lock); |
231 | rc = efx->phy_op->set_settings(efx, ecmd); | 227 | rc = efx->phy_op->set_settings(efx, ecmd); |
232 | mutex_unlock(&efx->mac_lock); | 228 | mutex_unlock(&efx->mac_lock); |
233 | if (!rc) | ||
234 | efx_reconfigure_port(efx); | ||
235 | |||
236 | return rc; | 229 | return rc; |
237 | } | 230 | } |
238 | 231 | ||
@@ -243,6 +236,9 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev, | |||
243 | 236 | ||
244 | strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver)); | 237 | strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver)); |
245 | strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); | 238 | strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); |
239 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
240 | siena_print_fwver(efx, info->fw_version, | ||
241 | sizeof(info->fw_version)); | ||
246 | strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); | 242 | strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); |
247 | } | 243 | } |
248 | 244 | ||
@@ -289,7 +285,7 @@ static void efx_fill_test(unsigned int test_index, | |||
289 | #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue | 285 | #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue |
290 | #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue | 286 | #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue |
291 | #define EFX_LOOPBACK_NAME(_mode, _counter) \ | 287 | #define EFX_LOOPBACK_NAME(_mode, _counter) \ |
292 | "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode) | 288 | "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) |
293 | 289 | ||
294 | /** | 290 | /** |
295 | * efx_fill_loopback_test - fill in a block of loopback self-test entries | 291 | * efx_fill_loopback_test - fill in a block of loopback self-test entries |
@@ -346,8 +342,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx, | |||
346 | unsigned int n = 0, i; | 342 | unsigned int n = 0, i; |
347 | enum efx_loopback_mode mode; | 343 | enum efx_loopback_mode mode; |
348 | 344 | ||
349 | efx_fill_test(n++, strings, data, &tests->mdio, | 345 | efx_fill_test(n++, strings, data, &tests->phy_alive, |
350 | "core", 0, "mdio", NULL); | 346 | "phy", 0, "alive", NULL); |
351 | efx_fill_test(n++, strings, data, &tests->nvram, | 347 | efx_fill_test(n++, strings, data, &tests->nvram, |
352 | "core", 0, "nvram", NULL); | 348 | "core", 0, "nvram", NULL); |
353 | efx_fill_test(n++, strings, data, &tests->interrupt, | 349 | efx_fill_test(n++, strings, data, &tests->interrupt, |
@@ -372,9 +368,21 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx, | |||
372 | efx_fill_test(n++, strings, data, &tests->registers, | 368 | efx_fill_test(n++, strings, data, &tests->registers, |
373 | "core", 0, "registers", NULL); | 369 | "core", 0, "registers", NULL); |
374 | 370 | ||
375 | for (i = 0; i < efx->phy_op->num_tests; i++) | 371 | if (efx->phy_op->run_tests != NULL) { |
376 | efx_fill_test(n++, strings, data, &tests->phy[i], | 372 | EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL); |
377 | "phy", 0, efx->phy_op->test_names[i], NULL); | 373 | |
374 | for (i = 0; true; ++i) { | ||
375 | const char *name; | ||
376 | |||
377 | EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); | ||
378 | name = efx->phy_op->test_name(efx, i); | ||
379 | if (name == NULL) | ||
380 | break; | ||
381 | |||
382 | efx_fill_test(n++, strings, data, &tests->phy_ext[i], | ||
383 | "phy", 0, name, NULL); | ||
384 | } | ||
385 | } | ||
378 | 386 | ||
379 | /* Loopback tests */ | 387 | /* Loopback tests */ |
380 | for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { | 388 | for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { |
@@ -463,6 +471,36 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, | |||
463 | } | 471 | } |
464 | } | 472 | } |
465 | 473 | ||
474 | static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) | ||
475 | { | ||
476 | struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev); | ||
477 | unsigned long features; | ||
478 | |||
479 | features = NETIF_F_TSO; | ||
480 | if (efx->type->offload_features & NETIF_F_V6_CSUM) | ||
481 | features |= NETIF_F_TSO6; | ||
482 | |||
483 | if (enable) | ||
484 | net_dev->features |= features; | ||
485 | else | ||
486 | net_dev->features &= ~features; | ||
487 | |||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) | ||
492 | { | ||
493 | struct efx_nic *efx = netdev_priv(net_dev); | ||
494 | unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM; | ||
495 | |||
496 | if (enable) | ||
497 | net_dev->features |= features; | ||
498 | else | ||
499 | net_dev->features &= ~features; | ||
500 | |||
501 | return 0; | ||
502 | } | ||
503 | |||
466 | static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable) | 504 | static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable) |
467 | { | 505 | { |
468 | struct efx_nic *efx = netdev_priv(net_dev); | 506 | struct efx_nic *efx = netdev_priv(net_dev); |
@@ -537,7 +575,7 @@ static u32 efx_ethtool_get_link(struct net_device *net_dev) | |||
537 | { | 575 | { |
538 | struct efx_nic *efx = netdev_priv(net_dev); | 576 | struct efx_nic *efx = netdev_priv(net_dev); |
539 | 577 | ||
540 | return efx->link_up; | 578 | return efx->link_state.up; |
541 | } | 579 | } |
542 | 580 | ||
543 | static int efx_ethtool_get_eeprom_len(struct net_device *net_dev) | 581 | static int efx_ethtool_get_eeprom_len(struct net_device *net_dev) |
@@ -562,7 +600,8 @@ static int efx_ethtool_get_eeprom(struct net_device *net_dev, | |||
562 | rc = mutex_lock_interruptible(&efx->spi_lock); | 600 | rc = mutex_lock_interruptible(&efx->spi_lock); |
563 | if (rc) | 601 | if (rc) |
564 | return rc; | 602 | return rc; |
565 | rc = falcon_spi_read(spi, eeprom->offset + EFX_EEPROM_BOOTCONFIG_START, | 603 | rc = falcon_spi_read(efx, spi, |
604 | eeprom->offset + EFX_EEPROM_BOOTCONFIG_START, | ||
566 | eeprom->len, &len, buf); | 605 | eeprom->len, &len, buf); |
567 | mutex_unlock(&efx->spi_lock); | 606 | mutex_unlock(&efx->spi_lock); |
568 | 607 | ||
@@ -585,7 +624,8 @@ static int efx_ethtool_set_eeprom(struct net_device *net_dev, | |||
585 | rc = mutex_lock_interruptible(&efx->spi_lock); | 624 | rc = mutex_lock_interruptible(&efx->spi_lock); |
586 | if (rc) | 625 | if (rc) |
587 | return rc; | 626 | return rc; |
588 | rc = falcon_spi_write(spi, eeprom->offset + EFX_EEPROM_BOOTCONFIG_START, | 627 | rc = falcon_spi_write(efx, spi, |
628 | eeprom->offset + EFX_EEPROM_BOOTCONFIG_START, | ||
589 | eeprom->len, &len, buf); | 629 | eeprom->len, &len, buf); |
590 | mutex_unlock(&efx->spi_lock); | 630 | mutex_unlock(&efx->spi_lock); |
591 | 631 | ||
@@ -618,6 +658,9 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, | |||
618 | coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; | 658 | coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; |
619 | coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; | 659 | coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; |
620 | 660 | ||
661 | coalesce->tx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION; | ||
662 | coalesce->rx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION; | ||
663 | |||
621 | return 0; | 664 | return 0; |
622 | } | 665 | } |
623 | 666 | ||
@@ -656,13 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, | |||
656 | } | 699 | } |
657 | 700 | ||
658 | efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive); | 701 | efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive); |
659 | |||
660 | /* Reset channel to pick up new moderation value. Note that | ||
661 | * this may change the value of the irq_moderation field | ||
662 | * (e.g. to allow for hardware timer granularity). | ||
663 | */ | ||
664 | efx_for_each_channel(channel, efx) | 702 | efx_for_each_channel(channel, efx) |
665 | falcon_set_int_moderation(channel); | 703 | efx->type->push_irq_moderation(channel); |
666 | 704 | ||
667 | return 0; | 705 | return 0; |
668 | } | 706 | } |
@@ -671,8 +709,12 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, | |||
671 | struct ethtool_pauseparam *pause) | 709 | struct ethtool_pauseparam *pause) |
672 | { | 710 | { |
673 | struct efx_nic *efx = netdev_priv(net_dev); | 711 | struct efx_nic *efx = netdev_priv(net_dev); |
674 | enum efx_fc_type wanted_fc; | 712 | enum efx_fc_type wanted_fc, old_fc; |
713 | u32 old_adv; | ||
675 | bool reset; | 714 | bool reset; |
715 | int rc = 0; | ||
716 | |||
717 | mutex_lock(&efx->mac_lock); | ||
676 | 718 | ||
677 | wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | | 719 | wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | |
678 | (pause->tx_pause ? EFX_FC_TX : 0) | | 720 | (pause->tx_pause ? EFX_FC_TX : 0) | |
@@ -680,14 +722,14 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, | |||
680 | 722 | ||
681 | if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { | 723 | if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { |
682 | EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n"); | 724 | EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n"); |
683 | return -EINVAL; | 725 | rc = -EINVAL; |
726 | goto out; | ||
684 | } | 727 | } |
685 | 728 | ||
686 | if (!(efx->phy_op->mmds & MDIO_DEVS_AN) && | 729 | if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { |
687 | (wanted_fc & EFX_FC_AUTO)) { | 730 | EFX_LOG(efx, "Autonegotiation is disabled\n"); |
688 | EFX_LOG(efx, "PHY does not support flow control " | 731 | rc = -EINVAL; |
689 | "autonegotiation\n"); | 732 | goto out; |
690 | return -EINVAL; | ||
691 | } | 733 | } |
692 | 734 | ||
693 | /* TX flow control may automatically turn itself off if the | 735 | /* TX flow control may automatically turn itself off if the |
@@ -697,27 +739,40 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, | |||
697 | * and fix it be cycling transmit flow control on this end. */ | 739 | * and fix it be cycling transmit flow control on this end. */ |
698 | reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); | 740 | reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); |
699 | if (EFX_WORKAROUND_11482(efx) && reset) { | 741 | if (EFX_WORKAROUND_11482(efx) && reset) { |
700 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 742 | if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) { |
701 | /* Recover by resetting the EM block */ | 743 | /* Recover by resetting the EM block */ |
702 | if (efx->link_up) | 744 | falcon_stop_nic_stats(efx); |
703 | falcon_drain_tx_fifo(efx); | 745 | falcon_drain_tx_fifo(efx); |
746 | efx->mac_op->reconfigure(efx); | ||
747 | falcon_start_nic_stats(efx); | ||
704 | } else { | 748 | } else { |
705 | /* Schedule a reset to recover */ | 749 | /* Schedule a reset to recover */ |
706 | efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); | 750 | efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); |
707 | } | 751 | } |
708 | } | 752 | } |
709 | 753 | ||
710 | /* Try to push the pause parameters */ | 754 | old_adv = efx->link_advertising; |
711 | mutex_lock(&efx->mac_lock); | 755 | old_fc = efx->wanted_fc; |
756 | efx_link_set_wanted_fc(efx, wanted_fc); | ||
757 | if (efx->link_advertising != old_adv || | ||
758 | (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { | ||
759 | rc = efx->phy_op->reconfigure(efx); | ||
760 | if (rc) { | ||
761 | EFX_ERR(efx, "Unable to advertise requested flow " | ||
762 | "control setting\n"); | ||
763 | goto out; | ||
764 | } | ||
765 | } | ||
712 | 766 | ||
713 | efx->wanted_fc = wanted_fc; | 767 | /* Reconfigure the MAC. The PHY *may* generate a link state change event |
714 | if (efx->phy_op->mmds & MDIO_DEVS_AN) | 768 | * if the user just changed the advertised capabilities, but there's no |
715 | mdio45_ethtool_spauseparam_an(&efx->mdio, pause); | 769 | * harm doing this twice */ |
716 | __efx_reconfigure_port(efx); | 770 | efx->mac_op->reconfigure(efx); |
717 | 771 | ||
772 | out: | ||
718 | mutex_unlock(&efx->mac_lock); | 773 | mutex_unlock(&efx->mac_lock); |
719 | 774 | ||
720 | return 0; | 775 | return rc; |
721 | } | 776 | } |
722 | 777 | ||
723 | static void efx_ethtool_get_pauseparam(struct net_device *net_dev, | 778 | static void efx_ethtool_get_pauseparam(struct net_device *net_dev, |
@@ -731,6 +786,50 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev, | |||
731 | } | 786 | } |
732 | 787 | ||
733 | 788 | ||
789 | static void efx_ethtool_get_wol(struct net_device *net_dev, | ||
790 | struct ethtool_wolinfo *wol) | ||
791 | { | ||
792 | struct efx_nic *efx = netdev_priv(net_dev); | ||
793 | return efx->type->get_wol(efx, wol); | ||
794 | } | ||
795 | |||
796 | |||
797 | static int efx_ethtool_set_wol(struct net_device *net_dev, | ||
798 | struct ethtool_wolinfo *wol) | ||
799 | { | ||
800 | struct efx_nic *efx = netdev_priv(net_dev); | ||
801 | return efx->type->set_wol(efx, wol->wolopts); | ||
802 | } | ||
803 | |||
804 | extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) | ||
805 | { | ||
806 | struct efx_nic *efx = netdev_priv(net_dev); | ||
807 | enum reset_type method; | ||
808 | enum { | ||
809 | ETH_RESET_EFX_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER | | ||
810 | ETH_RESET_OFFLOAD | ETH_RESET_MAC) | ||
811 | }; | ||
812 | |||
813 | /* Check for minimal reset flags */ | ||
814 | if ((*flags & ETH_RESET_EFX_INVISIBLE) != ETH_RESET_EFX_INVISIBLE) | ||
815 | return -EINVAL; | ||
816 | *flags ^= ETH_RESET_EFX_INVISIBLE; | ||
817 | method = RESET_TYPE_INVISIBLE; | ||
818 | |||
819 | if (*flags & ETH_RESET_PHY) { | ||
820 | *flags ^= ETH_RESET_PHY; | ||
821 | method = RESET_TYPE_ALL; | ||
822 | } | ||
823 | |||
824 | if ((*flags & efx->type->reset_world_flags) == | ||
825 | efx->type->reset_world_flags) { | ||
826 | *flags ^= efx->type->reset_world_flags; | ||
827 | method = RESET_TYPE_WORLD; | ||
828 | } | ||
829 | |||
830 | return efx_reset(efx, method); | ||
831 | } | ||
832 | |||
734 | const struct ethtool_ops efx_ethtool_ops = { | 833 | const struct ethtool_ops efx_ethtool_ops = { |
735 | .get_settings = efx_ethtool_get_settings, | 834 | .get_settings = efx_ethtool_get_settings, |
736 | .set_settings = efx_ethtool_set_settings, | 835 | .set_settings = efx_ethtool_set_settings, |
@@ -747,11 +846,13 @@ const struct ethtool_ops efx_ethtool_ops = { | |||
747 | .get_rx_csum = efx_ethtool_get_rx_csum, | 846 | .get_rx_csum = efx_ethtool_get_rx_csum, |
748 | .set_rx_csum = efx_ethtool_set_rx_csum, | 847 | .set_rx_csum = efx_ethtool_set_rx_csum, |
749 | .get_tx_csum = ethtool_op_get_tx_csum, | 848 | .get_tx_csum = ethtool_op_get_tx_csum, |
750 | .set_tx_csum = ethtool_op_set_tx_csum, | 849 | /* Need to enable/disable IPv6 too */ |
850 | .set_tx_csum = efx_ethtool_set_tx_csum, | ||
751 | .get_sg = ethtool_op_get_sg, | 851 | .get_sg = ethtool_op_get_sg, |
752 | .set_sg = ethtool_op_set_sg, | 852 | .set_sg = ethtool_op_set_sg, |
753 | .get_tso = ethtool_op_get_tso, | 853 | .get_tso = ethtool_op_get_tso, |
754 | .set_tso = ethtool_op_set_tso, | 854 | /* Need to enable/disable TSO-IPv6 too */ |
855 | .set_tso = efx_ethtool_set_tso, | ||
755 | .get_flags = ethtool_op_get_flags, | 856 | .get_flags = ethtool_op_get_flags, |
756 | .set_flags = ethtool_op_set_flags, | 857 | .set_flags = ethtool_op_set_flags, |
757 | .get_sset_count = efx_ethtool_get_sset_count, | 858 | .get_sset_count = efx_ethtool_get_sset_count, |
@@ -759,4 +860,7 @@ const struct ethtool_ops efx_ethtool_ops = { | |||
759 | .get_strings = efx_ethtool_get_strings, | 860 | .get_strings = efx_ethtool_get_strings, |
760 | .phys_id = efx_ethtool_phys_id, | 861 | .phys_id = efx_ethtool_phys_id, |
761 | .get_ethtool_stats = efx_ethtool_get_stats, | 862 | .get_ethtool_stats = efx_ethtool_get_stats, |
863 | .get_wol = efx_ethtool_get_wol, | ||
864 | .set_wol = efx_ethtool_set_wol, | ||
865 | .reset = efx_ethtool_reset, | ||
762 | }; | 866 | }; |
diff --git a/drivers/net/sfc/ethtool.h b/drivers/net/sfc/ethtool.h deleted file mode 100644 index 295ead403356..000000000000 --- a/drivers/net/sfc/ethtool.h +++ /dev/null | |||
@@ -1,27 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005 Fen Systems Ltd. | ||
4 | * Copyright 2006 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_ETHTOOL_H | ||
12 | #define EFX_ETHTOOL_H | ||
13 | |||
14 | #include "net_driver.h" | ||
15 | |||
16 | /* | ||
17 | * Ethtool support | ||
18 | */ | ||
19 | |||
20 | extern int efx_ethtool_get_settings(struct net_device *net_dev, | ||
21 | struct ethtool_cmd *ecmd); | ||
22 | extern int efx_ethtool_set_settings(struct net_device *net_dev, | ||
23 | struct ethtool_cmd *ecmd); | ||
24 | |||
25 | extern const struct ethtool_ops efx_ethtool_ops; | ||
26 | |||
27 | #endif /* EFX_ETHTOOL_H */ | ||
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index c049364aec46..08278e7302b3 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2008 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -14,66 +14,21 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include <linux/i2c.h> | 16 | #include <linux/i2c.h> |
17 | #include <linux/i2c-algo-bit.h> | ||
18 | #include <linux/mii.h> | 17 | #include <linux/mii.h> |
18 | #include <linux/slab.h> | ||
19 | #include "net_driver.h" | 19 | #include "net_driver.h" |
20 | #include "bitfield.h" | 20 | #include "bitfield.h" |
21 | #include "efx.h" | 21 | #include "efx.h" |
22 | #include "mac.h" | 22 | #include "mac.h" |
23 | #include "spi.h" | 23 | #include "spi.h" |
24 | #include "falcon.h" | 24 | #include "nic.h" |
25 | #include "falcon_hwdefs.h" | 25 | #include "regs.h" |
26 | #include "falcon_io.h" | 26 | #include "io.h" |
27 | #include "mdio_10g.h" | 27 | #include "mdio_10g.h" |
28 | #include "phy.h" | 28 | #include "phy.h" |
29 | #include "boards.h" | ||
30 | #include "workarounds.h" | 29 | #include "workarounds.h" |
31 | 30 | ||
32 | /* Falcon hardware control. | 31 | /* Hardware control for SFC4000 (aka Falcon). */ |
33 | * Falcon is the internal codename for the SFC4000 controller that is | ||
34 | * present in SFE400X evaluation boards | ||
35 | */ | ||
36 | |||
37 | /** | ||
38 | * struct falcon_nic_data - Falcon NIC state | ||
39 | * @next_buffer_table: First available buffer table id | ||
40 | * @pci_dev2: The secondary PCI device if present | ||
41 | * @i2c_data: Operations and state for I2C bit-bashing algorithm | ||
42 | * @int_error_count: Number of internal errors seen recently | ||
43 | * @int_error_expire: Time at which error count will be expired | ||
44 | */ | ||
45 | struct falcon_nic_data { | ||
46 | unsigned next_buffer_table; | ||
47 | struct pci_dev *pci_dev2; | ||
48 | struct i2c_algo_bit_data i2c_data; | ||
49 | |||
50 | unsigned int_error_count; | ||
51 | unsigned long int_error_expire; | ||
52 | }; | ||
53 | |||
54 | /************************************************************************** | ||
55 | * | ||
56 | * Configurable values | ||
57 | * | ||
58 | ************************************************************************** | ||
59 | */ | ||
60 | |||
61 | static int disable_dma_stats; | ||
62 | |||
63 | /* This is set to 16 for a good reason. In summary, if larger than | ||
64 | * 16, the descriptor cache holds more than a default socket | ||
65 | * buffer's worth of packets (for UDP we can only have at most one | ||
66 | * socket buffer's worth outstanding). This combined with the fact | ||
67 | * that we only get 1 TX event per descriptor cache means the NIC | ||
68 | * goes idle. | ||
69 | */ | ||
70 | #define TX_DC_ENTRIES 16 | ||
71 | #define TX_DC_ENTRIES_ORDER 0 | ||
72 | #define TX_DC_BASE 0x130000 | ||
73 | |||
74 | #define RX_DC_ENTRIES 64 | ||
75 | #define RX_DC_ENTRIES_ORDER 2 | ||
76 | #define RX_DC_BASE 0x100000 | ||
77 | 32 | ||
78 | static const unsigned int | 33 | static const unsigned int |
79 | /* "Large" EEPROM device: Atmel AT25640 or similar | 34 | /* "Large" EEPROM device: Atmel AT25640 or similar |
@@ -89,104 +44,6 @@ default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN) | |||
89 | | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) | 44 | | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) |
90 | | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)); | 45 | | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)); |
91 | 46 | ||
92 | /* RX FIFO XOFF watermark | ||
93 | * | ||
94 | * When the amount of the RX FIFO increases used increases past this | ||
95 | * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A) | ||
96 | * This also has an effect on RX/TX arbitration | ||
97 | */ | ||
98 | static int rx_xoff_thresh_bytes = -1; | ||
99 | module_param(rx_xoff_thresh_bytes, int, 0644); | ||
100 | MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold"); | ||
101 | |||
102 | /* RX FIFO XON watermark | ||
103 | * | ||
104 | * When the amount of the RX FIFO used decreases below this | ||
105 | * watermark send XON. Only used if TX flow control is enabled (ethtool -A) | ||
106 | * This also has an effect on RX/TX arbitration | ||
107 | */ | ||
108 | static int rx_xon_thresh_bytes = -1; | ||
109 | module_param(rx_xon_thresh_bytes, int, 0644); | ||
110 | MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | ||
111 | |||
112 | /* TX descriptor ring size - min 512 max 4k */ | ||
113 | #define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K | ||
114 | #define FALCON_TXD_RING_SIZE 1024 | ||
115 | #define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1) | ||
116 | |||
117 | /* RX descriptor ring size - min 512 max 4k */ | ||
118 | #define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K | ||
119 | #define FALCON_RXD_RING_SIZE 1024 | ||
120 | #define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1) | ||
121 | |||
122 | /* Event queue size - max 32k */ | ||
123 | #define FALCON_EVQ_ORDER EVQ_SIZE_4K | ||
124 | #define FALCON_EVQ_SIZE 4096 | ||
125 | #define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1) | ||
126 | |||
127 | /* If FALCON_MAX_INT_ERRORS internal errors occur within | ||
128 | * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | ||
129 | * disable it. | ||
130 | */ | ||
131 | #define FALCON_INT_ERROR_EXPIRE 3600 | ||
132 | #define FALCON_MAX_INT_ERRORS 5 | ||
133 | |||
134 | /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times | ||
135 | */ | ||
136 | #define FALCON_FLUSH_INTERVAL 10 | ||
137 | #define FALCON_FLUSH_POLL_COUNT 100 | ||
138 | |||
139 | /************************************************************************** | ||
140 | * | ||
141 | * Falcon constants | ||
142 | * | ||
143 | ************************************************************************** | ||
144 | */ | ||
145 | |||
146 | /* DMA address mask */ | ||
147 | #define FALCON_DMA_MASK DMA_BIT_MASK(46) | ||
148 | |||
149 | /* TX DMA length mask (13-bit) */ | ||
150 | #define FALCON_TX_DMA_MASK (4096 - 1) | ||
151 | |||
152 | /* Size and alignment of special buffers (4KB) */ | ||
153 | #define FALCON_BUF_SIZE 4096 | ||
154 | |||
155 | /* Dummy SRAM size code */ | ||
156 | #define SRM_NB_BSZ_ONCHIP_ONLY (-1) | ||
157 | |||
158 | #define FALCON_IS_DUAL_FUNC(efx) \ | ||
159 | (falcon_rev(efx) < FALCON_REV_B0) | ||
160 | |||
161 | /************************************************************************** | ||
162 | * | ||
163 | * Falcon hardware access | ||
164 | * | ||
165 | **************************************************************************/ | ||
166 | |||
167 | /* Read the current event from the event queue */ | ||
168 | static inline efx_qword_t *falcon_event(struct efx_channel *channel, | ||
169 | unsigned int index) | ||
170 | { | ||
171 | return (((efx_qword_t *) (channel->eventq.addr)) + index); | ||
172 | } | ||
173 | |||
174 | /* See if an event is present | ||
175 | * | ||
176 | * We check both the high and low dword of the event for all ones. We | ||
177 | * wrote all ones when we cleared the event, and no valid event can | ||
178 | * have all ones in either its high or low dwords. This approach is | ||
179 | * robust against reordering. | ||
180 | * | ||
181 | * Note that using a single 64-bit comparison is incorrect; even | ||
182 | * though the CPU read will be atomic, the DMA write may not be. | ||
183 | */ | ||
184 | static inline int falcon_event_present(efx_qword_t *event) | ||
185 | { | ||
186 | return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | ||
187 | EFX_DWORD_IS_ALL_ONES(event->dword[1]))); | ||
188 | } | ||
189 | |||
190 | /************************************************************************** | 47 | /************************************************************************** |
191 | * | 48 | * |
192 | * I2C bus - this is a bit-bashing interface using GPIO pins | 49 | * I2C bus - this is a bit-bashing interface using GPIO pins |
@@ -200,9 +57,9 @@ static void falcon_setsda(void *data, int state) | |||
200 | struct efx_nic *efx = (struct efx_nic *)data; | 57 | struct efx_nic *efx = (struct efx_nic *)data; |
201 | efx_oword_t reg; | 58 | efx_oword_t reg; |
202 | 59 | ||
203 | falcon_read(efx, ®, GPIO_CTL_REG_KER); | 60 | efx_reado(efx, ®, FR_AB_GPIO_CTL); |
204 | EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state); | 61 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state); |
205 | falcon_write(efx, ®, GPIO_CTL_REG_KER); | 62 | efx_writeo(efx, ®, FR_AB_GPIO_CTL); |
206 | } | 63 | } |
207 | 64 | ||
208 | static void falcon_setscl(void *data, int state) | 65 | static void falcon_setscl(void *data, int state) |
@@ -210,9 +67,9 @@ static void falcon_setscl(void *data, int state) | |||
210 | struct efx_nic *efx = (struct efx_nic *)data; | 67 | struct efx_nic *efx = (struct efx_nic *)data; |
211 | efx_oword_t reg; | 68 | efx_oword_t reg; |
212 | 69 | ||
213 | falcon_read(efx, ®, GPIO_CTL_REG_KER); | 70 | efx_reado(efx, ®, FR_AB_GPIO_CTL); |
214 | EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state); | 71 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state); |
215 | falcon_write(efx, ®, GPIO_CTL_REG_KER); | 72 | efx_writeo(efx, ®, FR_AB_GPIO_CTL); |
216 | } | 73 | } |
217 | 74 | ||
218 | static int falcon_getsda(void *data) | 75 | static int falcon_getsda(void *data) |
@@ -220,8 +77,8 @@ static int falcon_getsda(void *data) | |||
220 | struct efx_nic *efx = (struct efx_nic *)data; | 77 | struct efx_nic *efx = (struct efx_nic *)data; |
221 | efx_oword_t reg; | 78 | efx_oword_t reg; |
222 | 79 | ||
223 | falcon_read(efx, ®, GPIO_CTL_REG_KER); | 80 | efx_reado(efx, ®, FR_AB_GPIO_CTL); |
224 | return EFX_OWORD_FIELD(reg, GPIO3_IN); | 81 | return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN); |
225 | } | 82 | } |
226 | 83 | ||
227 | static int falcon_getscl(void *data) | 84 | static int falcon_getscl(void *data) |
@@ -229,8 +86,8 @@ static int falcon_getscl(void *data) | |||
229 | struct efx_nic *efx = (struct efx_nic *)data; | 86 | struct efx_nic *efx = (struct efx_nic *)data; |
230 | efx_oword_t reg; | 87 | efx_oword_t reg; |
231 | 88 | ||
232 | falcon_read(efx, ®, GPIO_CTL_REG_KER); | 89 | efx_reado(efx, ®, FR_AB_GPIO_CTL); |
233 | return EFX_OWORD_FIELD(reg, GPIO0_IN); | 90 | return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN); |
234 | } | 91 | } |
235 | 92 | ||
236 | static struct i2c_algo_bit_data falcon_i2c_bit_operations = { | 93 | static struct i2c_algo_bit_data falcon_i2c_bit_operations = { |
@@ -243,1115 +100,39 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = { | |||
243 | .timeout = DIV_ROUND_UP(HZ, 20), | 100 | .timeout = DIV_ROUND_UP(HZ, 20), |
244 | }; | 101 | }; |
245 | 102 | ||
246 | /************************************************************************** | 103 | static void falcon_push_irq_moderation(struct efx_channel *channel) |
247 | * | ||
248 | * Falcon special buffer handling | ||
249 | * Special buffers are used for event queues and the TX and RX | ||
250 | * descriptor rings. | ||
251 | * | ||
252 | *************************************************************************/ | ||
253 | |||
254 | /* | ||
255 | * Initialise a Falcon special buffer | ||
256 | * | ||
257 | * This will define a buffer (previously allocated via | ||
258 | * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing | ||
259 | * it to be used for event queues, descriptor rings etc. | ||
260 | */ | ||
261 | static void | ||
262 | falcon_init_special_buffer(struct efx_nic *efx, | ||
263 | struct efx_special_buffer *buffer) | ||
264 | { | ||
265 | efx_qword_t buf_desc; | ||
266 | int index; | ||
267 | dma_addr_t dma_addr; | ||
268 | int i; | ||
269 | |||
270 | EFX_BUG_ON_PARANOID(!buffer->addr); | ||
271 | |||
272 | /* Write buffer descriptors to NIC */ | ||
273 | for (i = 0; i < buffer->entries; i++) { | ||
274 | index = buffer->index + i; | ||
275 | dma_addr = buffer->dma_addr + (i * 4096); | ||
276 | EFX_LOG(efx, "mapping special buffer %d at %llx\n", | ||
277 | index, (unsigned long long)dma_addr); | ||
278 | EFX_POPULATE_QWORD_4(buf_desc, | ||
279 | IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K, | ||
280 | BUF_ADR_REGION, 0, | ||
281 | BUF_ADR_FBUF, (dma_addr >> 12), | ||
282 | BUF_OWNER_ID_FBUF, 0); | ||
283 | falcon_write_sram(efx, &buf_desc, index); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | /* Unmaps a buffer from Falcon and clears the buffer table entries */ | ||
288 | static void | ||
289 | falcon_fini_special_buffer(struct efx_nic *efx, | ||
290 | struct efx_special_buffer *buffer) | ||
291 | { | ||
292 | efx_oword_t buf_tbl_upd; | ||
293 | unsigned int start = buffer->index; | ||
294 | unsigned int end = (buffer->index + buffer->entries - 1); | ||
295 | |||
296 | if (!buffer->entries) | ||
297 | return; | ||
298 | |||
299 | EFX_LOG(efx, "unmapping special buffers %d-%d\n", | ||
300 | buffer->index, buffer->index + buffer->entries - 1); | ||
301 | |||
302 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | ||
303 | BUF_UPD_CMD, 0, | ||
304 | BUF_CLR_CMD, 1, | ||
305 | BUF_CLR_END_ID, end, | ||
306 | BUF_CLR_START_ID, start); | ||
307 | falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER); | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Allocate a new Falcon special buffer | ||
312 | * | ||
313 | * This allocates memory for a new buffer, clears it and allocates a | ||
314 | * new buffer ID range. It does not write into Falcon's buffer table. | ||
315 | * | ||
316 | * This call will allocate 4KB buffers, since Falcon can't use 8KB | ||
317 | * buffers for event queues and descriptor rings. | ||
318 | */ | ||
319 | static int falcon_alloc_special_buffer(struct efx_nic *efx, | ||
320 | struct efx_special_buffer *buffer, | ||
321 | unsigned int len) | ||
322 | { | ||
323 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
324 | |||
325 | len = ALIGN(len, FALCON_BUF_SIZE); | ||
326 | |||
327 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | ||
328 | &buffer->dma_addr); | ||
329 | if (!buffer->addr) | ||
330 | return -ENOMEM; | ||
331 | buffer->len = len; | ||
332 | buffer->entries = len / FALCON_BUF_SIZE; | ||
333 | BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1)); | ||
334 | |||
335 | /* All zeros is a potentially valid event so memset to 0xff */ | ||
336 | memset(buffer->addr, 0xff, len); | ||
337 | |||
338 | /* Select new buffer ID */ | ||
339 | buffer->index = nic_data->next_buffer_table; | ||
340 | nic_data->next_buffer_table += buffer->entries; | ||
341 | |||
342 | EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " | ||
343 | "(virt %p phys %llx)\n", buffer->index, | ||
344 | buffer->index + buffer->entries - 1, | ||
345 | (u64)buffer->dma_addr, len, | ||
346 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | ||
347 | |||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | static void falcon_free_special_buffer(struct efx_nic *efx, | ||
352 | struct efx_special_buffer *buffer) | ||
353 | { | ||
354 | if (!buffer->addr) | ||
355 | return; | ||
356 | |||
357 | EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " | ||
358 | "(virt %p phys %llx)\n", buffer->index, | ||
359 | buffer->index + buffer->entries - 1, | ||
360 | (u64)buffer->dma_addr, buffer->len, | ||
361 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | ||
362 | |||
363 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, | ||
364 | buffer->dma_addr); | ||
365 | buffer->addr = NULL; | ||
366 | buffer->entries = 0; | ||
367 | } | ||
368 | |||
369 | /************************************************************************** | ||
370 | * | ||
371 | * Falcon generic buffer handling | ||
372 | * These buffers are used for interrupt status and MAC stats | ||
373 | * | ||
374 | **************************************************************************/ | ||
375 | |||
376 | static int falcon_alloc_buffer(struct efx_nic *efx, | ||
377 | struct efx_buffer *buffer, unsigned int len) | ||
378 | { | ||
379 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | ||
380 | &buffer->dma_addr); | ||
381 | if (!buffer->addr) | ||
382 | return -ENOMEM; | ||
383 | buffer->len = len; | ||
384 | memset(buffer->addr, 0, len); | ||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) | ||
389 | { | ||
390 | if (buffer->addr) { | ||
391 | pci_free_consistent(efx->pci_dev, buffer->len, | ||
392 | buffer->addr, buffer->dma_addr); | ||
393 | buffer->addr = NULL; | ||
394 | } | ||
395 | } | ||
396 | |||
397 | /************************************************************************** | ||
398 | * | ||
399 | * Falcon TX path | ||
400 | * | ||
401 | **************************************************************************/ | ||
402 | |||
403 | /* Returns a pointer to the specified transmit descriptor in the TX | ||
404 | * descriptor queue belonging to the specified channel. | ||
405 | */ | ||
406 | static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue, | ||
407 | unsigned int index) | ||
408 | { | ||
409 | return (((efx_qword_t *) (tx_queue->txd.addr)) + index); | ||
410 | } | ||
411 | |||
412 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | ||
413 | static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue) | ||
414 | { | ||
415 | unsigned write_ptr; | ||
416 | efx_dword_t reg; | ||
417 | |||
418 | write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; | ||
419 | EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr); | ||
420 | falcon_writel_page(tx_queue->efx, ®, | ||
421 | TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue); | ||
422 | } | ||
423 | |||
424 | |||
425 | /* For each entry inserted into the software descriptor ring, create a | ||
426 | * descriptor in the hardware TX descriptor ring (in host memory), and | ||
427 | * write a doorbell. | ||
428 | */ | ||
429 | void falcon_push_buffers(struct efx_tx_queue *tx_queue) | ||
430 | { | ||
431 | |||
432 | struct efx_tx_buffer *buffer; | ||
433 | efx_qword_t *txd; | ||
434 | unsigned write_ptr; | ||
435 | |||
436 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | ||
437 | |||
438 | do { | ||
439 | write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; | ||
440 | buffer = &tx_queue->buffer[write_ptr]; | ||
441 | txd = falcon_tx_desc(tx_queue, write_ptr); | ||
442 | ++tx_queue->write_count; | ||
443 | |||
444 | /* Create TX descriptor ring entry */ | ||
445 | EFX_POPULATE_QWORD_5(*txd, | ||
446 | TX_KER_PORT, 0, | ||
447 | TX_KER_CONT, buffer->continuation, | ||
448 | TX_KER_BYTE_CNT, buffer->len, | ||
449 | TX_KER_BUF_REGION, 0, | ||
450 | TX_KER_BUF_ADR, buffer->dma_addr); | ||
451 | } while (tx_queue->write_count != tx_queue->insert_count); | ||
452 | |||
453 | wmb(); /* Ensure descriptors are written before they are fetched */ | ||
454 | falcon_notify_tx_desc(tx_queue); | ||
455 | } | ||
456 | |||
457 | /* Allocate hardware resources for a TX queue */ | ||
458 | int falcon_probe_tx(struct efx_tx_queue *tx_queue) | ||
459 | { | ||
460 | struct efx_nic *efx = tx_queue->efx; | ||
461 | return falcon_alloc_special_buffer(efx, &tx_queue->txd, | ||
462 | FALCON_TXD_RING_SIZE * | ||
463 | sizeof(efx_qword_t)); | ||
464 | } | ||
465 | |||
466 | void falcon_init_tx(struct efx_tx_queue *tx_queue) | ||
467 | { | ||
468 | efx_oword_t tx_desc_ptr; | ||
469 | struct efx_nic *efx = tx_queue->efx; | ||
470 | |||
471 | tx_queue->flushed = false; | ||
472 | |||
473 | /* Pin TX descriptor ring */ | ||
474 | falcon_init_special_buffer(efx, &tx_queue->txd); | ||
475 | |||
476 | /* Push TX descriptor ring to card */ | ||
477 | EFX_POPULATE_OWORD_10(tx_desc_ptr, | ||
478 | TX_DESCQ_EN, 1, | ||
479 | TX_ISCSI_DDIG_EN, 0, | ||
480 | TX_ISCSI_HDIG_EN, 0, | ||
481 | TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | ||
482 | TX_DESCQ_EVQ_ID, tx_queue->channel->channel, | ||
483 | TX_DESCQ_OWNER_ID, 0, | ||
484 | TX_DESCQ_LABEL, tx_queue->queue, | ||
485 | TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, | ||
486 | TX_DESCQ_TYPE, 0, | ||
487 | TX_NON_IP_DROP_DIS_B0, 1); | ||
488 | |||
489 | if (falcon_rev(efx) >= FALCON_REV_B0) { | ||
490 | int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; | ||
491 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum); | ||
492 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum); | ||
493 | } | ||
494 | |||
495 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | ||
496 | tx_queue->queue); | ||
497 | |||
498 | if (falcon_rev(efx) < FALCON_REV_B0) { | ||
499 | efx_oword_t reg; | ||
500 | |||
501 | /* Only 128 bits in this register */ | ||
502 | BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); | ||
503 | |||
504 | falcon_read(efx, ®, TX_CHKSM_CFG_REG_KER_A1); | ||
505 | if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) | ||
506 | clear_bit_le(tx_queue->queue, (void *)®); | ||
507 | else | ||
508 | set_bit_le(tx_queue->queue, (void *)®); | ||
509 | falcon_write(efx, ®, TX_CHKSM_CFG_REG_KER_A1); | ||
510 | } | ||
511 | } | ||
512 | |||
513 | static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) | ||
514 | { | ||
515 | struct efx_nic *efx = tx_queue->efx; | ||
516 | efx_oword_t tx_flush_descq; | ||
517 | |||
518 | /* Post a flush command */ | ||
519 | EFX_POPULATE_OWORD_2(tx_flush_descq, | ||
520 | TX_FLUSH_DESCQ_CMD, 1, | ||
521 | TX_FLUSH_DESCQ, tx_queue->queue); | ||
522 | falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); | ||
523 | } | ||
524 | |||
525 | void falcon_fini_tx(struct efx_tx_queue *tx_queue) | ||
526 | { | ||
527 | struct efx_nic *efx = tx_queue->efx; | ||
528 | efx_oword_t tx_desc_ptr; | ||
529 | |||
530 | /* The queue should have been flushed */ | ||
531 | WARN_ON(!tx_queue->flushed); | ||
532 | |||
533 | /* Remove TX descriptor ring from card */ | ||
534 | EFX_ZERO_OWORD(tx_desc_ptr); | ||
535 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | ||
536 | tx_queue->queue); | ||
537 | |||
538 | /* Unpin TX descriptor ring */ | ||
539 | falcon_fini_special_buffer(efx, &tx_queue->txd); | ||
540 | } | ||
541 | |||
542 | /* Free buffers backing TX queue */ | ||
543 | void falcon_remove_tx(struct efx_tx_queue *tx_queue) | ||
544 | { | ||
545 | falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd); | ||
546 | } | ||
547 | |||
548 | /************************************************************************** | ||
549 | * | ||
550 | * Falcon RX path | ||
551 | * | ||
552 | **************************************************************************/ | ||
553 | |||
554 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ | ||
555 | static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue, | ||
556 | unsigned int index) | ||
557 | { | ||
558 | return (((efx_qword_t *) (rx_queue->rxd.addr)) + index); | ||
559 | } | ||
560 | |||
561 | /* This creates an entry in the RX descriptor queue */ | ||
562 | static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue, | ||
563 | unsigned index) | ||
564 | { | ||
565 | struct efx_rx_buffer *rx_buf; | ||
566 | efx_qword_t *rxd; | ||
567 | |||
568 | rxd = falcon_rx_desc(rx_queue, index); | ||
569 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
570 | EFX_POPULATE_QWORD_3(*rxd, | ||
571 | RX_KER_BUF_SIZE, | ||
572 | rx_buf->len - | ||
573 | rx_queue->efx->type->rx_buffer_padding, | ||
574 | RX_KER_BUF_REGION, 0, | ||
575 | RX_KER_BUF_ADR, rx_buf->dma_addr); | ||
576 | } | ||
577 | |||
578 | /* This writes to the RX_DESC_WPTR register for the specified receive | ||
579 | * descriptor ring. | ||
580 | */ | ||
581 | void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue) | ||
582 | { | ||
583 | efx_dword_t reg; | ||
584 | unsigned write_ptr; | ||
585 | |||
586 | while (rx_queue->notified_count != rx_queue->added_count) { | ||
587 | falcon_build_rx_desc(rx_queue, | ||
588 | rx_queue->notified_count & | ||
589 | FALCON_RXD_RING_MASK); | ||
590 | ++rx_queue->notified_count; | ||
591 | } | ||
592 | |||
593 | wmb(); | ||
594 | write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK; | ||
595 | EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr); | ||
596 | falcon_writel_page(rx_queue->efx, ®, | ||
597 | RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue); | ||
598 | } | ||
599 | |||
600 | int falcon_probe_rx(struct efx_rx_queue *rx_queue) | ||
601 | { | ||
602 | struct efx_nic *efx = rx_queue->efx; | ||
603 | return falcon_alloc_special_buffer(efx, &rx_queue->rxd, | ||
604 | FALCON_RXD_RING_SIZE * | ||
605 | sizeof(efx_qword_t)); | ||
606 | } | ||
607 | |||
608 | void falcon_init_rx(struct efx_rx_queue *rx_queue) | ||
609 | { | ||
610 | efx_oword_t rx_desc_ptr; | ||
611 | struct efx_nic *efx = rx_queue->efx; | ||
612 | bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0; | ||
613 | bool iscsi_digest_en = is_b0; | ||
614 | |||
615 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | ||
616 | rx_queue->queue, rx_queue->rxd.index, | ||
617 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | ||
618 | |||
619 | rx_queue->flushed = false; | ||
620 | |||
621 | /* Pin RX descriptor ring */ | ||
622 | falcon_init_special_buffer(efx, &rx_queue->rxd); | ||
623 | |||
624 | /* Push RX descriptor ring to card */ | ||
625 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | ||
626 | RX_ISCSI_DDIG_EN, iscsi_digest_en, | ||
627 | RX_ISCSI_HDIG_EN, iscsi_digest_en, | ||
628 | RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | ||
629 | RX_DESCQ_EVQ_ID, rx_queue->channel->channel, | ||
630 | RX_DESCQ_OWNER_ID, 0, | ||
631 | RX_DESCQ_LABEL, rx_queue->queue, | ||
632 | RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, | ||
633 | RX_DESCQ_TYPE, 0 /* kernel queue */ , | ||
634 | /* For >=B0 this is scatter so disable */ | ||
635 | RX_DESCQ_JUMBO, !is_b0, | ||
636 | RX_DESCQ_EN, 1); | ||
637 | falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
638 | rx_queue->queue); | ||
639 | } | ||
640 | |||
641 | static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) | ||
642 | { | ||
643 | struct efx_nic *efx = rx_queue->efx; | ||
644 | efx_oword_t rx_flush_descq; | ||
645 | |||
646 | /* Post a flush command */ | ||
647 | EFX_POPULATE_OWORD_2(rx_flush_descq, | ||
648 | RX_FLUSH_DESCQ_CMD, 1, | ||
649 | RX_FLUSH_DESCQ, rx_queue->queue); | ||
650 | falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); | ||
651 | } | ||
652 | |||
653 | void falcon_fini_rx(struct efx_rx_queue *rx_queue) | ||
654 | { | ||
655 | efx_oword_t rx_desc_ptr; | ||
656 | struct efx_nic *efx = rx_queue->efx; | ||
657 | |||
658 | /* The queue should already have been flushed */ | ||
659 | WARN_ON(!rx_queue->flushed); | ||
660 | |||
661 | /* Remove RX descriptor ring from card */ | ||
662 | EFX_ZERO_OWORD(rx_desc_ptr); | ||
663 | falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
664 | rx_queue->queue); | ||
665 | |||
666 | /* Unpin RX descriptor ring */ | ||
667 | falcon_fini_special_buffer(efx, &rx_queue->rxd); | ||
668 | } | ||
669 | |||
670 | /* Free buffers backing RX queue */ | ||
671 | void falcon_remove_rx(struct efx_rx_queue *rx_queue) | ||
672 | { | ||
673 | falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | ||
674 | } | ||
675 | |||
676 | /************************************************************************** | ||
677 | * | ||
678 | * Falcon event queue processing | ||
679 | * Event queues are processed by per-channel tasklets. | ||
680 | * | ||
681 | **************************************************************************/ | ||
682 | |||
683 | /* Update a channel's event queue's read pointer (RPTR) register | ||
684 | * | ||
685 | * This writes the EVQ_RPTR_REG register for the specified channel's | ||
686 | * event queue. | ||
687 | * | ||
688 | * Note that EVQ_RPTR_REG contains the index of the "last read" event, | ||
689 | * whereas channel->eventq_read_ptr contains the index of the "next to | ||
690 | * read" event. | ||
691 | */ | ||
692 | void falcon_eventq_read_ack(struct efx_channel *channel) | ||
693 | { | ||
694 | efx_dword_t reg; | ||
695 | struct efx_nic *efx = channel->efx; | ||
696 | |||
697 | EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr); | ||
698 | falcon_writel_table(efx, ®, efx->type->evq_rptr_tbl_base, | ||
699 | channel->channel); | ||
700 | } | ||
701 | |||
702 | /* Use HW to insert a SW defined event */ | ||
703 | void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event) | ||
704 | { | ||
705 | efx_oword_t drv_ev_reg; | ||
706 | |||
707 | EFX_POPULATE_OWORD_2(drv_ev_reg, | ||
708 | DRV_EV_QID, channel->channel, | ||
709 | DRV_EV_DATA, | ||
710 | EFX_QWORD_FIELD64(*event, WHOLE_EVENT)); | ||
711 | falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER); | ||
712 | } | ||
713 | |||
714 | /* Handle a transmit completion event | ||
715 | * | ||
716 | * Falcon batches TX completion events; the message we receive is of | ||
717 | * the form "complete all TX events up to this index". | ||
718 | */ | ||
719 | static void falcon_handle_tx_event(struct efx_channel *channel, | ||
720 | efx_qword_t *event) | ||
721 | { | ||
722 | unsigned int tx_ev_desc_ptr; | ||
723 | unsigned int tx_ev_q_label; | ||
724 | struct efx_tx_queue *tx_queue; | ||
725 | struct efx_nic *efx = channel->efx; | ||
726 | |||
727 | if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) { | ||
728 | /* Transmit completion */ | ||
729 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR); | ||
730 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | ||
731 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | ||
732 | channel->irq_mod_score += | ||
733 | (tx_ev_desc_ptr - tx_queue->read_count) & | ||
734 | efx->type->txd_ring_mask; | ||
735 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | ||
736 | } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) { | ||
737 | /* Rewrite the FIFO write pointer */ | ||
738 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | ||
739 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | ||
740 | |||
741 | if (efx_dev_registered(efx)) | ||
742 | netif_tx_lock(efx->net_dev); | ||
743 | falcon_notify_tx_desc(tx_queue); | ||
744 | if (efx_dev_registered(efx)) | ||
745 | netif_tx_unlock(efx->net_dev); | ||
746 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && | ||
747 | EFX_WORKAROUND_10727(efx)) { | ||
748 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
749 | } else { | ||
750 | EFX_ERR(efx, "channel %d unexpected TX event " | ||
751 | EFX_QWORD_FMT"\n", channel->channel, | ||
752 | EFX_QWORD_VAL(*event)); | ||
753 | } | ||
754 | } | ||
755 | |||
756 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | ||
757 | static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | ||
758 | const efx_qword_t *event, | ||
759 | bool *rx_ev_pkt_ok, | ||
760 | bool *discard) | ||
761 | { | ||
762 | struct efx_nic *efx = rx_queue->efx; | ||
763 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | ||
764 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | ||
765 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | ||
766 | bool rx_ev_other_err, rx_ev_pause_frm; | ||
767 | bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
768 | unsigned rx_ev_pkt_type; | ||
769 | |||
770 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); | ||
771 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); | ||
772 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC); | ||
773 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE); | ||
774 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | ||
775 | RX_EV_BUF_OWNER_ID_ERR); | ||
776 | rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR); | ||
777 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | ||
778 | RX_EV_IP_HDR_CHKSUM_ERR); | ||
779 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | ||
780 | RX_EV_TCP_UDP_CHKSUM_ERR); | ||
781 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); | ||
782 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); | ||
783 | rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? | ||
784 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); | ||
785 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); | ||
786 | |||
787 | /* Every error apart from tobe_disc and pause_frm */ | ||
788 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | ||
789 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | ||
790 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | ||
791 | |||
792 | /* Count errors that are not in MAC stats. Ignore expected | ||
793 | * checksum errors during self-test. */ | ||
794 | if (rx_ev_frm_trunc) | ||
795 | ++rx_queue->channel->n_rx_frm_trunc; | ||
796 | else if (rx_ev_tobe_disc) | ||
797 | ++rx_queue->channel->n_rx_tobe_disc; | ||
798 | else if (!efx->loopback_selftest) { | ||
799 | if (rx_ev_ip_hdr_chksum_err) | ||
800 | ++rx_queue->channel->n_rx_ip_hdr_chksum_err; | ||
801 | else if (rx_ev_tcp_udp_chksum_err) | ||
802 | ++rx_queue->channel->n_rx_tcp_udp_chksum_err; | ||
803 | } | ||
804 | if (rx_ev_ip_frag_err) | ||
805 | ++rx_queue->channel->n_rx_ip_frag_err; | ||
806 | |||
807 | /* The frame must be discarded if any of these are true. */ | ||
808 | *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | ||
809 | rx_ev_tobe_disc | rx_ev_pause_frm); | ||
810 | |||
811 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | ||
812 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | ||
813 | * to a FIFO overflow. | ||
814 | */ | ||
815 | #ifdef EFX_ENABLE_DEBUG | ||
816 | if (rx_ev_other_err) { | ||
817 | EFX_INFO_RL(efx, " RX queue %d unexpected RX event " | ||
818 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | ||
819 | rx_queue->queue, EFX_QWORD_VAL(*event), | ||
820 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | ||
821 | rx_ev_ip_hdr_chksum_err ? | ||
822 | " [IP_HDR_CHKSUM_ERR]" : "", | ||
823 | rx_ev_tcp_udp_chksum_err ? | ||
824 | " [TCP_UDP_CHKSUM_ERR]" : "", | ||
825 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | ||
826 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | ||
827 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | ||
828 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | ||
829 | rx_ev_pause_frm ? " [PAUSE]" : ""); | ||
830 | } | ||
831 | #endif | ||
832 | } | ||
833 | |||
834 | /* Handle receive events that are not in-order. */ | ||
835 | static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue, | ||
836 | unsigned index) | ||
837 | { | ||
838 | struct efx_nic *efx = rx_queue->efx; | ||
839 | unsigned expected, dropped; | ||
840 | |||
841 | expected = rx_queue->removed_count & FALCON_RXD_RING_MASK; | ||
842 | dropped = ((index + FALCON_RXD_RING_SIZE - expected) & | ||
843 | FALCON_RXD_RING_MASK); | ||
844 | EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", | ||
845 | dropped, index, expected); | ||
846 | |||
847 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | ||
848 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
849 | } | ||
850 | |||
851 | /* Handle a packet received event | ||
852 | * | ||
853 | * Falcon silicon gives a "discard" flag if it's a unicast packet with the | ||
854 | * wrong destination address | ||
855 | * Also "is multicast" and "matches multicast filter" flags can be used to | ||
856 | * discard non-matching multicast packets. | ||
857 | */ | ||
858 | static void falcon_handle_rx_event(struct efx_channel *channel, | ||
859 | const efx_qword_t *event) | ||
860 | { | ||
861 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; | ||
862 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
863 | unsigned expected_ptr; | ||
864 | bool rx_ev_pkt_ok, discard = false, checksummed; | ||
865 | struct efx_rx_queue *rx_queue; | ||
866 | struct efx_nic *efx = channel->efx; | ||
867 | |||
868 | /* Basic packet information */ | ||
869 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT); | ||
870 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK); | ||
871 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); | ||
872 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT)); | ||
873 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1); | ||
874 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel); | ||
875 | |||
876 | rx_queue = &efx->rx_queue[channel->channel]; | ||
877 | |||
878 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); | ||
879 | expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; | ||
880 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) | ||
881 | falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | ||
882 | |||
883 | if (likely(rx_ev_pkt_ok)) { | ||
884 | /* If packet is marked as OK and packet type is TCP/IPv4 or | ||
885 | * UDP/IPv4, then we can rely on the hardware checksum. | ||
886 | */ | ||
887 | checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type); | ||
888 | } else { | ||
889 | falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, | ||
890 | &discard); | ||
891 | checksummed = false; | ||
892 | } | ||
893 | |||
894 | /* Detect multicast packets that didn't match the filter */ | ||
895 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); | ||
896 | if (rx_ev_mcast_pkt) { | ||
897 | unsigned int rx_ev_mcast_hash_match = | ||
898 | EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH); | ||
899 | |||
900 | if (unlikely(!rx_ev_mcast_hash_match)) | ||
901 | discard = true; | ||
902 | } | ||
903 | |||
904 | channel->irq_mod_score += 2; | ||
905 | |||
906 | /* Handle received packet */ | ||
907 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, | ||
908 | checksummed, discard); | ||
909 | } | ||
910 | |||
911 | /* Global events are basically PHY events */ | ||
912 | static void falcon_handle_global_event(struct efx_channel *channel, | ||
913 | efx_qword_t *event) | ||
914 | { | ||
915 | struct efx_nic *efx = channel->efx; | ||
916 | bool handled = false; | ||
917 | |||
918 | if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || | ||
919 | EFX_QWORD_FIELD(*event, G_PHY1_INTR) || | ||
920 | EFX_QWORD_FIELD(*event, XG_PHY_INTR) || | ||
921 | EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) { | ||
922 | efx->phy_op->clear_interrupt(efx); | ||
923 | queue_work(efx->workqueue, &efx->phy_work); | ||
924 | handled = true; | ||
925 | } | ||
926 | |||
927 | if ((falcon_rev(efx) >= FALCON_REV_B0) && | ||
928 | EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) { | ||
929 | queue_work(efx->workqueue, &efx->mac_work); | ||
930 | handled = true; | ||
931 | } | ||
932 | |||
933 | if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) { | ||
934 | EFX_ERR(efx, "channel %d seen global RX_RESET " | ||
935 | "event. Resetting.\n", channel->channel); | ||
936 | |||
937 | atomic_inc(&efx->rx_reset); | ||
938 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? | ||
939 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
940 | handled = true; | ||
941 | } | ||
942 | |||
943 | if (!handled) | ||
944 | EFX_ERR(efx, "channel %d unknown global event " | ||
945 | EFX_QWORD_FMT "\n", channel->channel, | ||
946 | EFX_QWORD_VAL(*event)); | ||
947 | } | ||
948 | |||
949 | static void falcon_handle_driver_event(struct efx_channel *channel, | ||
950 | efx_qword_t *event) | ||
951 | { | ||
952 | struct efx_nic *efx = channel->efx; | ||
953 | unsigned int ev_sub_code; | ||
954 | unsigned int ev_sub_data; | ||
955 | |||
956 | ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); | ||
957 | ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA); | ||
958 | |||
959 | switch (ev_sub_code) { | ||
960 | case TX_DESCQ_FLS_DONE_EV_DECODE: | ||
961 | EFX_TRACE(efx, "channel %d TXQ %d flushed\n", | ||
962 | channel->channel, ev_sub_data); | ||
963 | break; | ||
964 | case RX_DESCQ_FLS_DONE_EV_DECODE: | ||
965 | EFX_TRACE(efx, "channel %d RXQ %d flushed\n", | ||
966 | channel->channel, ev_sub_data); | ||
967 | break; | ||
968 | case EVQ_INIT_DONE_EV_DECODE: | ||
969 | EFX_LOG(efx, "channel %d EVQ %d initialised\n", | ||
970 | channel->channel, ev_sub_data); | ||
971 | break; | ||
972 | case SRM_UPD_DONE_EV_DECODE: | ||
973 | EFX_TRACE(efx, "channel %d SRAM update done\n", | ||
974 | channel->channel); | ||
975 | break; | ||
976 | case WAKE_UP_EV_DECODE: | ||
977 | EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", | ||
978 | channel->channel, ev_sub_data); | ||
979 | break; | ||
980 | case TIMER_EV_DECODE: | ||
981 | EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", | ||
982 | channel->channel, ev_sub_data); | ||
983 | break; | ||
984 | case RX_RECOVERY_EV_DECODE: | ||
985 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " | ||
986 | "Resetting.\n", channel->channel); | ||
987 | atomic_inc(&efx->rx_reset); | ||
988 | efx_schedule_reset(efx, | ||
989 | EFX_WORKAROUND_6555(efx) ? | ||
990 | RESET_TYPE_RX_RECOVERY : | ||
991 | RESET_TYPE_DISABLE); | ||
992 | break; | ||
993 | case RX_DSC_ERROR_EV_DECODE: | ||
994 | EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." | ||
995 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
996 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | ||
997 | break; | ||
998 | case TX_DSC_ERROR_EV_DECODE: | ||
999 | EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." | ||
1000 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
1001 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
1002 | break; | ||
1003 | default: | ||
1004 | EFX_TRACE(efx, "channel %d unknown driver event code %d " | ||
1005 | "data %04x\n", channel->channel, ev_sub_code, | ||
1006 | ev_sub_data); | ||
1007 | break; | ||
1008 | } | ||
1009 | } | ||
1010 | |||
1011 | int falcon_process_eventq(struct efx_channel *channel, int rx_quota) | ||
1012 | { | ||
1013 | unsigned int read_ptr; | ||
1014 | efx_qword_t event, *p_event; | ||
1015 | int ev_code; | ||
1016 | int rx_packets = 0; | ||
1017 | |||
1018 | read_ptr = channel->eventq_read_ptr; | ||
1019 | |||
1020 | do { | ||
1021 | p_event = falcon_event(channel, read_ptr); | ||
1022 | event = *p_event; | ||
1023 | |||
1024 | if (!falcon_event_present(&event)) | ||
1025 | /* End of events */ | ||
1026 | break; | ||
1027 | |||
1028 | EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n", | ||
1029 | channel->channel, EFX_QWORD_VAL(event)); | ||
1030 | |||
1031 | /* Clear this event by marking it all ones */ | ||
1032 | EFX_SET_QWORD(*p_event); | ||
1033 | |||
1034 | ev_code = EFX_QWORD_FIELD(event, EV_CODE); | ||
1035 | |||
1036 | switch (ev_code) { | ||
1037 | case RX_IP_EV_DECODE: | ||
1038 | falcon_handle_rx_event(channel, &event); | ||
1039 | ++rx_packets; | ||
1040 | break; | ||
1041 | case TX_IP_EV_DECODE: | ||
1042 | falcon_handle_tx_event(channel, &event); | ||
1043 | break; | ||
1044 | case DRV_GEN_EV_DECODE: | ||
1045 | channel->eventq_magic | ||
1046 | = EFX_QWORD_FIELD(event, EVQ_MAGIC); | ||
1047 | EFX_LOG(channel->efx, "channel %d received generated " | ||
1048 | "event "EFX_QWORD_FMT"\n", channel->channel, | ||
1049 | EFX_QWORD_VAL(event)); | ||
1050 | break; | ||
1051 | case GLOBAL_EV_DECODE: | ||
1052 | falcon_handle_global_event(channel, &event); | ||
1053 | break; | ||
1054 | case DRIVER_EV_DECODE: | ||
1055 | falcon_handle_driver_event(channel, &event); | ||
1056 | break; | ||
1057 | default: | ||
1058 | EFX_ERR(channel->efx, "channel %d unknown event type %d" | ||
1059 | " (data " EFX_QWORD_FMT ")\n", channel->channel, | ||
1060 | ev_code, EFX_QWORD_VAL(event)); | ||
1061 | } | ||
1062 | |||
1063 | /* Increment read pointer */ | ||
1064 | read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; | ||
1065 | |||
1066 | } while (rx_packets < rx_quota); | ||
1067 | |||
1068 | channel->eventq_read_ptr = read_ptr; | ||
1069 | return rx_packets; | ||
1070 | } | ||
1071 | |||
1072 | void falcon_set_int_moderation(struct efx_channel *channel) | ||
1073 | { | 104 | { |
1074 | efx_dword_t timer_cmd; | 105 | efx_dword_t timer_cmd; |
1075 | struct efx_nic *efx = channel->efx; | 106 | struct efx_nic *efx = channel->efx; |
1076 | 107 | ||
1077 | /* Set timer register */ | 108 | /* Set timer register */ |
1078 | if (channel->irq_moderation) { | 109 | if (channel->irq_moderation) { |
1079 | /* Round to resolution supported by hardware. The value we | ||
1080 | * program is based at 0. So actual interrupt moderation | ||
1081 | * achieved is ((x + 1) * res). | ||
1082 | */ | ||
1083 | channel->irq_moderation -= (channel->irq_moderation % | ||
1084 | FALCON_IRQ_MOD_RESOLUTION); | ||
1085 | if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION) | ||
1086 | channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION; | ||
1087 | EFX_POPULATE_DWORD_2(timer_cmd, | 110 | EFX_POPULATE_DWORD_2(timer_cmd, |
1088 | TIMER_MODE, TIMER_MODE_INT_HLDOFF, | 111 | FRF_AB_TC_TIMER_MODE, |
1089 | TIMER_VAL, | 112 | FFE_BB_TIMER_MODE_INT_HLDOFF, |
1090 | channel->irq_moderation / | 113 | FRF_AB_TC_TIMER_VAL, |
1091 | FALCON_IRQ_MOD_RESOLUTION - 1); | 114 | channel->irq_moderation - 1); |
1092 | } else { | 115 | } else { |
1093 | EFX_POPULATE_DWORD_2(timer_cmd, | 116 | EFX_POPULATE_DWORD_2(timer_cmd, |
1094 | TIMER_MODE, TIMER_MODE_DIS, | 117 | FRF_AB_TC_TIMER_MODE, |
1095 | TIMER_VAL, 0); | 118 | FFE_BB_TIMER_MODE_DIS, |
119 | FRF_AB_TC_TIMER_VAL, 0); | ||
1096 | } | 120 | } |
1097 | falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, | 121 | BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0); |
1098 | channel->channel); | 122 | efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, |
1099 | 123 | channel->channel); | |
1100 | } | 124 | } |
1101 | 125 | ||
1102 | /* Allocate buffer table entries for event queue */ | 126 | static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx); |
1103 | int falcon_probe_eventq(struct efx_channel *channel) | ||
1104 | { | ||
1105 | struct efx_nic *efx = channel->efx; | ||
1106 | unsigned int evq_size; | ||
1107 | 127 | ||
1108 | evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t); | 128 | static void falcon_prepare_flush(struct efx_nic *efx) |
1109 | return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); | ||
1110 | } | ||
1111 | |||
1112 | void falcon_init_eventq(struct efx_channel *channel) | ||
1113 | { | 129 | { |
1114 | efx_oword_t evq_ptr; | 130 | falcon_deconfigure_mac_wrapper(efx); |
1115 | struct efx_nic *efx = channel->efx; | ||
1116 | |||
1117 | EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", | ||
1118 | channel->channel, channel->eventq.index, | ||
1119 | channel->eventq.index + channel->eventq.entries - 1); | ||
1120 | |||
1121 | /* Pin event queue buffer */ | ||
1122 | falcon_init_special_buffer(efx, &channel->eventq); | ||
1123 | |||
1124 | /* Fill event queue with all ones (i.e. empty events) */ | ||
1125 | memset(channel->eventq.addr, 0xff, channel->eventq.len); | ||
1126 | 131 | ||
1127 | /* Push event queue to card */ | 132 | /* Wait for the tx and rx fifo's to get to the next packet boundary |
1128 | EFX_POPULATE_OWORD_3(evq_ptr, | 133 | * (~1ms without back-pressure), then to drain the remainder of the |
1129 | EVQ_EN, 1, | 134 | * fifo's at data path speeds (negligible), with a healthy margin. */ |
1130 | EVQ_SIZE, FALCON_EVQ_ORDER, | 135 | msleep(10); |
1131 | EVQ_BUF_BASE_ID, channel->eventq.index); | ||
1132 | falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, | ||
1133 | channel->channel); | ||
1134 | |||
1135 | falcon_set_int_moderation(channel); | ||
1136 | } | ||
1137 | |||
1138 | void falcon_fini_eventq(struct efx_channel *channel) | ||
1139 | { | ||
1140 | efx_oword_t eventq_ptr; | ||
1141 | struct efx_nic *efx = channel->efx; | ||
1142 | |||
1143 | /* Remove event queue from card */ | ||
1144 | EFX_ZERO_OWORD(eventq_ptr); | ||
1145 | falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base, | ||
1146 | channel->channel); | ||
1147 | |||
1148 | /* Unpin event queue */ | ||
1149 | falcon_fini_special_buffer(efx, &channel->eventq); | ||
1150 | } | ||
1151 | |||
1152 | /* Free buffers backing event queue */ | ||
1153 | void falcon_remove_eventq(struct efx_channel *channel) | ||
1154 | { | ||
1155 | falcon_free_special_buffer(channel->efx, &channel->eventq); | ||
1156 | } | ||
1157 | |||
1158 | |||
1159 | /* Generates a test event on the event queue. A subsequent call to | ||
1160 | * process_eventq() should pick up the event and place the value of | ||
1161 | * "magic" into channel->eventq_magic; | ||
1162 | */ | ||
1163 | void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic) | ||
1164 | { | ||
1165 | efx_qword_t test_event; | ||
1166 | |||
1167 | EFX_POPULATE_QWORD_2(test_event, | ||
1168 | EV_CODE, DRV_GEN_EV_DECODE, | ||
1169 | EVQ_MAGIC, magic); | ||
1170 | falcon_generate_event(channel, &test_event); | ||
1171 | } | ||
1172 | |||
1173 | void falcon_sim_phy_event(struct efx_nic *efx) | ||
1174 | { | ||
1175 | efx_qword_t phy_event; | ||
1176 | |||
1177 | EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE); | ||
1178 | if (EFX_IS10G(efx)) | ||
1179 | EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1); | ||
1180 | else | ||
1181 | EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1); | ||
1182 | |||
1183 | falcon_generate_event(&efx->channel[0], &phy_event); | ||
1184 | } | ||
1185 | |||
1186 | /************************************************************************** | ||
1187 | * | ||
1188 | * Flush handling | ||
1189 | * | ||
1190 | **************************************************************************/ | ||
1191 | |||
1192 | |||
1193 | static void falcon_poll_flush_events(struct efx_nic *efx) | ||
1194 | { | ||
1195 | struct efx_channel *channel = &efx->channel[0]; | ||
1196 | struct efx_tx_queue *tx_queue; | ||
1197 | struct efx_rx_queue *rx_queue; | ||
1198 | unsigned int read_ptr = channel->eventq_read_ptr; | ||
1199 | unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK; | ||
1200 | |||
1201 | do { | ||
1202 | efx_qword_t *event = falcon_event(channel, read_ptr); | ||
1203 | int ev_code, ev_sub_code, ev_queue; | ||
1204 | bool ev_failed; | ||
1205 | |||
1206 | if (!falcon_event_present(event)) | ||
1207 | break; | ||
1208 | |||
1209 | ev_code = EFX_QWORD_FIELD(*event, EV_CODE); | ||
1210 | ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); | ||
1211 | if (ev_code == DRIVER_EV_DECODE && | ||
1212 | ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) { | ||
1213 | ev_queue = EFX_QWORD_FIELD(*event, | ||
1214 | DRIVER_EV_TX_DESCQ_ID); | ||
1215 | if (ev_queue < EFX_TX_QUEUE_COUNT) { | ||
1216 | tx_queue = efx->tx_queue + ev_queue; | ||
1217 | tx_queue->flushed = true; | ||
1218 | } | ||
1219 | } else if (ev_code == DRIVER_EV_DECODE && | ||
1220 | ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) { | ||
1221 | ev_queue = EFX_QWORD_FIELD(*event, | ||
1222 | DRIVER_EV_RX_DESCQ_ID); | ||
1223 | ev_failed = EFX_QWORD_FIELD(*event, | ||
1224 | DRIVER_EV_RX_FLUSH_FAIL); | ||
1225 | if (ev_queue < efx->n_rx_queues) { | ||
1226 | rx_queue = efx->rx_queue + ev_queue; | ||
1227 | |||
1228 | /* retry the rx flush */ | ||
1229 | if (ev_failed) | ||
1230 | falcon_flush_rx_queue(rx_queue); | ||
1231 | else | ||
1232 | rx_queue->flushed = true; | ||
1233 | } | ||
1234 | } | ||
1235 | |||
1236 | read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; | ||
1237 | } while (read_ptr != end_ptr); | ||
1238 | } | ||
1239 | |||
1240 | /* Handle tx and rx flushes at the same time, since they run in | ||
1241 | * parallel in the hardware and there's no reason for us to | ||
1242 | * serialise them */ | ||
1243 | int falcon_flush_queues(struct efx_nic *efx) | ||
1244 | { | ||
1245 | struct efx_rx_queue *rx_queue; | ||
1246 | struct efx_tx_queue *tx_queue; | ||
1247 | int i; | ||
1248 | bool outstanding; | ||
1249 | |||
1250 | /* Issue flush requests */ | ||
1251 | efx_for_each_tx_queue(tx_queue, efx) { | ||
1252 | tx_queue->flushed = false; | ||
1253 | falcon_flush_tx_queue(tx_queue); | ||
1254 | } | ||
1255 | efx_for_each_rx_queue(rx_queue, efx) { | ||
1256 | rx_queue->flushed = false; | ||
1257 | falcon_flush_rx_queue(rx_queue); | ||
1258 | } | ||
1259 | |||
1260 | /* Poll the evq looking for flush completions. Since we're not pushing | ||
1261 | * any more rx or tx descriptors at this point, we're in no danger of | ||
1262 | * overflowing the evq whilst we wait */ | ||
1263 | for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) { | ||
1264 | msleep(FALCON_FLUSH_INTERVAL); | ||
1265 | falcon_poll_flush_events(efx); | ||
1266 | |||
1267 | /* Check if every queue has been succesfully flushed */ | ||
1268 | outstanding = false; | ||
1269 | efx_for_each_tx_queue(tx_queue, efx) | ||
1270 | outstanding |= !tx_queue->flushed; | ||
1271 | efx_for_each_rx_queue(rx_queue, efx) | ||
1272 | outstanding |= !rx_queue->flushed; | ||
1273 | if (!outstanding) | ||
1274 | return 0; | ||
1275 | } | ||
1276 | |||
1277 | /* Mark the queues as all flushed. We're going to return failure | ||
1278 | * leading to a reset, or fake up success anyway. "flushed" now | ||
1279 | * indicates that we tried to flush. */ | ||
1280 | efx_for_each_tx_queue(tx_queue, efx) { | ||
1281 | if (!tx_queue->flushed) | ||
1282 | EFX_ERR(efx, "tx queue %d flush command timed out\n", | ||
1283 | tx_queue->queue); | ||
1284 | tx_queue->flushed = true; | ||
1285 | } | ||
1286 | efx_for_each_rx_queue(rx_queue, efx) { | ||
1287 | if (!rx_queue->flushed) | ||
1288 | EFX_ERR(efx, "rx queue %d flush command timed out\n", | ||
1289 | rx_queue->queue); | ||
1290 | rx_queue->flushed = true; | ||
1291 | } | ||
1292 | |||
1293 | if (EFX_WORKAROUND_7803(efx)) | ||
1294 | return 0; | ||
1295 | |||
1296 | return -ETIMEDOUT; | ||
1297 | } | ||
1298 | |||
1299 | /************************************************************************** | ||
1300 | * | ||
1301 | * Falcon hardware interrupts | ||
1302 | * The hardware interrupt handler does very little work; all the event | ||
1303 | * queue processing is carried out by per-channel tasklets. | ||
1304 | * | ||
1305 | **************************************************************************/ | ||
1306 | |||
1307 | /* Enable/disable/generate Falcon interrupts */ | ||
1308 | static inline void falcon_interrupts(struct efx_nic *efx, int enabled, | ||
1309 | int force) | ||
1310 | { | ||
1311 | efx_oword_t int_en_reg_ker; | ||
1312 | |||
1313 | EFX_POPULATE_OWORD_2(int_en_reg_ker, | ||
1314 | KER_INT_KER, force, | ||
1315 | DRV_INT_EN_KER, enabled); | ||
1316 | falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER); | ||
1317 | } | ||
1318 | |||
1319 | void falcon_enable_interrupts(struct efx_nic *efx) | ||
1320 | { | ||
1321 | efx_oword_t int_adr_reg_ker; | ||
1322 | struct efx_channel *channel; | ||
1323 | |||
1324 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | ||
1325 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | ||
1326 | |||
1327 | /* Program address */ | ||
1328 | EFX_POPULATE_OWORD_2(int_adr_reg_ker, | ||
1329 | NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx), | ||
1330 | INT_ADR_KER, efx->irq_status.dma_addr); | ||
1331 | falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER); | ||
1332 | |||
1333 | /* Enable interrupts */ | ||
1334 | falcon_interrupts(efx, 1, 0); | ||
1335 | |||
1336 | /* Force processing of all the channels to get the EVQ RPTRs up to | ||
1337 | date */ | ||
1338 | efx_for_each_channel(channel, efx) | ||
1339 | efx_schedule_channel(channel); | ||
1340 | } | ||
1341 | |||
1342 | void falcon_disable_interrupts(struct efx_nic *efx) | ||
1343 | { | ||
1344 | /* Disable interrupts */ | ||
1345 | falcon_interrupts(efx, 0, 0); | ||
1346 | } | ||
1347 | |||
1348 | /* Generate a Falcon test interrupt | ||
1349 | * Interrupt must already have been enabled, otherwise nasty things | ||
1350 | * may happen. | ||
1351 | */ | ||
1352 | void falcon_generate_interrupt(struct efx_nic *efx) | ||
1353 | { | ||
1354 | falcon_interrupts(efx, 1, 1); | ||
1355 | } | 136 | } |
1356 | 137 | ||
1357 | /* Acknowledge a legacy interrupt from Falcon | 138 | /* Acknowledge a legacy interrupt from Falcon |
@@ -1364,113 +145,17 @@ void falcon_generate_interrupt(struct efx_nic *efx) | |||
1364 | * | 145 | * |
1365 | * NB most hardware supports MSI interrupts | 146 | * NB most hardware supports MSI interrupts |
1366 | */ | 147 | */ |
1367 | static inline void falcon_irq_ack_a1(struct efx_nic *efx) | 148 | inline void falcon_irq_ack_a1(struct efx_nic *efx) |
1368 | { | 149 | { |
1369 | efx_dword_t reg; | 150 | efx_dword_t reg; |
1370 | 151 | ||
1371 | EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e); | 152 | EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e); |
1372 | falcon_writel(efx, ®, INT_ACK_REG_KER_A1); | 153 | efx_writed(efx, ®, FR_AA_INT_ACK_KER); |
1373 | falcon_readl(efx, ®, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1); | 154 | efx_readd(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS); |
1374 | } | 155 | } |
1375 | 156 | ||
1376 | /* Process a fatal interrupt | ||
1377 | * Disable bus mastering ASAP and schedule a reset | ||
1378 | */ | ||
1379 | static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) | ||
1380 | { | ||
1381 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1382 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1383 | efx_oword_t fatal_intr; | ||
1384 | int error, mem_perr; | ||
1385 | |||
1386 | falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER); | ||
1387 | error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR); | ||
1388 | |||
1389 | EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " | ||
1390 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | ||
1391 | EFX_OWORD_VAL(fatal_intr), | ||
1392 | error ? "disabling bus mastering" : "no recognised error"); | ||
1393 | if (error == 0) | ||
1394 | goto out; | ||
1395 | |||
1396 | /* If this is a memory parity error dump which blocks are offending */ | ||
1397 | mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER); | ||
1398 | if (mem_perr) { | ||
1399 | efx_oword_t reg; | ||
1400 | falcon_read(efx, ®, MEM_STAT_REG_KER); | ||
1401 | EFX_ERR(efx, "SYSTEM ERROR: memory parity error " | ||
1402 | EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); | ||
1403 | } | ||
1404 | |||
1405 | /* Disable both devices */ | ||
1406 | pci_clear_master(efx->pci_dev); | ||
1407 | if (FALCON_IS_DUAL_FUNC(efx)) | ||
1408 | pci_clear_master(nic_data->pci_dev2); | ||
1409 | falcon_disable_interrupts(efx); | ||
1410 | |||
1411 | /* Count errors and reset or disable the NIC accordingly */ | ||
1412 | if (nic_data->int_error_count == 0 || | ||
1413 | time_after(jiffies, nic_data->int_error_expire)) { | ||
1414 | nic_data->int_error_count = 0; | ||
1415 | nic_data->int_error_expire = | ||
1416 | jiffies + FALCON_INT_ERROR_EXPIRE * HZ; | ||
1417 | } | ||
1418 | if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) { | ||
1419 | EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); | ||
1420 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | ||
1421 | } else { | ||
1422 | EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen." | ||
1423 | "NIC will be disabled\n"); | ||
1424 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
1425 | } | ||
1426 | out: | ||
1427 | return IRQ_HANDLED; | ||
1428 | } | ||
1429 | 157 | ||
1430 | /* Handle a legacy interrupt from Falcon | 158 | irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) |
1431 | * Acknowledges the interrupt and schedule event queue processing. | ||
1432 | */ | ||
1433 | static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | ||
1434 | { | ||
1435 | struct efx_nic *efx = dev_id; | ||
1436 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1437 | irqreturn_t result = IRQ_NONE; | ||
1438 | struct efx_channel *channel; | ||
1439 | efx_dword_t reg; | ||
1440 | u32 queues; | ||
1441 | int syserr; | ||
1442 | |||
1443 | /* Read the ISR which also ACKs the interrupts */ | ||
1444 | falcon_readl(efx, ®, INT_ISR0_B0); | ||
1445 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | ||
1446 | |||
1447 | /* Check to see if we have a serious error condition */ | ||
1448 | syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); | ||
1449 | if (unlikely(syserr)) | ||
1450 | return falcon_fatal_interrupt(efx); | ||
1451 | |||
1452 | /* Schedule processing of any interrupting queues */ | ||
1453 | efx_for_each_channel(channel, efx) { | ||
1454 | if ((queues & 1) || | ||
1455 | falcon_event_present( | ||
1456 | falcon_event(channel, channel->eventq_read_ptr))) { | ||
1457 | efx_schedule_channel(channel); | ||
1458 | result = IRQ_HANDLED; | ||
1459 | } | ||
1460 | queues >>= 1; | ||
1461 | } | ||
1462 | |||
1463 | if (result == IRQ_HANDLED) { | ||
1464 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1465 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | ||
1466 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
1467 | } | ||
1468 | |||
1469 | return result; | ||
1470 | } | ||
1471 | |||
1472 | |||
1473 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | ||
1474 | { | 159 | { |
1475 | struct efx_nic *efx = dev_id; | 160 | struct efx_nic *efx = dev_id; |
1476 | efx_oword_t *int_ker = efx->irq_status.addr; | 161 | efx_oword_t *int_ker = efx->irq_status.addr; |
@@ -1491,15 +176,15 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
1491 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 176 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); |
1492 | 177 | ||
1493 | /* Check to see if we have a serious error condition */ | 178 | /* Check to see if we have a serious error condition */ |
1494 | syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); | 179 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1495 | if (unlikely(syserr)) | 180 | if (unlikely(syserr)) |
1496 | return falcon_fatal_interrupt(efx); | 181 | return efx_nic_fatal_interrupt(efx); |
1497 | 182 | ||
1498 | /* Determine interrupting queues, clear interrupt status | 183 | /* Determine interrupting queues, clear interrupt status |
1499 | * register and acknowledge the device interrupt. | 184 | * register and acknowledge the device interrupt. |
1500 | */ | 185 | */ |
1501 | BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS); | 186 | BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); |
1502 | queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS); | 187 | queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); |
1503 | EFX_ZERO_OWORD(*int_ker); | 188 | EFX_ZERO_OWORD(*int_ker); |
1504 | wmb(); /* Ensure the vector is cleared before interrupt ack */ | 189 | wmb(); /* Ensure the vector is cleared before interrupt ack */ |
1505 | falcon_irq_ack_a1(efx); | 190 | falcon_irq_ack_a1(efx); |
@@ -1515,126 +200,6 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
1515 | 200 | ||
1516 | return IRQ_HANDLED; | 201 | return IRQ_HANDLED; |
1517 | } | 202 | } |
1518 | |||
1519 | /* Handle an MSI interrupt from Falcon | ||
1520 | * | ||
1521 | * Handle an MSI hardware interrupt. This routine schedules event | ||
1522 | * queue processing. No interrupt acknowledgement cycle is necessary. | ||
1523 | * Also, we never need to check that the interrupt is for us, since | ||
1524 | * MSI interrupts cannot be shared. | ||
1525 | */ | ||
1526 | static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) | ||
1527 | { | ||
1528 | struct efx_channel *channel = dev_id; | ||
1529 | struct efx_nic *efx = channel->efx; | ||
1530 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1531 | int syserr; | ||
1532 | |||
1533 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1534 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | ||
1535 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
1536 | |||
1537 | /* Check to see if we have a serious error condition */ | ||
1538 | syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); | ||
1539 | if (unlikely(syserr)) | ||
1540 | return falcon_fatal_interrupt(efx); | ||
1541 | |||
1542 | /* Schedule processing of the channel */ | ||
1543 | efx_schedule_channel(channel); | ||
1544 | |||
1545 | return IRQ_HANDLED; | ||
1546 | } | ||
1547 | |||
1548 | |||
1549 | /* Setup RSS indirection table. | ||
1550 | * This maps from the hash value of the packet to RXQ | ||
1551 | */ | ||
1552 | static void falcon_setup_rss_indir_table(struct efx_nic *efx) | ||
1553 | { | ||
1554 | int i = 0; | ||
1555 | unsigned long offset; | ||
1556 | efx_dword_t dword; | ||
1557 | |||
1558 | if (falcon_rev(efx) < FALCON_REV_B0) | ||
1559 | return; | ||
1560 | |||
1561 | for (offset = RX_RSS_INDIR_TBL_B0; | ||
1562 | offset < RX_RSS_INDIR_TBL_B0 + 0x800; | ||
1563 | offset += 0x10) { | ||
1564 | EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0, | ||
1565 | i % efx->n_rx_queues); | ||
1566 | falcon_writel(efx, &dword, offset); | ||
1567 | i++; | ||
1568 | } | ||
1569 | } | ||
1570 | |||
1571 | /* Hook interrupt handler(s) | ||
1572 | * Try MSI and then legacy interrupts. | ||
1573 | */ | ||
1574 | int falcon_init_interrupt(struct efx_nic *efx) | ||
1575 | { | ||
1576 | struct efx_channel *channel; | ||
1577 | int rc; | ||
1578 | |||
1579 | if (!EFX_INT_MODE_USE_MSI(efx)) { | ||
1580 | irq_handler_t handler; | ||
1581 | if (falcon_rev(efx) >= FALCON_REV_B0) | ||
1582 | handler = falcon_legacy_interrupt_b0; | ||
1583 | else | ||
1584 | handler = falcon_legacy_interrupt_a1; | ||
1585 | |||
1586 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, | ||
1587 | efx->name, efx); | ||
1588 | if (rc) { | ||
1589 | EFX_ERR(efx, "failed to hook legacy IRQ %d\n", | ||
1590 | efx->pci_dev->irq); | ||
1591 | goto fail1; | ||
1592 | } | ||
1593 | return 0; | ||
1594 | } | ||
1595 | |||
1596 | /* Hook MSI or MSI-X interrupt */ | ||
1597 | efx_for_each_channel(channel, efx) { | ||
1598 | rc = request_irq(channel->irq, falcon_msi_interrupt, | ||
1599 | IRQF_PROBE_SHARED, /* Not shared */ | ||
1600 | channel->name, channel); | ||
1601 | if (rc) { | ||
1602 | EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq); | ||
1603 | goto fail2; | ||
1604 | } | ||
1605 | } | ||
1606 | |||
1607 | return 0; | ||
1608 | |||
1609 | fail2: | ||
1610 | efx_for_each_channel(channel, efx) | ||
1611 | free_irq(channel->irq, channel); | ||
1612 | fail1: | ||
1613 | return rc; | ||
1614 | } | ||
1615 | |||
1616 | void falcon_fini_interrupt(struct efx_nic *efx) | ||
1617 | { | ||
1618 | struct efx_channel *channel; | ||
1619 | efx_oword_t reg; | ||
1620 | |||
1621 | /* Disable MSI/MSI-X interrupts */ | ||
1622 | efx_for_each_channel(channel, efx) { | ||
1623 | if (channel->irq) | ||
1624 | free_irq(channel->irq, channel); | ||
1625 | } | ||
1626 | |||
1627 | /* ACK legacy interrupt */ | ||
1628 | if (falcon_rev(efx) >= FALCON_REV_B0) | ||
1629 | falcon_read(efx, ®, INT_ISR0_B0); | ||
1630 | else | ||
1631 | falcon_irq_ack_a1(efx); | ||
1632 | |||
1633 | /* Disable legacy interrupt */ | ||
1634 | if (efx->legacy_irq) | ||
1635 | free_irq(efx->legacy_irq, efx); | ||
1636 | } | ||
1637 | |||
1638 | /************************************************************************** | 203 | /************************************************************************** |
1639 | * | 204 | * |
1640 | * EEPROM/flash | 205 | * EEPROM/flash |
@@ -1647,8 +212,8 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
1647 | static int falcon_spi_poll(struct efx_nic *efx) | 212 | static int falcon_spi_poll(struct efx_nic *efx) |
1648 | { | 213 | { |
1649 | efx_oword_t reg; | 214 | efx_oword_t reg; |
1650 | falcon_read(efx, ®, EE_SPI_HCMD_REG_KER); | 215 | efx_reado(efx, ®, FR_AB_EE_SPI_HCMD); |
1651 | return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; | 216 | return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; |
1652 | } | 217 | } |
1653 | 218 | ||
1654 | /* Wait for SPI command completion */ | 219 | /* Wait for SPI command completion */ |
@@ -1678,11 +243,10 @@ static int falcon_spi_wait(struct efx_nic *efx) | |||
1678 | } | 243 | } |
1679 | } | 244 | } |
1680 | 245 | ||
1681 | int falcon_spi_cmd(const struct efx_spi_device *spi, | 246 | int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, |
1682 | unsigned int command, int address, | 247 | unsigned int command, int address, |
1683 | const void *in, void *out, size_t len) | 248 | const void *in, void *out, size_t len) |
1684 | { | 249 | { |
1685 | struct efx_nic *efx = spi->efx; | ||
1686 | bool addressed = (address >= 0); | 250 | bool addressed = (address >= 0); |
1687 | bool reading = (out != NULL); | 251 | bool reading = (out != NULL); |
1688 | efx_oword_t reg; | 252 | efx_oword_t reg; |
@@ -1700,27 +264,27 @@ int falcon_spi_cmd(const struct efx_spi_device *spi, | |||
1700 | 264 | ||
1701 | /* Program address register, if we have an address */ | 265 | /* Program address register, if we have an address */ |
1702 | if (addressed) { | 266 | if (addressed) { |
1703 | EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); | 267 | EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address); |
1704 | falcon_write(efx, ®, EE_SPI_HADR_REG_KER); | 268 | efx_writeo(efx, ®, FR_AB_EE_SPI_HADR); |
1705 | } | 269 | } |
1706 | 270 | ||
1707 | /* Program data register, if we have data */ | 271 | /* Program data register, if we have data */ |
1708 | if (in != NULL) { | 272 | if (in != NULL) { |
1709 | memcpy(®, in, len); | 273 | memcpy(®, in, len); |
1710 | falcon_write(efx, ®, EE_SPI_HDATA_REG_KER); | 274 | efx_writeo(efx, ®, FR_AB_EE_SPI_HDATA); |
1711 | } | 275 | } |
1712 | 276 | ||
1713 | /* Issue read/write command */ | 277 | /* Issue read/write command */ |
1714 | EFX_POPULATE_OWORD_7(reg, | 278 | EFX_POPULATE_OWORD_7(reg, |
1715 | EE_SPI_HCMD_CMD_EN, 1, | 279 | FRF_AB_EE_SPI_HCMD_CMD_EN, 1, |
1716 | EE_SPI_HCMD_SF_SEL, spi->device_id, | 280 | FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id, |
1717 | EE_SPI_HCMD_DABCNT, len, | 281 | FRF_AB_EE_SPI_HCMD_DABCNT, len, |
1718 | EE_SPI_HCMD_READ, reading, | 282 | FRF_AB_EE_SPI_HCMD_READ, reading, |
1719 | EE_SPI_HCMD_DUBCNT, 0, | 283 | FRF_AB_EE_SPI_HCMD_DUBCNT, 0, |
1720 | EE_SPI_HCMD_ADBCNT, | 284 | FRF_AB_EE_SPI_HCMD_ADBCNT, |
1721 | (addressed ? spi->addr_len : 0), | 285 | (addressed ? spi->addr_len : 0), |
1722 | EE_SPI_HCMD_ENC, command); | 286 | FRF_AB_EE_SPI_HCMD_ENC, command); |
1723 | falcon_write(efx, ®, EE_SPI_HCMD_REG_KER); | 287 | efx_writeo(efx, ®, FR_AB_EE_SPI_HCMD); |
1724 | 288 | ||
1725 | /* Wait for read/write to complete */ | 289 | /* Wait for read/write to complete */ |
1726 | rc = falcon_spi_wait(efx); | 290 | rc = falcon_spi_wait(efx); |
@@ -1729,7 +293,7 @@ int falcon_spi_cmd(const struct efx_spi_device *spi, | |||
1729 | 293 | ||
1730 | /* Read data */ | 294 | /* Read data */ |
1731 | if (out != NULL) { | 295 | if (out != NULL) { |
1732 | falcon_read(efx, ®, EE_SPI_HDATA_REG_KER); | 296 | efx_reado(efx, ®, FR_AB_EE_SPI_HDATA); |
1733 | memcpy(out, ®, len); | 297 | memcpy(out, ®, len); |
1734 | } | 298 | } |
1735 | 299 | ||
@@ -1751,15 +315,15 @@ efx_spi_munge_command(const struct efx_spi_device *spi, | |||
1751 | } | 315 | } |
1752 | 316 | ||
1753 | /* Wait up to 10 ms for buffered write completion */ | 317 | /* Wait up to 10 ms for buffered write completion */ |
1754 | int falcon_spi_wait_write(const struct efx_spi_device *spi) | 318 | int |
319 | falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) | ||
1755 | { | 320 | { |
1756 | struct efx_nic *efx = spi->efx; | ||
1757 | unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); | 321 | unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); |
1758 | u8 status; | 322 | u8 status; |
1759 | int rc; | 323 | int rc; |
1760 | 324 | ||
1761 | for (;;) { | 325 | for (;;) { |
1762 | rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, | 326 | rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, |
1763 | &status, sizeof(status)); | 327 | &status, sizeof(status)); |
1764 | if (rc) | 328 | if (rc) |
1765 | return rc; | 329 | return rc; |
@@ -1775,8 +339,8 @@ int falcon_spi_wait_write(const struct efx_spi_device *spi) | |||
1775 | } | 339 | } |
1776 | } | 340 | } |
1777 | 341 | ||
1778 | int falcon_spi_read(const struct efx_spi_device *spi, loff_t start, | 342 | int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, |
1779 | size_t len, size_t *retlen, u8 *buffer) | 343 | loff_t start, size_t len, size_t *retlen, u8 *buffer) |
1780 | { | 344 | { |
1781 | size_t block_len, pos = 0; | 345 | size_t block_len, pos = 0; |
1782 | unsigned int command; | 346 | unsigned int command; |
@@ -1786,7 +350,7 @@ int falcon_spi_read(const struct efx_spi_device *spi, loff_t start, | |||
1786 | block_len = min(len - pos, FALCON_SPI_MAX_LEN); | 350 | block_len = min(len - pos, FALCON_SPI_MAX_LEN); |
1787 | 351 | ||
1788 | command = efx_spi_munge_command(spi, SPI_READ, start + pos); | 352 | command = efx_spi_munge_command(spi, SPI_READ, start + pos); |
1789 | rc = falcon_spi_cmd(spi, command, start + pos, NULL, | 353 | rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, |
1790 | buffer + pos, block_len); | 354 | buffer + pos, block_len); |
1791 | if (rc) | 355 | if (rc) |
1792 | break; | 356 | break; |
@@ -1805,8 +369,9 @@ int falcon_spi_read(const struct efx_spi_device *spi, loff_t start, | |||
1805 | return rc; | 369 | return rc; |
1806 | } | 370 | } |
1807 | 371 | ||
1808 | int falcon_spi_write(const struct efx_spi_device *spi, loff_t start, | 372 | int |
1809 | size_t len, size_t *retlen, const u8 *buffer) | 373 | falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, |
374 | loff_t start, size_t len, size_t *retlen, const u8 *buffer) | ||
1810 | { | 375 | { |
1811 | u8 verify_buffer[FALCON_SPI_MAX_LEN]; | 376 | u8 verify_buffer[FALCON_SPI_MAX_LEN]; |
1812 | size_t block_len, pos = 0; | 377 | size_t block_len, pos = 0; |
@@ -1814,24 +379,24 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start, | |||
1814 | int rc = 0; | 379 | int rc = 0; |
1815 | 380 | ||
1816 | while (pos < len) { | 381 | while (pos < len) { |
1817 | rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0); | 382 | rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); |
1818 | if (rc) | 383 | if (rc) |
1819 | break; | 384 | break; |
1820 | 385 | ||
1821 | block_len = min(len - pos, | 386 | block_len = min(len - pos, |
1822 | falcon_spi_write_limit(spi, start + pos)); | 387 | falcon_spi_write_limit(spi, start + pos)); |
1823 | command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); | 388 | command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); |
1824 | rc = falcon_spi_cmd(spi, command, start + pos, | 389 | rc = falcon_spi_cmd(efx, spi, command, start + pos, |
1825 | buffer + pos, NULL, block_len); | 390 | buffer + pos, NULL, block_len); |
1826 | if (rc) | 391 | if (rc) |
1827 | break; | 392 | break; |
1828 | 393 | ||
1829 | rc = falcon_spi_wait_write(spi); | 394 | rc = falcon_spi_wait_write(efx, spi); |
1830 | if (rc) | 395 | if (rc) |
1831 | break; | 396 | break; |
1832 | 397 | ||
1833 | command = efx_spi_munge_command(spi, SPI_READ, start + pos); | 398 | command = efx_spi_munge_command(spi, SPI_READ, start + pos); |
1834 | rc = falcon_spi_cmd(spi, command, start + pos, | 399 | rc = falcon_spi_cmd(efx, spi, command, start + pos, |
1835 | NULL, verify_buffer, block_len); | 400 | NULL, verify_buffer, block_len); |
1836 | if (memcmp(verify_buffer, buffer + pos, block_len)) { | 401 | if (memcmp(verify_buffer, buffer + pos, block_len)) { |
1837 | rc = -EIO; | 402 | rc = -EIO; |
@@ -1860,60 +425,70 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start, | |||
1860 | ************************************************************************** | 425 | ************************************************************************** |
1861 | */ | 426 | */ |
1862 | 427 | ||
1863 | static int falcon_reset_macs(struct efx_nic *efx) | 428 | static void falcon_push_multicast_hash(struct efx_nic *efx) |
1864 | { | 429 | { |
1865 | efx_oword_t reg; | 430 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; |
431 | |||
432 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
433 | |||
434 | efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0); | ||
435 | efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1); | ||
436 | } | ||
437 | |||
438 | static void falcon_reset_macs(struct efx_nic *efx) | ||
439 | { | ||
440 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
441 | efx_oword_t reg, mac_ctrl; | ||
1866 | int count; | 442 | int count; |
1867 | 443 | ||
1868 | if (falcon_rev(efx) < FALCON_REV_B0) { | 444 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { |
1869 | /* It's not safe to use GLB_CTL_REG to reset the | 445 | /* It's not safe to use GLB_CTL_REG to reset the |
1870 | * macs, so instead use the internal MAC resets | 446 | * macs, so instead use the internal MAC resets |
1871 | */ | 447 | */ |
1872 | if (!EFX_IS10G(efx)) { | 448 | if (!EFX_IS10G(efx)) { |
1873 | EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1); | 449 | EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1); |
1874 | falcon_write(efx, ®, GM_CFG1_REG); | 450 | efx_writeo(efx, ®, FR_AB_GM_CFG1); |
1875 | udelay(1000); | 451 | udelay(1000); |
1876 | 452 | ||
1877 | EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0); | 453 | EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0); |
1878 | falcon_write(efx, ®, GM_CFG1_REG); | 454 | efx_writeo(efx, ®, FR_AB_GM_CFG1); |
1879 | udelay(1000); | 455 | udelay(1000); |
1880 | return 0; | 456 | return; |
1881 | } else { | 457 | } else { |
1882 | EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1); | 458 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1); |
1883 | falcon_write(efx, ®, XM_GLB_CFG_REG); | 459 | efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); |
1884 | 460 | ||
1885 | for (count = 0; count < 10000; count++) { | 461 | for (count = 0; count < 10000; count++) { |
1886 | falcon_read(efx, ®, XM_GLB_CFG_REG); | 462 | efx_reado(efx, ®, FR_AB_XM_GLB_CFG); |
1887 | if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0) | 463 | if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) == |
1888 | return 0; | 464 | 0) |
465 | return; | ||
1889 | udelay(10); | 466 | udelay(10); |
1890 | } | 467 | } |
1891 | 468 | ||
1892 | EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); | 469 | EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); |
1893 | return -ETIMEDOUT; | ||
1894 | } | 470 | } |
1895 | } | 471 | } |
1896 | 472 | ||
1897 | /* MAC stats will fail whilst the TX fifo is draining. Serialise | 473 | /* Mac stats will fail whist the TX fifo is draining */ |
1898 | * the drain sequence with the statistics fetch */ | 474 | WARN_ON(nic_data->stats_disable_count == 0); |
1899 | efx_stats_disable(efx); | ||
1900 | 475 | ||
1901 | falcon_read(efx, ®, MAC0_CTRL_REG_KER); | 476 | efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL); |
1902 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); | 477 | EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1); |
1903 | falcon_write(efx, ®, MAC0_CTRL_REG_KER); | 478 | efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); |
1904 | 479 | ||
1905 | falcon_read(efx, ®, GLB_CTL_REG_KER); | 480 | efx_reado(efx, ®, FR_AB_GLB_CTL); |
1906 | EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1); | 481 | EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1); |
1907 | EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1); | 482 | EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1); |
1908 | EFX_SET_OWORD_FIELD(reg, RST_EM, 1); | 483 | EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1); |
1909 | falcon_write(efx, ®, GLB_CTL_REG_KER); | 484 | efx_writeo(efx, ®, FR_AB_GLB_CTL); |
1910 | 485 | ||
1911 | count = 0; | 486 | count = 0; |
1912 | while (1) { | 487 | while (1) { |
1913 | falcon_read(efx, ®, GLB_CTL_REG_KER); | 488 | efx_reado(efx, ®, FR_AB_GLB_CTL); |
1914 | if (!EFX_OWORD_FIELD(reg, RST_XGTX) && | 489 | if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && |
1915 | !EFX_OWORD_FIELD(reg, RST_XGRX) && | 490 | !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && |
1916 | !EFX_OWORD_FIELD(reg, RST_EM)) { | 491 | !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { |
1917 | EFX_LOG(efx, "Completed MAC reset after %d loops\n", | 492 | EFX_LOG(efx, "Completed MAC reset after %d loops\n", |
1918 | count); | 493 | count); |
1919 | break; | 494 | break; |
@@ -1926,55 +501,50 @@ static int falcon_reset_macs(struct efx_nic *efx) | |||
1926 | udelay(10); | 501 | udelay(10); |
1927 | } | 502 | } |
1928 | 503 | ||
1929 | efx_stats_enable(efx); | 504 | /* Ensure the correct MAC is selected before statistics |
1930 | 505 | * are re-enabled by the caller */ | |
1931 | /* If we've reset the EM block and the link is up, then | 506 | efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); |
1932 | * we'll have to kick the XAUI link so the PHY can recover */ | ||
1933 | if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx)) | ||
1934 | falcon_reset_xaui(efx); | ||
1935 | |||
1936 | return 0; | ||
1937 | } | 507 | } |
1938 | 508 | ||
1939 | void falcon_drain_tx_fifo(struct efx_nic *efx) | 509 | void falcon_drain_tx_fifo(struct efx_nic *efx) |
1940 | { | 510 | { |
1941 | efx_oword_t reg; | 511 | efx_oword_t reg; |
1942 | 512 | ||
1943 | if ((falcon_rev(efx) < FALCON_REV_B0) || | 513 | if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) || |
1944 | (efx->loopback_mode != LOOPBACK_NONE)) | 514 | (efx->loopback_mode != LOOPBACK_NONE)) |
1945 | return; | 515 | return; |
1946 | 516 | ||
1947 | falcon_read(efx, ®, MAC0_CTRL_REG_KER); | 517 | efx_reado(efx, ®, FR_AB_MAC_CTRL); |
1948 | /* There is no point in draining more than once */ | 518 | /* There is no point in draining more than once */ |
1949 | if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0)) | 519 | if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN)) |
1950 | return; | 520 | return; |
1951 | 521 | ||
1952 | falcon_reset_macs(efx); | 522 | falcon_reset_macs(efx); |
1953 | } | 523 | } |
1954 | 524 | ||
1955 | void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) | 525 | static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) |
1956 | { | 526 | { |
1957 | efx_oword_t reg; | 527 | efx_oword_t reg; |
1958 | 528 | ||
1959 | if (falcon_rev(efx) < FALCON_REV_B0) | 529 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) |
1960 | return; | 530 | return; |
1961 | 531 | ||
1962 | /* Isolate the MAC -> RX */ | 532 | /* Isolate the MAC -> RX */ |
1963 | falcon_read(efx, ®, RX_CFG_REG_KER); | 533 | efx_reado(efx, ®, FR_AZ_RX_CFG); |
1964 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0); | 534 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0); |
1965 | falcon_write(efx, ®, RX_CFG_REG_KER); | 535 | efx_writeo(efx, ®, FR_AZ_RX_CFG); |
1966 | 536 | ||
1967 | if (!efx->link_up) | 537 | /* Isolate TX -> MAC */ |
1968 | falcon_drain_tx_fifo(efx); | 538 | falcon_drain_tx_fifo(efx); |
1969 | } | 539 | } |
1970 | 540 | ||
1971 | void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | 541 | void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) |
1972 | { | 542 | { |
543 | struct efx_link_state *link_state = &efx->link_state; | ||
1973 | efx_oword_t reg; | 544 | efx_oword_t reg; |
1974 | int link_speed; | 545 | int link_speed; |
1975 | bool tx_fc; | ||
1976 | 546 | ||
1977 | switch (efx->link_speed) { | 547 | switch (link_state->speed) { |
1978 | case 10000: link_speed = 3; break; | 548 | case 10000: link_speed = 3; break; |
1979 | case 1000: link_speed = 2; break; | 549 | case 1000: link_speed = 2; break; |
1980 | case 100: link_speed = 1; break; | 550 | case 100: link_speed = 1; break; |
@@ -1985,75 +555,139 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1985 | * indefinitely held and TX queue can be flushed at any point | 555 | * indefinitely held and TX queue can be flushed at any point |
1986 | * while the link is down. */ | 556 | * while the link is down. */ |
1987 | EFX_POPULATE_OWORD_5(reg, | 557 | EFX_POPULATE_OWORD_5(reg, |
1988 | MAC_XOFF_VAL, 0xffff /* max pause time */, | 558 | FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, |
1989 | MAC_BCAD_ACPT, 1, | 559 | FRF_AB_MAC_BCAD_ACPT, 1, |
1990 | MAC_UC_PROM, efx->promiscuous, | 560 | FRF_AB_MAC_UC_PROM, efx->promiscuous, |
1991 | MAC_LINK_STATUS, 1, /* always set */ | 561 | FRF_AB_MAC_LINK_STATUS, 1, /* always set */ |
1992 | MAC_SPEED, link_speed); | 562 | FRF_AB_MAC_SPEED, link_speed); |
1993 | /* On B0, MAC backpressure can be disabled and packets get | 563 | /* On B0, MAC backpressure can be disabled and packets get |
1994 | * discarded. */ | 564 | * discarded. */ |
1995 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 565 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { |
1996 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, | 566 | EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, |
1997 | !efx->link_up); | 567 | !link_state->up); |
1998 | } | 568 | } |
1999 | 569 | ||
2000 | falcon_write(efx, ®, MAC0_CTRL_REG_KER); | 570 | efx_writeo(efx, ®, FR_AB_MAC_CTRL); |
2001 | 571 | ||
2002 | /* Restore the multicast hash registers. */ | 572 | /* Restore the multicast hash registers. */ |
2003 | falcon_set_multicast_hash(efx); | 573 | falcon_push_multicast_hash(efx); |
2004 | |||
2005 | /* Transmission of pause frames when RX crosses the threshold is | ||
2006 | * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. | ||
2007 | * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ | ||
2008 | tx_fc = !!(efx->link_fc & EFX_FC_TX); | ||
2009 | falcon_read(efx, ®, RX_CFG_REG_KER); | ||
2010 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); | ||
2011 | 574 | ||
575 | efx_reado(efx, ®, FR_AZ_RX_CFG); | ||
576 | /* Enable XOFF signal from RX FIFO (we enabled it during NIC | ||
577 | * initialisation but it may read back as 0) */ | ||
578 | EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); | ||
2012 | /* Unisolate the MAC -> RX */ | 579 | /* Unisolate the MAC -> RX */ |
2013 | if (falcon_rev(efx) >= FALCON_REV_B0) | 580 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
2014 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); | 581 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); |
2015 | falcon_write(efx, ®, RX_CFG_REG_KER); | 582 | efx_writeo(efx, ®, FR_AZ_RX_CFG); |
2016 | } | 583 | } |
2017 | 584 | ||
2018 | int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | 585 | static void falcon_stats_request(struct efx_nic *efx) |
2019 | { | 586 | { |
587 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
2020 | efx_oword_t reg; | 588 | efx_oword_t reg; |
2021 | u32 *dma_done; | ||
2022 | int i; | ||
2023 | 589 | ||
2024 | if (disable_dma_stats) | 590 | WARN_ON(nic_data->stats_pending); |
2025 | return 0; | 591 | WARN_ON(nic_data->stats_disable_count); |
2026 | 592 | ||
2027 | /* Statistics fetch will fail if the MAC is in TX drain */ | 593 | if (nic_data->stats_dma_done == NULL) |
2028 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 594 | return; /* no mac selected */ |
2029 | efx_oword_t temp; | ||
2030 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | ||
2031 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) | ||
2032 | return 0; | ||
2033 | } | ||
2034 | 595 | ||
2035 | dma_done = (efx->stats_buffer.addr + done_offset); | 596 | *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE; |
2036 | *dma_done = FALCON_STATS_NOT_DONE; | 597 | nic_data->stats_pending = true; |
2037 | wmb(); /* ensure done flag is clear */ | 598 | wmb(); /* ensure done flag is clear */ |
2038 | 599 | ||
2039 | /* Initiate DMA transfer of stats */ | 600 | /* Initiate DMA transfer of stats */ |
2040 | EFX_POPULATE_OWORD_2(reg, | 601 | EFX_POPULATE_OWORD_2(reg, |
2041 | MAC_STAT_DMA_CMD, 1, | 602 | FRF_AB_MAC_STAT_DMA_CMD, 1, |
2042 | MAC_STAT_DMA_ADR, | 603 | FRF_AB_MAC_STAT_DMA_ADR, |
2043 | efx->stats_buffer.dma_addr); | 604 | efx->stats_buffer.dma_addr); |
2044 | falcon_write(efx, ®, MAC0_STAT_DMA_REG_KER); | 605 | efx_writeo(efx, ®, FR_AB_MAC_STAT_DMA); |
2045 | 606 | ||
2046 | /* Wait for transfer to complete */ | 607 | mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2)); |
2047 | for (i = 0; i < 400; i++) { | 608 | } |
2048 | if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) { | 609 | |
2049 | rmb(); /* Ensure the stats are valid. */ | 610 | static void falcon_stats_complete(struct efx_nic *efx) |
2050 | return 0; | 611 | { |
2051 | } | 612 | struct falcon_nic_data *nic_data = efx->nic_data; |
2052 | udelay(10); | 613 | |
614 | if (!nic_data->stats_pending) | ||
615 | return; | ||
616 | |||
617 | nic_data->stats_pending = 0; | ||
618 | if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { | ||
619 | rmb(); /* read the done flag before the stats */ | ||
620 | efx->mac_op->update_stats(efx); | ||
621 | } else { | ||
622 | EFX_ERR(efx, "timed out waiting for statistics\n"); | ||
2053 | } | 623 | } |
624 | } | ||
2054 | 625 | ||
2055 | EFX_ERR(efx, "timed out waiting for statistics\n"); | 626 | static void falcon_stats_timer_func(unsigned long context) |
2056 | return -ETIMEDOUT; | 627 | { |
628 | struct efx_nic *efx = (struct efx_nic *)context; | ||
629 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
630 | |||
631 | spin_lock(&efx->stats_lock); | ||
632 | |||
633 | falcon_stats_complete(efx); | ||
634 | if (nic_data->stats_disable_count == 0) | ||
635 | falcon_stats_request(efx); | ||
636 | |||
637 | spin_unlock(&efx->stats_lock); | ||
638 | } | ||
639 | |||
640 | static void falcon_switch_mac(struct efx_nic *efx); | ||
641 | |||
642 | static bool falcon_loopback_link_poll(struct efx_nic *efx) | ||
643 | { | ||
644 | struct efx_link_state old_state = efx->link_state; | ||
645 | |||
646 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
647 | WARN_ON(!LOOPBACK_INTERNAL(efx)); | ||
648 | |||
649 | efx->link_state.fd = true; | ||
650 | efx->link_state.fc = efx->wanted_fc; | ||
651 | efx->link_state.up = true; | ||
652 | |||
653 | if (efx->loopback_mode == LOOPBACK_GMAC) | ||
654 | efx->link_state.speed = 1000; | ||
655 | else | ||
656 | efx->link_state.speed = 10000; | ||
657 | |||
658 | return !efx_link_state_equal(&efx->link_state, &old_state); | ||
659 | } | ||
660 | |||
661 | static int falcon_reconfigure_port(struct efx_nic *efx) | ||
662 | { | ||
663 | int rc; | ||
664 | |||
665 | WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0); | ||
666 | |||
667 | /* Poll the PHY link state *before* reconfiguring it. This means we | ||
668 | * will pick up the correct speed (in loopback) to select the correct | ||
669 | * MAC. | ||
670 | */ | ||
671 | if (LOOPBACK_INTERNAL(efx)) | ||
672 | falcon_loopback_link_poll(efx); | ||
673 | else | ||
674 | efx->phy_op->poll(efx); | ||
675 | |||
676 | falcon_stop_nic_stats(efx); | ||
677 | falcon_deconfigure_mac_wrapper(efx); | ||
678 | |||
679 | falcon_switch_mac(efx); | ||
680 | |||
681 | efx->phy_op->reconfigure(efx); | ||
682 | rc = efx->mac_op->reconfigure(efx); | ||
683 | BUG_ON(rc); | ||
684 | |||
685 | falcon_start_nic_stats(efx); | ||
686 | |||
687 | /* Synchronise efx->link_state with the kernel */ | ||
688 | efx_link_status_changed(efx); | ||
689 | |||
690 | return 0; | ||
2057 | } | 691 | } |
2058 | 692 | ||
2059 | /************************************************************************** | 693 | /************************************************************************** |
@@ -2066,18 +700,18 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | |||
2066 | /* Wait for GMII access to complete */ | 700 | /* Wait for GMII access to complete */ |
2067 | static int falcon_gmii_wait(struct efx_nic *efx) | 701 | static int falcon_gmii_wait(struct efx_nic *efx) |
2068 | { | 702 | { |
2069 | efx_dword_t md_stat; | 703 | efx_oword_t md_stat; |
2070 | int count; | 704 | int count; |
2071 | 705 | ||
2072 | /* wait upto 50ms - taken max from datasheet */ | 706 | /* wait upto 50ms - taken max from datasheet */ |
2073 | for (count = 0; count < 5000; count++) { | 707 | for (count = 0; count < 5000; count++) { |
2074 | falcon_readl(efx, &md_stat, MD_STAT_REG_KER); | 708 | efx_reado(efx, &md_stat, FR_AB_MD_STAT); |
2075 | if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) { | 709 | if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { |
2076 | if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 || | 710 | if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || |
2077 | EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) { | 711 | EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { |
2078 | EFX_ERR(efx, "error from GMII access " | 712 | EFX_ERR(efx, "error from GMII access " |
2079 | EFX_DWORD_FMT"\n", | 713 | EFX_OWORD_FMT"\n", |
2080 | EFX_DWORD_VAL(md_stat)); | 714 | EFX_OWORD_VAL(md_stat)); |
2081 | return -EIO; | 715 | return -EIO; |
2082 | } | 716 | } |
2083 | return 0; | 717 | return 0; |
@@ -2099,7 +733,7 @@ static int falcon_mdio_write(struct net_device *net_dev, | |||
2099 | EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n", | 733 | EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n", |
2100 | prtad, devad, addr, value); | 734 | prtad, devad, addr, value); |
2101 | 735 | ||
2102 | spin_lock_bh(&efx->phy_lock); | 736 | mutex_lock(&efx->mdio_lock); |
2103 | 737 | ||
2104 | /* Check MDIO not currently being accessed */ | 738 | /* Check MDIO not currently being accessed */ |
2105 | rc = falcon_gmii_wait(efx); | 739 | rc = falcon_gmii_wait(efx); |
@@ -2107,34 +741,35 @@ static int falcon_mdio_write(struct net_device *net_dev, | |||
2107 | goto out; | 741 | goto out; |
2108 | 742 | ||
2109 | /* Write the address/ID register */ | 743 | /* Write the address/ID register */ |
2110 | EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); | 744 | EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); |
2111 | falcon_write(efx, ®, MD_PHY_ADR_REG_KER); | 745 | efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); |
2112 | 746 | ||
2113 | EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); | 747 | EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, |
2114 | falcon_write(efx, ®, MD_ID_REG_KER); | 748 | FRF_AB_MD_DEV_ADR, devad); |
749 | efx_writeo(efx, ®, FR_AB_MD_ID); | ||
2115 | 750 | ||
2116 | /* Write data */ | 751 | /* Write data */ |
2117 | EFX_POPULATE_OWORD_1(reg, MD_TXD, value); | 752 | EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value); |
2118 | falcon_write(efx, ®, MD_TXD_REG_KER); | 753 | efx_writeo(efx, ®, FR_AB_MD_TXD); |
2119 | 754 | ||
2120 | EFX_POPULATE_OWORD_2(reg, | 755 | EFX_POPULATE_OWORD_2(reg, |
2121 | MD_WRC, 1, | 756 | FRF_AB_MD_WRC, 1, |
2122 | MD_GC, 0); | 757 | FRF_AB_MD_GC, 0); |
2123 | falcon_write(efx, ®, MD_CS_REG_KER); | 758 | efx_writeo(efx, ®, FR_AB_MD_CS); |
2124 | 759 | ||
2125 | /* Wait for data to be written */ | 760 | /* Wait for data to be written */ |
2126 | rc = falcon_gmii_wait(efx); | 761 | rc = falcon_gmii_wait(efx); |
2127 | if (rc) { | 762 | if (rc) { |
2128 | /* Abort the write operation */ | 763 | /* Abort the write operation */ |
2129 | EFX_POPULATE_OWORD_2(reg, | 764 | EFX_POPULATE_OWORD_2(reg, |
2130 | MD_WRC, 0, | 765 | FRF_AB_MD_WRC, 0, |
2131 | MD_GC, 1); | 766 | FRF_AB_MD_GC, 1); |
2132 | falcon_write(efx, ®, MD_CS_REG_KER); | 767 | efx_writeo(efx, ®, FR_AB_MD_CS); |
2133 | udelay(10); | 768 | udelay(10); |
2134 | } | 769 | } |
2135 | 770 | ||
2136 | out: | 771 | out: |
2137 | spin_unlock_bh(&efx->phy_lock); | 772 | mutex_unlock(&efx->mdio_lock); |
2138 | return rc; | 773 | return rc; |
2139 | } | 774 | } |
2140 | 775 | ||
@@ -2146,152 +781,141 @@ static int falcon_mdio_read(struct net_device *net_dev, | |||
2146 | efx_oword_t reg; | 781 | efx_oword_t reg; |
2147 | int rc; | 782 | int rc; |
2148 | 783 | ||
2149 | spin_lock_bh(&efx->phy_lock); | 784 | mutex_lock(&efx->mdio_lock); |
2150 | 785 | ||
2151 | /* Check MDIO not currently being accessed */ | 786 | /* Check MDIO not currently being accessed */ |
2152 | rc = falcon_gmii_wait(efx); | 787 | rc = falcon_gmii_wait(efx); |
2153 | if (rc) | 788 | if (rc) |
2154 | goto out; | 789 | goto out; |
2155 | 790 | ||
2156 | EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); | 791 | EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); |
2157 | falcon_write(efx, ®, MD_PHY_ADR_REG_KER); | 792 | efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); |
2158 | 793 | ||
2159 | EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); | 794 | EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, |
2160 | falcon_write(efx, ®, MD_ID_REG_KER); | 795 | FRF_AB_MD_DEV_ADR, devad); |
796 | efx_writeo(efx, ®, FR_AB_MD_ID); | ||
2161 | 797 | ||
2162 | /* Request data to be read */ | 798 | /* Request data to be read */ |
2163 | EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0); | 799 | EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); |
2164 | falcon_write(efx, ®, MD_CS_REG_KER); | 800 | efx_writeo(efx, ®, FR_AB_MD_CS); |
2165 | 801 | ||
2166 | /* Wait for data to become available */ | 802 | /* Wait for data to become available */ |
2167 | rc = falcon_gmii_wait(efx); | 803 | rc = falcon_gmii_wait(efx); |
2168 | if (rc == 0) { | 804 | if (rc == 0) { |
2169 | falcon_read(efx, ®, MD_RXD_REG_KER); | 805 | efx_reado(efx, ®, FR_AB_MD_RXD); |
2170 | rc = EFX_OWORD_FIELD(reg, MD_RXD); | 806 | rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); |
2171 | EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", | 807 | EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", |
2172 | prtad, devad, addr, rc); | 808 | prtad, devad, addr, rc); |
2173 | } else { | 809 | } else { |
2174 | /* Abort the read operation */ | 810 | /* Abort the read operation */ |
2175 | EFX_POPULATE_OWORD_2(reg, | 811 | EFX_POPULATE_OWORD_2(reg, |
2176 | MD_RIC, 0, | 812 | FRF_AB_MD_RIC, 0, |
2177 | MD_GC, 1); | 813 | FRF_AB_MD_GC, 1); |
2178 | falcon_write(efx, ®, MD_CS_REG_KER); | 814 | efx_writeo(efx, ®, FR_AB_MD_CS); |
2179 | 815 | ||
2180 | EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", | 816 | EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", |
2181 | prtad, devad, addr, rc); | 817 | prtad, devad, addr, rc); |
2182 | } | 818 | } |
2183 | 819 | ||
2184 | out: | 820 | out: |
2185 | spin_unlock_bh(&efx->phy_lock); | 821 | mutex_unlock(&efx->mdio_lock); |
2186 | return rc; | 822 | return rc; |
2187 | } | 823 | } |
2188 | 824 | ||
2189 | static int falcon_probe_phy(struct efx_nic *efx) | 825 | static void falcon_clock_mac(struct efx_nic *efx) |
2190 | { | 826 | { |
2191 | switch (efx->phy_type) { | 827 | unsigned strap_val; |
2192 | case PHY_TYPE_SFX7101: | 828 | efx_oword_t nic_stat; |
2193 | efx->phy_op = &falcon_sfx7101_phy_ops; | ||
2194 | break; | ||
2195 | case PHY_TYPE_SFT9001A: | ||
2196 | case PHY_TYPE_SFT9001B: | ||
2197 | efx->phy_op = &falcon_sft9001_phy_ops; | ||
2198 | break; | ||
2199 | case PHY_TYPE_QT2022C2: | ||
2200 | case PHY_TYPE_QT2025C: | ||
2201 | efx->phy_op = &falcon_xfp_phy_ops; | ||
2202 | break; | ||
2203 | default: | ||
2204 | EFX_ERR(efx, "Unknown PHY type %d\n", | ||
2205 | efx->phy_type); | ||
2206 | return -1; | ||
2207 | } | ||
2208 | |||
2209 | if (efx->phy_op->macs & EFX_XMAC) | ||
2210 | efx->loopback_modes |= ((1 << LOOPBACK_XGMII) | | ||
2211 | (1 << LOOPBACK_XGXS) | | ||
2212 | (1 << LOOPBACK_XAUI)); | ||
2213 | if (efx->phy_op->macs & EFX_GMAC) | ||
2214 | efx->loopback_modes |= (1 << LOOPBACK_GMAC); | ||
2215 | efx->loopback_modes |= efx->phy_op->loopbacks; | ||
2216 | 829 | ||
2217 | return 0; | 830 | /* Configure the NIC generated MAC clock correctly */ |
831 | efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); | ||
832 | strap_val = EFX_IS10G(efx) ? 5 : 3; | ||
833 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
834 | EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1); | ||
835 | EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val); | ||
836 | efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT); | ||
837 | } else { | ||
838 | /* Falcon A1 does not support 1G/10G speed switching | ||
839 | * and must not be used with a PHY that does. */ | ||
840 | BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) != | ||
841 | strap_val); | ||
842 | } | ||
2218 | } | 843 | } |
2219 | 844 | ||
2220 | int falcon_switch_mac(struct efx_nic *efx) | 845 | static void falcon_switch_mac(struct efx_nic *efx) |
2221 | { | 846 | { |
2222 | struct efx_mac_operations *old_mac_op = efx->mac_op; | 847 | struct efx_mac_operations *old_mac_op = efx->mac_op; |
2223 | efx_oword_t nic_stat; | 848 | struct falcon_nic_data *nic_data = efx->nic_data; |
2224 | unsigned strap_val; | 849 | unsigned int stats_done_offset; |
2225 | int rc = 0; | ||
2226 | |||
2227 | /* Don't try to fetch MAC stats while we're switching MACs */ | ||
2228 | efx_stats_disable(efx); | ||
2229 | |||
2230 | /* Internal loopbacks override the phy speed setting */ | ||
2231 | if (efx->loopback_mode == LOOPBACK_GMAC) { | ||
2232 | efx->link_speed = 1000; | ||
2233 | efx->link_fd = true; | ||
2234 | } else if (LOOPBACK_INTERNAL(efx)) { | ||
2235 | efx->link_speed = 10000; | ||
2236 | efx->link_fd = true; | ||
2237 | } | ||
2238 | 850 | ||
2239 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | 851 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); |
852 | WARN_ON(nic_data->stats_disable_count == 0); | ||
853 | |||
2240 | efx->mac_op = (EFX_IS10G(efx) ? | 854 | efx->mac_op = (EFX_IS10G(efx) ? |
2241 | &falcon_xmac_operations : &falcon_gmac_operations); | 855 | &falcon_xmac_operations : &falcon_gmac_operations); |
2242 | 856 | ||
2243 | /* Always push the NIC_STAT_REG setting even if the mac hasn't | 857 | if (EFX_IS10G(efx)) |
2244 | * changed, because this function is run post online reset */ | 858 | stats_done_offset = XgDmaDone_offset; |
2245 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | 859 | else |
2246 | strap_val = EFX_IS10G(efx) ? 5 : 3; | 860 | stats_done_offset = GDmaDone_offset; |
2247 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 861 | nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset; |
2248 | EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1); | ||
2249 | EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val); | ||
2250 | falcon_write(efx, &nic_stat, NIC_STAT_REG); | ||
2251 | } else { | ||
2252 | /* Falcon A1 does not support 1G/10G speed switching | ||
2253 | * and must not be used with a PHY that does. */ | ||
2254 | BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val); | ||
2255 | } | ||
2256 | 862 | ||
2257 | if (old_mac_op == efx->mac_op) | 863 | if (old_mac_op == efx->mac_op) |
2258 | goto out; | 864 | return; |
865 | |||
866 | falcon_clock_mac(efx); | ||
2259 | 867 | ||
2260 | EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); | 868 | EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); |
2261 | /* Not all macs support a mac-level link state */ | 869 | /* Not all macs support a mac-level link state */ |
2262 | efx->mac_up = true; | 870 | efx->xmac_poll_required = false; |
2263 | 871 | falcon_reset_macs(efx); | |
2264 | rc = falcon_reset_macs(efx); | ||
2265 | out: | ||
2266 | efx_stats_enable(efx); | ||
2267 | return rc; | ||
2268 | } | 872 | } |
2269 | 873 | ||
2270 | /* This call is responsible for hooking in the MAC and PHY operations */ | 874 | /* This call is responsible for hooking in the MAC and PHY operations */ |
2271 | int falcon_probe_port(struct efx_nic *efx) | 875 | static int falcon_probe_port(struct efx_nic *efx) |
2272 | { | 876 | { |
2273 | int rc; | 877 | int rc; |
2274 | 878 | ||
2275 | /* Hook in PHY operations table */ | 879 | switch (efx->phy_type) { |
2276 | rc = falcon_probe_phy(efx); | 880 | case PHY_TYPE_SFX7101: |
2277 | if (rc) | 881 | efx->phy_op = &falcon_sfx7101_phy_ops; |
2278 | return rc; | 882 | break; |
883 | case PHY_TYPE_SFT9001A: | ||
884 | case PHY_TYPE_SFT9001B: | ||
885 | efx->phy_op = &falcon_sft9001_phy_ops; | ||
886 | break; | ||
887 | case PHY_TYPE_QT2022C2: | ||
888 | case PHY_TYPE_QT2025C: | ||
889 | efx->phy_op = &falcon_qt202x_phy_ops; | ||
890 | break; | ||
891 | default: | ||
892 | EFX_ERR(efx, "Unknown PHY type %d\n", | ||
893 | efx->phy_type); | ||
894 | return -ENODEV; | ||
895 | } | ||
2279 | 896 | ||
2280 | /* Set up MDIO structure for PHY */ | 897 | /* Fill out MDIO structure and loopback modes */ |
2281 | efx->mdio.mmds = efx->phy_op->mmds; | ||
2282 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
2283 | efx->mdio.mdio_read = falcon_mdio_read; | 898 | efx->mdio.mdio_read = falcon_mdio_read; |
2284 | efx->mdio.mdio_write = falcon_mdio_write; | 899 | efx->mdio.mdio_write = falcon_mdio_write; |
900 | rc = efx->phy_op->probe(efx); | ||
901 | if (rc != 0) | ||
902 | return rc; | ||
903 | |||
904 | /* Initial assumption */ | ||
905 | efx->link_state.speed = 10000; | ||
906 | efx->link_state.fd = true; | ||
2285 | 907 | ||
2286 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ | 908 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ |
2287 | if (falcon_rev(efx) >= FALCON_REV_B0) | 909 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
2288 | efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; | 910 | efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; |
2289 | else | 911 | else |
2290 | efx->wanted_fc = EFX_FC_RX; | 912 | efx->wanted_fc = EFX_FC_RX; |
913 | if (efx->mdio.mmds & MDIO_DEVS_AN) | ||
914 | efx->wanted_fc |= EFX_FC_AUTO; | ||
2291 | 915 | ||
2292 | /* Allocate buffer for stats */ | 916 | /* Allocate buffer for stats */ |
2293 | rc = falcon_alloc_buffer(efx, &efx->stats_buffer, | 917 | rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, |
2294 | FALCON_MAC_STATS_SIZE); | 918 | FALCON_MAC_STATS_SIZE); |
2295 | if (rc) | 919 | if (rc) |
2296 | return rc; | 920 | return rc; |
2297 | EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", | 921 | EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", |
@@ -2302,40 +926,20 @@ int falcon_probe_port(struct efx_nic *efx) | |||
2302 | return 0; | 926 | return 0; |
2303 | } | 927 | } |
2304 | 928 | ||
2305 | void falcon_remove_port(struct efx_nic *efx) | 929 | static void falcon_remove_port(struct efx_nic *efx) |
2306 | { | 930 | { |
2307 | falcon_free_buffer(efx, &efx->stats_buffer); | 931 | efx->phy_op->remove(efx); |
932 | efx_nic_free_buffer(efx, &efx->stats_buffer); | ||
2308 | } | 933 | } |
2309 | 934 | ||
2310 | /************************************************************************** | 935 | /************************************************************************** |
2311 | * | 936 | * |
2312 | * Multicast filtering | ||
2313 | * | ||
2314 | ************************************************************************** | ||
2315 | */ | ||
2316 | |||
2317 | void falcon_set_multicast_hash(struct efx_nic *efx) | ||
2318 | { | ||
2319 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; | ||
2320 | |||
2321 | /* Broadcast packets go through the multicast hash filter. | ||
2322 | * ether_crc_le() of the broadcast address is 0xbe2612ff | ||
2323 | * so we always add bit 0xff to the mask. | ||
2324 | */ | ||
2325 | set_bit_le(0xff, mc_hash->byte); | ||
2326 | |||
2327 | falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER); | ||
2328 | falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER); | ||
2329 | } | ||
2330 | |||
2331 | |||
2332 | /************************************************************************** | ||
2333 | * | ||
2334 | * Falcon test code | 937 | * Falcon test code |
2335 | * | 938 | * |
2336 | **************************************************************************/ | 939 | **************************************************************************/ |
2337 | 940 | ||
2338 | int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | 941 | static int |
942 | falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | ||
2339 | { | 943 | { |
2340 | struct falcon_nvconfig *nvconfig; | 944 | struct falcon_nvconfig *nvconfig; |
2341 | struct efx_spi_device *spi; | 945 | struct efx_spi_device *spi; |
@@ -2351,10 +955,10 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | |||
2351 | region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); | 955 | region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); |
2352 | if (!region) | 956 | if (!region) |
2353 | return -ENOMEM; | 957 | return -ENOMEM; |
2354 | nvconfig = region + NVCONFIG_OFFSET; | 958 | nvconfig = region + FALCON_NVCONFIG_OFFSET; |
2355 | 959 | ||
2356 | mutex_lock(&efx->spi_lock); | 960 | mutex_lock(&efx->spi_lock); |
2357 | rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region); | 961 | rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); |
2358 | mutex_unlock(&efx->spi_lock); | 962 | mutex_unlock(&efx->spi_lock); |
2359 | if (rc) { | 963 | if (rc) { |
2360 | EFX_ERR(efx, "Failed to read %s\n", | 964 | EFX_ERR(efx, "Failed to read %s\n", |
@@ -2367,7 +971,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | |||
2367 | struct_ver = le16_to_cpu(nvconfig->board_struct_ver); | 971 | struct_ver = le16_to_cpu(nvconfig->board_struct_ver); |
2368 | 972 | ||
2369 | rc = -EINVAL; | 973 | rc = -EINVAL; |
2370 | if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) { | 974 | if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { |
2371 | EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); | 975 | EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); |
2372 | goto out; | 976 | goto out; |
2373 | } | 977 | } |
@@ -2398,107 +1002,54 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | |||
2398 | return rc; | 1002 | return rc; |
2399 | } | 1003 | } |
2400 | 1004 | ||
2401 | /* Registers tested in the falcon register test */ | 1005 | static int falcon_test_nvram(struct efx_nic *efx) |
2402 | static struct { | 1006 | { |
2403 | unsigned address; | 1007 | return falcon_read_nvram(efx, NULL); |
2404 | efx_oword_t mask; | 1008 | } |
2405 | } efx_test_registers[] = { | 1009 | |
2406 | { ADR_REGION_REG_KER, | 1010 | static const struct efx_nic_register_test falcon_b0_register_tests[] = { |
2407 | EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, | 1011 | { FR_AZ_ADR_REGION, |
2408 | { RX_CFG_REG_KER, | 1012 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, |
1013 | { FR_AZ_RX_CFG, | ||
2409 | EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, | 1014 | EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, |
2410 | { TX_CFG_REG_KER, | 1015 | { FR_AZ_TX_CFG, |
2411 | EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, | 1016 | EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, |
2412 | { TX_CFG2_REG_KER, | 1017 | { FR_AZ_TX_RESERVED, |
2413 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, | 1018 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, |
2414 | { MAC0_CTRL_REG_KER, | 1019 | { FR_AB_MAC_CTRL, |
2415 | EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, | 1020 | EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, |
2416 | { SRM_TX_DC_CFG_REG_KER, | 1021 | { FR_AZ_SRM_TX_DC_CFG, |
2417 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, | 1022 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, |
2418 | { RX_DC_CFG_REG_KER, | 1023 | { FR_AZ_RX_DC_CFG, |
2419 | EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, | 1024 | EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, |
2420 | { RX_DC_PF_WM_REG_KER, | 1025 | { FR_AZ_RX_DC_PF_WM, |
2421 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, | 1026 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, |
2422 | { DP_CTRL_REG, | 1027 | { FR_BZ_DP_CTRL, |
2423 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, | 1028 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, |
2424 | { GM_CFG2_REG, | 1029 | { FR_AB_GM_CFG2, |
2425 | EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, | 1030 | EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, |
2426 | { GMF_CFG0_REG, | 1031 | { FR_AB_GMF_CFG0, |
2427 | EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, | 1032 | EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, |
2428 | { XM_GLB_CFG_REG, | 1033 | { FR_AB_XM_GLB_CFG, |
2429 | EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, | 1034 | EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, |
2430 | { XM_TX_CFG_REG, | 1035 | { FR_AB_XM_TX_CFG, |
2431 | EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, | 1036 | EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, |
2432 | { XM_RX_CFG_REG, | 1037 | { FR_AB_XM_RX_CFG, |
2433 | EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, | 1038 | EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, |
2434 | { XM_RX_PARAM_REG, | 1039 | { FR_AB_XM_RX_PARAM, |
2435 | EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, | 1040 | EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, |
2436 | { XM_FC_REG, | 1041 | { FR_AB_XM_FC, |
2437 | EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, | 1042 | EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, |
2438 | { XM_ADR_LO_REG, | 1043 | { FR_AB_XM_ADR_LO, |
2439 | EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, | 1044 | EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, |
2440 | { XX_SD_CTL_REG, | 1045 | { FR_AB_XX_SD_CTL, |
2441 | EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, | 1046 | EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, |
2442 | }; | 1047 | }; |
2443 | 1048 | ||
2444 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | 1049 | static int falcon_b0_test_registers(struct efx_nic *efx) |
2445 | const efx_oword_t *mask) | ||
2446 | { | ||
2447 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | ||
2448 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | ||
2449 | } | ||
2450 | |||
2451 | int falcon_test_registers(struct efx_nic *efx) | ||
2452 | { | 1050 | { |
2453 | unsigned address = 0, i, j; | 1051 | return efx_nic_test_registers(efx, falcon_b0_register_tests, |
2454 | efx_oword_t mask, imask, original, reg, buf; | 1052 | ARRAY_SIZE(falcon_b0_register_tests)); |
2455 | |||
2456 | /* Falcon should be in loopback to isolate the XMAC from the PHY */ | ||
2457 | WARN_ON(!LOOPBACK_INTERNAL(efx)); | ||
2458 | |||
2459 | for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) { | ||
2460 | address = efx_test_registers[i].address; | ||
2461 | mask = imask = efx_test_registers[i].mask; | ||
2462 | EFX_INVERT_OWORD(imask); | ||
2463 | |||
2464 | falcon_read(efx, &original, address); | ||
2465 | |||
2466 | /* bit sweep on and off */ | ||
2467 | for (j = 0; j < 128; j++) { | ||
2468 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | ||
2469 | continue; | ||
2470 | |||
2471 | /* Test this testable bit can be set in isolation */ | ||
2472 | EFX_AND_OWORD(reg, original, mask); | ||
2473 | EFX_SET_OWORD32(reg, j, j, 1); | ||
2474 | |||
2475 | falcon_write(efx, ®, address); | ||
2476 | falcon_read(efx, &buf, address); | ||
2477 | |||
2478 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
2479 | goto fail; | ||
2480 | |||
2481 | /* Test this testable bit can be cleared in isolation */ | ||
2482 | EFX_OR_OWORD(reg, original, mask); | ||
2483 | EFX_SET_OWORD32(reg, j, j, 0); | ||
2484 | |||
2485 | falcon_write(efx, ®, address); | ||
2486 | falcon_read(efx, &buf, address); | ||
2487 | |||
2488 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
2489 | goto fail; | ||
2490 | } | ||
2491 | |||
2492 | falcon_write(efx, &original, address); | ||
2493 | } | ||
2494 | |||
2495 | return 0; | ||
2496 | |||
2497 | fail: | ||
2498 | EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | ||
2499 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | ||
2500 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | ||
2501 | return -EIO; | ||
2502 | } | 1053 | } |
2503 | 1054 | ||
2504 | /************************************************************************** | 1055 | /************************************************************************** |
@@ -2510,13 +1061,13 @@ fail: | |||
2510 | 1061 | ||
2511 | /* Resets NIC to known state. This routine must be called in process | 1062 | /* Resets NIC to known state. This routine must be called in process |
2512 | * context and is allowed to sleep. */ | 1063 | * context and is allowed to sleep. */ |
2513 | int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | 1064 | static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) |
2514 | { | 1065 | { |
2515 | struct falcon_nic_data *nic_data = efx->nic_data; | 1066 | struct falcon_nic_data *nic_data = efx->nic_data; |
2516 | efx_oword_t glb_ctl_reg_ker; | 1067 | efx_oword_t glb_ctl_reg_ker; |
2517 | int rc; | 1068 | int rc; |
2518 | 1069 | ||
2519 | EFX_LOG(efx, "performing hardware reset (%d)\n", method); | 1070 | EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method)); |
2520 | 1071 | ||
2521 | /* Initiate device reset */ | 1072 | /* Initiate device reset */ |
2522 | if (method == RESET_TYPE_WORLD) { | 1073 | if (method == RESET_TYPE_WORLD) { |
@@ -2526,7 +1077,7 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
2526 | "function prior to hardware reset\n"); | 1077 | "function prior to hardware reset\n"); |
2527 | goto fail1; | 1078 | goto fail1; |
2528 | } | 1079 | } |
2529 | if (FALCON_IS_DUAL_FUNC(efx)) { | 1080 | if (efx_nic_is_dual_func(efx)) { |
2530 | rc = pci_save_state(nic_data->pci_dev2); | 1081 | rc = pci_save_state(nic_data->pci_dev2); |
2531 | if (rc) { | 1082 | if (rc) { |
2532 | EFX_ERR(efx, "failed to backup PCI state of " | 1083 | EFX_ERR(efx, "failed to backup PCI state of " |
@@ -2537,29 +1088,31 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
2537 | } | 1088 | } |
2538 | 1089 | ||
2539 | EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, | 1090 | EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, |
2540 | EXT_PHY_RST_DUR, 0x7, | 1091 | FRF_AB_EXT_PHY_RST_DUR, |
2541 | SWRST, 1); | 1092 | FFE_AB_EXT_PHY_RST_DUR_10240US, |
1093 | FRF_AB_SWRST, 1); | ||
2542 | } else { | 1094 | } else { |
2543 | int reset_phy = (method == RESET_TYPE_INVISIBLE ? | ||
2544 | EXCLUDE_FROM_RESET : 0); | ||
2545 | |||
2546 | EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, | 1095 | EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, |
2547 | EXT_PHY_RST_CTL, reset_phy, | 1096 | /* exclude PHY from "invisible" reset */ |
2548 | PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET, | 1097 | FRF_AB_EXT_PHY_RST_CTL, |
2549 | PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET, | 1098 | method == RESET_TYPE_INVISIBLE, |
2550 | PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET, | 1099 | /* exclude EEPROM/flash and PCIe */ |
2551 | EE_RST_CTL, EXCLUDE_FROM_RESET, | 1100 | FRF_AB_PCIE_CORE_RST_CTL, 1, |
2552 | EXT_PHY_RST_DUR, 0x7 /* 10ms */, | 1101 | FRF_AB_PCIE_NSTKY_RST_CTL, 1, |
2553 | SWRST, 1); | 1102 | FRF_AB_PCIE_SD_RST_CTL, 1, |
2554 | } | 1103 | FRF_AB_EE_RST_CTL, 1, |
2555 | falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); | 1104 | FRF_AB_EXT_PHY_RST_DUR, |
1105 | FFE_AB_EXT_PHY_RST_DUR_10240US, | ||
1106 | FRF_AB_SWRST, 1); | ||
1107 | } | ||
1108 | efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); | ||
2556 | 1109 | ||
2557 | EFX_LOG(efx, "waiting for hardware reset\n"); | 1110 | EFX_LOG(efx, "waiting for hardware reset\n"); |
2558 | schedule_timeout_uninterruptible(HZ / 20); | 1111 | schedule_timeout_uninterruptible(HZ / 20); |
2559 | 1112 | ||
2560 | /* Restore PCI configuration if needed */ | 1113 | /* Restore PCI configuration if needed */ |
2561 | if (method == RESET_TYPE_WORLD) { | 1114 | if (method == RESET_TYPE_WORLD) { |
2562 | if (FALCON_IS_DUAL_FUNC(efx)) { | 1115 | if (efx_nic_is_dual_func(efx)) { |
2563 | rc = pci_restore_state(nic_data->pci_dev2); | 1116 | rc = pci_restore_state(nic_data->pci_dev2); |
2564 | if (rc) { | 1117 | if (rc) { |
2565 | EFX_ERR(efx, "failed to restore PCI config for " | 1118 | EFX_ERR(efx, "failed to restore PCI config for " |
@@ -2577,8 +1130,8 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
2577 | } | 1130 | } |
2578 | 1131 | ||
2579 | /* Assert that reset complete */ | 1132 | /* Assert that reset complete */ |
2580 | falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); | 1133 | efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); |
2581 | if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) { | 1134 | if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { |
2582 | rc = -ETIMEDOUT; | 1135 | rc = -ETIMEDOUT; |
2583 | EFX_ERR(efx, "timed out waiting for hardware reset\n"); | 1136 | EFX_ERR(efx, "timed out waiting for hardware reset\n"); |
2584 | goto fail5; | 1137 | goto fail5; |
@@ -2597,6 +1150,44 @@ fail5: | |||
2597 | return rc; | 1150 | return rc; |
2598 | } | 1151 | } |
2599 | 1152 | ||
1153 | static void falcon_monitor(struct efx_nic *efx) | ||
1154 | { | ||
1155 | bool link_changed; | ||
1156 | int rc; | ||
1157 | |||
1158 | BUG_ON(!mutex_is_locked(&efx->mac_lock)); | ||
1159 | |||
1160 | rc = falcon_board(efx)->type->monitor(efx); | ||
1161 | if (rc) { | ||
1162 | EFX_ERR(efx, "Board sensor %s; shutting down PHY\n", | ||
1163 | (rc == -ERANGE) ? "reported fault" : "failed"); | ||
1164 | efx->phy_mode |= PHY_MODE_LOW_POWER; | ||
1165 | rc = __efx_reconfigure_port(efx); | ||
1166 | WARN_ON(rc); | ||
1167 | } | ||
1168 | |||
1169 | if (LOOPBACK_INTERNAL(efx)) | ||
1170 | link_changed = falcon_loopback_link_poll(efx); | ||
1171 | else | ||
1172 | link_changed = efx->phy_op->poll(efx); | ||
1173 | |||
1174 | if (link_changed) { | ||
1175 | falcon_stop_nic_stats(efx); | ||
1176 | falcon_deconfigure_mac_wrapper(efx); | ||
1177 | |||
1178 | falcon_switch_mac(efx); | ||
1179 | rc = efx->mac_op->reconfigure(efx); | ||
1180 | BUG_ON(rc); | ||
1181 | |||
1182 | falcon_start_nic_stats(efx); | ||
1183 | |||
1184 | efx_link_status_changed(efx); | ||
1185 | } | ||
1186 | |||
1187 | if (EFX_IS10G(efx)) | ||
1188 | falcon_poll_xmac(efx); | ||
1189 | } | ||
1190 | |||
2600 | /* Zeroes out the SRAM contents. This routine must be called in | 1191 | /* Zeroes out the SRAM contents. This routine must be called in |
2601 | * process context and is allowed to sleep. | 1192 | * process context and is allowed to sleep. |
2602 | */ | 1193 | */ |
@@ -2606,16 +1197,16 @@ static int falcon_reset_sram(struct efx_nic *efx) | |||
2606 | int count; | 1197 | int count; |
2607 | 1198 | ||
2608 | /* Set the SRAM wake/sleep GPIO appropriately. */ | 1199 | /* Set the SRAM wake/sleep GPIO appropriately. */ |
2609 | falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); | 1200 | efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); |
2610 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1); | 1201 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); |
2611 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1); | 1202 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); |
2612 | falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); | 1203 | efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); |
2613 | 1204 | ||
2614 | /* Initiate SRAM reset */ | 1205 | /* Initiate SRAM reset */ |
2615 | EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, | 1206 | EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, |
2616 | SRAM_OOB_BT_INIT_EN, 1, | 1207 | FRF_AZ_SRM_INIT_EN, 1, |
2617 | SRM_NUM_BANKS_AND_BANK_SIZE, 0); | 1208 | FRF_AZ_SRM_NB_SZ, 0); |
2618 | falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); | 1209 | efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); |
2619 | 1210 | ||
2620 | /* Wait for SRAM reset to complete */ | 1211 | /* Wait for SRAM reset to complete */ |
2621 | count = 0; | 1212 | count = 0; |
@@ -2626,8 +1217,8 @@ static int falcon_reset_sram(struct efx_nic *efx) | |||
2626 | schedule_timeout_uninterruptible(HZ / 50); | 1217 | schedule_timeout_uninterruptible(HZ / 50); |
2627 | 1218 | ||
2628 | /* Check for reset complete */ | 1219 | /* Check for reset complete */ |
2629 | falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); | 1220 | efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); |
2630 | if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) { | 1221 | if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { |
2631 | EFX_LOG(efx, "SRAM reset complete\n"); | 1222 | EFX_LOG(efx, "SRAM reset complete\n"); |
2632 | 1223 | ||
2633 | return 0; | 1224 | return 0; |
@@ -2663,8 +1254,6 @@ static int falcon_spi_device_init(struct efx_nic *efx, | |||
2663 | spi_device->block_size = | 1254 | spi_device->block_size = |
2664 | 1 << SPI_DEV_TYPE_FIELD(device_type, | 1255 | 1 << SPI_DEV_TYPE_FIELD(device_type, |
2665 | SPI_DEV_TYPE_BLOCK_SIZE); | 1256 | SPI_DEV_TYPE_BLOCK_SIZE); |
2666 | |||
2667 | spi_device->efx = efx; | ||
2668 | } else { | 1257 | } else { |
2669 | spi_device = NULL; | 1258 | spi_device = NULL; |
2670 | } | 1259 | } |
@@ -2674,7 +1263,6 @@ static int falcon_spi_device_init(struct efx_nic *efx, | |||
2674 | return 0; | 1263 | return 0; |
2675 | } | 1264 | } |
2676 | 1265 | ||
2677 | |||
2678 | static void falcon_remove_spi_devices(struct efx_nic *efx) | 1266 | static void falcon_remove_spi_devices(struct efx_nic *efx) |
2679 | { | 1267 | { |
2680 | kfree(efx->spi_eeprom); | 1268 | kfree(efx->spi_eeprom); |
@@ -2712,16 +1300,16 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) | |||
2712 | board_rev = le16_to_cpu(v2->board_revision); | 1300 | board_rev = le16_to_cpu(v2->board_revision); |
2713 | 1301 | ||
2714 | if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { | 1302 | if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { |
2715 | __le32 fl = v3->spi_device_type[EE_SPI_FLASH]; | 1303 | rc = falcon_spi_device_init( |
2716 | __le32 ee = v3->spi_device_type[EE_SPI_EEPROM]; | 1304 | efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH, |
2717 | rc = falcon_spi_device_init(efx, &efx->spi_flash, | 1305 | le32_to_cpu(v3->spi_device_type |
2718 | EE_SPI_FLASH, | 1306 | [FFE_AB_SPI_DEVICE_FLASH])); |
2719 | le32_to_cpu(fl)); | ||
2720 | if (rc) | 1307 | if (rc) |
2721 | goto fail2; | 1308 | goto fail2; |
2722 | rc = falcon_spi_device_init(efx, &efx->spi_eeprom, | 1309 | rc = falcon_spi_device_init( |
2723 | EE_SPI_EEPROM, | 1310 | efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, |
2724 | le32_to_cpu(ee)); | 1311 | le32_to_cpu(v3->spi_device_type |
1312 | [FFE_AB_SPI_DEVICE_EEPROM])); | ||
2725 | if (rc) | 1313 | if (rc) |
2726 | goto fail2; | 1314 | goto fail2; |
2727 | } | 1315 | } |
@@ -2732,7 +1320,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) | |||
2732 | 1320 | ||
2733 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); | 1321 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); |
2734 | 1322 | ||
2735 | efx_set_board_info(efx, board_rev); | 1323 | rc = falcon_probe_board(efx, board_rev); |
1324 | if (rc) | ||
1325 | goto fail2; | ||
2736 | 1326 | ||
2737 | kfree(nvconfig); | 1327 | kfree(nvconfig); |
2738 | return 0; | 1328 | return 0; |
@@ -2744,89 +1334,49 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) | |||
2744 | return rc; | 1334 | return rc; |
2745 | } | 1335 | } |
2746 | 1336 | ||
2747 | /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port | ||
2748 | * count, port speed). Set workaround and feature flags accordingly. | ||
2749 | */ | ||
2750 | static int falcon_probe_nic_variant(struct efx_nic *efx) | ||
2751 | { | ||
2752 | efx_oword_t altera_build; | ||
2753 | efx_oword_t nic_stat; | ||
2754 | |||
2755 | falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER); | ||
2756 | if (EFX_OWORD_FIELD(altera_build, VER_ALL)) { | ||
2757 | EFX_ERR(efx, "Falcon FPGA not supported\n"); | ||
2758 | return -ENODEV; | ||
2759 | } | ||
2760 | |||
2761 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | ||
2762 | |||
2763 | switch (falcon_rev(efx)) { | ||
2764 | case FALCON_REV_A0: | ||
2765 | case 0xff: | ||
2766 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); | ||
2767 | return -ENODEV; | ||
2768 | |||
2769 | case FALCON_REV_A1: | ||
2770 | if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) { | ||
2771 | EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); | ||
2772 | return -ENODEV; | ||
2773 | } | ||
2774 | break; | ||
2775 | |||
2776 | case FALCON_REV_B0: | ||
2777 | break; | ||
2778 | |||
2779 | default: | ||
2780 | EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx)); | ||
2781 | return -ENODEV; | ||
2782 | } | ||
2783 | |||
2784 | /* Initial assumed speed */ | ||
2785 | efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000; | ||
2786 | |||
2787 | return 0; | ||
2788 | } | ||
2789 | |||
2790 | /* Probe all SPI devices on the NIC */ | 1337 | /* Probe all SPI devices on the NIC */ |
2791 | static void falcon_probe_spi_devices(struct efx_nic *efx) | 1338 | static void falcon_probe_spi_devices(struct efx_nic *efx) |
2792 | { | 1339 | { |
2793 | efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; | 1340 | efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; |
2794 | int boot_dev; | 1341 | int boot_dev; |
2795 | 1342 | ||
2796 | falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER); | 1343 | efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL); |
2797 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | 1344 | efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); |
2798 | falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); | 1345 | efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); |
2799 | 1346 | ||
2800 | if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) { | 1347 | if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { |
2801 | boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ? | 1348 | boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? |
2802 | EE_SPI_FLASH : EE_SPI_EEPROM); | 1349 | FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); |
2803 | EFX_LOG(efx, "Booted from %s\n", | 1350 | EFX_LOG(efx, "Booted from %s\n", |
2804 | boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM"); | 1351 | boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM"); |
2805 | } else { | 1352 | } else { |
2806 | /* Disable VPD and set clock dividers to safe | 1353 | /* Disable VPD and set clock dividers to safe |
2807 | * values for initial programming. */ | 1354 | * values for initial programming. */ |
2808 | boot_dev = -1; | 1355 | boot_dev = -1; |
2809 | EFX_LOG(efx, "Booted from internal ASIC settings;" | 1356 | EFX_LOG(efx, "Booted from internal ASIC settings;" |
2810 | " setting SPI config\n"); | 1357 | " setting SPI config\n"); |
2811 | EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0, | 1358 | EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, |
2812 | /* 125 MHz / 7 ~= 20 MHz */ | 1359 | /* 125 MHz / 7 ~= 20 MHz */ |
2813 | EE_SF_CLOCK_DIV, 7, | 1360 | FRF_AB_EE_SF_CLOCK_DIV, 7, |
2814 | /* 125 MHz / 63 ~= 2 MHz */ | 1361 | /* 125 MHz / 63 ~= 2 MHz */ |
2815 | EE_EE_CLOCK_DIV, 63); | 1362 | FRF_AB_EE_EE_CLOCK_DIV, 63); |
2816 | falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); | 1363 | efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); |
2817 | } | 1364 | } |
2818 | 1365 | ||
2819 | if (boot_dev == EE_SPI_FLASH) | 1366 | if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) |
2820 | falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH, | 1367 | falcon_spi_device_init(efx, &efx->spi_flash, |
1368 | FFE_AB_SPI_DEVICE_FLASH, | ||
2821 | default_flash_type); | 1369 | default_flash_type); |
2822 | if (boot_dev == EE_SPI_EEPROM) | 1370 | if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) |
2823 | falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM, | 1371 | falcon_spi_device_init(efx, &efx->spi_eeprom, |
1372 | FFE_AB_SPI_DEVICE_EEPROM, | ||
2824 | large_eeprom_type); | 1373 | large_eeprom_type); |
2825 | } | 1374 | } |
2826 | 1375 | ||
2827 | int falcon_probe_nic(struct efx_nic *efx) | 1376 | static int falcon_probe_nic(struct efx_nic *efx) |
2828 | { | 1377 | { |
2829 | struct falcon_nic_data *nic_data; | 1378 | struct falcon_nic_data *nic_data; |
1379 | struct falcon_board *board; | ||
2830 | int rc; | 1380 | int rc; |
2831 | 1381 | ||
2832 | /* Allocate storage for hardware specific data */ | 1382 | /* Allocate storage for hardware specific data */ |
@@ -2835,15 +1385,33 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2835 | return -ENOMEM; | 1385 | return -ENOMEM; |
2836 | efx->nic_data = nic_data; | 1386 | efx->nic_data = nic_data; |
2837 | 1387 | ||
2838 | /* Determine number of ports etc. */ | 1388 | rc = -ENODEV; |
2839 | rc = falcon_probe_nic_variant(efx); | 1389 | |
2840 | if (rc) | 1390 | if (efx_nic_fpga_ver(efx) != 0) { |
1391 | EFX_ERR(efx, "Falcon FPGA not supported\n"); | ||
2841 | goto fail1; | 1392 | goto fail1; |
1393 | } | ||
2842 | 1394 | ||
2843 | /* Probe secondary function if expected */ | 1395 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { |
2844 | if (FALCON_IS_DUAL_FUNC(efx)) { | 1396 | efx_oword_t nic_stat; |
2845 | struct pci_dev *dev = pci_dev_get(efx->pci_dev); | 1397 | struct pci_dev *dev; |
1398 | u8 pci_rev = efx->pci_dev->revision; | ||
1399 | |||
1400 | if ((pci_rev == 0xff) || (pci_rev == 0)) { | ||
1401 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); | ||
1402 | goto fail1; | ||
1403 | } | ||
1404 | efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); | ||
1405 | if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { | ||
1406 | EFX_ERR(efx, "Falcon rev A1 1G not supported\n"); | ||
1407 | goto fail1; | ||
1408 | } | ||
1409 | if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { | ||
1410 | EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); | ||
1411 | goto fail1; | ||
1412 | } | ||
2846 | 1413 | ||
1414 | dev = pci_dev_get(efx->pci_dev); | ||
2847 | while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, | 1415 | while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, |
2848 | dev))) { | 1416 | dev))) { |
2849 | if (dev->bus == efx->pci_dev->bus && | 1417 | if (dev->bus == efx->pci_dev->bus && |
@@ -2867,7 +1435,7 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2867 | } | 1435 | } |
2868 | 1436 | ||
2869 | /* Allocate memory for INT_KER */ | 1437 | /* Allocate memory for INT_KER */ |
2870 | rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); | 1438 | rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); |
2871 | if (rc) | 1439 | if (rc) |
2872 | goto fail4; | 1440 | goto fail4; |
2873 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | 1441 | BUG_ON(efx->irq_status.dma_addr & 0x0f); |
@@ -2884,21 +1452,36 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2884 | goto fail5; | 1452 | goto fail5; |
2885 | 1453 | ||
2886 | /* Initialise I2C adapter */ | 1454 | /* Initialise I2C adapter */ |
2887 | efx->i2c_adap.owner = THIS_MODULE; | 1455 | board = falcon_board(efx); |
2888 | nic_data->i2c_data = falcon_i2c_bit_operations; | 1456 | board->i2c_adap.owner = THIS_MODULE; |
2889 | nic_data->i2c_data.data = efx; | 1457 | board->i2c_data = falcon_i2c_bit_operations; |
2890 | efx->i2c_adap.algo_data = &nic_data->i2c_data; | 1458 | board->i2c_data.data = efx; |
2891 | efx->i2c_adap.dev.parent = &efx->pci_dev->dev; | 1459 | board->i2c_adap.algo_data = &board->i2c_data; |
2892 | strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name)); | 1460 | board->i2c_adap.dev.parent = &efx->pci_dev->dev; |
2893 | rc = i2c_bit_add_bus(&efx->i2c_adap); | 1461 | strlcpy(board->i2c_adap.name, "SFC4000 GPIO", |
1462 | sizeof(board->i2c_adap.name)); | ||
1463 | rc = i2c_bit_add_bus(&board->i2c_adap); | ||
2894 | if (rc) | 1464 | if (rc) |
2895 | goto fail5; | 1465 | goto fail5; |
2896 | 1466 | ||
1467 | rc = falcon_board(efx)->type->init(efx); | ||
1468 | if (rc) { | ||
1469 | EFX_ERR(efx, "failed to initialise board\n"); | ||
1470 | goto fail6; | ||
1471 | } | ||
1472 | |||
1473 | nic_data->stats_disable_count = 1; | ||
1474 | setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, | ||
1475 | (unsigned long)efx); | ||
1476 | |||
2897 | return 0; | 1477 | return 0; |
2898 | 1478 | ||
1479 | fail6: | ||
1480 | BUG_ON(i2c_del_adapter(&board->i2c_adap)); | ||
1481 | memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); | ||
2899 | fail5: | 1482 | fail5: |
2900 | falcon_remove_spi_devices(efx); | 1483 | falcon_remove_spi_devices(efx); |
2901 | falcon_free_buffer(efx, &efx->irq_status); | 1484 | efx_nic_free_buffer(efx, &efx->irq_status); |
2902 | fail4: | 1485 | fail4: |
2903 | fail3: | 1486 | fail3: |
2904 | if (nic_data->pci_dev2) { | 1487 | if (nic_data->pci_dev2) { |
@@ -2911,166 +1494,147 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2911 | return rc; | 1494 | return rc; |
2912 | } | 1495 | } |
2913 | 1496 | ||
1497 | static void falcon_init_rx_cfg(struct efx_nic *efx) | ||
1498 | { | ||
1499 | /* Prior to Siena the RX DMA engine will split each frame at | ||
1500 | * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to | ||
1501 | * be so large that that never happens. */ | ||
1502 | const unsigned huge_buf_size = (3 * 4096) >> 5; | ||
1503 | /* RX control FIFO thresholds (32 entries) */ | ||
1504 | const unsigned ctrl_xon_thr = 20; | ||
1505 | const unsigned ctrl_xoff_thr = 25; | ||
1506 | /* RX data FIFO thresholds (256-byte units; size varies) */ | ||
1507 | int data_xon_thr = efx_nic_rx_xon_thresh >> 8; | ||
1508 | int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8; | ||
1509 | efx_oword_t reg; | ||
1510 | |||
1511 | efx_reado(efx, ®, FR_AZ_RX_CFG); | ||
1512 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { | ||
1513 | /* Data FIFO size is 5.5K */ | ||
1514 | if (data_xon_thr < 0) | ||
1515 | data_xon_thr = 512 >> 8; | ||
1516 | if (data_xoff_thr < 0) | ||
1517 | data_xoff_thr = 2048 >> 8; | ||
1518 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); | ||
1519 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, | ||
1520 | huge_buf_size); | ||
1521 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr); | ||
1522 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr); | ||
1523 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); | ||
1524 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); | ||
1525 | } else { | ||
1526 | /* Data FIFO size is 80K; register fields moved */ | ||
1527 | if (data_xon_thr < 0) | ||
1528 | data_xon_thr = 27648 >> 8; /* ~3*max MTU */ | ||
1529 | if (data_xoff_thr < 0) | ||
1530 | data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */ | ||
1531 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); | ||
1532 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, | ||
1533 | huge_buf_size); | ||
1534 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr); | ||
1535 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr); | ||
1536 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); | ||
1537 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); | ||
1538 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); | ||
1539 | } | ||
1540 | /* Always enable XOFF signal from RX FIFO. We enable | ||
1541 | * or disable transmission of pause frames at the MAC. */ | ||
1542 | EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); | ||
1543 | efx_writeo(efx, ®, FR_AZ_RX_CFG); | ||
1544 | } | ||
1545 | |||
2914 | /* This call performs hardware-specific global initialisation, such as | 1546 | /* This call performs hardware-specific global initialisation, such as |
2915 | * defining the descriptor cache sizes and number of RSS channels. | 1547 | * defining the descriptor cache sizes and number of RSS channels. |
2916 | * It does not set up any buffers, descriptor rings or event queues. | 1548 | * It does not set up any buffers, descriptor rings or event queues. |
2917 | */ | 1549 | */ |
2918 | int falcon_init_nic(struct efx_nic *efx) | 1550 | static int falcon_init_nic(struct efx_nic *efx) |
2919 | { | 1551 | { |
2920 | efx_oword_t temp; | 1552 | efx_oword_t temp; |
2921 | unsigned thresh; | ||
2922 | int rc; | 1553 | int rc; |
2923 | 1554 | ||
2924 | /* Use on-chip SRAM */ | 1555 | /* Use on-chip SRAM */ |
2925 | falcon_read(efx, &temp, NIC_STAT_REG); | 1556 | efx_reado(efx, &temp, FR_AB_NIC_STAT); |
2926 | EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1); | 1557 | EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); |
2927 | falcon_write(efx, &temp, NIC_STAT_REG); | 1558 | efx_writeo(efx, &temp, FR_AB_NIC_STAT); |
2928 | 1559 | ||
2929 | /* Set the source of the GMAC clock */ | 1560 | /* Set the source of the GMAC clock */ |
2930 | if (falcon_rev(efx) == FALCON_REV_B0) { | 1561 | if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) { |
2931 | falcon_read(efx, &temp, GPIO_CTL_REG_KER); | 1562 | efx_reado(efx, &temp, FR_AB_GPIO_CTL); |
2932 | EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true); | 1563 | EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true); |
2933 | falcon_write(efx, &temp, GPIO_CTL_REG_KER); | 1564 | efx_writeo(efx, &temp, FR_AB_GPIO_CTL); |
2934 | } | 1565 | } |
2935 | 1566 | ||
2936 | /* Set buffer table mode */ | 1567 | /* Select the correct MAC */ |
2937 | EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL); | 1568 | falcon_clock_mac(efx); |
2938 | falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER); | ||
2939 | 1569 | ||
2940 | rc = falcon_reset_sram(efx); | 1570 | rc = falcon_reset_sram(efx); |
2941 | if (rc) | 1571 | if (rc) |
2942 | return rc; | 1572 | return rc; |
2943 | 1573 | ||
2944 | /* Set positions of descriptor caches in SRAM. */ | ||
2945 | EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8); | ||
2946 | falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER); | ||
2947 | EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8); | ||
2948 | falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER); | ||
2949 | |||
2950 | /* Set TX descriptor cache size. */ | ||
2951 | BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER)); | ||
2952 | EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | ||
2953 | falcon_write(efx, &temp, TX_DC_CFG_REG_KER); | ||
2954 | |||
2955 | /* Set RX descriptor cache size. Set low watermark to size-8, as | ||
2956 | * this allows most efficient prefetching. | ||
2957 | */ | ||
2958 | BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER)); | ||
2959 | EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | ||
2960 | falcon_write(efx, &temp, RX_DC_CFG_REG_KER); | ||
2961 | EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | ||
2962 | falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER); | ||
2963 | |||
2964 | /* Clear the parity enables on the TX data fifos as | 1574 | /* Clear the parity enables on the TX data fifos as |
2965 | * they produce false parity errors because of timing issues | 1575 | * they produce false parity errors because of timing issues |
2966 | */ | 1576 | */ |
2967 | if (EFX_WORKAROUND_5129(efx)) { | 1577 | if (EFX_WORKAROUND_5129(efx)) { |
2968 | falcon_read(efx, &temp, SPARE_REG_KER); | 1578 | efx_reado(efx, &temp, FR_AZ_CSR_SPARE); |
2969 | EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0); | 1579 | EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0); |
2970 | falcon_write(efx, &temp, SPARE_REG_KER); | 1580 | efx_writeo(efx, &temp, FR_AZ_CSR_SPARE); |
2971 | } | 1581 | } |
2972 | 1582 | ||
2973 | /* Enable all the genuinely fatal interrupts. (They are still | ||
2974 | * masked by the overall interrupt mask, controlled by | ||
2975 | * falcon_interrupts()). | ||
2976 | * | ||
2977 | * Note: All other fatal interrupts are enabled | ||
2978 | */ | ||
2979 | EFX_POPULATE_OWORD_3(temp, | ||
2980 | ILL_ADR_INT_KER_EN, 1, | ||
2981 | RBUF_OWN_INT_KER_EN, 1, | ||
2982 | TBUF_OWN_INT_KER_EN, 1); | ||
2983 | EFX_INVERT_OWORD(temp); | ||
2984 | falcon_write(efx, &temp, FATAL_INTR_REG_KER); | ||
2985 | |||
2986 | if (EFX_WORKAROUND_7244(efx)) { | 1583 | if (EFX_WORKAROUND_7244(efx)) { |
2987 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); | 1584 | efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL); |
2988 | EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); | 1585 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8); |
2989 | EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); | 1586 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8); |
2990 | EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); | 1587 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8); |
2991 | EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); | 1588 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8); |
2992 | falcon_write(efx, &temp, RX_FILTER_CTL_REG); | 1589 | efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL); |
2993 | } | 1590 | } |
2994 | 1591 | ||
2995 | falcon_setup_rss_indir_table(efx); | 1592 | /* XXX This is documented only for Falcon A0/A1 */ |
2996 | |||
2997 | /* Setup RX. Wait for descriptor is broken and must | 1593 | /* Setup RX. Wait for descriptor is broken and must |
2998 | * be disabled. RXDP recovery shouldn't be needed, but is. | 1594 | * be disabled. RXDP recovery shouldn't be needed, but is. |
2999 | */ | 1595 | */ |
3000 | falcon_read(efx, &temp, RX_SELF_RST_REG_KER); | 1596 | efx_reado(efx, &temp, FR_AA_RX_SELF_RST); |
3001 | EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1); | 1597 | EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1); |
3002 | EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1); | 1598 | EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1); |
3003 | if (EFX_WORKAROUND_5583(efx)) | 1599 | if (EFX_WORKAROUND_5583(efx)) |
3004 | EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1); | 1600 | EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1); |
3005 | falcon_write(efx, &temp, RX_SELF_RST_REG_KER); | 1601 | efx_writeo(efx, &temp, FR_AA_RX_SELF_RST); |
3006 | |||
3007 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | ||
3008 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | ||
3009 | */ | ||
3010 | falcon_read(efx, &temp, TX_CFG2_REG_KER); | ||
3011 | EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe); | ||
3012 | EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1); | ||
3013 | EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1); | ||
3014 | EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0); | ||
3015 | EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1); | ||
3016 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | ||
3017 | EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1); | ||
3018 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | ||
3019 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); | ||
3020 | /* Squash TX of packets of 16 bytes or less */ | ||
3021 | if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) | ||
3022 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); | ||
3023 | falcon_write(efx, &temp, TX_CFG2_REG_KER); | ||
3024 | 1602 | ||
3025 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 | 1603 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 |
3026 | * descriptors (which is bad). | 1604 | * descriptors (which is bad). |
3027 | */ | 1605 | */ |
3028 | falcon_read(efx, &temp, TX_CFG_REG_KER); | 1606 | efx_reado(efx, &temp, FR_AZ_TX_CFG); |
3029 | EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0); | 1607 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); |
3030 | falcon_write(efx, &temp, TX_CFG_REG_KER); | 1608 | efx_writeo(efx, &temp, FR_AZ_TX_CFG); |
3031 | 1609 | ||
3032 | /* RX config */ | 1610 | falcon_init_rx_cfg(efx); |
3033 | falcon_read(efx, &temp, RX_CFG_REG_KER); | ||
3034 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0); | ||
3035 | if (EFX_WORKAROUND_7575(efx)) | ||
3036 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, | ||
3037 | (3 * 4096) / 32); | ||
3038 | if (falcon_rev(efx) >= FALCON_REV_B0) | ||
3039 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); | ||
3040 | |||
3041 | /* RX FIFO flow control thresholds */ | ||
3042 | thresh = ((rx_xon_thresh_bytes >= 0) ? | ||
3043 | rx_xon_thresh_bytes : efx->type->rx_xon_thresh); | ||
3044 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256); | ||
3045 | thresh = ((rx_xoff_thresh_bytes >= 0) ? | ||
3046 | rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh); | ||
3047 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256); | ||
3048 | /* RX control FIFO thresholds [32 entries] */ | ||
3049 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20); | ||
3050 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25); | ||
3051 | falcon_write(efx, &temp, RX_CFG_REG_KER); | ||
3052 | 1611 | ||
3053 | /* Set destination of both TX and RX Flush events */ | 1612 | /* Set destination of both TX and RX Flush events */ |
3054 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 1613 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { |
3055 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); | 1614 | EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); |
3056 | falcon_write(efx, &temp, DP_CTRL_REG); | 1615 | efx_writeo(efx, &temp, FR_BZ_DP_CTRL); |
3057 | } | 1616 | } |
3058 | 1617 | ||
1618 | efx_nic_init_common(efx); | ||
1619 | |||
3059 | return 0; | 1620 | return 0; |
3060 | } | 1621 | } |
3061 | 1622 | ||
3062 | void falcon_remove_nic(struct efx_nic *efx) | 1623 | static void falcon_remove_nic(struct efx_nic *efx) |
3063 | { | 1624 | { |
3064 | struct falcon_nic_data *nic_data = efx->nic_data; | 1625 | struct falcon_nic_data *nic_data = efx->nic_data; |
1626 | struct falcon_board *board = falcon_board(efx); | ||
3065 | int rc; | 1627 | int rc; |
3066 | 1628 | ||
1629 | board->type->fini(efx); | ||
1630 | |||
3067 | /* Remove I2C adapter and clear it in preparation for a retry */ | 1631 | /* Remove I2C adapter and clear it in preparation for a retry */ |
3068 | rc = i2c_del_adapter(&efx->i2c_adap); | 1632 | rc = i2c_del_adapter(&board->i2c_adap); |
3069 | BUG_ON(rc); | 1633 | BUG_ON(rc); |
3070 | memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap)); | 1634 | memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); |
3071 | 1635 | ||
3072 | falcon_remove_spi_devices(efx); | 1636 | falcon_remove_spi_devices(efx); |
3073 | falcon_free_buffer(efx, &efx->irq_status); | 1637 | efx_nic_free_buffer(efx, &efx->irq_status); |
3074 | 1638 | ||
3075 | falcon_reset_hw(efx, RESET_TYPE_ALL); | 1639 | falcon_reset_hw(efx, RESET_TYPE_ALL); |
3076 | 1640 | ||
@@ -3085,65 +1649,180 @@ void falcon_remove_nic(struct efx_nic *efx) | |||
3085 | efx->nic_data = NULL; | 1649 | efx->nic_data = NULL; |
3086 | } | 1650 | } |
3087 | 1651 | ||
3088 | void falcon_update_nic_stats(struct efx_nic *efx) | 1652 | static void falcon_update_nic_stats(struct efx_nic *efx) |
3089 | { | 1653 | { |
1654 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
3090 | efx_oword_t cnt; | 1655 | efx_oword_t cnt; |
3091 | 1656 | ||
3092 | falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER); | 1657 | if (nic_data->stats_disable_count) |
3093 | efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT); | 1658 | return; |
1659 | |||
1660 | efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); | ||
1661 | efx->n_rx_nodesc_drop_cnt += | ||
1662 | EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); | ||
1663 | |||
1664 | if (nic_data->stats_pending && | ||
1665 | *nic_data->stats_dma_done == FALCON_STATS_DONE) { | ||
1666 | nic_data->stats_pending = false; | ||
1667 | rmb(); /* read the done flag before the stats */ | ||
1668 | efx->mac_op->update_stats(efx); | ||
1669 | } | ||
1670 | } | ||
1671 | |||
1672 | void falcon_start_nic_stats(struct efx_nic *efx) | ||
1673 | { | ||
1674 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1675 | |||
1676 | spin_lock_bh(&efx->stats_lock); | ||
1677 | if (--nic_data->stats_disable_count == 0) | ||
1678 | falcon_stats_request(efx); | ||
1679 | spin_unlock_bh(&efx->stats_lock); | ||
1680 | } | ||
1681 | |||
1682 | void falcon_stop_nic_stats(struct efx_nic *efx) | ||
1683 | { | ||
1684 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1685 | int i; | ||
1686 | |||
1687 | might_sleep(); | ||
1688 | |||
1689 | spin_lock_bh(&efx->stats_lock); | ||
1690 | ++nic_data->stats_disable_count; | ||
1691 | spin_unlock_bh(&efx->stats_lock); | ||
1692 | |||
1693 | del_timer_sync(&nic_data->stats_timer); | ||
1694 | |||
1695 | /* Wait enough time for the most recent transfer to | ||
1696 | * complete. */ | ||
1697 | for (i = 0; i < 4 && nic_data->stats_pending; i++) { | ||
1698 | if (*nic_data->stats_dma_done == FALCON_STATS_DONE) | ||
1699 | break; | ||
1700 | msleep(1); | ||
1701 | } | ||
1702 | |||
1703 | spin_lock_bh(&efx->stats_lock); | ||
1704 | falcon_stats_complete(efx); | ||
1705 | spin_unlock_bh(&efx->stats_lock); | ||
1706 | } | ||
1707 | |||
1708 | static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
1709 | { | ||
1710 | falcon_board(efx)->type->set_id_led(efx, mode); | ||
1711 | } | ||
1712 | |||
1713 | /************************************************************************** | ||
1714 | * | ||
1715 | * Wake on LAN | ||
1716 | * | ||
1717 | ************************************************************************** | ||
1718 | */ | ||
1719 | |||
1720 | static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) | ||
1721 | { | ||
1722 | wol->supported = 0; | ||
1723 | wol->wolopts = 0; | ||
1724 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | ||
1725 | } | ||
1726 | |||
1727 | static int falcon_set_wol(struct efx_nic *efx, u32 type) | ||
1728 | { | ||
1729 | if (type != 0) | ||
1730 | return -EINVAL; | ||
1731 | return 0; | ||
3094 | } | 1732 | } |
3095 | 1733 | ||
3096 | /************************************************************************** | 1734 | /************************************************************************** |
3097 | * | 1735 | * |
3098 | * Revision-dependent attributes used by efx.c | 1736 | * Revision-dependent attributes used by efx.c and nic.c |
3099 | * | 1737 | * |
3100 | ************************************************************************** | 1738 | ************************************************************************** |
3101 | */ | 1739 | */ |
3102 | 1740 | ||
3103 | struct efx_nic_type falcon_a_nic_type = { | 1741 | struct efx_nic_type falcon_a1_nic_type = { |
3104 | .mem_bar = 2, | 1742 | .probe = falcon_probe_nic, |
1743 | .remove = falcon_remove_nic, | ||
1744 | .init = falcon_init_nic, | ||
1745 | .fini = efx_port_dummy_op_void, | ||
1746 | .monitor = falcon_monitor, | ||
1747 | .reset = falcon_reset_hw, | ||
1748 | .probe_port = falcon_probe_port, | ||
1749 | .remove_port = falcon_remove_port, | ||
1750 | .prepare_flush = falcon_prepare_flush, | ||
1751 | .update_stats = falcon_update_nic_stats, | ||
1752 | .start_stats = falcon_start_nic_stats, | ||
1753 | .stop_stats = falcon_stop_nic_stats, | ||
1754 | .set_id_led = falcon_set_id_led, | ||
1755 | .push_irq_moderation = falcon_push_irq_moderation, | ||
1756 | .push_multicast_hash = falcon_push_multicast_hash, | ||
1757 | .reconfigure_port = falcon_reconfigure_port, | ||
1758 | .get_wol = falcon_get_wol, | ||
1759 | .set_wol = falcon_set_wol, | ||
1760 | .resume_wol = efx_port_dummy_op_void, | ||
1761 | .test_nvram = falcon_test_nvram, | ||
1762 | .default_mac_ops = &falcon_xmac_operations, | ||
1763 | |||
1764 | .revision = EFX_REV_FALCON_A1, | ||
3105 | .mem_map_size = 0x20000, | 1765 | .mem_map_size = 0x20000, |
3106 | .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1, | 1766 | .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, |
3107 | .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1, | 1767 | .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, |
3108 | .buf_tbl_base = BUF_TBL_KER_A1, | 1768 | .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, |
3109 | .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1, | 1769 | .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER, |
3110 | .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1, | 1770 | .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, |
3111 | .txd_ring_mask = FALCON_TXD_RING_MASK, | 1771 | .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), |
3112 | .rxd_ring_mask = FALCON_RXD_RING_MASK, | ||
3113 | .evq_size = FALCON_EVQ_SIZE, | ||
3114 | .max_dma_mask = FALCON_DMA_MASK, | ||
3115 | .tx_dma_mask = FALCON_TX_DMA_MASK, | ||
3116 | .bug5391_mask = 0xf, | ||
3117 | .rx_xoff_thresh = 2048, | ||
3118 | .rx_xon_thresh = 512, | ||
3119 | .rx_buffer_padding = 0x24, | 1772 | .rx_buffer_padding = 0x24, |
3120 | .max_interrupt_mode = EFX_INT_MODE_MSI, | 1773 | .max_interrupt_mode = EFX_INT_MODE_MSI, |
3121 | .phys_addr_channels = 4, | 1774 | .phys_addr_channels = 4, |
1775 | .tx_dc_base = 0x130000, | ||
1776 | .rx_dc_base = 0x100000, | ||
1777 | .offload_features = NETIF_F_IP_CSUM, | ||
1778 | .reset_world_flags = ETH_RESET_IRQ, | ||
3122 | }; | 1779 | }; |
3123 | 1780 | ||
3124 | struct efx_nic_type falcon_b_nic_type = { | 1781 | struct efx_nic_type falcon_b0_nic_type = { |
3125 | .mem_bar = 2, | 1782 | .probe = falcon_probe_nic, |
1783 | .remove = falcon_remove_nic, | ||
1784 | .init = falcon_init_nic, | ||
1785 | .fini = efx_port_dummy_op_void, | ||
1786 | .monitor = falcon_monitor, | ||
1787 | .reset = falcon_reset_hw, | ||
1788 | .probe_port = falcon_probe_port, | ||
1789 | .remove_port = falcon_remove_port, | ||
1790 | .prepare_flush = falcon_prepare_flush, | ||
1791 | .update_stats = falcon_update_nic_stats, | ||
1792 | .start_stats = falcon_start_nic_stats, | ||
1793 | .stop_stats = falcon_stop_nic_stats, | ||
1794 | .set_id_led = falcon_set_id_led, | ||
1795 | .push_irq_moderation = falcon_push_irq_moderation, | ||
1796 | .push_multicast_hash = falcon_push_multicast_hash, | ||
1797 | .reconfigure_port = falcon_reconfigure_port, | ||
1798 | .get_wol = falcon_get_wol, | ||
1799 | .set_wol = falcon_set_wol, | ||
1800 | .resume_wol = efx_port_dummy_op_void, | ||
1801 | .test_registers = falcon_b0_test_registers, | ||
1802 | .test_nvram = falcon_test_nvram, | ||
1803 | .default_mac_ops = &falcon_xmac_operations, | ||
1804 | |||
1805 | .revision = EFX_REV_FALCON_B0, | ||
3126 | /* Map everything up to and including the RSS indirection | 1806 | /* Map everything up to and including the RSS indirection |
3127 | * table. Don't map MSI-X table, MSI-X PBA since Linux | 1807 | * table. Don't map MSI-X table, MSI-X PBA since Linux |
3128 | * requires that they not be mapped. */ | 1808 | * requires that they not be mapped. */ |
3129 | .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800, | 1809 | .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL + |
3130 | .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0, | 1810 | FR_BZ_RX_INDIRECTION_TBL_STEP * |
3131 | .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0, | 1811 | FR_BZ_RX_INDIRECTION_TBL_ROWS), |
3132 | .buf_tbl_base = BUF_TBL_KER_B0, | 1812 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, |
3133 | .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0, | 1813 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, |
3134 | .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0, | 1814 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, |
3135 | .txd_ring_mask = FALCON_TXD_RING_MASK, | 1815 | .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, |
3136 | .rxd_ring_mask = FALCON_RXD_RING_MASK, | 1816 | .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, |
3137 | .evq_size = FALCON_EVQ_SIZE, | 1817 | .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), |
3138 | .max_dma_mask = FALCON_DMA_MASK, | ||
3139 | .tx_dma_mask = FALCON_TX_DMA_MASK, | ||
3140 | .bug5391_mask = 0, | ||
3141 | .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */ | ||
3142 | .rx_xon_thresh = 27648, /* ~3*max MTU */ | ||
3143 | .rx_buffer_padding = 0, | 1818 | .rx_buffer_padding = 0, |
3144 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | 1819 | .max_interrupt_mode = EFX_INT_MODE_MSIX, |
3145 | .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy | 1820 | .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy |
3146 | * interrupt handler only supports 32 | 1821 | * interrupt handler only supports 32 |
3147 | * channels */ | 1822 | * channels */ |
1823 | .tx_dc_base = 0x130000, | ||
1824 | .rx_dc_base = 0x100000, | ||
1825 | .offload_features = NETIF_F_IP_CSUM, | ||
1826 | .reset_world_flags = ETH_RESET_IRQ, | ||
3148 | }; | 1827 | }; |
3149 | 1828 | ||
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h deleted file mode 100644 index 77f2e0db7ca1..000000000000 --- a/drivers/net/sfc/falcon.h +++ /dev/null | |||
@@ -1,145 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_FALCON_H | ||
12 | #define EFX_FALCON_H | ||
13 | |||
14 | #include "net_driver.h" | ||
15 | #include "efx.h" | ||
16 | |||
17 | /* | ||
18 | * Falcon hardware control | ||
19 | */ | ||
20 | |||
21 | enum falcon_revision { | ||
22 | FALCON_REV_A0 = 0, | ||
23 | FALCON_REV_A1 = 1, | ||
24 | FALCON_REV_B0 = 2, | ||
25 | }; | ||
26 | |||
27 | static inline int falcon_rev(struct efx_nic *efx) | ||
28 | { | ||
29 | return efx->pci_dev->revision; | ||
30 | } | ||
31 | |||
32 | extern struct efx_nic_type falcon_a_nic_type; | ||
33 | extern struct efx_nic_type falcon_b_nic_type; | ||
34 | |||
35 | /************************************************************************** | ||
36 | * | ||
37 | * Externs | ||
38 | * | ||
39 | ************************************************************************** | ||
40 | */ | ||
41 | |||
42 | /* TX data path */ | ||
43 | extern int falcon_probe_tx(struct efx_tx_queue *tx_queue); | ||
44 | extern void falcon_init_tx(struct efx_tx_queue *tx_queue); | ||
45 | extern void falcon_fini_tx(struct efx_tx_queue *tx_queue); | ||
46 | extern void falcon_remove_tx(struct efx_tx_queue *tx_queue); | ||
47 | extern void falcon_push_buffers(struct efx_tx_queue *tx_queue); | ||
48 | |||
49 | /* RX data path */ | ||
50 | extern int falcon_probe_rx(struct efx_rx_queue *rx_queue); | ||
51 | extern void falcon_init_rx(struct efx_rx_queue *rx_queue); | ||
52 | extern void falcon_fini_rx(struct efx_rx_queue *rx_queue); | ||
53 | extern void falcon_remove_rx(struct efx_rx_queue *rx_queue); | ||
54 | extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue); | ||
55 | |||
56 | /* Event data path */ | ||
57 | extern int falcon_probe_eventq(struct efx_channel *channel); | ||
58 | extern void falcon_init_eventq(struct efx_channel *channel); | ||
59 | extern void falcon_fini_eventq(struct efx_channel *channel); | ||
60 | extern void falcon_remove_eventq(struct efx_channel *channel); | ||
61 | extern int falcon_process_eventq(struct efx_channel *channel, int rx_quota); | ||
62 | extern void falcon_eventq_read_ack(struct efx_channel *channel); | ||
63 | |||
64 | /* Ports */ | ||
65 | extern int falcon_probe_port(struct efx_nic *efx); | ||
66 | extern void falcon_remove_port(struct efx_nic *efx); | ||
67 | |||
68 | /* MAC/PHY */ | ||
69 | extern int falcon_switch_mac(struct efx_nic *efx); | ||
70 | extern bool falcon_xaui_link_ok(struct efx_nic *efx); | ||
71 | extern int falcon_dma_stats(struct efx_nic *efx, | ||
72 | unsigned int done_offset); | ||
73 | extern void falcon_drain_tx_fifo(struct efx_nic *efx); | ||
74 | extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx); | ||
75 | extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); | ||
76 | |||
77 | /* Interrupts and test events */ | ||
78 | extern int falcon_init_interrupt(struct efx_nic *efx); | ||
79 | extern void falcon_enable_interrupts(struct efx_nic *efx); | ||
80 | extern void falcon_generate_test_event(struct efx_channel *channel, | ||
81 | unsigned int magic); | ||
82 | extern void falcon_sim_phy_event(struct efx_nic *efx); | ||
83 | extern void falcon_generate_interrupt(struct efx_nic *efx); | ||
84 | extern void falcon_set_int_moderation(struct efx_channel *channel); | ||
85 | extern void falcon_disable_interrupts(struct efx_nic *efx); | ||
86 | extern void falcon_fini_interrupt(struct efx_nic *efx); | ||
87 | |||
88 | #define FALCON_IRQ_MOD_RESOLUTION 5 | ||
89 | |||
90 | /* Global Resources */ | ||
91 | extern int falcon_probe_nic(struct efx_nic *efx); | ||
92 | extern int falcon_probe_resources(struct efx_nic *efx); | ||
93 | extern int falcon_init_nic(struct efx_nic *efx); | ||
94 | extern int falcon_flush_queues(struct efx_nic *efx); | ||
95 | extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); | ||
96 | extern void falcon_remove_resources(struct efx_nic *efx); | ||
97 | extern void falcon_remove_nic(struct efx_nic *efx); | ||
98 | extern void falcon_update_nic_stats(struct efx_nic *efx); | ||
99 | extern void falcon_set_multicast_hash(struct efx_nic *efx); | ||
100 | extern int falcon_reset_xaui(struct efx_nic *efx); | ||
101 | |||
102 | /* Tests */ | ||
103 | struct falcon_nvconfig; | ||
104 | extern int falcon_read_nvram(struct efx_nic *efx, | ||
105 | struct falcon_nvconfig *nvconfig); | ||
106 | extern int falcon_test_registers(struct efx_nic *efx); | ||
107 | |||
108 | /************************************************************************** | ||
109 | * | ||
110 | * Falcon MAC stats | ||
111 | * | ||
112 | ************************************************************************** | ||
113 | */ | ||
114 | |||
115 | #define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) | ||
116 | #define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) | ||
117 | |||
118 | /* Retrieve statistic from statistics block */ | ||
119 | #define FALCON_STAT(efx, falcon_stat, efx_stat) do { \ | ||
120 | if (FALCON_STAT_WIDTH(falcon_stat) == 16) \ | ||
121 | (efx)->mac_stats.efx_stat += le16_to_cpu( \ | ||
122 | *((__force __le16 *) \ | ||
123 | (efx->stats_buffer.addr + \ | ||
124 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
125 | else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \ | ||
126 | (efx)->mac_stats.efx_stat += le32_to_cpu( \ | ||
127 | *((__force __le32 *) \ | ||
128 | (efx->stats_buffer.addr + \ | ||
129 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
130 | else \ | ||
131 | (efx)->mac_stats.efx_stat += le64_to_cpu( \ | ||
132 | *((__force __le64 *) \ | ||
133 | (efx->stats_buffer.addr + \ | ||
134 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
135 | } while (0) | ||
136 | |||
137 | #define FALCON_MAC_STATS_SIZE 0x100 | ||
138 | |||
139 | #define MAC_DATA_LBN 0 | ||
140 | #define MAC_DATA_WIDTH 32 | ||
141 | |||
142 | extern void falcon_generate_event(struct efx_channel *channel, | ||
143 | efx_qword_t *event); | ||
144 | |||
145 | #endif /* EFX_FALCON_H */ | ||
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c new file mode 100644 index 000000000000..c7a933a3292e --- /dev/null +++ b/drivers/net/sfc/falcon_boards.c | |||
@@ -0,0 +1,754 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2007-2009 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include <linux/rtnetlink.h> | ||
11 | |||
12 | #include "net_driver.h" | ||
13 | #include "phy.h" | ||
14 | #include "efx.h" | ||
15 | #include "nic.h" | ||
16 | #include "regs.h" | ||
17 | #include "io.h" | ||
18 | #include "workarounds.h" | ||
19 | |||
20 | /* Macros for unpacking the board revision */ | ||
21 | /* The revision info is in host byte order. */ | ||
22 | #define FALCON_BOARD_TYPE(_rev) (_rev >> 8) | ||
23 | #define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf) | ||
24 | #define FALCON_BOARD_MINOR(_rev) (_rev & 0xf) | ||
25 | |||
26 | /* Board types */ | ||
27 | #define FALCON_BOARD_SFE4001 0x01 | ||
28 | #define FALCON_BOARD_SFE4002 0x02 | ||
29 | #define FALCON_BOARD_SFN4111T 0x51 | ||
30 | #define FALCON_BOARD_SFN4112F 0x52 | ||
31 | |||
32 | /* Board temperature is about 15°C above ambient when air flow is | ||
33 | * limited. */ | ||
34 | #define FALCON_BOARD_TEMP_BIAS 15 | ||
35 | |||
36 | /* SFC4000 datasheet says: 'The maximum permitted junction temperature | ||
37 | * is 125°C; the thermal design of the environment for the SFC4000 | ||
38 | * should aim to keep this well below 100°C.' */ | ||
39 | #define FALCON_JUNC_TEMP_MAX 90 | ||
40 | |||
41 | /***************************************************************************** | ||
42 | * Support for LM87 sensor chip used on several boards | ||
43 | */ | ||
44 | #define LM87_REG_ALARMS1 0x41 | ||
45 | #define LM87_REG_ALARMS2 0x42 | ||
46 | #define LM87_IN_LIMITS(nr, _min, _max) \ | ||
47 | 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min | ||
48 | #define LM87_AIN_LIMITS(nr, _min, _max) \ | ||
49 | 0x3B + (nr), _max, 0x1A + (nr), _min | ||
50 | #define LM87_TEMP_INT_LIMITS(_min, _max) \ | ||
51 | 0x39, _max, 0x3A, _min | ||
52 | #define LM87_TEMP_EXT1_LIMITS(_min, _max) \ | ||
53 | 0x37, _max, 0x38, _min | ||
54 | |||
55 | #define LM87_ALARM_TEMP_INT 0x10 | ||
56 | #define LM87_ALARM_TEMP_EXT1 0x20 | ||
57 | |||
58 | #if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE) | ||
59 | |||
60 | static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, | ||
61 | const u8 *reg_values) | ||
62 | { | ||
63 | struct falcon_board *board = falcon_board(efx); | ||
64 | struct i2c_client *client = i2c_new_device(&board->i2c_adap, info); | ||
65 | int rc; | ||
66 | |||
67 | if (!client) | ||
68 | return -EIO; | ||
69 | |||
70 | while (*reg_values) { | ||
71 | u8 reg = *reg_values++; | ||
72 | u8 value = *reg_values++; | ||
73 | rc = i2c_smbus_write_byte_data(client, reg, value); | ||
74 | if (rc) | ||
75 | goto err; | ||
76 | } | ||
77 | |||
78 | board->hwmon_client = client; | ||
79 | return 0; | ||
80 | |||
81 | err: | ||
82 | i2c_unregister_device(client); | ||
83 | return rc; | ||
84 | } | ||
85 | |||
86 | static void efx_fini_lm87(struct efx_nic *efx) | ||
87 | { | ||
88 | i2c_unregister_device(falcon_board(efx)->hwmon_client); | ||
89 | } | ||
90 | |||
91 | static int efx_check_lm87(struct efx_nic *efx, unsigned mask) | ||
92 | { | ||
93 | struct i2c_client *client = falcon_board(efx)->hwmon_client; | ||
94 | s32 alarms1, alarms2; | ||
95 | |||
96 | /* If link is up then do not monitor temperature */ | ||
97 | if (EFX_WORKAROUND_7884(efx) && efx->link_state.up) | ||
98 | return 0; | ||
99 | |||
100 | alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); | ||
101 | alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); | ||
102 | if (alarms1 < 0) | ||
103 | return alarms1; | ||
104 | if (alarms2 < 0) | ||
105 | return alarms2; | ||
106 | alarms1 &= mask; | ||
107 | alarms2 &= mask >> 8; | ||
108 | if (alarms1 || alarms2) { | ||
109 | EFX_ERR(efx, | ||
110 | "LM87 detected a hardware failure (status %02x:%02x)" | ||
111 | "%s%s\n", | ||
112 | alarms1, alarms2, | ||
113 | (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "", | ||
114 | (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : ""); | ||
115 | return -ERANGE; | ||
116 | } | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | #else /* !CONFIG_SENSORS_LM87 */ | ||
122 | |||
123 | static inline int | ||
124 | efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, | ||
125 | const u8 *reg_values) | ||
126 | { | ||
127 | return 0; | ||
128 | } | ||
129 | static inline void efx_fini_lm87(struct efx_nic *efx) | ||
130 | { | ||
131 | } | ||
132 | static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask) | ||
133 | { | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | #endif /* CONFIG_SENSORS_LM87 */ | ||
138 | |||
139 | /***************************************************************************** | ||
140 | * Support for the SFE4001 and SFN4111T NICs. | ||
141 | * | ||
142 | * The SFE4001 does not power-up fully at reset due to its high power | ||
143 | * consumption. We control its power via a PCA9539 I/O expander. | ||
144 | * Both boards have a MAX6647 temperature monitor which we expose to | ||
145 | * the lm90 driver. | ||
146 | * | ||
147 | * This also provides minimal support for reflashing the PHY, which is | ||
148 | * initiated by resetting it with the FLASH_CFG_1 pin pulled down. | ||
149 | * On SFE4001 rev A2 and later this is connected to the 3V3X output of | ||
150 | * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3. | ||
151 | * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually | ||
152 | * exclusive with the network device being open. | ||
153 | */ | ||
154 | |||
155 | /************************************************************************** | ||
156 | * Support for I2C IO Expander device on SFE4001 | ||
157 | */ | ||
158 | #define PCA9539 0x74 | ||
159 | |||
160 | #define P0_IN 0x00 | ||
161 | #define P0_OUT 0x02 | ||
162 | #define P0_INVERT 0x04 | ||
163 | #define P0_CONFIG 0x06 | ||
164 | |||
165 | #define P0_EN_1V0X_LBN 0 | ||
166 | #define P0_EN_1V0X_WIDTH 1 | ||
167 | #define P0_EN_1V2_LBN 1 | ||
168 | #define P0_EN_1V2_WIDTH 1 | ||
169 | #define P0_EN_2V5_LBN 2 | ||
170 | #define P0_EN_2V5_WIDTH 1 | ||
171 | #define P0_EN_3V3X_LBN 3 | ||
172 | #define P0_EN_3V3X_WIDTH 1 | ||
173 | #define P0_EN_5V_LBN 4 | ||
174 | #define P0_EN_5V_WIDTH 1 | ||
175 | #define P0_SHORTEN_JTAG_LBN 5 | ||
176 | #define P0_SHORTEN_JTAG_WIDTH 1 | ||
177 | #define P0_X_TRST_LBN 6 | ||
178 | #define P0_X_TRST_WIDTH 1 | ||
179 | #define P0_DSP_RESET_LBN 7 | ||
180 | #define P0_DSP_RESET_WIDTH 1 | ||
181 | |||
182 | #define P1_IN 0x01 | ||
183 | #define P1_OUT 0x03 | ||
184 | #define P1_INVERT 0x05 | ||
185 | #define P1_CONFIG 0x07 | ||
186 | |||
187 | #define P1_AFE_PWD_LBN 0 | ||
188 | #define P1_AFE_PWD_WIDTH 1 | ||
189 | #define P1_DSP_PWD25_LBN 1 | ||
190 | #define P1_DSP_PWD25_WIDTH 1 | ||
191 | #define P1_RESERVED_LBN 2 | ||
192 | #define P1_RESERVED_WIDTH 2 | ||
193 | #define P1_SPARE_LBN 4 | ||
194 | #define P1_SPARE_WIDTH 4 | ||
195 | |||
196 | /* Temperature Sensor */ | ||
197 | #define MAX664X_REG_RSL 0x02 | ||
198 | #define MAX664X_REG_WLHO 0x0B | ||
199 | |||
200 | static void sfe4001_poweroff(struct efx_nic *efx) | ||
201 | { | ||
202 | struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client; | ||
203 | struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client; | ||
204 | |||
205 | /* Turn off all power rails and disable outputs */ | ||
206 | i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff); | ||
207 | i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff); | ||
208 | i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff); | ||
209 | |||
210 | /* Clear any over-temperature alert */ | ||
211 | i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); | ||
212 | } | ||
213 | |||
214 | static int sfe4001_poweron(struct efx_nic *efx) | ||
215 | { | ||
216 | struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client; | ||
217 | struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client; | ||
218 | unsigned int i, j; | ||
219 | int rc; | ||
220 | u8 out; | ||
221 | |||
222 | /* Clear any previous over-temperature alert */ | ||
223 | rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); | ||
224 | if (rc < 0) | ||
225 | return rc; | ||
226 | |||
227 | /* Enable port 0 and port 1 outputs on IO expander */ | ||
228 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00); | ||
229 | if (rc) | ||
230 | return rc; | ||
231 | rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, | ||
232 | 0xff & ~(1 << P1_SPARE_LBN)); | ||
233 | if (rc) | ||
234 | goto fail_on; | ||
235 | |||
236 | /* If PHY power is on, turn it all off and wait 1 second to | ||
237 | * ensure a full reset. | ||
238 | */ | ||
239 | rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT); | ||
240 | if (rc < 0) | ||
241 | goto fail_on; | ||
242 | out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) | | ||
243 | (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | | ||
244 | (0 << P0_EN_1V0X_LBN)); | ||
245 | if (rc != out) { | ||
246 | EFX_INFO(efx, "power-cycling PHY\n"); | ||
247 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); | ||
248 | if (rc) | ||
249 | goto fail_on; | ||
250 | schedule_timeout_uninterruptible(HZ); | ||
251 | } | ||
252 | |||
253 | for (i = 0; i < 20; ++i) { | ||
254 | /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */ | ||
255 | out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | | ||
256 | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | | ||
257 | (1 << P0_X_TRST_LBN)); | ||
258 | if (efx->phy_mode & PHY_MODE_SPECIAL) | ||
259 | out |= 1 << P0_EN_3V3X_LBN; | ||
260 | |||
261 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); | ||
262 | if (rc) | ||
263 | goto fail_on; | ||
264 | msleep(10); | ||
265 | |||
266 | /* Turn on 1V power rail */ | ||
267 | out &= ~(1 << P0_EN_1V0X_LBN); | ||
268 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); | ||
269 | if (rc) | ||
270 | goto fail_on; | ||
271 | |||
272 | EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i); | ||
273 | |||
274 | /* In flash config mode, DSP does not turn on AFE, so | ||
275 | * just wait 1 second. | ||
276 | */ | ||
277 | if (efx->phy_mode & PHY_MODE_SPECIAL) { | ||
278 | schedule_timeout_uninterruptible(HZ); | ||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | for (j = 0; j < 10; ++j) { | ||
283 | msleep(100); | ||
284 | |||
285 | /* Check DSP has asserted AFE power line */ | ||
286 | rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN); | ||
287 | if (rc < 0) | ||
288 | goto fail_on; | ||
289 | if (rc & (1 << P1_AFE_PWD_LBN)) | ||
290 | return 0; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | EFX_INFO(efx, "timed out waiting for DSP boot\n"); | ||
295 | rc = -ETIMEDOUT; | ||
296 | fail_on: | ||
297 | sfe4001_poweroff(efx); | ||
298 | return rc; | ||
299 | } | ||
300 | |||
301 | static int sfn4111t_reset(struct efx_nic *efx) | ||
302 | { | ||
303 | struct falcon_board *board = falcon_board(efx); | ||
304 | efx_oword_t reg; | ||
305 | |||
306 | /* GPIO 3 and the GPIO register are shared with I2C, so block that */ | ||
307 | i2c_lock_adapter(&board->i2c_adap); | ||
308 | |||
309 | /* Pull RST_N (GPIO 2) low then let it up again, setting the | ||
310 | * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the | ||
311 | * output enables; the output levels should always be 0 (low) | ||
312 | * and we rely on external pull-ups. */ | ||
313 | efx_reado(efx, ®, FR_AB_GPIO_CTL); | ||
314 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true); | ||
315 | efx_writeo(efx, ®, FR_AB_GPIO_CTL); | ||
316 | msleep(1000); | ||
317 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false); | ||
318 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, | ||
319 | !!(efx->phy_mode & PHY_MODE_SPECIAL)); | ||
320 | efx_writeo(efx, ®, FR_AB_GPIO_CTL); | ||
321 | msleep(1); | ||
322 | |||
323 | i2c_unlock_adapter(&board->i2c_adap); | ||
324 | |||
325 | ssleep(1); | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static ssize_t show_phy_flash_cfg(struct device *dev, | ||
330 | struct device_attribute *attr, char *buf) | ||
331 | { | ||
332 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
333 | return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL)); | ||
334 | } | ||
335 | |||
336 | static ssize_t set_phy_flash_cfg(struct device *dev, | ||
337 | struct device_attribute *attr, | ||
338 | const char *buf, size_t count) | ||
339 | { | ||
340 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
341 | enum efx_phy_mode old_mode, new_mode; | ||
342 | int err; | ||
343 | |||
344 | rtnl_lock(); | ||
345 | old_mode = efx->phy_mode; | ||
346 | if (count == 0 || *buf == '0') | ||
347 | new_mode = old_mode & ~PHY_MODE_SPECIAL; | ||
348 | else | ||
349 | new_mode = PHY_MODE_SPECIAL; | ||
350 | if (old_mode == new_mode) { | ||
351 | err = 0; | ||
352 | } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { | ||
353 | err = -EBUSY; | ||
354 | } else { | ||
355 | /* Reset the PHY, reconfigure the MAC and enable/disable | ||
356 | * MAC stats accordingly. */ | ||
357 | efx->phy_mode = new_mode; | ||
358 | if (new_mode & PHY_MODE_SPECIAL) | ||
359 | falcon_stop_nic_stats(efx); | ||
360 | if (falcon_board(efx)->type->id == FALCON_BOARD_SFE4001) | ||
361 | err = sfe4001_poweron(efx); | ||
362 | else | ||
363 | err = sfn4111t_reset(efx); | ||
364 | if (!err) | ||
365 | err = efx_reconfigure_port(efx); | ||
366 | if (!(new_mode & PHY_MODE_SPECIAL)) | ||
367 | falcon_start_nic_stats(efx); | ||
368 | } | ||
369 | rtnl_unlock(); | ||
370 | |||
371 | return err ? err : count; | ||
372 | } | ||
373 | |||
374 | static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg); | ||
375 | |||
376 | static void sfe4001_fini(struct efx_nic *efx) | ||
377 | { | ||
378 | struct falcon_board *board = falcon_board(efx); | ||
379 | |||
380 | EFX_INFO(efx, "%s\n", __func__); | ||
381 | |||
382 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
383 | sfe4001_poweroff(efx); | ||
384 | i2c_unregister_device(board->ioexp_client); | ||
385 | i2c_unregister_device(board->hwmon_client); | ||
386 | } | ||
387 | |||
388 | static int sfe4001_check_hw(struct efx_nic *efx) | ||
389 | { | ||
390 | s32 status; | ||
391 | |||
392 | /* If XAUI link is up then do not monitor */ | ||
393 | if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required) | ||
394 | return 0; | ||
395 | |||
396 | /* Check the powered status of the PHY. Lack of power implies that | ||
397 | * the MAX6647 has shut down power to it, probably due to a temp. | ||
398 | * alarm. Reading the power status rather than the MAX6647 status | ||
399 | * directly because the later is read-to-clear and would thus | ||
400 | * start to power up the PHY again when polled, causing us to blip | ||
401 | * the power undesirably. | ||
402 | * We know we can read from the IO expander because we did | ||
403 | * it during power-on. Assume failure now is bad news. */ | ||
404 | status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN); | ||
405 | if (status >= 0 && | ||
406 | (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0) | ||
407 | return 0; | ||
408 | |||
409 | /* Use board power control, not PHY power control */ | ||
410 | sfe4001_poweroff(efx); | ||
411 | efx->phy_mode = PHY_MODE_OFF; | ||
412 | |||
413 | return (status < 0) ? -EIO : -ERANGE; | ||
414 | } | ||
415 | |||
416 | static struct i2c_board_info sfe4001_hwmon_info = { | ||
417 | I2C_BOARD_INFO("max6647", 0x4e), | ||
418 | }; | ||
419 | |||
420 | /* This board uses an I2C expander to provider power to the PHY, which needs to | ||
421 | * be turned on before the PHY can be used. | ||
422 | * Context: Process context, rtnl lock held | ||
423 | */ | ||
424 | static int sfe4001_init(struct efx_nic *efx) | ||
425 | { | ||
426 | struct falcon_board *board = falcon_board(efx); | ||
427 | int rc; | ||
428 | |||
429 | #if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE) | ||
430 | board->hwmon_client = | ||
431 | i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info); | ||
432 | #else | ||
433 | board->hwmon_client = | ||
434 | i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr); | ||
435 | #endif | ||
436 | if (!board->hwmon_client) | ||
437 | return -EIO; | ||
438 | |||
439 | /* Raise board/PHY high limit from 85 to 90 degrees Celsius */ | ||
440 | rc = i2c_smbus_write_byte_data(board->hwmon_client, | ||
441 | MAX664X_REG_WLHO, 90); | ||
442 | if (rc) | ||
443 | goto fail_hwmon; | ||
444 | |||
445 | board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539); | ||
446 | if (!board->ioexp_client) { | ||
447 | rc = -EIO; | ||
448 | goto fail_hwmon; | ||
449 | } | ||
450 | |||
451 | if (efx->phy_mode & PHY_MODE_SPECIAL) { | ||
452 | /* PHY won't generate a 156.25 MHz clock and MAC stats fetch | ||
453 | * will fail. */ | ||
454 | falcon_stop_nic_stats(efx); | ||
455 | } | ||
456 | rc = sfe4001_poweron(efx); | ||
457 | if (rc) | ||
458 | goto fail_ioexp; | ||
459 | |||
460 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
461 | if (rc) | ||
462 | goto fail_on; | ||
463 | |||
464 | EFX_INFO(efx, "PHY is powered on\n"); | ||
465 | return 0; | ||
466 | |||
467 | fail_on: | ||
468 | sfe4001_poweroff(efx); | ||
469 | fail_ioexp: | ||
470 | i2c_unregister_device(board->ioexp_client); | ||
471 | fail_hwmon: | ||
472 | i2c_unregister_device(board->hwmon_client); | ||
473 | return rc; | ||
474 | } | ||
475 | |||
476 | static int sfn4111t_check_hw(struct efx_nic *efx) | ||
477 | { | ||
478 | s32 status; | ||
479 | |||
480 | /* If XAUI link is up then do not monitor */ | ||
481 | if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required) | ||
482 | return 0; | ||
483 | |||
484 | /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */ | ||
485 | status = i2c_smbus_read_byte_data(falcon_board(efx)->hwmon_client, | ||
486 | MAX664X_REG_RSL); | ||
487 | if (status < 0) | ||
488 | return -EIO; | ||
489 | if (status & 0x57) | ||
490 | return -ERANGE; | ||
491 | return 0; | ||
492 | } | ||
493 | |||
494 | static void sfn4111t_fini(struct efx_nic *efx) | ||
495 | { | ||
496 | EFX_INFO(efx, "%s\n", __func__); | ||
497 | |||
498 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
499 | i2c_unregister_device(falcon_board(efx)->hwmon_client); | ||
500 | } | ||
501 | |||
502 | static struct i2c_board_info sfn4111t_a0_hwmon_info = { | ||
503 | I2C_BOARD_INFO("max6647", 0x4e), | ||
504 | }; | ||
505 | |||
506 | static struct i2c_board_info sfn4111t_r5_hwmon_info = { | ||
507 | I2C_BOARD_INFO("max6646", 0x4d), | ||
508 | }; | ||
509 | |||
510 | static void sfn4111t_init_phy(struct efx_nic *efx) | ||
511 | { | ||
512 | if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { | ||
513 | if (sft9001_wait_boot(efx) != -EINVAL) | ||
514 | return; | ||
515 | |||
516 | efx->phy_mode = PHY_MODE_SPECIAL; | ||
517 | falcon_stop_nic_stats(efx); | ||
518 | } | ||
519 | |||
520 | sfn4111t_reset(efx); | ||
521 | sft9001_wait_boot(efx); | ||
522 | } | ||
523 | |||
524 | static int sfn4111t_init(struct efx_nic *efx) | ||
525 | { | ||
526 | struct falcon_board *board = falcon_board(efx); | ||
527 | int rc; | ||
528 | |||
529 | board->hwmon_client = | ||
530 | i2c_new_device(&board->i2c_adap, | ||
531 | (board->minor < 5) ? | ||
532 | &sfn4111t_a0_hwmon_info : | ||
533 | &sfn4111t_r5_hwmon_info); | ||
534 | if (!board->hwmon_client) | ||
535 | return -EIO; | ||
536 | |||
537 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
538 | if (rc) | ||
539 | goto fail_hwmon; | ||
540 | |||
541 | if (efx->phy_mode & PHY_MODE_SPECIAL) | ||
542 | /* PHY may not generate a 156.25 MHz clock and MAC | ||
543 | * stats fetch will fail. */ | ||
544 | falcon_stop_nic_stats(efx); | ||
545 | |||
546 | return 0; | ||
547 | |||
548 | fail_hwmon: | ||
549 | i2c_unregister_device(board->hwmon_client); | ||
550 | return rc; | ||
551 | } | ||
552 | |||
553 | /***************************************************************************** | ||
554 | * Support for the SFE4002 | ||
555 | * | ||
556 | */ | ||
557 | static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ | ||
558 | |||
559 | static const u8 sfe4002_lm87_regs[] = { | ||
560 | LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ | ||
561 | LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ | ||
562 | LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ | ||
563 | LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */ | ||
564 | LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ | ||
565 | LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ | ||
566 | LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */ | ||
567 | LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ | ||
568 | LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS), | ||
569 | LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), | ||
570 | 0 | ||
571 | }; | ||
572 | |||
573 | static struct i2c_board_info sfe4002_hwmon_info = { | ||
574 | I2C_BOARD_INFO("lm87", 0x2e), | ||
575 | .platform_data = &sfe4002_lm87_channel, | ||
576 | }; | ||
577 | |||
578 | /****************************************************************************/ | ||
579 | /* LED allocations. Note that on rev A0 boards the schematic and the reality | ||
580 | * differ: red and green are swapped. Below is the fixed (A1) layout (there | ||
581 | * are only 3 A0 boards in existence, so no real reason to make this | ||
582 | * conditional). | ||
583 | */ | ||
584 | #define SFE4002_FAULT_LED (2) /* Red */ | ||
585 | #define SFE4002_RX_LED (0) /* Green */ | ||
586 | #define SFE4002_TX_LED (1) /* Amber */ | ||
587 | |||
588 | static void sfe4002_init_phy(struct efx_nic *efx) | ||
589 | { | ||
590 | /* Set the TX and RX LEDs to reflect status and activity, and the | ||
591 | * fault LED off */ | ||
592 | falcon_qt202x_set_led(efx, SFE4002_TX_LED, | ||
593 | QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT); | ||
594 | falcon_qt202x_set_led(efx, SFE4002_RX_LED, | ||
595 | QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT); | ||
596 | falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF); | ||
597 | } | ||
598 | |||
599 | static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
600 | { | ||
601 | falcon_qt202x_set_led( | ||
602 | efx, SFE4002_FAULT_LED, | ||
603 | (mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF); | ||
604 | } | ||
605 | |||
606 | static int sfe4002_check_hw(struct efx_nic *efx) | ||
607 | { | ||
608 | struct falcon_board *board = falcon_board(efx); | ||
609 | |||
610 | /* A0 board rev. 4002s report a temperature fault the whole time | ||
611 | * (bad sensor) so we mask it out. */ | ||
612 | unsigned alarm_mask = | ||
613 | (board->major == 0 && board->minor == 0) ? | ||
614 | ~LM87_ALARM_TEMP_EXT1 : ~0; | ||
615 | |||
616 | return efx_check_lm87(efx, alarm_mask); | ||
617 | } | ||
618 | |||
619 | static int sfe4002_init(struct efx_nic *efx) | ||
620 | { | ||
621 | return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs); | ||
622 | } | ||
623 | |||
624 | /***************************************************************************** | ||
625 | * Support for the SFN4112F | ||
626 | * | ||
627 | */ | ||
628 | static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ | ||
629 | |||
630 | static const u8 sfn4112f_lm87_regs[] = { | ||
631 | LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ | ||
632 | LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ | ||
633 | LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ | ||
634 | LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ | ||
635 | LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ | ||
636 | LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ | ||
637 | LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS), | ||
638 | LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), | ||
639 | 0 | ||
640 | }; | ||
641 | |||
642 | static struct i2c_board_info sfn4112f_hwmon_info = { | ||
643 | I2C_BOARD_INFO("lm87", 0x2e), | ||
644 | .platform_data = &sfn4112f_lm87_channel, | ||
645 | }; | ||
646 | |||
647 | #define SFN4112F_ACT_LED 0 | ||
648 | #define SFN4112F_LINK_LED 1 | ||
649 | |||
650 | static void sfn4112f_init_phy(struct efx_nic *efx) | ||
651 | { | ||
652 | falcon_qt202x_set_led(efx, SFN4112F_ACT_LED, | ||
653 | QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT); | ||
654 | falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, | ||
655 | QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT); | ||
656 | } | ||
657 | |||
658 | static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
659 | { | ||
660 | int reg; | ||
661 | |||
662 | switch (mode) { | ||
663 | case EFX_LED_OFF: | ||
664 | reg = QUAKE_LED_OFF; | ||
665 | break; | ||
666 | case EFX_LED_ON: | ||
667 | reg = QUAKE_LED_ON; | ||
668 | break; | ||
669 | default: | ||
670 | reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT; | ||
671 | break; | ||
672 | } | ||
673 | |||
674 | falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg); | ||
675 | } | ||
676 | |||
677 | static int sfn4112f_check_hw(struct efx_nic *efx) | ||
678 | { | ||
679 | /* Mask out unused sensors */ | ||
680 | return efx_check_lm87(efx, ~0x48); | ||
681 | } | ||
682 | |||
683 | static int sfn4112f_init(struct efx_nic *efx) | ||
684 | { | ||
685 | return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs); | ||
686 | } | ||
687 | |||
688 | static const struct falcon_board_type board_types[] = { | ||
689 | { | ||
690 | .id = FALCON_BOARD_SFE4001, | ||
691 | .ref_model = "SFE4001", | ||
692 | .gen_type = "10GBASE-T adapter", | ||
693 | .init = sfe4001_init, | ||
694 | .init_phy = efx_port_dummy_op_void, | ||
695 | .fini = sfe4001_fini, | ||
696 | .set_id_led = tenxpress_set_id_led, | ||
697 | .monitor = sfe4001_check_hw, | ||
698 | }, | ||
699 | { | ||
700 | .id = FALCON_BOARD_SFE4002, | ||
701 | .ref_model = "SFE4002", | ||
702 | .gen_type = "XFP adapter", | ||
703 | .init = sfe4002_init, | ||
704 | .init_phy = sfe4002_init_phy, | ||
705 | .fini = efx_fini_lm87, | ||
706 | .set_id_led = sfe4002_set_id_led, | ||
707 | .monitor = sfe4002_check_hw, | ||
708 | }, | ||
709 | { | ||
710 | .id = FALCON_BOARD_SFN4111T, | ||
711 | .ref_model = "SFN4111T", | ||
712 | .gen_type = "100/1000/10GBASE-T adapter", | ||
713 | .init = sfn4111t_init, | ||
714 | .init_phy = sfn4111t_init_phy, | ||
715 | .fini = sfn4111t_fini, | ||
716 | .set_id_led = tenxpress_set_id_led, | ||
717 | .monitor = sfn4111t_check_hw, | ||
718 | }, | ||
719 | { | ||
720 | .id = FALCON_BOARD_SFN4112F, | ||
721 | .ref_model = "SFN4112F", | ||
722 | .gen_type = "SFP+ adapter", | ||
723 | .init = sfn4112f_init, | ||
724 | .init_phy = sfn4112f_init_phy, | ||
725 | .fini = efx_fini_lm87, | ||
726 | .set_id_led = sfn4112f_set_id_led, | ||
727 | .monitor = sfn4112f_check_hw, | ||
728 | }, | ||
729 | }; | ||
730 | |||
731 | int falcon_probe_board(struct efx_nic *efx, u16 revision_info) | ||
732 | { | ||
733 | struct falcon_board *board = falcon_board(efx); | ||
734 | u8 type_id = FALCON_BOARD_TYPE(revision_info); | ||
735 | int i; | ||
736 | |||
737 | board->major = FALCON_BOARD_MAJOR(revision_info); | ||
738 | board->minor = FALCON_BOARD_MINOR(revision_info); | ||
739 | |||
740 | for (i = 0; i < ARRAY_SIZE(board_types); i++) | ||
741 | if (board_types[i].id == type_id) | ||
742 | board->type = &board_types[i]; | ||
743 | |||
744 | if (board->type) { | ||
745 | EFX_INFO(efx, "board is %s rev %c%d\n", | ||
746 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) | ||
747 | ? board->type->ref_model : board->type->gen_type, | ||
748 | 'A' + board->major, board->minor); | ||
749 | return 0; | ||
750 | } else { | ||
751 | EFX_ERR(efx, "unknown board type %d\n", type_id); | ||
752 | return -ENODEV; | ||
753 | } | ||
754 | } | ||
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c index 8865eae20ac5..7dadfcbd6ce7 100644 --- a/drivers/net/sfc/falcon_gmac.c +++ b/drivers/net/sfc/falcon_gmac.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2008 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -11,11 +11,10 @@ | |||
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include "net_driver.h" | 12 | #include "net_driver.h" |
13 | #include "efx.h" | 13 | #include "efx.h" |
14 | #include "falcon.h" | 14 | #include "nic.h" |
15 | #include "mac.h" | 15 | #include "mac.h" |
16 | #include "falcon_hwdefs.h" | 16 | #include "regs.h" |
17 | #include "falcon_io.h" | 17 | #include "io.h" |
18 | #include "gmii.h" | ||
19 | 18 | ||
20 | /************************************************************************** | 19 | /************************************************************************** |
21 | * | 20 | * |
@@ -23,106 +22,109 @@ | |||
23 | * | 22 | * |
24 | *************************************************************************/ | 23 | *************************************************************************/ |
25 | 24 | ||
26 | static void falcon_reconfigure_gmac(struct efx_nic *efx) | 25 | static int falcon_reconfigure_gmac(struct efx_nic *efx) |
27 | { | 26 | { |
27 | struct efx_link_state *link_state = &efx->link_state; | ||
28 | bool loopback, tx_fc, rx_fc, bytemode; | 28 | bool loopback, tx_fc, rx_fc, bytemode; |
29 | int if_mode; | 29 | int if_mode; |
30 | unsigned int max_frame_len; | 30 | unsigned int max_frame_len; |
31 | efx_oword_t reg; | 31 | efx_oword_t reg; |
32 | 32 | ||
33 | /* Configuration register 1 */ | 33 | /* Configuration register 1 */ |
34 | tx_fc = (efx->link_fc & EFX_FC_TX) || !efx->link_fd; | 34 | tx_fc = (link_state->fc & EFX_FC_TX) || !link_state->fd; |
35 | rx_fc = !!(efx->link_fc & EFX_FC_RX); | 35 | rx_fc = !!(link_state->fc & EFX_FC_RX); |
36 | loopback = (efx->loopback_mode == LOOPBACK_GMAC); | 36 | loopback = (efx->loopback_mode == LOOPBACK_GMAC); |
37 | bytemode = (efx->link_speed == 1000); | 37 | bytemode = (link_state->speed == 1000); |
38 | 38 | ||
39 | EFX_POPULATE_OWORD_5(reg, | 39 | EFX_POPULATE_OWORD_5(reg, |
40 | GM_LOOP, loopback, | 40 | FRF_AB_GM_LOOP, loopback, |
41 | GM_TX_EN, 1, | 41 | FRF_AB_GM_TX_EN, 1, |
42 | GM_TX_FC_EN, tx_fc, | 42 | FRF_AB_GM_TX_FC_EN, tx_fc, |
43 | GM_RX_EN, 1, | 43 | FRF_AB_GM_RX_EN, 1, |
44 | GM_RX_FC_EN, rx_fc); | 44 | FRF_AB_GM_RX_FC_EN, rx_fc); |
45 | falcon_write(efx, ®, GM_CFG1_REG); | 45 | efx_writeo(efx, ®, FR_AB_GM_CFG1); |
46 | udelay(10); | 46 | udelay(10); |
47 | 47 | ||
48 | /* Configuration register 2 */ | 48 | /* Configuration register 2 */ |
49 | if_mode = (bytemode) ? 2 : 1; | 49 | if_mode = (bytemode) ? 2 : 1; |
50 | EFX_POPULATE_OWORD_5(reg, | 50 | EFX_POPULATE_OWORD_5(reg, |
51 | GM_IF_MODE, if_mode, | 51 | FRF_AB_GM_IF_MODE, if_mode, |
52 | GM_PAD_CRC_EN, 1, | 52 | FRF_AB_GM_PAD_CRC_EN, 1, |
53 | GM_LEN_CHK, 1, | 53 | FRF_AB_GM_LEN_CHK, 1, |
54 | GM_FD, efx->link_fd, | 54 | FRF_AB_GM_FD, link_state->fd, |
55 | GM_PAMBL_LEN, 0x7/*datasheet recommended */); | 55 | FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */); |
56 | 56 | ||
57 | falcon_write(efx, ®, GM_CFG2_REG); | 57 | efx_writeo(efx, ®, FR_AB_GM_CFG2); |
58 | udelay(10); | 58 | udelay(10); |
59 | 59 | ||
60 | /* Max frame len register */ | 60 | /* Max frame len register */ |
61 | max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); | 61 | max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); |
62 | EFX_POPULATE_OWORD_1(reg, GM_MAX_FLEN, max_frame_len); | 62 | EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len); |
63 | falcon_write(efx, ®, GM_MAX_FLEN_REG); | 63 | efx_writeo(efx, ®, FR_AB_GM_MAX_FLEN); |
64 | udelay(10); | 64 | udelay(10); |
65 | 65 | ||
66 | /* FIFO configuration register 0 */ | 66 | /* FIFO configuration register 0 */ |
67 | EFX_POPULATE_OWORD_5(reg, | 67 | EFX_POPULATE_OWORD_5(reg, |
68 | GMF_FTFENREQ, 1, | 68 | FRF_AB_GMF_FTFENREQ, 1, |
69 | GMF_STFENREQ, 1, | 69 | FRF_AB_GMF_STFENREQ, 1, |
70 | GMF_FRFENREQ, 1, | 70 | FRF_AB_GMF_FRFENREQ, 1, |
71 | GMF_SRFENREQ, 1, | 71 | FRF_AB_GMF_SRFENREQ, 1, |
72 | GMF_WTMENREQ, 1); | 72 | FRF_AB_GMF_WTMENREQ, 1); |
73 | falcon_write(efx, ®, GMF_CFG0_REG); | 73 | efx_writeo(efx, ®, FR_AB_GMF_CFG0); |
74 | udelay(10); | 74 | udelay(10); |
75 | 75 | ||
76 | /* FIFO configuration register 1 */ | 76 | /* FIFO configuration register 1 */ |
77 | EFX_POPULATE_OWORD_2(reg, | 77 | EFX_POPULATE_OWORD_2(reg, |
78 | GMF_CFGFRTH, 0x12, | 78 | FRF_AB_GMF_CFGFRTH, 0x12, |
79 | GMF_CFGXOFFRTX, 0xffff); | 79 | FRF_AB_GMF_CFGXOFFRTX, 0xffff); |
80 | falcon_write(efx, ®, GMF_CFG1_REG); | 80 | efx_writeo(efx, ®, FR_AB_GMF_CFG1); |
81 | udelay(10); | 81 | udelay(10); |
82 | 82 | ||
83 | /* FIFO configuration register 2 */ | 83 | /* FIFO configuration register 2 */ |
84 | EFX_POPULATE_OWORD_2(reg, | 84 | EFX_POPULATE_OWORD_2(reg, |
85 | GMF_CFGHWM, 0x3f, | 85 | FRF_AB_GMF_CFGHWM, 0x3f, |
86 | GMF_CFGLWM, 0xa); | 86 | FRF_AB_GMF_CFGLWM, 0xa); |
87 | falcon_write(efx, ®, GMF_CFG2_REG); | 87 | efx_writeo(efx, ®, FR_AB_GMF_CFG2); |
88 | udelay(10); | 88 | udelay(10); |
89 | 89 | ||
90 | /* FIFO configuration register 3 */ | 90 | /* FIFO configuration register 3 */ |
91 | EFX_POPULATE_OWORD_2(reg, | 91 | EFX_POPULATE_OWORD_2(reg, |
92 | GMF_CFGHWMFT, 0x1c, | 92 | FRF_AB_GMF_CFGHWMFT, 0x1c, |
93 | GMF_CFGFTTH, 0x08); | 93 | FRF_AB_GMF_CFGFTTH, 0x08); |
94 | falcon_write(efx, ®, GMF_CFG3_REG); | 94 | efx_writeo(efx, ®, FR_AB_GMF_CFG3); |
95 | udelay(10); | 95 | udelay(10); |
96 | 96 | ||
97 | /* FIFO configuration register 4 */ | 97 | /* FIFO configuration register 4 */ |
98 | EFX_POPULATE_OWORD_1(reg, GMF_HSTFLTRFRM_PAUSE, 1); | 98 | EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1); |
99 | falcon_write(efx, ®, GMF_CFG4_REG); | 99 | efx_writeo(efx, ®, FR_AB_GMF_CFG4); |
100 | udelay(10); | 100 | udelay(10); |
101 | 101 | ||
102 | /* FIFO configuration register 5 */ | 102 | /* FIFO configuration register 5 */ |
103 | falcon_read(efx, ®, GMF_CFG5_REG); | 103 | efx_reado(efx, ®, FR_AB_GMF_CFG5); |
104 | EFX_SET_OWORD_FIELD(reg, GMF_CFGBYTMODE, bytemode); | 104 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode); |
105 | EFX_SET_OWORD_FIELD(reg, GMF_CFGHDPLX, !efx->link_fd); | 105 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !link_state->fd); |
106 | EFX_SET_OWORD_FIELD(reg, GMF_HSTDRPLT64, !efx->link_fd); | 106 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !link_state->fd); |
107 | EFX_SET_OWORD_FIELD(reg, GMF_HSTFLTRFRMDC_PAUSE, 0); | 107 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0); |
108 | falcon_write(efx, ®, GMF_CFG5_REG); | 108 | efx_writeo(efx, ®, FR_AB_GMF_CFG5); |
109 | udelay(10); | 109 | udelay(10); |
110 | 110 | ||
111 | /* MAC address */ | 111 | /* MAC address */ |
112 | EFX_POPULATE_OWORD_4(reg, | 112 | EFX_POPULATE_OWORD_4(reg, |
113 | GM_HWADDR_5, efx->net_dev->dev_addr[5], | 113 | FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5], |
114 | GM_HWADDR_4, efx->net_dev->dev_addr[4], | 114 | FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4], |
115 | GM_HWADDR_3, efx->net_dev->dev_addr[3], | 115 | FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3], |
116 | GM_HWADDR_2, efx->net_dev->dev_addr[2]); | 116 | FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]); |
117 | falcon_write(efx, ®, GM_ADR1_REG); | 117 | efx_writeo(efx, ®, FR_AB_GM_ADR1); |
118 | udelay(10); | 118 | udelay(10); |
119 | EFX_POPULATE_OWORD_2(reg, | 119 | EFX_POPULATE_OWORD_2(reg, |
120 | GM_HWADDR_1, efx->net_dev->dev_addr[1], | 120 | FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1], |
121 | GM_HWADDR_0, efx->net_dev->dev_addr[0]); | 121 | FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]); |
122 | falcon_write(efx, ®, GM_ADR2_REG); | 122 | efx_writeo(efx, ®, FR_AB_GM_ADR2); |
123 | udelay(10); | 123 | udelay(10); |
124 | 124 | ||
125 | falcon_reconfigure_mac_wrapper(efx); | 125 | falcon_reconfigure_mac_wrapper(efx); |
126 | |||
127 | return 0; | ||
126 | } | 128 | } |
127 | 129 | ||
128 | static void falcon_update_stats_gmac(struct efx_nic *efx) | 130 | static void falcon_update_stats_gmac(struct efx_nic *efx) |
@@ -130,11 +132,6 @@ static void falcon_update_stats_gmac(struct efx_nic *efx) | |||
130 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 132 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
131 | unsigned long old_rx_pause, old_tx_pause; | 133 | unsigned long old_rx_pause, old_tx_pause; |
132 | unsigned long new_rx_pause, new_tx_pause; | 134 | unsigned long new_rx_pause, new_tx_pause; |
133 | int rc; | ||
134 | |||
135 | rc = falcon_dma_stats(efx, GDmaDone_offset); | ||
136 | if (rc) | ||
137 | return; | ||
138 | 135 | ||
139 | /* Pause frames are erroneously counted as errors (SFC bug 3269) */ | 136 | /* Pause frames are erroneously counted as errors (SFC bug 3269) */ |
140 | old_rx_pause = mac_stats->rx_pause; | 137 | old_rx_pause = mac_stats->rx_pause; |
@@ -221,9 +218,13 @@ static void falcon_update_stats_gmac(struct efx_nic *efx) | |||
221 | mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64; | 218 | mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64; |
222 | } | 219 | } |
223 | 220 | ||
221 | static bool falcon_gmac_check_fault(struct efx_nic *efx) | ||
222 | { | ||
223 | return false; | ||
224 | } | ||
225 | |||
224 | struct efx_mac_operations falcon_gmac_operations = { | 226 | struct efx_mac_operations falcon_gmac_operations = { |
225 | .reconfigure = falcon_reconfigure_gmac, | 227 | .reconfigure = falcon_reconfigure_gmac, |
226 | .update_stats = falcon_update_stats_gmac, | 228 | .update_stats = falcon_update_stats_gmac, |
227 | .irq = efx_port_dummy_op_void, | 229 | .check_fault = falcon_gmac_check_fault, |
228 | .poll = efx_port_dummy_op_void, | ||
229 | }; | 230 | }; |
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h deleted file mode 100644 index 2d2261117ace..000000000000 --- a/drivers/net/sfc/falcon_hwdefs.h +++ /dev/null | |||
@@ -1,1333 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_FALCON_HWDEFS_H | ||
12 | #define EFX_FALCON_HWDEFS_H | ||
13 | |||
14 | /* | ||
15 | * Falcon hardware value definitions. | ||
16 | * Falcon is the internal codename for the SFC4000 controller that is | ||
17 | * present in SFE400X evaluation boards | ||
18 | */ | ||
19 | |||
20 | /************************************************************************** | ||
21 | * | ||
22 | * Falcon registers | ||
23 | * | ||
24 | ************************************************************************** | ||
25 | */ | ||
26 | |||
27 | /* Address region register */ | ||
28 | #define ADR_REGION_REG_KER 0x00 | ||
29 | #define ADR_REGION0_LBN 0 | ||
30 | #define ADR_REGION0_WIDTH 18 | ||
31 | #define ADR_REGION1_LBN 32 | ||
32 | #define ADR_REGION1_WIDTH 18 | ||
33 | #define ADR_REGION2_LBN 64 | ||
34 | #define ADR_REGION2_WIDTH 18 | ||
35 | #define ADR_REGION3_LBN 96 | ||
36 | #define ADR_REGION3_WIDTH 18 | ||
37 | |||
38 | /* Interrupt enable register */ | ||
39 | #define INT_EN_REG_KER 0x0010 | ||
40 | #define KER_INT_KER_LBN 3 | ||
41 | #define KER_INT_KER_WIDTH 1 | ||
42 | #define DRV_INT_EN_KER_LBN 0 | ||
43 | #define DRV_INT_EN_KER_WIDTH 1 | ||
44 | |||
45 | /* Interrupt status address register */ | ||
46 | #define INT_ADR_REG_KER 0x0030 | ||
47 | #define NORM_INT_VEC_DIS_KER_LBN 64 | ||
48 | #define NORM_INT_VEC_DIS_KER_WIDTH 1 | ||
49 | #define INT_ADR_KER_LBN 0 | ||
50 | #define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */ | ||
51 | |||
52 | /* Interrupt status register (B0 only) */ | ||
53 | #define INT_ISR0_B0 0x90 | ||
54 | #define INT_ISR1_B0 0xA0 | ||
55 | |||
56 | /* Interrupt acknowledge register (A0/A1 only) */ | ||
57 | #define INT_ACK_REG_KER_A1 0x0050 | ||
58 | #define INT_ACK_DUMMY_DATA_LBN 0 | ||
59 | #define INT_ACK_DUMMY_DATA_WIDTH 32 | ||
60 | |||
61 | /* Interrupt acknowledge work-around register (A0/A1 only )*/ | ||
62 | #define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070 | ||
63 | |||
64 | /* SPI host command register */ | ||
65 | #define EE_SPI_HCMD_REG_KER 0x0100 | ||
66 | #define EE_SPI_HCMD_CMD_EN_LBN 31 | ||
67 | #define EE_SPI_HCMD_CMD_EN_WIDTH 1 | ||
68 | #define EE_WR_TIMER_ACTIVE_LBN 28 | ||
69 | #define EE_WR_TIMER_ACTIVE_WIDTH 1 | ||
70 | #define EE_SPI_HCMD_SF_SEL_LBN 24 | ||
71 | #define EE_SPI_HCMD_SF_SEL_WIDTH 1 | ||
72 | #define EE_SPI_EEPROM 0 | ||
73 | #define EE_SPI_FLASH 1 | ||
74 | #define EE_SPI_HCMD_DABCNT_LBN 16 | ||
75 | #define EE_SPI_HCMD_DABCNT_WIDTH 5 | ||
76 | #define EE_SPI_HCMD_READ_LBN 15 | ||
77 | #define EE_SPI_HCMD_READ_WIDTH 1 | ||
78 | #define EE_SPI_READ 1 | ||
79 | #define EE_SPI_WRITE 0 | ||
80 | #define EE_SPI_HCMD_DUBCNT_LBN 12 | ||
81 | #define EE_SPI_HCMD_DUBCNT_WIDTH 2 | ||
82 | #define EE_SPI_HCMD_ADBCNT_LBN 8 | ||
83 | #define EE_SPI_HCMD_ADBCNT_WIDTH 2 | ||
84 | #define EE_SPI_HCMD_ENC_LBN 0 | ||
85 | #define EE_SPI_HCMD_ENC_WIDTH 8 | ||
86 | |||
87 | /* SPI host address register */ | ||
88 | #define EE_SPI_HADR_REG_KER 0x0110 | ||
89 | #define EE_SPI_HADR_ADR_LBN 0 | ||
90 | #define EE_SPI_HADR_ADR_WIDTH 24 | ||
91 | |||
92 | /* SPI host data register */ | ||
93 | #define EE_SPI_HDATA_REG_KER 0x0120 | ||
94 | |||
95 | /* SPI/VPD config register */ | ||
96 | #define EE_VPD_CFG_REG_KER 0x0140 | ||
97 | #define EE_VPD_EN_LBN 0 | ||
98 | #define EE_VPD_EN_WIDTH 1 | ||
99 | #define EE_VPD_EN_AD9_MODE_LBN 1 | ||
100 | #define EE_VPD_EN_AD9_MODE_WIDTH 1 | ||
101 | #define EE_EE_CLOCK_DIV_LBN 112 | ||
102 | #define EE_EE_CLOCK_DIV_WIDTH 7 | ||
103 | #define EE_SF_CLOCK_DIV_LBN 120 | ||
104 | #define EE_SF_CLOCK_DIV_WIDTH 7 | ||
105 | |||
106 | /* PCIE CORE ACCESS REG */ | ||
107 | #define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68 | ||
108 | #define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70 | ||
109 | #define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700 | ||
110 | #define PCIE_CORE_ADDR_ACK_FREQ 0x70C | ||
111 | |||
112 | /* NIC status register */ | ||
113 | #define NIC_STAT_REG 0x0200 | ||
114 | #define EE_STRAP_EN_LBN 31 | ||
115 | #define EE_STRAP_EN_WIDTH 1 | ||
116 | #define EE_STRAP_OVR_LBN 24 | ||
117 | #define EE_STRAP_OVR_WIDTH 4 | ||
118 | #define ONCHIP_SRAM_LBN 16 | ||
119 | #define ONCHIP_SRAM_WIDTH 1 | ||
120 | #define SF_PRST_LBN 9 | ||
121 | #define SF_PRST_WIDTH 1 | ||
122 | #define EE_PRST_LBN 8 | ||
123 | #define EE_PRST_WIDTH 1 | ||
124 | #define STRAP_PINS_LBN 0 | ||
125 | #define STRAP_PINS_WIDTH 3 | ||
126 | /* These bit definitions are extrapolated from the list of numerical | ||
127 | * values for STRAP_PINS. | ||
128 | */ | ||
129 | #define STRAP_10G_LBN 2 | ||
130 | #define STRAP_10G_WIDTH 1 | ||
131 | #define STRAP_PCIE_LBN 0 | ||
132 | #define STRAP_PCIE_WIDTH 1 | ||
133 | |||
134 | #define BOOTED_USING_NVDEVICE_LBN 3 | ||
135 | #define BOOTED_USING_NVDEVICE_WIDTH 1 | ||
136 | |||
137 | /* GPIO control register */ | ||
138 | #define GPIO_CTL_REG_KER 0x0210 | ||
139 | #define GPIO_USE_NIC_CLK_LBN (30) | ||
140 | #define GPIO_USE_NIC_CLK_WIDTH (1) | ||
141 | #define GPIO_OUTPUTS_LBN (16) | ||
142 | #define GPIO_OUTPUTS_WIDTH (4) | ||
143 | #define GPIO_INPUTS_LBN (8) | ||
144 | #define GPIO_DIRECTION_LBN (24) | ||
145 | #define GPIO_DIRECTION_WIDTH (4) | ||
146 | #define GPIO_DIRECTION_OUT (1) | ||
147 | #define GPIO_SRAM_SLEEP (1 << 1) | ||
148 | |||
149 | #define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3) | ||
150 | #define GPIO3_OEN_WIDTH 1 | ||
151 | #define GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2) | ||
152 | #define GPIO2_OEN_WIDTH 1 | ||
153 | #define GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1) | ||
154 | #define GPIO1_OEN_WIDTH 1 | ||
155 | #define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0) | ||
156 | #define GPIO0_OEN_WIDTH 1 | ||
157 | |||
158 | #define GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3) | ||
159 | #define GPIO3_OUT_WIDTH 1 | ||
160 | #define GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2) | ||
161 | #define GPIO2_OUT_WIDTH 1 | ||
162 | #define GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1) | ||
163 | #define GPIO1_OUT_WIDTH 1 | ||
164 | #define GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0) | ||
165 | #define GPIO0_OUT_WIDTH 1 | ||
166 | |||
167 | #define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3) | ||
168 | #define GPIO3_IN_WIDTH 1 | ||
169 | #define GPIO2_IN_WIDTH 1 | ||
170 | #define GPIO1_IN_WIDTH 1 | ||
171 | #define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0) | ||
172 | #define GPIO0_IN_WIDTH 1 | ||
173 | |||
174 | /* Global control register */ | ||
175 | #define GLB_CTL_REG_KER 0x0220 | ||
176 | #define EXT_PHY_RST_CTL_LBN 63 | ||
177 | #define EXT_PHY_RST_CTL_WIDTH 1 | ||
178 | #define PCIE_SD_RST_CTL_LBN 61 | ||
179 | #define PCIE_SD_RST_CTL_WIDTH 1 | ||
180 | |||
181 | #define PCIE_NSTCK_RST_CTL_LBN 58 | ||
182 | #define PCIE_NSTCK_RST_CTL_WIDTH 1 | ||
183 | #define PCIE_CORE_RST_CTL_LBN 57 | ||
184 | #define PCIE_CORE_RST_CTL_WIDTH 1 | ||
185 | #define EE_RST_CTL_LBN 49 | ||
186 | #define EE_RST_CTL_WIDTH 1 | ||
187 | #define RST_XGRX_LBN 24 | ||
188 | #define RST_XGRX_WIDTH 1 | ||
189 | #define RST_XGTX_LBN 23 | ||
190 | #define RST_XGTX_WIDTH 1 | ||
191 | #define RST_EM_LBN 22 | ||
192 | #define RST_EM_WIDTH 1 | ||
193 | #define EXT_PHY_RST_DUR_LBN 1 | ||
194 | #define EXT_PHY_RST_DUR_WIDTH 3 | ||
195 | #define SWRST_LBN 0 | ||
196 | #define SWRST_WIDTH 1 | ||
197 | #define INCLUDE_IN_RESET 0 | ||
198 | #define EXCLUDE_FROM_RESET 1 | ||
199 | |||
200 | /* Fatal interrupt register */ | ||
201 | #define FATAL_INTR_REG_KER 0x0230 | ||
202 | #define RBUF_OWN_INT_KER_EN_LBN 39 | ||
203 | #define RBUF_OWN_INT_KER_EN_WIDTH 1 | ||
204 | #define TBUF_OWN_INT_KER_EN_LBN 38 | ||
205 | #define TBUF_OWN_INT_KER_EN_WIDTH 1 | ||
206 | #define ILL_ADR_INT_KER_EN_LBN 33 | ||
207 | #define ILL_ADR_INT_KER_EN_WIDTH 1 | ||
208 | #define MEM_PERR_INT_KER_LBN 8 | ||
209 | #define MEM_PERR_INT_KER_WIDTH 1 | ||
210 | #define INT_KER_ERROR_LBN 0 | ||
211 | #define INT_KER_ERROR_WIDTH 12 | ||
212 | |||
213 | #define DP_CTRL_REG 0x250 | ||
214 | #define FLS_EVQ_ID_LBN 0 | ||
215 | #define FLS_EVQ_ID_WIDTH 11 | ||
216 | |||
217 | #define MEM_STAT_REG_KER 0x260 | ||
218 | |||
219 | /* Debug probe register */ | ||
220 | #define DEBUG_BLK_SEL_MISC 7 | ||
221 | #define DEBUG_BLK_SEL_SERDES 6 | ||
222 | #define DEBUG_BLK_SEL_EM 5 | ||
223 | #define DEBUG_BLK_SEL_SR 4 | ||
224 | #define DEBUG_BLK_SEL_EV 3 | ||
225 | #define DEBUG_BLK_SEL_RX 2 | ||
226 | #define DEBUG_BLK_SEL_TX 1 | ||
227 | #define DEBUG_BLK_SEL_BIU 0 | ||
228 | |||
229 | /* FPGA build version */ | ||
230 | #define ALTERA_BUILD_REG_KER 0x0300 | ||
231 | #define VER_ALL_LBN 0 | ||
232 | #define VER_ALL_WIDTH 32 | ||
233 | |||
234 | /* Spare EEPROM bits register (flash 0x390) */ | ||
235 | #define SPARE_REG_KER 0x310 | ||
236 | #define MEM_PERR_EN_TX_DATA_LBN 72 | ||
237 | #define MEM_PERR_EN_TX_DATA_WIDTH 2 | ||
238 | |||
239 | /* Timer table for kernel access */ | ||
240 | #define TIMER_CMD_REG_KER 0x420 | ||
241 | #define TIMER_MODE_LBN 12 | ||
242 | #define TIMER_MODE_WIDTH 2 | ||
243 | #define TIMER_MODE_DIS 0 | ||
244 | #define TIMER_MODE_INT_HLDOFF 2 | ||
245 | #define TIMER_VAL_LBN 0 | ||
246 | #define TIMER_VAL_WIDTH 12 | ||
247 | |||
248 | /* Driver generated event register */ | ||
249 | #define DRV_EV_REG_KER 0x440 | ||
250 | #define DRV_EV_QID_LBN 64 | ||
251 | #define DRV_EV_QID_WIDTH 12 | ||
252 | #define DRV_EV_DATA_LBN 0 | ||
253 | #define DRV_EV_DATA_WIDTH 64 | ||
254 | |||
255 | /* Buffer table configuration register */ | ||
256 | #define BUF_TBL_CFG_REG_KER 0x600 | ||
257 | #define BUF_TBL_MODE_LBN 3 | ||
258 | #define BUF_TBL_MODE_WIDTH 1 | ||
259 | #define BUF_TBL_MODE_HALF 0 | ||
260 | #define BUF_TBL_MODE_FULL 1 | ||
261 | |||
262 | /* SRAM receive descriptor cache configuration register */ | ||
263 | #define SRM_RX_DC_CFG_REG_KER 0x610 | ||
264 | #define SRM_RX_DC_BASE_ADR_LBN 0 | ||
265 | #define SRM_RX_DC_BASE_ADR_WIDTH 21 | ||
266 | |||
267 | /* SRAM transmit descriptor cache configuration register */ | ||
268 | #define SRM_TX_DC_CFG_REG_KER 0x620 | ||
269 | #define SRM_TX_DC_BASE_ADR_LBN 0 | ||
270 | #define SRM_TX_DC_BASE_ADR_WIDTH 21 | ||
271 | |||
272 | /* SRAM configuration register */ | ||
273 | #define SRM_CFG_REG_KER 0x630 | ||
274 | #define SRAM_OOB_BT_INIT_EN_LBN 3 | ||
275 | #define SRAM_OOB_BT_INIT_EN_WIDTH 1 | ||
276 | #define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0 | ||
277 | #define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3 | ||
278 | #define SRM_NB_BSZ_1BANKS_2M 0 | ||
279 | #define SRM_NB_BSZ_1BANKS_4M 1 | ||
280 | #define SRM_NB_BSZ_1BANKS_8M 2 | ||
281 | #define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */ | ||
282 | #define SRM_NB_BSZ_2BANKS_4M 4 | ||
283 | #define SRM_NB_BSZ_2BANKS_8M 5 | ||
284 | #define SRM_NB_BSZ_2BANKS_16M 6 | ||
285 | #define SRM_NB_BSZ_RESERVED 7 | ||
286 | |||
287 | /* Special buffer table update register */ | ||
288 | #define BUF_TBL_UPD_REG_KER 0x0650 | ||
289 | #define BUF_UPD_CMD_LBN 63 | ||
290 | #define BUF_UPD_CMD_WIDTH 1 | ||
291 | #define BUF_CLR_CMD_LBN 62 | ||
292 | #define BUF_CLR_CMD_WIDTH 1 | ||
293 | #define BUF_CLR_END_ID_LBN 32 | ||
294 | #define BUF_CLR_END_ID_WIDTH 20 | ||
295 | #define BUF_CLR_START_ID_LBN 0 | ||
296 | #define BUF_CLR_START_ID_WIDTH 20 | ||
297 | |||
298 | /* Receive configuration register */ | ||
299 | #define RX_CFG_REG_KER 0x800 | ||
300 | |||
301 | /* B0 */ | ||
302 | #define RX_INGR_EN_B0_LBN 47 | ||
303 | #define RX_INGR_EN_B0_WIDTH 1 | ||
304 | #define RX_DESC_PUSH_EN_B0_LBN 43 | ||
305 | #define RX_DESC_PUSH_EN_B0_WIDTH 1 | ||
306 | #define RX_XON_TX_TH_B0_LBN 33 | ||
307 | #define RX_XON_TX_TH_B0_WIDTH 5 | ||
308 | #define RX_XOFF_TX_TH_B0_LBN 28 | ||
309 | #define RX_XOFF_TX_TH_B0_WIDTH 5 | ||
310 | #define RX_USR_BUF_SIZE_B0_LBN 19 | ||
311 | #define RX_USR_BUF_SIZE_B0_WIDTH 9 | ||
312 | #define RX_XON_MAC_TH_B0_LBN 10 | ||
313 | #define RX_XON_MAC_TH_B0_WIDTH 9 | ||
314 | #define RX_XOFF_MAC_TH_B0_LBN 1 | ||
315 | #define RX_XOFF_MAC_TH_B0_WIDTH 9 | ||
316 | #define RX_XOFF_MAC_EN_B0_LBN 0 | ||
317 | #define RX_XOFF_MAC_EN_B0_WIDTH 1 | ||
318 | |||
319 | /* A1 */ | ||
320 | #define RX_DESC_PUSH_EN_A1_LBN 35 | ||
321 | #define RX_DESC_PUSH_EN_A1_WIDTH 1 | ||
322 | #define RX_XON_TX_TH_A1_LBN 25 | ||
323 | #define RX_XON_TX_TH_A1_WIDTH 5 | ||
324 | #define RX_XOFF_TX_TH_A1_LBN 20 | ||
325 | #define RX_XOFF_TX_TH_A1_WIDTH 5 | ||
326 | #define RX_USR_BUF_SIZE_A1_LBN 11 | ||
327 | #define RX_USR_BUF_SIZE_A1_WIDTH 9 | ||
328 | #define RX_XON_MAC_TH_A1_LBN 6 | ||
329 | #define RX_XON_MAC_TH_A1_WIDTH 5 | ||
330 | #define RX_XOFF_MAC_TH_A1_LBN 1 | ||
331 | #define RX_XOFF_MAC_TH_A1_WIDTH 5 | ||
332 | #define RX_XOFF_MAC_EN_A1_LBN 0 | ||
333 | #define RX_XOFF_MAC_EN_A1_WIDTH 1 | ||
334 | |||
335 | /* Receive filter control register */ | ||
336 | #define RX_FILTER_CTL_REG 0x810 | ||
337 | #define UDP_FULL_SRCH_LIMIT_LBN 32 | ||
338 | #define UDP_FULL_SRCH_LIMIT_WIDTH 8 | ||
339 | #define NUM_KER_LBN 24 | ||
340 | #define NUM_KER_WIDTH 2 | ||
341 | #define UDP_WILD_SRCH_LIMIT_LBN 16 | ||
342 | #define UDP_WILD_SRCH_LIMIT_WIDTH 8 | ||
343 | #define TCP_WILD_SRCH_LIMIT_LBN 8 | ||
344 | #define TCP_WILD_SRCH_LIMIT_WIDTH 8 | ||
345 | #define TCP_FULL_SRCH_LIMIT_LBN 0 | ||
346 | #define TCP_FULL_SRCH_LIMIT_WIDTH 8 | ||
347 | |||
348 | /* RX queue flush register */ | ||
349 | #define RX_FLUSH_DESCQ_REG_KER 0x0820 | ||
350 | #define RX_FLUSH_DESCQ_CMD_LBN 24 | ||
351 | #define RX_FLUSH_DESCQ_CMD_WIDTH 1 | ||
352 | #define RX_FLUSH_DESCQ_LBN 0 | ||
353 | #define RX_FLUSH_DESCQ_WIDTH 12 | ||
354 | |||
355 | /* Receive descriptor update register */ | ||
356 | #define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12) | ||
357 | #define RX_DESC_WPTR_DWORD_LBN 0 | ||
358 | #define RX_DESC_WPTR_DWORD_WIDTH 12 | ||
359 | |||
360 | /* Receive descriptor cache configuration register */ | ||
361 | #define RX_DC_CFG_REG_KER 0x840 | ||
362 | #define RX_DC_SIZE_LBN 0 | ||
363 | #define RX_DC_SIZE_WIDTH 2 | ||
364 | |||
365 | #define RX_DC_PF_WM_REG_KER 0x850 | ||
366 | #define RX_DC_PF_LWM_LBN 0 | ||
367 | #define RX_DC_PF_LWM_WIDTH 6 | ||
368 | |||
369 | /* RX no descriptor drop counter */ | ||
370 | #define RX_NODESC_DROP_REG_KER 0x880 | ||
371 | #define RX_NODESC_DROP_CNT_LBN 0 | ||
372 | #define RX_NODESC_DROP_CNT_WIDTH 16 | ||
373 | |||
374 | /* RX black magic register */ | ||
375 | #define RX_SELF_RST_REG_KER 0x890 | ||
376 | #define RX_ISCSI_DIS_LBN 17 | ||
377 | #define RX_ISCSI_DIS_WIDTH 1 | ||
378 | #define RX_NODESC_WAIT_DIS_LBN 9 | ||
379 | #define RX_NODESC_WAIT_DIS_WIDTH 1 | ||
380 | #define RX_RECOVERY_EN_LBN 8 | ||
381 | #define RX_RECOVERY_EN_WIDTH 1 | ||
382 | |||
383 | /* TX queue flush register */ | ||
384 | #define TX_FLUSH_DESCQ_REG_KER 0x0a00 | ||
385 | #define TX_FLUSH_DESCQ_CMD_LBN 12 | ||
386 | #define TX_FLUSH_DESCQ_CMD_WIDTH 1 | ||
387 | #define TX_FLUSH_DESCQ_LBN 0 | ||
388 | #define TX_FLUSH_DESCQ_WIDTH 12 | ||
389 | |||
390 | /* Transmit descriptor update register */ | ||
391 | #define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12) | ||
392 | #define TX_DESC_WPTR_DWORD_LBN 0 | ||
393 | #define TX_DESC_WPTR_DWORD_WIDTH 12 | ||
394 | |||
395 | /* Transmit descriptor cache configuration register */ | ||
396 | #define TX_DC_CFG_REG_KER 0xa20 | ||
397 | #define TX_DC_SIZE_LBN 0 | ||
398 | #define TX_DC_SIZE_WIDTH 2 | ||
399 | |||
400 | /* Transmit checksum configuration register (A0/A1 only) */ | ||
401 | #define TX_CHKSM_CFG_REG_KER_A1 0xa30 | ||
402 | |||
403 | /* Transmit configuration register */ | ||
404 | #define TX_CFG_REG_KER 0xa50 | ||
405 | #define TX_NO_EOP_DISC_EN_LBN 5 | ||
406 | #define TX_NO_EOP_DISC_EN_WIDTH 1 | ||
407 | |||
408 | /* Transmit configuration register 2 */ | ||
409 | #define TX_CFG2_REG_KER 0xa80 | ||
410 | #define TX_CSR_PUSH_EN_LBN 89 | ||
411 | #define TX_CSR_PUSH_EN_WIDTH 1 | ||
412 | #define TX_RX_SPACER_LBN 64 | ||
413 | #define TX_RX_SPACER_WIDTH 8 | ||
414 | #define TX_SW_EV_EN_LBN 59 | ||
415 | #define TX_SW_EV_EN_WIDTH 1 | ||
416 | #define TX_RX_SPACER_EN_LBN 57 | ||
417 | #define TX_RX_SPACER_EN_WIDTH 1 | ||
418 | #define TX_PREF_THRESHOLD_LBN 19 | ||
419 | #define TX_PREF_THRESHOLD_WIDTH 2 | ||
420 | #define TX_ONE_PKT_PER_Q_LBN 18 | ||
421 | #define TX_ONE_PKT_PER_Q_WIDTH 1 | ||
422 | #define TX_DIS_NON_IP_EV_LBN 17 | ||
423 | #define TX_DIS_NON_IP_EV_WIDTH 1 | ||
424 | #define TX_FLUSH_MIN_LEN_EN_B0_LBN 7 | ||
425 | #define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1 | ||
426 | |||
427 | /* PHY management transmit data register */ | ||
428 | #define MD_TXD_REG_KER 0xc00 | ||
429 | #define MD_TXD_LBN 0 | ||
430 | #define MD_TXD_WIDTH 16 | ||
431 | |||
432 | /* PHY management receive data register */ | ||
433 | #define MD_RXD_REG_KER 0xc10 | ||
434 | #define MD_RXD_LBN 0 | ||
435 | #define MD_RXD_WIDTH 16 | ||
436 | |||
437 | /* PHY management configuration & status register */ | ||
438 | #define MD_CS_REG_KER 0xc20 | ||
439 | #define MD_GC_LBN 4 | ||
440 | #define MD_GC_WIDTH 1 | ||
441 | #define MD_RIC_LBN 2 | ||
442 | #define MD_RIC_WIDTH 1 | ||
443 | #define MD_RDC_LBN 1 | ||
444 | #define MD_RDC_WIDTH 1 | ||
445 | #define MD_WRC_LBN 0 | ||
446 | #define MD_WRC_WIDTH 1 | ||
447 | |||
448 | /* PHY management PHY address register */ | ||
449 | #define MD_PHY_ADR_REG_KER 0xc30 | ||
450 | #define MD_PHY_ADR_LBN 0 | ||
451 | #define MD_PHY_ADR_WIDTH 16 | ||
452 | |||
453 | /* PHY management ID register */ | ||
454 | #define MD_ID_REG_KER 0xc40 | ||
455 | #define MD_PRT_ADR_LBN 11 | ||
456 | #define MD_PRT_ADR_WIDTH 5 | ||
457 | #define MD_DEV_ADR_LBN 6 | ||
458 | #define MD_DEV_ADR_WIDTH 5 | ||
459 | |||
460 | /* PHY management status & mask register (DWORD read only) */ | ||
461 | #define MD_STAT_REG_KER 0xc50 | ||
462 | #define MD_BSERR_LBN 2 | ||
463 | #define MD_BSERR_WIDTH 1 | ||
464 | #define MD_LNFL_LBN 1 | ||
465 | #define MD_LNFL_WIDTH 1 | ||
466 | #define MD_BSY_LBN 0 | ||
467 | #define MD_BSY_WIDTH 1 | ||
468 | |||
469 | /* Port 0 and 1 MAC stats registers */ | ||
470 | #define MAC0_STAT_DMA_REG_KER 0xc60 | ||
471 | #define MAC_STAT_DMA_CMD_LBN 48 | ||
472 | #define MAC_STAT_DMA_CMD_WIDTH 1 | ||
473 | #define MAC_STAT_DMA_ADR_LBN 0 | ||
474 | #define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46) | ||
475 | |||
476 | /* Port 0 and 1 MAC control registers */ | ||
477 | #define MAC0_CTRL_REG_KER 0xc80 | ||
478 | #define MAC_XOFF_VAL_LBN 16 | ||
479 | #define MAC_XOFF_VAL_WIDTH 16 | ||
480 | #define TXFIFO_DRAIN_EN_B0_LBN 7 | ||
481 | #define TXFIFO_DRAIN_EN_B0_WIDTH 1 | ||
482 | #define MAC_BCAD_ACPT_LBN 4 | ||
483 | #define MAC_BCAD_ACPT_WIDTH 1 | ||
484 | #define MAC_UC_PROM_LBN 3 | ||
485 | #define MAC_UC_PROM_WIDTH 1 | ||
486 | #define MAC_LINK_STATUS_LBN 2 | ||
487 | #define MAC_LINK_STATUS_WIDTH 1 | ||
488 | #define MAC_SPEED_LBN 0 | ||
489 | #define MAC_SPEED_WIDTH 2 | ||
490 | |||
491 | /* 10G XAUI XGXS default values */ | ||
492 | #define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */ | ||
493 | #define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */ | ||
494 | #define XX_SD_CTL_DRV_DEFAULT 0 /* 20mA */ | ||
495 | |||
496 | /* Multicast address hash table */ | ||
497 | #define MAC_MCAST_HASH_REG0_KER 0xca0 | ||
498 | #define MAC_MCAST_HASH_REG1_KER 0xcb0 | ||
499 | |||
500 | /* GMAC configuration register 1 */ | ||
501 | #define GM_CFG1_REG 0xe00 | ||
502 | #define GM_SW_RST_LBN 31 | ||
503 | #define GM_SW_RST_WIDTH 1 | ||
504 | #define GM_LOOP_LBN 8 | ||
505 | #define GM_LOOP_WIDTH 1 | ||
506 | #define GM_RX_FC_EN_LBN 5 | ||
507 | #define GM_RX_FC_EN_WIDTH 1 | ||
508 | #define GM_TX_FC_EN_LBN 4 | ||
509 | #define GM_TX_FC_EN_WIDTH 1 | ||
510 | #define GM_RX_EN_LBN 2 | ||
511 | #define GM_RX_EN_WIDTH 1 | ||
512 | #define GM_TX_EN_LBN 0 | ||
513 | #define GM_TX_EN_WIDTH 1 | ||
514 | |||
515 | /* GMAC configuration register 2 */ | ||
516 | #define GM_CFG2_REG 0xe10 | ||
517 | #define GM_PAMBL_LEN_LBN 12 | ||
518 | #define GM_PAMBL_LEN_WIDTH 4 | ||
519 | #define GM_IF_MODE_LBN 8 | ||
520 | #define GM_IF_MODE_WIDTH 2 | ||
521 | #define GM_LEN_CHK_LBN 4 | ||
522 | #define GM_LEN_CHK_WIDTH 1 | ||
523 | #define GM_PAD_CRC_EN_LBN 2 | ||
524 | #define GM_PAD_CRC_EN_WIDTH 1 | ||
525 | #define GM_FD_LBN 0 | ||
526 | #define GM_FD_WIDTH 1 | ||
527 | |||
528 | /* GMAC maximum frame length register */ | ||
529 | #define GM_MAX_FLEN_REG 0xe40 | ||
530 | #define GM_MAX_FLEN_LBN 0 | ||
531 | #define GM_MAX_FLEN_WIDTH 16 | ||
532 | |||
533 | /* GMAC station address register 1 */ | ||
534 | #define GM_ADR1_REG 0xf00 | ||
535 | #define GM_HWADDR_5_LBN 24 | ||
536 | #define GM_HWADDR_5_WIDTH 8 | ||
537 | #define GM_HWADDR_4_LBN 16 | ||
538 | #define GM_HWADDR_4_WIDTH 8 | ||
539 | #define GM_HWADDR_3_LBN 8 | ||
540 | #define GM_HWADDR_3_WIDTH 8 | ||
541 | #define GM_HWADDR_2_LBN 0 | ||
542 | #define GM_HWADDR_2_WIDTH 8 | ||
543 | |||
544 | /* GMAC station address register 2 */ | ||
545 | #define GM_ADR2_REG 0xf10 | ||
546 | #define GM_HWADDR_1_LBN 24 | ||
547 | #define GM_HWADDR_1_WIDTH 8 | ||
548 | #define GM_HWADDR_0_LBN 16 | ||
549 | #define GM_HWADDR_0_WIDTH 8 | ||
550 | |||
551 | /* GMAC FIFO configuration register 0 */ | ||
552 | #define GMF_CFG0_REG 0xf20 | ||
553 | #define GMF_FTFENREQ_LBN 12 | ||
554 | #define GMF_FTFENREQ_WIDTH 1 | ||
555 | #define GMF_STFENREQ_LBN 11 | ||
556 | #define GMF_STFENREQ_WIDTH 1 | ||
557 | #define GMF_FRFENREQ_LBN 10 | ||
558 | #define GMF_FRFENREQ_WIDTH 1 | ||
559 | #define GMF_SRFENREQ_LBN 9 | ||
560 | #define GMF_SRFENREQ_WIDTH 1 | ||
561 | #define GMF_WTMENREQ_LBN 8 | ||
562 | #define GMF_WTMENREQ_WIDTH 1 | ||
563 | |||
564 | /* GMAC FIFO configuration register 1 */ | ||
565 | #define GMF_CFG1_REG 0xf30 | ||
566 | #define GMF_CFGFRTH_LBN 16 | ||
567 | #define GMF_CFGFRTH_WIDTH 5 | ||
568 | #define GMF_CFGXOFFRTX_LBN 0 | ||
569 | #define GMF_CFGXOFFRTX_WIDTH 16 | ||
570 | |||
571 | /* GMAC FIFO configuration register 2 */ | ||
572 | #define GMF_CFG2_REG 0xf40 | ||
573 | #define GMF_CFGHWM_LBN 16 | ||
574 | #define GMF_CFGHWM_WIDTH 6 | ||
575 | #define GMF_CFGLWM_LBN 0 | ||
576 | #define GMF_CFGLWM_WIDTH 6 | ||
577 | |||
578 | /* GMAC FIFO configuration register 3 */ | ||
579 | #define GMF_CFG3_REG 0xf50 | ||
580 | #define GMF_CFGHWMFT_LBN 16 | ||
581 | #define GMF_CFGHWMFT_WIDTH 6 | ||
582 | #define GMF_CFGFTTH_LBN 0 | ||
583 | #define GMF_CFGFTTH_WIDTH 6 | ||
584 | |||
585 | /* GMAC FIFO configuration register 4 */ | ||
586 | #define GMF_CFG4_REG 0xf60 | ||
587 | #define GMF_HSTFLTRFRM_PAUSE_LBN 12 | ||
588 | #define GMF_HSTFLTRFRM_PAUSE_WIDTH 12 | ||
589 | |||
590 | /* GMAC FIFO configuration register 5 */ | ||
591 | #define GMF_CFG5_REG 0xf70 | ||
592 | #define GMF_CFGHDPLX_LBN 22 | ||
593 | #define GMF_CFGHDPLX_WIDTH 1 | ||
594 | #define GMF_CFGBYTMODE_LBN 19 | ||
595 | #define GMF_CFGBYTMODE_WIDTH 1 | ||
596 | #define GMF_HSTDRPLT64_LBN 18 | ||
597 | #define GMF_HSTDRPLT64_WIDTH 1 | ||
598 | #define GMF_HSTFLTRFRMDC_PAUSE_LBN 12 | ||
599 | #define GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1 | ||
600 | |||
601 | /* XGMAC address register low */ | ||
602 | #define XM_ADR_LO_REG 0x1200 | ||
603 | #define XM_ADR_3_LBN 24 | ||
604 | #define XM_ADR_3_WIDTH 8 | ||
605 | #define XM_ADR_2_LBN 16 | ||
606 | #define XM_ADR_2_WIDTH 8 | ||
607 | #define XM_ADR_1_LBN 8 | ||
608 | #define XM_ADR_1_WIDTH 8 | ||
609 | #define XM_ADR_0_LBN 0 | ||
610 | #define XM_ADR_0_WIDTH 8 | ||
611 | |||
612 | /* XGMAC address register high */ | ||
613 | #define XM_ADR_HI_REG 0x1210 | ||
614 | #define XM_ADR_5_LBN 8 | ||
615 | #define XM_ADR_5_WIDTH 8 | ||
616 | #define XM_ADR_4_LBN 0 | ||
617 | #define XM_ADR_4_WIDTH 8 | ||
618 | |||
619 | /* XGMAC global configuration */ | ||
620 | #define XM_GLB_CFG_REG 0x1220 | ||
621 | #define XM_RX_STAT_EN_LBN 11 | ||
622 | #define XM_RX_STAT_EN_WIDTH 1 | ||
623 | #define XM_TX_STAT_EN_LBN 10 | ||
624 | #define XM_TX_STAT_EN_WIDTH 1 | ||
625 | #define XM_RX_JUMBO_MODE_LBN 6 | ||
626 | #define XM_RX_JUMBO_MODE_WIDTH 1 | ||
627 | #define XM_INTCLR_MODE_LBN 3 | ||
628 | #define XM_INTCLR_MODE_WIDTH 1 | ||
629 | #define XM_CORE_RST_LBN 0 | ||
630 | #define XM_CORE_RST_WIDTH 1 | ||
631 | |||
632 | /* XGMAC transmit configuration */ | ||
633 | #define XM_TX_CFG_REG 0x1230 | ||
634 | #define XM_IPG_LBN 16 | ||
635 | #define XM_IPG_WIDTH 4 | ||
636 | #define XM_FCNTL_LBN 10 | ||
637 | #define XM_FCNTL_WIDTH 1 | ||
638 | #define XM_TXCRC_LBN 8 | ||
639 | #define XM_TXCRC_WIDTH 1 | ||
640 | #define XM_AUTO_PAD_LBN 5 | ||
641 | #define XM_AUTO_PAD_WIDTH 1 | ||
642 | #define XM_TX_PRMBL_LBN 2 | ||
643 | #define XM_TX_PRMBL_WIDTH 1 | ||
644 | #define XM_TXEN_LBN 1 | ||
645 | #define XM_TXEN_WIDTH 1 | ||
646 | |||
647 | /* XGMAC receive configuration */ | ||
648 | #define XM_RX_CFG_REG 0x1240 | ||
649 | #define XM_PASS_CRC_ERR_LBN 25 | ||
650 | #define XM_PASS_CRC_ERR_WIDTH 1 | ||
651 | #define XM_ACPT_ALL_MCAST_LBN 11 | ||
652 | #define XM_ACPT_ALL_MCAST_WIDTH 1 | ||
653 | #define XM_ACPT_ALL_UCAST_LBN 9 | ||
654 | #define XM_ACPT_ALL_UCAST_WIDTH 1 | ||
655 | #define XM_AUTO_DEPAD_LBN 8 | ||
656 | #define XM_AUTO_DEPAD_WIDTH 1 | ||
657 | #define XM_RXEN_LBN 1 | ||
658 | #define XM_RXEN_WIDTH 1 | ||
659 | |||
660 | /* XGMAC management interrupt mask register */ | ||
661 | #define XM_MGT_INT_MSK_REG_B0 0x1250 | ||
662 | #define XM_MSK_PRMBLE_ERR_LBN 2 | ||
663 | #define XM_MSK_PRMBLE_ERR_WIDTH 1 | ||
664 | #define XM_MSK_RMTFLT_LBN 1 | ||
665 | #define XM_MSK_RMTFLT_WIDTH 1 | ||
666 | #define XM_MSK_LCLFLT_LBN 0 | ||
667 | #define XM_MSK_LCLFLT_WIDTH 1 | ||
668 | |||
669 | /* XGMAC flow control register */ | ||
670 | #define XM_FC_REG 0x1270 | ||
671 | #define XM_PAUSE_TIME_LBN 16 | ||
672 | #define XM_PAUSE_TIME_WIDTH 16 | ||
673 | #define XM_DIS_FCNTL_LBN 0 | ||
674 | #define XM_DIS_FCNTL_WIDTH 1 | ||
675 | |||
676 | /* XGMAC pause time count register */ | ||
677 | #define XM_PAUSE_TIME_REG 0x1290 | ||
678 | |||
679 | /* XGMAC transmit parameter register */ | ||
680 | #define XM_TX_PARAM_REG 0x012d0 | ||
681 | #define XM_TX_JUMBO_MODE_LBN 31 | ||
682 | #define XM_TX_JUMBO_MODE_WIDTH 1 | ||
683 | #define XM_MAX_TX_FRM_SIZE_LBN 16 | ||
684 | #define XM_MAX_TX_FRM_SIZE_WIDTH 14 | ||
685 | |||
686 | /* XGMAC receive parameter register */ | ||
687 | #define XM_RX_PARAM_REG 0x12e0 | ||
688 | #define XM_MAX_RX_FRM_SIZE_LBN 0 | ||
689 | #define XM_MAX_RX_FRM_SIZE_WIDTH 14 | ||
690 | |||
691 | /* XGMAC management interrupt status register */ | ||
692 | #define XM_MGT_INT_REG_B0 0x12f0 | ||
693 | #define XM_PRMBLE_ERR 2 | ||
694 | #define XM_PRMBLE_WIDTH 1 | ||
695 | #define XM_RMTFLT_LBN 1 | ||
696 | #define XM_RMTFLT_WIDTH 1 | ||
697 | #define XM_LCLFLT_LBN 0 | ||
698 | #define XM_LCLFLT_WIDTH 1 | ||
699 | |||
700 | /* XGXS/XAUI powerdown/reset register */ | ||
701 | #define XX_PWR_RST_REG 0x1300 | ||
702 | |||
703 | #define XX_SD_RST_ACT_LBN 16 | ||
704 | #define XX_SD_RST_ACT_WIDTH 1 | ||
705 | #define XX_PWRDND_EN_LBN 15 | ||
706 | #define XX_PWRDND_EN_WIDTH 1 | ||
707 | #define XX_PWRDNC_EN_LBN 14 | ||
708 | #define XX_PWRDNC_EN_WIDTH 1 | ||
709 | #define XX_PWRDNB_EN_LBN 13 | ||
710 | #define XX_PWRDNB_EN_WIDTH 1 | ||
711 | #define XX_PWRDNA_EN_LBN 12 | ||
712 | #define XX_PWRDNA_EN_WIDTH 1 | ||
713 | #define XX_RSTPLLCD_EN_LBN 9 | ||
714 | #define XX_RSTPLLCD_EN_WIDTH 1 | ||
715 | #define XX_RSTPLLAB_EN_LBN 8 | ||
716 | #define XX_RSTPLLAB_EN_WIDTH 1 | ||
717 | #define XX_RESETD_EN_LBN 7 | ||
718 | #define XX_RESETD_EN_WIDTH 1 | ||
719 | #define XX_RESETC_EN_LBN 6 | ||
720 | #define XX_RESETC_EN_WIDTH 1 | ||
721 | #define XX_RESETB_EN_LBN 5 | ||
722 | #define XX_RESETB_EN_WIDTH 1 | ||
723 | #define XX_RESETA_EN_LBN 4 | ||
724 | #define XX_RESETA_EN_WIDTH 1 | ||
725 | #define XX_RSTXGXSRX_EN_LBN 2 | ||
726 | #define XX_RSTXGXSRX_EN_WIDTH 1 | ||
727 | #define XX_RSTXGXSTX_EN_LBN 1 | ||
728 | #define XX_RSTXGXSTX_EN_WIDTH 1 | ||
729 | #define XX_RST_XX_EN_LBN 0 | ||
730 | #define XX_RST_XX_EN_WIDTH 1 | ||
731 | |||
732 | /* XGXS/XAUI powerdown/reset control register */ | ||
733 | #define XX_SD_CTL_REG 0x1310 | ||
734 | #define XX_HIDRVD_LBN 15 | ||
735 | #define XX_HIDRVD_WIDTH 1 | ||
736 | #define XX_LODRVD_LBN 14 | ||
737 | #define XX_LODRVD_WIDTH 1 | ||
738 | #define XX_HIDRVC_LBN 13 | ||
739 | #define XX_HIDRVC_WIDTH 1 | ||
740 | #define XX_LODRVC_LBN 12 | ||
741 | #define XX_LODRVC_WIDTH 1 | ||
742 | #define XX_HIDRVB_LBN 11 | ||
743 | #define XX_HIDRVB_WIDTH 1 | ||
744 | #define XX_LODRVB_LBN 10 | ||
745 | #define XX_LODRVB_WIDTH 1 | ||
746 | #define XX_HIDRVA_LBN 9 | ||
747 | #define XX_HIDRVA_WIDTH 1 | ||
748 | #define XX_LODRVA_LBN 8 | ||
749 | #define XX_LODRVA_WIDTH 1 | ||
750 | #define XX_LPBKD_LBN 3 | ||
751 | #define XX_LPBKD_WIDTH 1 | ||
752 | #define XX_LPBKC_LBN 2 | ||
753 | #define XX_LPBKC_WIDTH 1 | ||
754 | #define XX_LPBKB_LBN 1 | ||
755 | #define XX_LPBKB_WIDTH 1 | ||
756 | #define XX_LPBKA_LBN 0 | ||
757 | #define XX_LPBKA_WIDTH 1 | ||
758 | |||
759 | #define XX_TXDRV_CTL_REG 0x1320 | ||
760 | #define XX_DEQD_LBN 28 | ||
761 | #define XX_DEQD_WIDTH 4 | ||
762 | #define XX_DEQC_LBN 24 | ||
763 | #define XX_DEQC_WIDTH 4 | ||
764 | #define XX_DEQB_LBN 20 | ||
765 | #define XX_DEQB_WIDTH 4 | ||
766 | #define XX_DEQA_LBN 16 | ||
767 | #define XX_DEQA_WIDTH 4 | ||
768 | #define XX_DTXD_LBN 12 | ||
769 | #define XX_DTXD_WIDTH 4 | ||
770 | #define XX_DTXC_LBN 8 | ||
771 | #define XX_DTXC_WIDTH 4 | ||
772 | #define XX_DTXB_LBN 4 | ||
773 | #define XX_DTXB_WIDTH 4 | ||
774 | #define XX_DTXA_LBN 0 | ||
775 | #define XX_DTXA_WIDTH 4 | ||
776 | |||
777 | /* XAUI XGXS core status register */ | ||
778 | #define XX_CORE_STAT_REG 0x1360 | ||
779 | #define XX_FORCE_SIG_LBN 24 | ||
780 | #define XX_FORCE_SIG_WIDTH 8 | ||
781 | #define XX_FORCE_SIG_DECODE_FORCED 0xff | ||
782 | #define XX_XGXS_LB_EN_LBN 23 | ||
783 | #define XX_XGXS_LB_EN_WIDTH 1 | ||
784 | #define XX_XGMII_LB_EN_LBN 22 | ||
785 | #define XX_XGMII_LB_EN_WIDTH 1 | ||
786 | #define XX_ALIGN_DONE_LBN 20 | ||
787 | #define XX_ALIGN_DONE_WIDTH 1 | ||
788 | #define XX_SYNC_STAT_LBN 16 | ||
789 | #define XX_SYNC_STAT_WIDTH 4 | ||
790 | #define XX_SYNC_STAT_DECODE_SYNCED 0xf | ||
791 | #define XX_COMMA_DET_LBN 12 | ||
792 | #define XX_COMMA_DET_WIDTH 4 | ||
793 | #define XX_COMMA_DET_DECODE_DETECTED 0xf | ||
794 | #define XX_COMMA_DET_RESET 0xf | ||
795 | #define XX_CHARERR_LBN 4 | ||
796 | #define XX_CHARERR_WIDTH 4 | ||
797 | #define XX_CHARERR_RESET 0xf | ||
798 | #define XX_DISPERR_LBN 0 | ||
799 | #define XX_DISPERR_WIDTH 4 | ||
800 | #define XX_DISPERR_RESET 0xf | ||
801 | |||
802 | /* Receive filter table */ | ||
803 | #define RX_FILTER_TBL0 0xF00000 | ||
804 | |||
805 | /* Receive descriptor pointer table */ | ||
806 | #define RX_DESC_PTR_TBL_KER_A1 0x11800 | ||
807 | #define RX_DESC_PTR_TBL_KER_B0 0xF40000 | ||
808 | #define RX_DESC_PTR_TBL_KER_P0 0x900 | ||
809 | #define RX_ISCSI_DDIG_EN_LBN 88 | ||
810 | #define RX_ISCSI_DDIG_EN_WIDTH 1 | ||
811 | #define RX_ISCSI_HDIG_EN_LBN 87 | ||
812 | #define RX_ISCSI_HDIG_EN_WIDTH 1 | ||
813 | #define RX_DESCQ_BUF_BASE_ID_LBN 36 | ||
814 | #define RX_DESCQ_BUF_BASE_ID_WIDTH 20 | ||
815 | #define RX_DESCQ_EVQ_ID_LBN 24 | ||
816 | #define RX_DESCQ_EVQ_ID_WIDTH 12 | ||
817 | #define RX_DESCQ_OWNER_ID_LBN 10 | ||
818 | #define RX_DESCQ_OWNER_ID_WIDTH 14 | ||
819 | #define RX_DESCQ_LABEL_LBN 5 | ||
820 | #define RX_DESCQ_LABEL_WIDTH 5 | ||
821 | #define RX_DESCQ_SIZE_LBN 3 | ||
822 | #define RX_DESCQ_SIZE_WIDTH 2 | ||
823 | #define RX_DESCQ_SIZE_4K 3 | ||
824 | #define RX_DESCQ_SIZE_2K 2 | ||
825 | #define RX_DESCQ_SIZE_1K 1 | ||
826 | #define RX_DESCQ_SIZE_512 0 | ||
827 | #define RX_DESCQ_TYPE_LBN 2 | ||
828 | #define RX_DESCQ_TYPE_WIDTH 1 | ||
829 | #define RX_DESCQ_JUMBO_LBN 1 | ||
830 | #define RX_DESCQ_JUMBO_WIDTH 1 | ||
831 | #define RX_DESCQ_EN_LBN 0 | ||
832 | #define RX_DESCQ_EN_WIDTH 1 | ||
833 | |||
834 | /* Transmit descriptor pointer table */ | ||
835 | #define TX_DESC_PTR_TBL_KER_A1 0x11900 | ||
836 | #define TX_DESC_PTR_TBL_KER_B0 0xF50000 | ||
837 | #define TX_DESC_PTR_TBL_KER_P0 0xa40 | ||
838 | #define TX_NON_IP_DROP_DIS_B0_LBN 91 | ||
839 | #define TX_NON_IP_DROP_DIS_B0_WIDTH 1 | ||
840 | #define TX_IP_CHKSM_DIS_B0_LBN 90 | ||
841 | #define TX_IP_CHKSM_DIS_B0_WIDTH 1 | ||
842 | #define TX_TCP_CHKSM_DIS_B0_LBN 89 | ||
843 | #define TX_TCP_CHKSM_DIS_B0_WIDTH 1 | ||
844 | #define TX_DESCQ_EN_LBN 88 | ||
845 | #define TX_DESCQ_EN_WIDTH 1 | ||
846 | #define TX_ISCSI_DDIG_EN_LBN 87 | ||
847 | #define TX_ISCSI_DDIG_EN_WIDTH 1 | ||
848 | #define TX_ISCSI_HDIG_EN_LBN 86 | ||
849 | #define TX_ISCSI_HDIG_EN_WIDTH 1 | ||
850 | #define TX_DESCQ_BUF_BASE_ID_LBN 36 | ||
851 | #define TX_DESCQ_BUF_BASE_ID_WIDTH 20 | ||
852 | #define TX_DESCQ_EVQ_ID_LBN 24 | ||
853 | #define TX_DESCQ_EVQ_ID_WIDTH 12 | ||
854 | #define TX_DESCQ_OWNER_ID_LBN 10 | ||
855 | #define TX_DESCQ_OWNER_ID_WIDTH 14 | ||
856 | #define TX_DESCQ_LABEL_LBN 5 | ||
857 | #define TX_DESCQ_LABEL_WIDTH 5 | ||
858 | #define TX_DESCQ_SIZE_LBN 3 | ||
859 | #define TX_DESCQ_SIZE_WIDTH 2 | ||
860 | #define TX_DESCQ_SIZE_4K 3 | ||
861 | #define TX_DESCQ_SIZE_2K 2 | ||
862 | #define TX_DESCQ_SIZE_1K 1 | ||
863 | #define TX_DESCQ_SIZE_512 0 | ||
864 | #define TX_DESCQ_TYPE_LBN 1 | ||
865 | #define TX_DESCQ_TYPE_WIDTH 2 | ||
866 | |||
867 | /* Event queue pointer */ | ||
868 | #define EVQ_PTR_TBL_KER_A1 0x11a00 | ||
869 | #define EVQ_PTR_TBL_KER_B0 0xf60000 | ||
870 | #define EVQ_PTR_TBL_KER_P0 0x500 | ||
871 | #define EVQ_EN_LBN 23 | ||
872 | #define EVQ_EN_WIDTH 1 | ||
873 | #define EVQ_SIZE_LBN 20 | ||
874 | #define EVQ_SIZE_WIDTH 3 | ||
875 | #define EVQ_SIZE_32K 6 | ||
876 | #define EVQ_SIZE_16K 5 | ||
877 | #define EVQ_SIZE_8K 4 | ||
878 | #define EVQ_SIZE_4K 3 | ||
879 | #define EVQ_SIZE_2K 2 | ||
880 | #define EVQ_SIZE_1K 1 | ||
881 | #define EVQ_SIZE_512 0 | ||
882 | #define EVQ_BUF_BASE_ID_LBN 0 | ||
883 | #define EVQ_BUF_BASE_ID_WIDTH 20 | ||
884 | |||
885 | /* Event queue read pointer */ | ||
886 | #define EVQ_RPTR_REG_KER_A1 0x11b00 | ||
887 | #define EVQ_RPTR_REG_KER_B0 0xfa0000 | ||
888 | #define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0) | ||
889 | #define EVQ_RPTR_DWORD_LBN 0 | ||
890 | #define EVQ_RPTR_DWORD_WIDTH 14 | ||
891 | |||
892 | /* RSS indirection table */ | ||
893 | #define RX_RSS_INDIR_TBL_B0 0xFB0000 | ||
894 | #define RX_RSS_INDIR_ENT_B0_LBN 0 | ||
895 | #define RX_RSS_INDIR_ENT_B0_WIDTH 6 | ||
896 | |||
897 | /* Special buffer descriptors (full-mode) */ | ||
898 | #define BUF_FULL_TBL_KER_A1 0x8000 | ||
899 | #define BUF_FULL_TBL_KER_B0 0x800000 | ||
900 | #define IP_DAT_BUF_SIZE_LBN 50 | ||
901 | #define IP_DAT_BUF_SIZE_WIDTH 1 | ||
902 | #define IP_DAT_BUF_SIZE_8K 1 | ||
903 | #define IP_DAT_BUF_SIZE_4K 0 | ||
904 | #define BUF_ADR_REGION_LBN 48 | ||
905 | #define BUF_ADR_REGION_WIDTH 2 | ||
906 | #define BUF_ADR_FBUF_LBN 14 | ||
907 | #define BUF_ADR_FBUF_WIDTH 34 | ||
908 | #define BUF_OWNER_ID_FBUF_LBN 0 | ||
909 | #define BUF_OWNER_ID_FBUF_WIDTH 14 | ||
910 | |||
911 | /* Transmit descriptor */ | ||
912 | #define TX_KER_PORT_LBN 63 | ||
913 | #define TX_KER_PORT_WIDTH 1 | ||
914 | #define TX_KER_CONT_LBN 62 | ||
915 | #define TX_KER_CONT_WIDTH 1 | ||
916 | #define TX_KER_BYTE_CNT_LBN 48 | ||
917 | #define TX_KER_BYTE_CNT_WIDTH 14 | ||
918 | #define TX_KER_BUF_REGION_LBN 46 | ||
919 | #define TX_KER_BUF_REGION_WIDTH 2 | ||
920 | #define TX_KER_BUF_REGION0_DECODE 0 | ||
921 | #define TX_KER_BUF_REGION1_DECODE 1 | ||
922 | #define TX_KER_BUF_REGION2_DECODE 2 | ||
923 | #define TX_KER_BUF_REGION3_DECODE 3 | ||
924 | #define TX_KER_BUF_ADR_LBN 0 | ||
925 | #define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46) | ||
926 | |||
927 | /* Receive descriptor */ | ||
928 | #define RX_KER_BUF_SIZE_LBN 48 | ||
929 | #define RX_KER_BUF_SIZE_WIDTH 14 | ||
930 | #define RX_KER_BUF_REGION_LBN 46 | ||
931 | #define RX_KER_BUF_REGION_WIDTH 2 | ||
932 | #define RX_KER_BUF_REGION0_DECODE 0 | ||
933 | #define RX_KER_BUF_REGION1_DECODE 1 | ||
934 | #define RX_KER_BUF_REGION2_DECODE 2 | ||
935 | #define RX_KER_BUF_REGION3_DECODE 3 | ||
936 | #define RX_KER_BUF_ADR_LBN 0 | ||
937 | #define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46) | ||
938 | |||
939 | /************************************************************************** | ||
940 | * | ||
941 | * Falcon events | ||
942 | * | ||
943 | ************************************************************************** | ||
944 | */ | ||
945 | |||
946 | /* Event queue entries */ | ||
947 | #define EV_CODE_LBN 60 | ||
948 | #define EV_CODE_WIDTH 4 | ||
949 | #define RX_IP_EV_DECODE 0 | ||
950 | #define TX_IP_EV_DECODE 2 | ||
951 | #define DRIVER_EV_DECODE 5 | ||
952 | #define GLOBAL_EV_DECODE 6 | ||
953 | #define DRV_GEN_EV_DECODE 7 | ||
954 | #define WHOLE_EVENT_LBN 0 | ||
955 | #define WHOLE_EVENT_WIDTH 64 | ||
956 | |||
957 | /* Receive events */ | ||
958 | #define RX_EV_PKT_OK_LBN 56 | ||
959 | #define RX_EV_PKT_OK_WIDTH 1 | ||
960 | #define RX_EV_PAUSE_FRM_ERR_LBN 55 | ||
961 | #define RX_EV_PAUSE_FRM_ERR_WIDTH 1 | ||
962 | #define RX_EV_BUF_OWNER_ID_ERR_LBN 54 | ||
963 | #define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 | ||
964 | #define RX_EV_IF_FRAG_ERR_LBN 53 | ||
965 | #define RX_EV_IF_FRAG_ERR_WIDTH 1 | ||
966 | #define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 | ||
967 | #define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 | ||
968 | #define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 | ||
969 | #define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 | ||
970 | #define RX_EV_ETH_CRC_ERR_LBN 50 | ||
971 | #define RX_EV_ETH_CRC_ERR_WIDTH 1 | ||
972 | #define RX_EV_FRM_TRUNC_LBN 49 | ||
973 | #define RX_EV_FRM_TRUNC_WIDTH 1 | ||
974 | #define RX_EV_DRIB_NIB_LBN 48 | ||
975 | #define RX_EV_DRIB_NIB_WIDTH 1 | ||
976 | #define RX_EV_TOBE_DISC_LBN 47 | ||
977 | #define RX_EV_TOBE_DISC_WIDTH 1 | ||
978 | #define RX_EV_PKT_TYPE_LBN 44 | ||
979 | #define RX_EV_PKT_TYPE_WIDTH 3 | ||
980 | #define RX_EV_PKT_TYPE_ETH_DECODE 0 | ||
981 | #define RX_EV_PKT_TYPE_LLC_DECODE 1 | ||
982 | #define RX_EV_PKT_TYPE_JUMBO_DECODE 2 | ||
983 | #define RX_EV_PKT_TYPE_VLAN_DECODE 3 | ||
984 | #define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4 | ||
985 | #define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5 | ||
986 | #define RX_EV_HDR_TYPE_LBN 42 | ||
987 | #define RX_EV_HDR_TYPE_WIDTH 2 | ||
988 | #define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0 | ||
989 | #define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1 | ||
990 | #define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2 | ||
991 | #define RX_EV_HDR_TYPE_NON_IP_DECODE 3 | ||
992 | #define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \ | ||
993 | ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE) | ||
994 | #define RX_EV_MCAST_HASH_MATCH_LBN 40 | ||
995 | #define RX_EV_MCAST_HASH_MATCH_WIDTH 1 | ||
996 | #define RX_EV_MCAST_PKT_LBN 39 | ||
997 | #define RX_EV_MCAST_PKT_WIDTH 1 | ||
998 | #define RX_EV_Q_LABEL_LBN 32 | ||
999 | #define RX_EV_Q_LABEL_WIDTH 5 | ||
1000 | #define RX_EV_JUMBO_CONT_LBN 31 | ||
1001 | #define RX_EV_JUMBO_CONT_WIDTH 1 | ||
1002 | #define RX_EV_BYTE_CNT_LBN 16 | ||
1003 | #define RX_EV_BYTE_CNT_WIDTH 14 | ||
1004 | #define RX_EV_SOP_LBN 15 | ||
1005 | #define RX_EV_SOP_WIDTH 1 | ||
1006 | #define RX_EV_DESC_PTR_LBN 0 | ||
1007 | #define RX_EV_DESC_PTR_WIDTH 12 | ||
1008 | |||
1009 | /* Transmit events */ | ||
1010 | #define TX_EV_PKT_ERR_LBN 38 | ||
1011 | #define TX_EV_PKT_ERR_WIDTH 1 | ||
1012 | #define TX_EV_Q_LABEL_LBN 32 | ||
1013 | #define TX_EV_Q_LABEL_WIDTH 5 | ||
1014 | #define TX_EV_WQ_FF_FULL_LBN 15 | ||
1015 | #define TX_EV_WQ_FF_FULL_WIDTH 1 | ||
1016 | #define TX_EV_COMP_LBN 12 | ||
1017 | #define TX_EV_COMP_WIDTH 1 | ||
1018 | #define TX_EV_DESC_PTR_LBN 0 | ||
1019 | #define TX_EV_DESC_PTR_WIDTH 12 | ||
1020 | |||
1021 | /* Driver events */ | ||
1022 | #define DRIVER_EV_SUB_CODE_LBN 56 | ||
1023 | #define DRIVER_EV_SUB_CODE_WIDTH 4 | ||
1024 | #define DRIVER_EV_SUB_DATA_LBN 0 | ||
1025 | #define DRIVER_EV_SUB_DATA_WIDTH 14 | ||
1026 | #define TX_DESCQ_FLS_DONE_EV_DECODE 0 | ||
1027 | #define RX_DESCQ_FLS_DONE_EV_DECODE 1 | ||
1028 | #define EVQ_INIT_DONE_EV_DECODE 2 | ||
1029 | #define EVQ_NOT_EN_EV_DECODE 3 | ||
1030 | #define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4 | ||
1031 | #define SRM_UPD_DONE_EV_DECODE 5 | ||
1032 | #define WAKE_UP_EV_DECODE 6 | ||
1033 | #define TX_PKT_NON_TCP_UDP_DECODE 9 | ||
1034 | #define TIMER_EV_DECODE 10 | ||
1035 | #define RX_RECOVERY_EV_DECODE 11 | ||
1036 | #define RX_DSC_ERROR_EV_DECODE 14 | ||
1037 | #define TX_DSC_ERROR_EV_DECODE 15 | ||
1038 | #define DRIVER_EV_TX_DESCQ_ID_LBN 0 | ||
1039 | #define DRIVER_EV_TX_DESCQ_ID_WIDTH 12 | ||
1040 | #define DRIVER_EV_RX_FLUSH_FAIL_LBN 12 | ||
1041 | #define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 | ||
1042 | #define DRIVER_EV_RX_DESCQ_ID_LBN 0 | ||
1043 | #define DRIVER_EV_RX_DESCQ_ID_WIDTH 12 | ||
1044 | #define SRM_CLR_EV_DECODE 0 | ||
1045 | #define SRM_UPD_EV_DECODE 1 | ||
1046 | #define SRM_ILLCLR_EV_DECODE 2 | ||
1047 | |||
1048 | /* Global events */ | ||
1049 | #define RX_RECOVERY_B0_LBN 12 | ||
1050 | #define RX_RECOVERY_B0_WIDTH 1 | ||
1051 | #define XG_MNT_INTR_B0_LBN 11 | ||
1052 | #define XG_MNT_INTR_B0_WIDTH 1 | ||
1053 | #define RX_RECOVERY_A1_LBN 11 | ||
1054 | #define RX_RECOVERY_A1_WIDTH 1 | ||
1055 | #define XFP_PHY_INTR_LBN 10 | ||
1056 | #define XFP_PHY_INTR_WIDTH 1 | ||
1057 | #define XG_PHY_INTR_LBN 9 | ||
1058 | #define XG_PHY_INTR_WIDTH 1 | ||
1059 | #define G_PHY1_INTR_LBN 8 | ||
1060 | #define G_PHY1_INTR_WIDTH 1 | ||
1061 | #define G_PHY0_INTR_LBN 7 | ||
1062 | #define G_PHY0_INTR_WIDTH 1 | ||
1063 | |||
1064 | /* Driver-generated test events */ | ||
1065 | #define EVQ_MAGIC_LBN 0 | ||
1066 | #define EVQ_MAGIC_WIDTH 32 | ||
1067 | |||
1068 | /************************************************************************** | ||
1069 | * | ||
1070 | * Falcon MAC stats | ||
1071 | * | ||
1072 | ************************************************************************** | ||
1073 | * | ||
1074 | */ | ||
1075 | |||
1076 | #define GRxGoodOct_offset 0x0 | ||
1077 | #define GRxGoodOct_WIDTH 48 | ||
1078 | #define GRxBadOct_offset 0x8 | ||
1079 | #define GRxBadOct_WIDTH 48 | ||
1080 | #define GRxMissPkt_offset 0x10 | ||
1081 | #define GRxMissPkt_WIDTH 32 | ||
1082 | #define GRxFalseCRS_offset 0x14 | ||
1083 | #define GRxFalseCRS_WIDTH 32 | ||
1084 | #define GRxPausePkt_offset 0x18 | ||
1085 | #define GRxPausePkt_WIDTH 32 | ||
1086 | #define GRxBadPkt_offset 0x1C | ||
1087 | #define GRxBadPkt_WIDTH 32 | ||
1088 | #define GRxUcastPkt_offset 0x20 | ||
1089 | #define GRxUcastPkt_WIDTH 32 | ||
1090 | #define GRxMcastPkt_offset 0x24 | ||
1091 | #define GRxMcastPkt_WIDTH 32 | ||
1092 | #define GRxBcastPkt_offset 0x28 | ||
1093 | #define GRxBcastPkt_WIDTH 32 | ||
1094 | #define GRxGoodLt64Pkt_offset 0x2C | ||
1095 | #define GRxGoodLt64Pkt_WIDTH 32 | ||
1096 | #define GRxBadLt64Pkt_offset 0x30 | ||
1097 | #define GRxBadLt64Pkt_WIDTH 32 | ||
1098 | #define GRx64Pkt_offset 0x34 | ||
1099 | #define GRx64Pkt_WIDTH 32 | ||
1100 | #define GRx65to127Pkt_offset 0x38 | ||
1101 | #define GRx65to127Pkt_WIDTH 32 | ||
1102 | #define GRx128to255Pkt_offset 0x3C | ||
1103 | #define GRx128to255Pkt_WIDTH 32 | ||
1104 | #define GRx256to511Pkt_offset 0x40 | ||
1105 | #define GRx256to511Pkt_WIDTH 32 | ||
1106 | #define GRx512to1023Pkt_offset 0x44 | ||
1107 | #define GRx512to1023Pkt_WIDTH 32 | ||
1108 | #define GRx1024to15xxPkt_offset 0x48 | ||
1109 | #define GRx1024to15xxPkt_WIDTH 32 | ||
1110 | #define GRx15xxtoJumboPkt_offset 0x4C | ||
1111 | #define GRx15xxtoJumboPkt_WIDTH 32 | ||
1112 | #define GRxGtJumboPkt_offset 0x50 | ||
1113 | #define GRxGtJumboPkt_WIDTH 32 | ||
1114 | #define GRxFcsErr64to15xxPkt_offset 0x54 | ||
1115 | #define GRxFcsErr64to15xxPkt_WIDTH 32 | ||
1116 | #define GRxFcsErr15xxtoJumboPkt_offset 0x58 | ||
1117 | #define GRxFcsErr15xxtoJumboPkt_WIDTH 32 | ||
1118 | #define GRxFcsErrGtJumboPkt_offset 0x5C | ||
1119 | #define GRxFcsErrGtJumboPkt_WIDTH 32 | ||
1120 | #define GTxGoodBadOct_offset 0x80 | ||
1121 | #define GTxGoodBadOct_WIDTH 48 | ||
1122 | #define GTxGoodOct_offset 0x88 | ||
1123 | #define GTxGoodOct_WIDTH 48 | ||
1124 | #define GTxSglColPkt_offset 0x90 | ||
1125 | #define GTxSglColPkt_WIDTH 32 | ||
1126 | #define GTxMultColPkt_offset 0x94 | ||
1127 | #define GTxMultColPkt_WIDTH 32 | ||
1128 | #define GTxExColPkt_offset 0x98 | ||
1129 | #define GTxExColPkt_WIDTH 32 | ||
1130 | #define GTxDefPkt_offset 0x9C | ||
1131 | #define GTxDefPkt_WIDTH 32 | ||
1132 | #define GTxLateCol_offset 0xA0 | ||
1133 | #define GTxLateCol_WIDTH 32 | ||
1134 | #define GTxExDefPkt_offset 0xA4 | ||
1135 | #define GTxExDefPkt_WIDTH 32 | ||
1136 | #define GTxPausePkt_offset 0xA8 | ||
1137 | #define GTxPausePkt_WIDTH 32 | ||
1138 | #define GTxBadPkt_offset 0xAC | ||
1139 | #define GTxBadPkt_WIDTH 32 | ||
1140 | #define GTxUcastPkt_offset 0xB0 | ||
1141 | #define GTxUcastPkt_WIDTH 32 | ||
1142 | #define GTxMcastPkt_offset 0xB4 | ||
1143 | #define GTxMcastPkt_WIDTH 32 | ||
1144 | #define GTxBcastPkt_offset 0xB8 | ||
1145 | #define GTxBcastPkt_WIDTH 32 | ||
1146 | #define GTxLt64Pkt_offset 0xBC | ||
1147 | #define GTxLt64Pkt_WIDTH 32 | ||
1148 | #define GTx64Pkt_offset 0xC0 | ||
1149 | #define GTx64Pkt_WIDTH 32 | ||
1150 | #define GTx65to127Pkt_offset 0xC4 | ||
1151 | #define GTx65to127Pkt_WIDTH 32 | ||
1152 | #define GTx128to255Pkt_offset 0xC8 | ||
1153 | #define GTx128to255Pkt_WIDTH 32 | ||
1154 | #define GTx256to511Pkt_offset 0xCC | ||
1155 | #define GTx256to511Pkt_WIDTH 32 | ||
1156 | #define GTx512to1023Pkt_offset 0xD0 | ||
1157 | #define GTx512to1023Pkt_WIDTH 32 | ||
1158 | #define GTx1024to15xxPkt_offset 0xD4 | ||
1159 | #define GTx1024to15xxPkt_WIDTH 32 | ||
1160 | #define GTx15xxtoJumboPkt_offset 0xD8 | ||
1161 | #define GTx15xxtoJumboPkt_WIDTH 32 | ||
1162 | #define GTxGtJumboPkt_offset 0xDC | ||
1163 | #define GTxGtJumboPkt_WIDTH 32 | ||
1164 | #define GTxNonTcpUdpPkt_offset 0xE0 | ||
1165 | #define GTxNonTcpUdpPkt_WIDTH 16 | ||
1166 | #define GTxMacSrcErrPkt_offset 0xE4 | ||
1167 | #define GTxMacSrcErrPkt_WIDTH 16 | ||
1168 | #define GTxIpSrcErrPkt_offset 0xE8 | ||
1169 | #define GTxIpSrcErrPkt_WIDTH 16 | ||
1170 | #define GDmaDone_offset 0xEC | ||
1171 | #define GDmaDone_WIDTH 32 | ||
1172 | |||
1173 | #define XgRxOctets_offset 0x0 | ||
1174 | #define XgRxOctets_WIDTH 48 | ||
1175 | #define XgRxOctetsOK_offset 0x8 | ||
1176 | #define XgRxOctetsOK_WIDTH 48 | ||
1177 | #define XgRxPkts_offset 0x10 | ||
1178 | #define XgRxPkts_WIDTH 32 | ||
1179 | #define XgRxPktsOK_offset 0x14 | ||
1180 | #define XgRxPktsOK_WIDTH 32 | ||
1181 | #define XgRxBroadcastPkts_offset 0x18 | ||
1182 | #define XgRxBroadcastPkts_WIDTH 32 | ||
1183 | #define XgRxMulticastPkts_offset 0x1C | ||
1184 | #define XgRxMulticastPkts_WIDTH 32 | ||
1185 | #define XgRxUnicastPkts_offset 0x20 | ||
1186 | #define XgRxUnicastPkts_WIDTH 32 | ||
1187 | #define XgRxUndersizePkts_offset 0x24 | ||
1188 | #define XgRxUndersizePkts_WIDTH 32 | ||
1189 | #define XgRxOversizePkts_offset 0x28 | ||
1190 | #define XgRxOversizePkts_WIDTH 32 | ||
1191 | #define XgRxJabberPkts_offset 0x2C | ||
1192 | #define XgRxJabberPkts_WIDTH 32 | ||
1193 | #define XgRxUndersizeFCSerrorPkts_offset 0x30 | ||
1194 | #define XgRxUndersizeFCSerrorPkts_WIDTH 32 | ||
1195 | #define XgRxDropEvents_offset 0x34 | ||
1196 | #define XgRxDropEvents_WIDTH 32 | ||
1197 | #define XgRxFCSerrorPkts_offset 0x38 | ||
1198 | #define XgRxFCSerrorPkts_WIDTH 32 | ||
1199 | #define XgRxAlignError_offset 0x3C | ||
1200 | #define XgRxAlignError_WIDTH 32 | ||
1201 | #define XgRxSymbolError_offset 0x40 | ||
1202 | #define XgRxSymbolError_WIDTH 32 | ||
1203 | #define XgRxInternalMACError_offset 0x44 | ||
1204 | #define XgRxInternalMACError_WIDTH 32 | ||
1205 | #define XgRxControlPkts_offset 0x48 | ||
1206 | #define XgRxControlPkts_WIDTH 32 | ||
1207 | #define XgRxPausePkts_offset 0x4C | ||
1208 | #define XgRxPausePkts_WIDTH 32 | ||
1209 | #define XgRxPkts64Octets_offset 0x50 | ||
1210 | #define XgRxPkts64Octets_WIDTH 32 | ||
1211 | #define XgRxPkts65to127Octets_offset 0x54 | ||
1212 | #define XgRxPkts65to127Octets_WIDTH 32 | ||
1213 | #define XgRxPkts128to255Octets_offset 0x58 | ||
1214 | #define XgRxPkts128to255Octets_WIDTH 32 | ||
1215 | #define XgRxPkts256to511Octets_offset 0x5C | ||
1216 | #define XgRxPkts256to511Octets_WIDTH 32 | ||
1217 | #define XgRxPkts512to1023Octets_offset 0x60 | ||
1218 | #define XgRxPkts512to1023Octets_WIDTH 32 | ||
1219 | #define XgRxPkts1024to15xxOctets_offset 0x64 | ||
1220 | #define XgRxPkts1024to15xxOctets_WIDTH 32 | ||
1221 | #define XgRxPkts15xxtoMaxOctets_offset 0x68 | ||
1222 | #define XgRxPkts15xxtoMaxOctets_WIDTH 32 | ||
1223 | #define XgRxLengthError_offset 0x6C | ||
1224 | #define XgRxLengthError_WIDTH 32 | ||
1225 | #define XgTxPkts_offset 0x80 | ||
1226 | #define XgTxPkts_WIDTH 32 | ||
1227 | #define XgTxOctets_offset 0x88 | ||
1228 | #define XgTxOctets_WIDTH 48 | ||
1229 | #define XgTxMulticastPkts_offset 0x90 | ||
1230 | #define XgTxMulticastPkts_WIDTH 32 | ||
1231 | #define XgTxBroadcastPkts_offset 0x94 | ||
1232 | #define XgTxBroadcastPkts_WIDTH 32 | ||
1233 | #define XgTxUnicastPkts_offset 0x98 | ||
1234 | #define XgTxUnicastPkts_WIDTH 32 | ||
1235 | #define XgTxControlPkts_offset 0x9C | ||
1236 | #define XgTxControlPkts_WIDTH 32 | ||
1237 | #define XgTxPausePkts_offset 0xA0 | ||
1238 | #define XgTxPausePkts_WIDTH 32 | ||
1239 | #define XgTxPkts64Octets_offset 0xA4 | ||
1240 | #define XgTxPkts64Octets_WIDTH 32 | ||
1241 | #define XgTxPkts65to127Octets_offset 0xA8 | ||
1242 | #define XgTxPkts65to127Octets_WIDTH 32 | ||
1243 | #define XgTxPkts128to255Octets_offset 0xAC | ||
1244 | #define XgTxPkts128to255Octets_WIDTH 32 | ||
1245 | #define XgTxPkts256to511Octets_offset 0xB0 | ||
1246 | #define XgTxPkts256to511Octets_WIDTH 32 | ||
1247 | #define XgTxPkts512to1023Octets_offset 0xB4 | ||
1248 | #define XgTxPkts512to1023Octets_WIDTH 32 | ||
1249 | #define XgTxPkts1024to15xxOctets_offset 0xB8 | ||
1250 | #define XgTxPkts1024to15xxOctets_WIDTH 32 | ||
1251 | #define XgTxPkts1519toMaxOctets_offset 0xBC | ||
1252 | #define XgTxPkts1519toMaxOctets_WIDTH 32 | ||
1253 | #define XgTxUndersizePkts_offset 0xC0 | ||
1254 | #define XgTxUndersizePkts_WIDTH 32 | ||
1255 | #define XgTxOversizePkts_offset 0xC4 | ||
1256 | #define XgTxOversizePkts_WIDTH 32 | ||
1257 | #define XgTxNonTcpUdpPkt_offset 0xC8 | ||
1258 | #define XgTxNonTcpUdpPkt_WIDTH 16 | ||
1259 | #define XgTxMacSrcErrPkt_offset 0xCC | ||
1260 | #define XgTxMacSrcErrPkt_WIDTH 16 | ||
1261 | #define XgTxIpSrcErrPkt_offset 0xD0 | ||
1262 | #define XgTxIpSrcErrPkt_WIDTH 16 | ||
1263 | #define XgDmaDone_offset 0xD4 | ||
1264 | |||
1265 | #define FALCON_STATS_NOT_DONE 0x00000000 | ||
1266 | #define FALCON_STATS_DONE 0xffffffff | ||
1267 | |||
1268 | /* Interrupt status register bits */ | ||
1269 | #define FATAL_INT_LBN 64 | ||
1270 | #define FATAL_INT_WIDTH 1 | ||
1271 | #define INT_EVQS_LBN 40 | ||
1272 | #define INT_EVQS_WIDTH 4 | ||
1273 | |||
1274 | /************************************************************************** | ||
1275 | * | ||
1276 | * Falcon non-volatile configuration | ||
1277 | * | ||
1278 | ************************************************************************** | ||
1279 | */ | ||
1280 | |||
1281 | /* Board configuration v2 (v1 is obsolete; later versions are compatible) */ | ||
1282 | struct falcon_nvconfig_board_v2 { | ||
1283 | __le16 nports; | ||
1284 | u8 port0_phy_addr; | ||
1285 | u8 port0_phy_type; | ||
1286 | u8 port1_phy_addr; | ||
1287 | u8 port1_phy_type; | ||
1288 | __le16 asic_sub_revision; | ||
1289 | __le16 board_revision; | ||
1290 | } __packed; | ||
1291 | |||
1292 | /* Board configuration v3 extra information */ | ||
1293 | struct falcon_nvconfig_board_v3 { | ||
1294 | __le32 spi_device_type[2]; | ||
1295 | } __packed; | ||
1296 | |||
1297 | /* Bit numbers for spi_device_type */ | ||
1298 | #define SPI_DEV_TYPE_SIZE_LBN 0 | ||
1299 | #define SPI_DEV_TYPE_SIZE_WIDTH 5 | ||
1300 | #define SPI_DEV_TYPE_ADDR_LEN_LBN 6 | ||
1301 | #define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2 | ||
1302 | #define SPI_DEV_TYPE_ERASE_CMD_LBN 8 | ||
1303 | #define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8 | ||
1304 | #define SPI_DEV_TYPE_ERASE_SIZE_LBN 16 | ||
1305 | #define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5 | ||
1306 | #define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24 | ||
1307 | #define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5 | ||
1308 | #define SPI_DEV_TYPE_FIELD(type, field) \ | ||
1309 | (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field))) | ||
1310 | |||
1311 | #define NVCONFIG_OFFSET 0x300 | ||
1312 | |||
1313 | #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C | ||
1314 | struct falcon_nvconfig { | ||
1315 | efx_oword_t ee_vpd_cfg_reg; /* 0x300 */ | ||
1316 | u8 mac_address[2][8]; /* 0x310 */ | ||
1317 | efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */ | ||
1318 | efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */ | ||
1319 | efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */ | ||
1320 | efx_oword_t hw_init_reg; /* 0x350 */ | ||
1321 | efx_oword_t nic_stat_reg; /* 0x360 */ | ||
1322 | efx_oword_t glb_ctl_reg; /* 0x370 */ | ||
1323 | efx_oword_t srm_cfg_reg; /* 0x380 */ | ||
1324 | efx_oword_t spare_reg; /* 0x390 */ | ||
1325 | __le16 board_magic_num; /* 0x3A0 */ | ||
1326 | __le16 board_struct_ver; | ||
1327 | __le16 board_checksum; | ||
1328 | struct falcon_nvconfig_board_v2 board_v2; | ||
1329 | efx_oword_t ee_base_page_reg; /* 0x3B0 */ | ||
1330 | struct falcon_nvconfig_board_v3 board_v3; | ||
1331 | } __packed; | ||
1332 | |||
1333 | #endif /* EFX_FALCON_HWDEFS_H */ | ||
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h deleted file mode 100644 index 8883092dae97..000000000000 --- a/drivers/net/sfc/falcon_io.h +++ /dev/null | |||
@@ -1,258 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_FALCON_IO_H | ||
12 | #define EFX_FALCON_IO_H | ||
13 | |||
14 | #include <linux/io.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | |||
17 | /************************************************************************** | ||
18 | * | ||
19 | * Falcon hardware access | ||
20 | * | ||
21 | ************************************************************************** | ||
22 | * | ||
23 | * Notes on locking strategy: | ||
24 | * | ||
25 | * Most Falcon registers require 16-byte (or 8-byte, for SRAM | ||
26 | * registers) atomic writes which necessitates locking. | ||
27 | * Under normal operation few writes to the Falcon BAR are made and these | ||
28 | * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special | ||
29 | * cased to allow 4-byte (hence lockless) accesses. | ||
30 | * | ||
31 | * It *is* safe to write to these 4-byte registers in the middle of an | ||
32 | * access to an 8-byte or 16-byte register. We therefore use a | ||
33 | * spinlock to protect accesses to the larger registers, but no locks | ||
34 | * for the 4-byte registers. | ||
35 | * | ||
36 | * A write barrier is needed to ensure that DW3 is written after DW0/1/2 | ||
37 | * due to the way the 16byte registers are "collected" in the Falcon BIU | ||
38 | * | ||
39 | * We also lock when carrying out reads, to ensure consistency of the | ||
40 | * data (made possible since the BIU reads all 128 bits into a cache). | ||
41 | * Reads are very rare, so this isn't a significant performance | ||
42 | * impact. (Most data transferred from NIC to host is DMAed directly | ||
43 | * into host memory). | ||
44 | * | ||
45 | * I/O BAR access uses locks for both reads and writes (but is only provided | ||
46 | * for testing purposes). | ||
47 | */ | ||
48 | |||
49 | /* Special buffer descriptors (Falcon SRAM) */ | ||
50 | #define BUF_TBL_KER_A1 0x18000 | ||
51 | #define BUF_TBL_KER_B0 0x800000 | ||
52 | |||
53 | |||
54 | #if BITS_PER_LONG == 64 | ||
55 | #define FALCON_USE_QWORD_IO 1 | ||
56 | #endif | ||
57 | |||
58 | #ifdef FALCON_USE_QWORD_IO | ||
59 | static inline void _falcon_writeq(struct efx_nic *efx, __le64 value, | ||
60 | unsigned int reg) | ||
61 | { | ||
62 | __raw_writeq((__force u64)value, efx->membase + reg); | ||
63 | } | ||
64 | static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg) | ||
65 | { | ||
66 | return (__force __le64)__raw_readq(efx->membase + reg); | ||
67 | } | ||
68 | #endif | ||
69 | |||
70 | static inline void _falcon_writel(struct efx_nic *efx, __le32 value, | ||
71 | unsigned int reg) | ||
72 | { | ||
73 | __raw_writel((__force u32)value, efx->membase + reg); | ||
74 | } | ||
75 | static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg) | ||
76 | { | ||
77 | return (__force __le32)__raw_readl(efx->membase + reg); | ||
78 | } | ||
79 | |||
80 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ | ||
81 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, | ||
82 | unsigned int reg) | ||
83 | { | ||
84 | unsigned long flags; | ||
85 | |||
86 | EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg, | ||
87 | EFX_OWORD_VAL(*value)); | ||
88 | |||
89 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
90 | #ifdef FALCON_USE_QWORD_IO | ||
91 | _falcon_writeq(efx, value->u64[0], reg + 0); | ||
92 | wmb(); | ||
93 | _falcon_writeq(efx, value->u64[1], reg + 8); | ||
94 | #else | ||
95 | _falcon_writel(efx, value->u32[0], reg + 0); | ||
96 | _falcon_writel(efx, value->u32[1], reg + 4); | ||
97 | _falcon_writel(efx, value->u32[2], reg + 8); | ||
98 | wmb(); | ||
99 | _falcon_writel(efx, value->u32[3], reg + 12); | ||
100 | #endif | ||
101 | mmiowb(); | ||
102 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
103 | } | ||
104 | |||
105 | /* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */ | ||
106 | static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value, | ||
107 | unsigned int index) | ||
108 | { | ||
109 | unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value)); | ||
110 | unsigned long flags; | ||
111 | |||
112 | EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n", | ||
113 | reg, EFX_QWORD_VAL(*value)); | ||
114 | |||
115 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
116 | #ifdef FALCON_USE_QWORD_IO | ||
117 | _falcon_writeq(efx, value->u64[0], reg + 0); | ||
118 | #else | ||
119 | _falcon_writel(efx, value->u32[0], reg + 0); | ||
120 | wmb(); | ||
121 | _falcon_writel(efx, value->u32[1], reg + 4); | ||
122 | #endif | ||
123 | mmiowb(); | ||
124 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
125 | } | ||
126 | |||
127 | /* Write dword to Falcon register that allows partial writes | ||
128 | * | ||
129 | * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and | ||
130 | * TX_DESC_UPD_REG) can be written to as a single dword. This allows | ||
131 | * for lockless writes. | ||
132 | */ | ||
133 | static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value, | ||
134 | unsigned int reg) | ||
135 | { | ||
136 | EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n", | ||
137 | reg, EFX_DWORD_VAL(*value)); | ||
138 | |||
139 | /* No lock required */ | ||
140 | _falcon_writel(efx, value->u32[0], reg); | ||
141 | } | ||
142 | |||
143 | /* Read from a Falcon register | ||
144 | * | ||
145 | * This reads an entire 16-byte Falcon register in one go, locking as | ||
146 | * appropriate. It is essential to read the first dword first, as this | ||
147 | * prompts Falcon to load the current value into the shadow register. | ||
148 | */ | ||
149 | static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value, | ||
150 | unsigned int reg) | ||
151 | { | ||
152 | unsigned long flags; | ||
153 | |||
154 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
155 | value->u32[0] = _falcon_readl(efx, reg + 0); | ||
156 | rmb(); | ||
157 | value->u32[1] = _falcon_readl(efx, reg + 4); | ||
158 | value->u32[2] = _falcon_readl(efx, reg + 8); | ||
159 | value->u32[3] = _falcon_readl(efx, reg + 12); | ||
160 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
161 | |||
162 | EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg, | ||
163 | EFX_OWORD_VAL(*value)); | ||
164 | } | ||
165 | |||
166 | /* This reads an 8-byte Falcon SRAM entry in one go. */ | ||
167 | static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value, | ||
168 | unsigned int index) | ||
169 | { | ||
170 | unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value)); | ||
171 | unsigned long flags; | ||
172 | |||
173 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
174 | #ifdef FALCON_USE_QWORD_IO | ||
175 | value->u64[0] = _falcon_readq(efx, reg + 0); | ||
176 | #else | ||
177 | value->u32[0] = _falcon_readl(efx, reg + 0); | ||
178 | rmb(); | ||
179 | value->u32[1] = _falcon_readl(efx, reg + 4); | ||
180 | #endif | ||
181 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
182 | |||
183 | EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n", | ||
184 | reg, EFX_QWORD_VAL(*value)); | ||
185 | } | ||
186 | |||
187 | /* Read dword from Falcon register that allows partial writes (sic) */ | ||
188 | static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value, | ||
189 | unsigned int reg) | ||
190 | { | ||
191 | value->u32[0] = _falcon_readl(efx, reg); | ||
192 | EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n", | ||
193 | reg, EFX_DWORD_VAL(*value)); | ||
194 | } | ||
195 | |||
196 | /* Write to a register forming part of a table */ | ||
197 | static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value, | ||
198 | unsigned int reg, unsigned int index) | ||
199 | { | ||
200 | falcon_write(efx, value, reg + index * sizeof(efx_oword_t)); | ||
201 | } | ||
202 | |||
203 | /* Read to a register forming part of a table */ | ||
204 | static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value, | ||
205 | unsigned int reg, unsigned int index) | ||
206 | { | ||
207 | falcon_read(efx, value, reg + index * sizeof(efx_oword_t)); | ||
208 | } | ||
209 | |||
210 | /* Write to a dword register forming part of a table */ | ||
211 | static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value, | ||
212 | unsigned int reg, unsigned int index) | ||
213 | { | ||
214 | falcon_writel(efx, value, reg + index * sizeof(efx_oword_t)); | ||
215 | } | ||
216 | |||
217 | /* Page-mapped register block size */ | ||
218 | #define FALCON_PAGE_BLOCK_SIZE 0x2000 | ||
219 | |||
220 | /* Calculate offset to page-mapped register block */ | ||
221 | #define FALCON_PAGED_REG(page, reg) \ | ||
222 | ((page) * FALCON_PAGE_BLOCK_SIZE + (reg)) | ||
223 | |||
224 | /* As for falcon_write(), but for a page-mapped register. */ | ||
225 | static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value, | ||
226 | unsigned int reg, unsigned int page) | ||
227 | { | ||
228 | falcon_write(efx, value, FALCON_PAGED_REG(page, reg)); | ||
229 | } | ||
230 | |||
231 | /* As for falcon_writel(), but for a page-mapped register. */ | ||
232 | static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value, | ||
233 | unsigned int reg, unsigned int page) | ||
234 | { | ||
235 | falcon_writel(efx, value, FALCON_PAGED_REG(page, reg)); | ||
236 | } | ||
237 | |||
238 | /* Write dword to Falcon page-mapped register with an extra lock. | ||
239 | * | ||
240 | * As for falcon_writel_page(), but for a register that suffers from | ||
241 | * SFC bug 3181. If writing to page 0, take out a lock so the BIU | ||
242 | * collector cannot be confused. | ||
243 | */ | ||
244 | static inline void falcon_writel_page_locked(struct efx_nic *efx, | ||
245 | efx_dword_t *value, | ||
246 | unsigned int reg, | ||
247 | unsigned int page) | ||
248 | { | ||
249 | unsigned long flags = 0; | ||
250 | |||
251 | if (page == 0) | ||
252 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
253 | falcon_writel(efx, value, FALCON_PAGED_REG(page, reg)); | ||
254 | if (page == 0) | ||
255 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
256 | } | ||
257 | |||
258 | #endif /* EFX_FALCON_IO_H */ | ||
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index bec52ca37eee..8ccab2c67a20 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2008 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -11,13 +11,12 @@ | |||
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include "net_driver.h" | 12 | #include "net_driver.h" |
13 | #include "efx.h" | 13 | #include "efx.h" |
14 | #include "falcon.h" | 14 | #include "nic.h" |
15 | #include "falcon_hwdefs.h" | 15 | #include "regs.h" |
16 | #include "falcon_io.h" | 16 | #include "io.h" |
17 | #include "mac.h" | 17 | #include "mac.h" |
18 | #include "mdio_10g.h" | 18 | #include "mdio_10g.h" |
19 | #include "phy.h" | 19 | #include "phy.h" |
20 | #include "boards.h" | ||
21 | #include "workarounds.h" | 20 | #include "workarounds.h" |
22 | 21 | ||
23 | /************************************************************************** | 22 | /************************************************************************** |
@@ -36,43 +35,47 @@ static void falcon_setup_xaui(struct efx_nic *efx) | |||
36 | if (efx->phy_type == PHY_TYPE_NONE) | 35 | if (efx->phy_type == PHY_TYPE_NONE) |
37 | return; | 36 | return; |
38 | 37 | ||
39 | falcon_read(efx, &sdctl, XX_SD_CTL_REG); | 38 | efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL); |
40 | EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT); | 39 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF); |
41 | EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT); | 40 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF); |
42 | EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT); | 41 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF); |
43 | EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT); | 42 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF); |
44 | EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT); | 43 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF); |
45 | EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT); | 44 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF); |
46 | EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT); | 45 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF); |
47 | EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT); | 46 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF); |
48 | falcon_write(efx, &sdctl, XX_SD_CTL_REG); | 47 | efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL); |
49 | 48 | ||
50 | EFX_POPULATE_OWORD_8(txdrv, | 49 | EFX_POPULATE_OWORD_8(txdrv, |
51 | XX_DEQD, XX_TXDRV_DEQ_DEFAULT, | 50 | FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF, |
52 | XX_DEQC, XX_TXDRV_DEQ_DEFAULT, | 51 | FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF, |
53 | XX_DEQB, XX_TXDRV_DEQ_DEFAULT, | 52 | FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF, |
54 | XX_DEQA, XX_TXDRV_DEQ_DEFAULT, | 53 | FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF, |
55 | XX_DTXD, XX_TXDRV_DTX_DEFAULT, | 54 | FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF, |
56 | XX_DTXC, XX_TXDRV_DTX_DEFAULT, | 55 | FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF, |
57 | XX_DTXB, XX_TXDRV_DTX_DEFAULT, | 56 | FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF, |
58 | XX_DTXA, XX_TXDRV_DTX_DEFAULT); | 57 | FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF); |
59 | falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG); | 58 | efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL); |
60 | } | 59 | } |
61 | 60 | ||
62 | int falcon_reset_xaui(struct efx_nic *efx) | 61 | int falcon_reset_xaui(struct efx_nic *efx) |
63 | { | 62 | { |
63 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
64 | efx_oword_t reg; | 64 | efx_oword_t reg; |
65 | int count; | 65 | int count; |
66 | 66 | ||
67 | /* Don't fetch MAC statistics over an XMAC reset */ | ||
68 | WARN_ON(nic_data->stats_disable_count == 0); | ||
69 | |||
67 | /* Start reset sequence */ | 70 | /* Start reset sequence */ |
68 | EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); | 71 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1); |
69 | falcon_write(efx, ®, XX_PWR_RST_REG); | 72 | efx_writeo(efx, ®, FR_AB_XX_PWR_RST); |
70 | 73 | ||
71 | /* Wait up to 10 ms for completion, then reinitialise */ | 74 | /* Wait up to 10 ms for completion, then reinitialise */ |
72 | for (count = 0; count < 1000; count++) { | 75 | for (count = 0; count < 1000; count++) { |
73 | falcon_read(efx, ®, XX_PWR_RST_REG); | 76 | efx_reado(efx, ®, FR_AB_XX_PWR_RST); |
74 | if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 && | 77 | if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 && |
75 | EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) { | 78 | EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) { |
76 | falcon_setup_xaui(efx); | 79 | falcon_setup_xaui(efx); |
77 | return 0; | 80 | return 0; |
78 | } | 81 | } |
@@ -86,117 +89,118 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable) | |||
86 | { | 89 | { |
87 | efx_oword_t reg; | 90 | efx_oword_t reg; |
88 | 91 | ||
89 | if ((falcon_rev(efx) != FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) | 92 | if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) |
90 | return; | 93 | return; |
91 | 94 | ||
92 | /* We expect xgmii faults if the wireside link is up */ | 95 | /* We expect xgmii faults if the wireside link is up */ |
93 | if (!EFX_WORKAROUND_5147(efx) || !efx->link_up) | 96 | if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) |
94 | return; | 97 | return; |
95 | 98 | ||
96 | /* We can only use this interrupt to signal the negative edge of | 99 | /* We can only use this interrupt to signal the negative edge of |
97 | * xaui_align [we have to poll the positive edge]. */ | 100 | * xaui_align [we have to poll the positive edge]. */ |
98 | if (!efx->mac_up) | 101 | if (efx->xmac_poll_required) |
99 | return; | 102 | return; |
100 | 103 | ||
101 | /* Flush the ISR */ | 104 | /* Flush the ISR */ |
102 | if (enable) | 105 | if (enable) |
103 | falcon_read(efx, ®, XM_MGT_INT_REG_B0); | 106 | efx_reado(efx, ®, FR_AB_XM_MGT_INT_MSK); |
104 | 107 | ||
105 | EFX_POPULATE_OWORD_2(reg, | 108 | EFX_POPULATE_OWORD_2(reg, |
106 | XM_MSK_RMTFLT, !enable, | 109 | FRF_AB_XM_MSK_RMTFLT, !enable, |
107 | XM_MSK_LCLFLT, !enable); | 110 | FRF_AB_XM_MSK_LCLFLT, !enable); |
108 | falcon_write(efx, ®, XM_MGT_INT_MSK_REG_B0); | 111 | efx_writeo(efx, ®, FR_AB_XM_MGT_INT_MASK); |
109 | } | 112 | } |
110 | 113 | ||
111 | /* Get status of XAUI link */ | 114 | static bool falcon_xgxs_link_ok(struct efx_nic *efx) |
112 | bool falcon_xaui_link_ok(struct efx_nic *efx) | ||
113 | { | 115 | { |
114 | efx_oword_t reg; | 116 | efx_oword_t reg; |
115 | bool align_done, link_ok = false; | 117 | bool align_done, link_ok = false; |
116 | int sync_status; | 118 | int sync_status; |
117 | 119 | ||
118 | if (LOOPBACK_INTERNAL(efx)) | ||
119 | return true; | ||
120 | |||
121 | /* Read link status */ | 120 | /* Read link status */ |
122 | falcon_read(efx, ®, XX_CORE_STAT_REG); | 121 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); |
123 | 122 | ||
124 | align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE); | 123 | align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE); |
125 | sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT); | 124 | sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT); |
126 | if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED)) | 125 | if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES)) |
127 | link_ok = true; | 126 | link_ok = true; |
128 | 127 | ||
129 | /* Clear link status ready for next read */ | 128 | /* Clear link status ready for next read */ |
130 | EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET); | 129 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES); |
131 | EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET); | 130 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES); |
132 | EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET); | 131 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); |
133 | falcon_write(efx, ®, XX_CORE_STAT_REG); | 132 | efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); |
134 | |||
135 | /* If the link is up, then check the phy side of the xaui link */ | ||
136 | if (efx->link_up && link_ok) | ||
137 | if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS)) | ||
138 | link_ok = efx_mdio_phyxgxs_lane_sync(efx); | ||
139 | 133 | ||
140 | return link_ok; | 134 | return link_ok; |
141 | } | 135 | } |
142 | 136 | ||
143 | static void falcon_reconfigure_xmac_core(struct efx_nic *efx) | 137 | static bool falcon_xmac_link_ok(struct efx_nic *efx) |
138 | { | ||
139 | /* | ||
140 | * Check MAC's XGXS link status except when using XGMII loopback | ||
141 | * which bypasses the XGXS block. | ||
142 | * If possible, check PHY's XGXS link status except when using | ||
143 | * MAC loopback. | ||
144 | */ | ||
145 | return (efx->loopback_mode == LOOPBACK_XGMII || | ||
146 | falcon_xgxs_link_ok(efx)) && | ||
147 | (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || | ||
148 | LOOPBACK_INTERNAL(efx) || | ||
149 | efx_mdio_phyxgxs_lane_sync(efx)); | ||
150 | } | ||
151 | |||
152 | void falcon_reconfigure_xmac_core(struct efx_nic *efx) | ||
144 | { | 153 | { |
145 | unsigned int max_frame_len; | 154 | unsigned int max_frame_len; |
146 | efx_oword_t reg; | 155 | efx_oword_t reg; |
147 | bool rx_fc = !!(efx->link_fc & EFX_FC_RX); | 156 | bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX); |
157 | bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX); | ||
148 | 158 | ||
149 | /* Configure MAC - cut-thru mode is hard wired on */ | 159 | /* Configure MAC - cut-thru mode is hard wired on */ |
150 | EFX_POPULATE_DWORD_3(reg, | 160 | EFX_POPULATE_OWORD_3(reg, |
151 | XM_RX_JUMBO_MODE, 1, | 161 | FRF_AB_XM_RX_JUMBO_MODE, 1, |
152 | XM_TX_STAT_EN, 1, | 162 | FRF_AB_XM_TX_STAT_EN, 1, |
153 | XM_RX_STAT_EN, 1); | 163 | FRF_AB_XM_RX_STAT_EN, 1); |
154 | falcon_write(efx, ®, XM_GLB_CFG_REG); | 164 | efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); |
155 | 165 | ||
156 | /* Configure TX */ | 166 | /* Configure TX */ |
157 | EFX_POPULATE_DWORD_6(reg, | 167 | EFX_POPULATE_OWORD_6(reg, |
158 | XM_TXEN, 1, | 168 | FRF_AB_XM_TXEN, 1, |
159 | XM_TX_PRMBL, 1, | 169 | FRF_AB_XM_TX_PRMBL, 1, |
160 | XM_AUTO_PAD, 1, | 170 | FRF_AB_XM_AUTO_PAD, 1, |
161 | XM_TXCRC, 1, | 171 | FRF_AB_XM_TXCRC, 1, |
162 | XM_FCNTL, 1, | 172 | FRF_AB_XM_FCNTL, tx_fc, |
163 | XM_IPG, 0x3); | 173 | FRF_AB_XM_IPG, 0x3); |
164 | falcon_write(efx, ®, XM_TX_CFG_REG); | 174 | efx_writeo(efx, ®, FR_AB_XM_TX_CFG); |
165 | 175 | ||
166 | /* Configure RX */ | 176 | /* Configure RX */ |
167 | EFX_POPULATE_DWORD_5(reg, | 177 | EFX_POPULATE_OWORD_5(reg, |
168 | XM_RXEN, 1, | 178 | FRF_AB_XM_RXEN, 1, |
169 | XM_AUTO_DEPAD, 0, | 179 | FRF_AB_XM_AUTO_DEPAD, 0, |
170 | XM_ACPT_ALL_MCAST, 1, | 180 | FRF_AB_XM_ACPT_ALL_MCAST, 1, |
171 | XM_ACPT_ALL_UCAST, efx->promiscuous, | 181 | FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous, |
172 | XM_PASS_CRC_ERR, 1); | 182 | FRF_AB_XM_PASS_CRC_ERR, 1); |
173 | falcon_write(efx, ®, XM_RX_CFG_REG); | 183 | efx_writeo(efx, ®, FR_AB_XM_RX_CFG); |
174 | 184 | ||
175 | /* Set frame length */ | 185 | /* Set frame length */ |
176 | max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); | 186 | max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); |
177 | EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len); | 187 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len); |
178 | falcon_write(efx, ®, XM_RX_PARAM_REG); | 188 | efx_writeo(efx, ®, FR_AB_XM_RX_PARAM); |
179 | EFX_POPULATE_DWORD_2(reg, | 189 | EFX_POPULATE_OWORD_2(reg, |
180 | XM_MAX_TX_FRM_SIZE, max_frame_len, | 190 | FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len, |
181 | XM_TX_JUMBO_MODE, 1); | 191 | FRF_AB_XM_TX_JUMBO_MODE, 1); |
182 | falcon_write(efx, ®, XM_TX_PARAM_REG); | 192 | efx_writeo(efx, ®, FR_AB_XM_TX_PARAM); |
183 | 193 | ||
184 | EFX_POPULATE_DWORD_2(reg, | 194 | EFX_POPULATE_OWORD_2(reg, |
185 | XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ | 195 | FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ |
186 | XM_DIS_FCNTL, !rx_fc); | 196 | FRF_AB_XM_DIS_FCNTL, !rx_fc); |
187 | falcon_write(efx, ®, XM_FC_REG); | 197 | efx_writeo(efx, ®, FR_AB_XM_FC); |
188 | 198 | ||
189 | /* Set MAC address */ | 199 | /* Set MAC address */ |
190 | EFX_POPULATE_DWORD_4(reg, | 200 | memcpy(®, &efx->net_dev->dev_addr[0], 4); |
191 | XM_ADR_0, efx->net_dev->dev_addr[0], | 201 | efx_writeo(efx, ®, FR_AB_XM_ADR_LO); |
192 | XM_ADR_1, efx->net_dev->dev_addr[1], | 202 | memcpy(®, &efx->net_dev->dev_addr[4], 2); |
193 | XM_ADR_2, efx->net_dev->dev_addr[2], | 203 | efx_writeo(efx, ®, FR_AB_XM_ADR_HI); |
194 | XM_ADR_3, efx->net_dev->dev_addr[3]); | ||
195 | falcon_write(efx, ®, XM_ADR_LO_REG); | ||
196 | EFX_POPULATE_DWORD_2(reg, | ||
197 | XM_ADR_4, efx->net_dev->dev_addr[4], | ||
198 | XM_ADR_5, efx->net_dev->dev_addr[5]); | ||
199 | falcon_write(efx, ®, XM_ADR_HI_REG); | ||
200 | } | 204 | } |
201 | 205 | ||
202 | static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) | 206 | static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) |
@@ -212,12 +216,13 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) | |||
212 | bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; | 216 | bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; |
213 | bool reset_xgxs; | 217 | bool reset_xgxs; |
214 | 218 | ||
215 | falcon_read(efx, ®, XX_CORE_STAT_REG); | 219 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); |
216 | old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN); | 220 | old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); |
217 | old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN); | 221 | old_xgmii_loopback = |
222 | EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN); | ||
218 | 223 | ||
219 | falcon_read(efx, ®, XX_SD_CTL_REG); | 224 | efx_reado(efx, ®, FR_AB_XX_SD_CTL); |
220 | old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA); | 225 | old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); |
221 | 226 | ||
222 | /* The PHY driver may have turned XAUI off */ | 227 | /* The PHY driver may have turned XAUI off */ |
223 | reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || | 228 | reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || |
@@ -228,45 +233,55 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) | |||
228 | falcon_reset_xaui(efx); | 233 | falcon_reset_xaui(efx); |
229 | } | 234 | } |
230 | 235 | ||
231 | falcon_read(efx, ®, XX_CORE_STAT_REG); | 236 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); |
232 | EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG, | 237 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG, |
233 | (xgxs_loopback || xaui_loopback) ? | 238 | (xgxs_loopback || xaui_loopback) ? |
234 | XX_FORCE_SIG_DECODE_FORCED : 0); | 239 | FFE_AB_XX_FORCE_SIG_ALL_LANES : 0); |
235 | EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); | 240 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback); |
236 | EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); | 241 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback); |
237 | falcon_write(efx, ®, XX_CORE_STAT_REG); | 242 | efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); |
238 | 243 | ||
239 | falcon_read(efx, ®, XX_SD_CTL_REG); | 244 | efx_reado(efx, ®, FR_AB_XX_SD_CTL); |
240 | EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback); | 245 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback); |
241 | EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback); | 246 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback); |
242 | EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback); | 247 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback); |
243 | EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback); | 248 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback); |
244 | falcon_write(efx, ®, XX_SD_CTL_REG); | 249 | efx_writeo(efx, ®, FR_AB_XX_SD_CTL); |
245 | } | 250 | } |
246 | 251 | ||
247 | 252 | ||
248 | /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails | 253 | /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ |
249 | * to come back up. Bash it until it comes back up */ | 254 | static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) |
250 | static void falcon_check_xaui_link_up(struct efx_nic *efx, int tries) | ||
251 | { | 255 | { |
252 | efx->mac_up = falcon_xaui_link_ok(efx); | 256 | bool mac_up = falcon_xmac_link_ok(efx); |
253 | 257 | ||
254 | if ((efx->loopback_mode == LOOPBACK_NETWORK) || | 258 | if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || |
255 | efx_phy_mode_disabled(efx->phy_mode)) | 259 | efx_phy_mode_disabled(efx->phy_mode)) |
256 | /* XAUI link is expected to be down */ | 260 | /* XAUI link is expected to be down */ |
257 | return; | 261 | return mac_up; |
262 | |||
263 | falcon_stop_nic_stats(efx); | ||
258 | 264 | ||
259 | while (!efx->mac_up && tries) { | 265 | while (!mac_up && tries) { |
260 | EFX_LOG(efx, "bashing xaui\n"); | 266 | EFX_LOG(efx, "bashing xaui\n"); |
261 | falcon_reset_xaui(efx); | 267 | falcon_reset_xaui(efx); |
262 | udelay(200); | 268 | udelay(200); |
263 | 269 | ||
264 | efx->mac_up = falcon_xaui_link_ok(efx); | 270 | mac_up = falcon_xmac_link_ok(efx); |
265 | --tries; | 271 | --tries; |
266 | } | 272 | } |
273 | |||
274 | falcon_start_nic_stats(efx); | ||
275 | |||
276 | return mac_up; | ||
277 | } | ||
278 | |||
279 | static bool falcon_xmac_check_fault(struct efx_nic *efx) | ||
280 | { | ||
281 | return !falcon_xmac_link_ok_retry(efx, 5); | ||
267 | } | 282 | } |
268 | 283 | ||
269 | static void falcon_reconfigure_xmac(struct efx_nic *efx) | 284 | static int falcon_reconfigure_xmac(struct efx_nic *efx) |
270 | { | 285 | { |
271 | falcon_mask_status_intr(efx, false); | 286 | falcon_mask_status_intr(efx, false); |
272 | 287 | ||
@@ -275,18 +290,15 @@ static void falcon_reconfigure_xmac(struct efx_nic *efx) | |||
275 | 290 | ||
276 | falcon_reconfigure_mac_wrapper(efx); | 291 | falcon_reconfigure_mac_wrapper(efx); |
277 | 292 | ||
278 | falcon_check_xaui_link_up(efx, 5); | 293 | efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); |
279 | falcon_mask_status_intr(efx, true); | 294 | falcon_mask_status_intr(efx, true); |
295 | |||
296 | return 0; | ||
280 | } | 297 | } |
281 | 298 | ||
282 | static void falcon_update_stats_xmac(struct efx_nic *efx) | 299 | static void falcon_update_stats_xmac(struct efx_nic *efx) |
283 | { | 300 | { |
284 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 301 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
285 | int rc; | ||
286 | |||
287 | rc = falcon_dma_stats(efx, XgDmaDone_offset); | ||
288 | if (rc) | ||
289 | return; | ||
290 | 302 | ||
291 | /* Update MAC stats from DMAed values */ | 303 | /* Update MAC stats from DMAed values */ |
292 | FALCON_STAT(efx, XgRxOctets, rx_bytes); | 304 | FALCON_STAT(efx, XgRxOctets, rx_bytes); |
@@ -344,35 +356,19 @@ static void falcon_update_stats_xmac(struct efx_nic *efx) | |||
344 | mac_stats->rx_control * 64); | 356 | mac_stats->rx_control * 64); |
345 | } | 357 | } |
346 | 358 | ||
347 | static void falcon_xmac_irq(struct efx_nic *efx) | 359 | void falcon_poll_xmac(struct efx_nic *efx) |
348 | { | ||
349 | /* The XGMII link has a transient fault, which indicates either: | ||
350 | * - there's a transient xgmii fault | ||
351 | * - falcon's end of the xaui link may need a kick | ||
352 | * - the wire-side link may have gone down, but the lasi/poll() | ||
353 | * hasn't noticed yet. | ||
354 | * | ||
355 | * We only want to even bother polling XAUI if we're confident it's | ||
356 | * not (1) or (3). In both cases, the only reliable way to spot this | ||
357 | * is to wait a bit. We do this here by forcing the mac link state | ||
358 | * to down, and waiting for the mac poll to come round and check | ||
359 | */ | ||
360 | efx->mac_up = false; | ||
361 | } | ||
362 | |||
363 | static void falcon_poll_xmac(struct efx_nic *efx) | ||
364 | { | 360 | { |
365 | if (!EFX_WORKAROUND_5147(efx) || !efx->link_up || efx->mac_up) | 361 | if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || |
362 | !efx->xmac_poll_required) | ||
366 | return; | 363 | return; |
367 | 364 | ||
368 | falcon_mask_status_intr(efx, false); | 365 | falcon_mask_status_intr(efx, false); |
369 | falcon_check_xaui_link_up(efx, 1); | 366 | efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); |
370 | falcon_mask_status_intr(efx, true); | 367 | falcon_mask_status_intr(efx, true); |
371 | } | 368 | } |
372 | 369 | ||
373 | struct efx_mac_operations falcon_xmac_operations = { | 370 | struct efx_mac_operations falcon_xmac_operations = { |
374 | .reconfigure = falcon_reconfigure_xmac, | 371 | .reconfigure = falcon_reconfigure_xmac, |
375 | .update_stats = falcon_update_stats_xmac, | 372 | .update_stats = falcon_update_stats_xmac, |
376 | .irq = falcon_xmac_irq, | 373 | .check_fault = falcon_xmac_check_fault, |
377 | .poll = falcon_poll_xmac, | ||
378 | }; | 374 | }; |
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h deleted file mode 100644 index dfccaa7b573e..000000000000 --- a/drivers/net/sfc/gmii.h +++ /dev/null | |||
@@ -1,60 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_GMII_H | ||
12 | #define EFX_GMII_H | ||
13 | |||
14 | /* | ||
15 | * GMII interface | ||
16 | */ | ||
17 | |||
18 | #include <linux/mii.h> | ||
19 | |||
20 | /* GMII registers, excluding registers already defined as MII | ||
21 | * registers in mii.h | ||
22 | */ | ||
23 | #define GMII_IER 0x12 /* Interrupt enable register */ | ||
24 | #define GMII_ISR 0x13 /* Interrupt status register */ | ||
25 | |||
26 | /* Interrupt enable register */ | ||
27 | #define IER_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */ | ||
28 | #define IER_SPEED_CHG 0x4000 /* Bit 14 - speed changed */ | ||
29 | #define IER_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */ | ||
30 | #define IER_PAGE_RCVD 0x1000 /* Bit 12 - page received */ | ||
31 | #define IER_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */ | ||
32 | #define IER_LINK_CHG 0x0400 /* Bit 10 - link status changed */ | ||
33 | #define IER_SYM_ERR 0x0200 /* Bit 9 - symbol error */ | ||
34 | #define IER_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */ | ||
35 | #define IER_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */ | ||
36 | #define IER_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */ | ||
37 | #define IER_DOWNSHIFT 0x0020 /* Bit 5 - downshift */ | ||
38 | #define IER_ENERGY 0x0010 /* Bit 4 - energy detect */ | ||
39 | #define IER_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */ | ||
40 | #define IER_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */ | ||
41 | #define IER_JABBER 0x0001 /* Bit 0 - jabber */ | ||
42 | |||
43 | /* Interrupt status register */ | ||
44 | #define ISR_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */ | ||
45 | #define ISR_SPEED_CHG 0x4000 /* Bit 14 - speed changed */ | ||
46 | #define ISR_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */ | ||
47 | #define ISR_PAGE_RCVD 0x1000 /* Bit 12 - page received */ | ||
48 | #define ISR_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */ | ||
49 | #define ISR_LINK_CHG 0x0400 /* Bit 10 - link status changed */ | ||
50 | #define ISR_SYM_ERR 0x0200 /* Bit 9 - symbol error */ | ||
51 | #define ISR_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */ | ||
52 | #define ISR_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */ | ||
53 | #define ISR_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */ | ||
54 | #define ISR_DOWNSHIFT 0x0020 /* Bit 5 - downshift */ | ||
55 | #define ISR_ENERGY 0x0010 /* Bit 4 - energy detect */ | ||
56 | #define ISR_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */ | ||
57 | #define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */ | ||
58 | #define ISR_JABBER 0x0001 /* Bit 0 - jabber */ | ||
59 | |||
60 | #endif /* EFX_GMII_H */ | ||
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h new file mode 100644 index 000000000000..b89177c27f4a --- /dev/null +++ b/drivers/net/sfc/io.h | |||
@@ -0,0 +1,256 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2009 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_IO_H | ||
12 | #define EFX_IO_H | ||
13 | |||
14 | #include <linux/io.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | |||
17 | /************************************************************************** | ||
18 | * | ||
19 | * NIC register I/O | ||
20 | * | ||
21 | ************************************************************************** | ||
22 | * | ||
23 | * Notes on locking strategy: | ||
24 | * | ||
25 | * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes | ||
26 | * which necessitates locking. | ||
27 | * Under normal operation few writes to NIC registers are made and these | ||
28 | * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special | ||
29 | * cased to allow 4-byte (hence lockless) accesses. | ||
30 | * | ||
31 | * It *is* safe to write to these 4-byte registers in the middle of an | ||
32 | * access to an 8-byte or 16-byte register. We therefore use a | ||
33 | * spinlock to protect accesses to the larger registers, but no locks | ||
34 | * for the 4-byte registers. | ||
35 | * | ||
36 | * A write barrier is needed to ensure that DW3 is written after DW0/1/2 | ||
37 | * due to the way the 16byte registers are "collected" in the BIU. | ||
38 | * | ||
39 | * We also lock when carrying out reads, to ensure consistency of the | ||
40 | * data (made possible since the BIU reads all 128 bits into a cache). | ||
41 | * Reads are very rare, so this isn't a significant performance | ||
42 | * impact. (Most data transferred from NIC to host is DMAed directly | ||
43 | * into host memory). | ||
44 | * | ||
45 | * I/O BAR access uses locks for both reads and writes (but is only provided | ||
46 | * for testing purposes). | ||
47 | */ | ||
48 | |||
49 | #if BITS_PER_LONG == 64 | ||
50 | #define EFX_USE_QWORD_IO 1 | ||
51 | #endif | ||
52 | |||
53 | #ifdef EFX_USE_QWORD_IO | ||
54 | static inline void _efx_writeq(struct efx_nic *efx, __le64 value, | ||
55 | unsigned int reg) | ||
56 | { | ||
57 | __raw_writeq((__force u64)value, efx->membase + reg); | ||
58 | } | ||
59 | static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg) | ||
60 | { | ||
61 | return (__force __le64)__raw_readq(efx->membase + reg); | ||
62 | } | ||
63 | #endif | ||
64 | |||
65 | static inline void _efx_writed(struct efx_nic *efx, __le32 value, | ||
66 | unsigned int reg) | ||
67 | { | ||
68 | __raw_writel((__force u32)value, efx->membase + reg); | ||
69 | } | ||
70 | static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg) | ||
71 | { | ||
72 | return (__force __le32)__raw_readl(efx->membase + reg); | ||
73 | } | ||
74 | |||
75 | /* Writes to a normal 16-byte Efx register, locking as appropriate. */ | ||
76 | static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, | ||
77 | unsigned int reg) | ||
78 | { | ||
79 | unsigned long flags __attribute__ ((unused)); | ||
80 | |||
81 | EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg, | ||
82 | EFX_OWORD_VAL(*value)); | ||
83 | |||
84 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
85 | #ifdef EFX_USE_QWORD_IO | ||
86 | _efx_writeq(efx, value->u64[0], reg + 0); | ||
87 | wmb(); | ||
88 | _efx_writeq(efx, value->u64[1], reg + 8); | ||
89 | #else | ||
90 | _efx_writed(efx, value->u32[0], reg + 0); | ||
91 | _efx_writed(efx, value->u32[1], reg + 4); | ||
92 | _efx_writed(efx, value->u32[2], reg + 8); | ||
93 | wmb(); | ||
94 | _efx_writed(efx, value->u32[3], reg + 12); | ||
95 | #endif | ||
96 | mmiowb(); | ||
97 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
98 | } | ||
99 | |||
100 | /* Write an 8-byte NIC SRAM entry through the supplied mapping, | ||
101 | * locking as appropriate. */ | ||
102 | static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, | ||
103 | efx_qword_t *value, unsigned int index) | ||
104 | { | ||
105 | unsigned int addr = index * sizeof(*value); | ||
106 | unsigned long flags __attribute__ ((unused)); | ||
107 | |||
108 | EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n", | ||
109 | addr, EFX_QWORD_VAL(*value)); | ||
110 | |||
111 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
112 | #ifdef EFX_USE_QWORD_IO | ||
113 | __raw_writeq((__force u64)value->u64[0], membase + addr); | ||
114 | #else | ||
115 | __raw_writel((__force u32)value->u32[0], membase + addr); | ||
116 | wmb(); | ||
117 | __raw_writel((__force u32)value->u32[1], membase + addr + 4); | ||
118 | #endif | ||
119 | mmiowb(); | ||
120 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
121 | } | ||
122 | |||
123 | /* Write dword to NIC register that allows partial writes | ||
124 | * | ||
125 | * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and | ||
126 | * TX_DESC_UPD_REG) can be written to as a single dword. This allows | ||
127 | * for lockless writes. | ||
128 | */ | ||
129 | static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, | ||
130 | unsigned int reg) | ||
131 | { | ||
132 | EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n", | ||
133 | reg, EFX_DWORD_VAL(*value)); | ||
134 | |||
135 | /* No lock required */ | ||
136 | _efx_writed(efx, value->u32[0], reg); | ||
137 | } | ||
138 | |||
139 | /* Read from a NIC register | ||
140 | * | ||
141 | * This reads an entire 16-byte register in one go, locking as | ||
142 | * appropriate. It is essential to read the first dword first, as this | ||
143 | * prompts the NIC to load the current value into the shadow register. | ||
144 | */ | ||
145 | static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | ||
146 | unsigned int reg) | ||
147 | { | ||
148 | unsigned long flags __attribute__ ((unused)); | ||
149 | |||
150 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
151 | value->u32[0] = _efx_readd(efx, reg + 0); | ||
152 | rmb(); | ||
153 | value->u32[1] = _efx_readd(efx, reg + 4); | ||
154 | value->u32[2] = _efx_readd(efx, reg + 8); | ||
155 | value->u32[3] = _efx_readd(efx, reg + 12); | ||
156 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
157 | |||
158 | EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg, | ||
159 | EFX_OWORD_VAL(*value)); | ||
160 | } | ||
161 | |||
162 | /* Read an 8-byte SRAM entry through supplied mapping, | ||
163 | * locking as appropriate. */ | ||
164 | static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | ||
165 | efx_qword_t *value, unsigned int index) | ||
166 | { | ||
167 | unsigned int addr = index * sizeof(*value); | ||
168 | unsigned long flags __attribute__ ((unused)); | ||
169 | |||
170 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
171 | #ifdef EFX_USE_QWORD_IO | ||
172 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); | ||
173 | #else | ||
174 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); | ||
175 | rmb(); | ||
176 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); | ||
177 | #endif | ||
178 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
179 | |||
180 | EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n", | ||
181 | addr, EFX_QWORD_VAL(*value)); | ||
182 | } | ||
183 | |||
184 | /* Read dword from register that allows partial writes (sic) */ | ||
185 | static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, | ||
186 | unsigned int reg) | ||
187 | { | ||
188 | value->u32[0] = _efx_readd(efx, reg); | ||
189 | EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n", | ||
190 | reg, EFX_DWORD_VAL(*value)); | ||
191 | } | ||
192 | |||
193 | /* Write to a register forming part of a table */ | ||
194 | static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, | ||
195 | unsigned int reg, unsigned int index) | ||
196 | { | ||
197 | efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); | ||
198 | } | ||
199 | |||
200 | /* Read to a register forming part of a table */ | ||
201 | static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, | ||
202 | unsigned int reg, unsigned int index) | ||
203 | { | ||
204 | efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); | ||
205 | } | ||
206 | |||
207 | /* Write to a dword register forming part of a table */ | ||
208 | static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value, | ||
209 | unsigned int reg, unsigned int index) | ||
210 | { | ||
211 | efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); | ||
212 | } | ||
213 | |||
214 | /* Page-mapped register block size */ | ||
215 | #define EFX_PAGE_BLOCK_SIZE 0x2000 | ||
216 | |||
217 | /* Calculate offset to page-mapped register block */ | ||
218 | #define EFX_PAGED_REG(page, reg) \ | ||
219 | ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) | ||
220 | |||
221 | /* As for efx_writeo(), but for a page-mapped register. */ | ||
222 | static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, | ||
223 | unsigned int reg, unsigned int page) | ||
224 | { | ||
225 | efx_writeo(efx, value, EFX_PAGED_REG(page, reg)); | ||
226 | } | ||
227 | |||
228 | /* As for efx_writed(), but for a page-mapped register. */ | ||
229 | static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value, | ||
230 | unsigned int reg, unsigned int page) | ||
231 | { | ||
232 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | ||
233 | } | ||
234 | |||
235 | /* Write dword to page-mapped register with an extra lock. | ||
236 | * | ||
237 | * As for efx_writed_page(), but for a register that suffers from | ||
238 | * SFC bug 3181. Take out a lock so the BIU collector cannot be | ||
239 | * confused. */ | ||
240 | static inline void efx_writed_page_locked(struct efx_nic *efx, | ||
241 | efx_dword_t *value, | ||
242 | unsigned int reg, | ||
243 | unsigned int page) | ||
244 | { | ||
245 | unsigned long flags __attribute__ ((unused)); | ||
246 | |||
247 | if (page == 0) { | ||
248 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
249 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | ||
250 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
251 | } else { | ||
252 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | #endif /* EFX_IO_H */ | ||
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h index 4e7074278fe1..f1aa5f374890 100644 --- a/drivers/net/sfc/mac.h +++ b/drivers/net/sfc/mac.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2008 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -15,5 +15,9 @@ | |||
15 | 15 | ||
16 | extern struct efx_mac_operations falcon_gmac_operations; | 16 | extern struct efx_mac_operations falcon_gmac_operations; |
17 | extern struct efx_mac_operations falcon_xmac_operations; | 17 | extern struct efx_mac_operations falcon_xmac_operations; |
18 | extern struct efx_mac_operations efx_mcdi_mac_operations; | ||
19 | extern void falcon_reconfigure_xmac_core(struct efx_nic *efx); | ||
20 | extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, | ||
21 | u32 dma_len, int enable, int clear); | ||
18 | 22 | ||
19 | #endif | 23 | #endif |
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c new file mode 100644 index 000000000000..c48669c77414 --- /dev/null +++ b/drivers/net/sfc/mcdi.c | |||
@@ -0,0 +1,1173 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2008-2009 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include <linux/delay.h> | ||
11 | #include "net_driver.h" | ||
12 | #include "nic.h" | ||
13 | #include "io.h" | ||
14 | #include "regs.h" | ||
15 | #include "mcdi_pcol.h" | ||
16 | #include "phy.h" | ||
17 | |||
18 | /************************************************************************** | ||
19 | * | ||
20 | * Management-Controller-to-Driver Interface | ||
21 | * | ||
22 | ************************************************************************** | ||
23 | */ | ||
24 | |||
25 | /* Software-defined structure to the shared-memory */ | ||
26 | #define CMD_NOTIFY_PORT0 0 | ||
27 | #define CMD_NOTIFY_PORT1 4 | ||
28 | #define CMD_PDU_PORT0 0x008 | ||
29 | #define CMD_PDU_PORT1 0x108 | ||
30 | #define REBOOT_FLAG_PORT0 0x3f8 | ||
31 | #define REBOOT_FLAG_PORT1 0x3fc | ||
32 | |||
33 | #define MCDI_RPC_TIMEOUT 10 /*seconds */ | ||
34 | |||
35 | #define MCDI_PDU(efx) \ | ||
36 | (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) | ||
37 | #define MCDI_DOORBELL(efx) \ | ||
38 | (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0) | ||
39 | #define MCDI_REBOOT_FLAG(efx) \ | ||
40 | (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0) | ||
41 | |||
42 | #define SEQ_MASK \ | ||
43 | EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) | ||
44 | |||
45 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | ||
46 | { | ||
47 | struct siena_nic_data *nic_data; | ||
48 | EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | ||
49 | nic_data = efx->nic_data; | ||
50 | return &nic_data->mcdi; | ||
51 | } | ||
52 | |||
53 | void efx_mcdi_init(struct efx_nic *efx) | ||
54 | { | ||
55 | struct efx_mcdi_iface *mcdi; | ||
56 | |||
57 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
58 | return; | ||
59 | |||
60 | mcdi = efx_mcdi(efx); | ||
61 | init_waitqueue_head(&mcdi->wq); | ||
62 | spin_lock_init(&mcdi->iface_lock); | ||
63 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | ||
64 | mcdi->mode = MCDI_MODE_POLL; | ||
65 | |||
66 | (void) efx_mcdi_poll_reboot(efx); | ||
67 | } | ||
68 | |||
69 | static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | ||
70 | const u8 *inbuf, size_t inlen) | ||
71 | { | ||
72 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
73 | unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | ||
74 | unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); | ||
75 | unsigned int i; | ||
76 | efx_dword_t hdr; | ||
77 | u32 xflags, seqno; | ||
78 | |||
79 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | ||
80 | BUG_ON(inlen & 3 || inlen >= 0x100); | ||
81 | |||
82 | seqno = mcdi->seqno & SEQ_MASK; | ||
83 | xflags = 0; | ||
84 | if (mcdi->mode == MCDI_MODE_EVENTS) | ||
85 | xflags |= MCDI_HEADER_XFLAGS_EVREQ; | ||
86 | |||
87 | EFX_POPULATE_DWORD_6(hdr, | ||
88 | MCDI_HEADER_RESPONSE, 0, | ||
89 | MCDI_HEADER_RESYNC, 1, | ||
90 | MCDI_HEADER_CODE, cmd, | ||
91 | MCDI_HEADER_DATALEN, inlen, | ||
92 | MCDI_HEADER_SEQ, seqno, | ||
93 | MCDI_HEADER_XFLAGS, xflags); | ||
94 | |||
95 | efx_writed(efx, &hdr, pdu); | ||
96 | |||
97 | for (i = 0; i < inlen; i += 4) | ||
98 | _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); | ||
99 | |||
100 | /* Ensure the payload is written out before the header */ | ||
101 | wmb(); | ||
102 | |||
103 | /* ring the doorbell with a distinctive value */ | ||
104 | _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); | ||
105 | } | ||
106 | |||
107 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) | ||
108 | { | ||
109 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
110 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | ||
111 | int i; | ||
112 | |||
113 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | ||
114 | BUG_ON(outlen & 3 || outlen >= 0x100); | ||
115 | |||
116 | for (i = 0; i < outlen; i += 4) | ||
117 | *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); | ||
118 | } | ||
119 | |||
120 | static int efx_mcdi_poll(struct efx_nic *efx) | ||
121 | { | ||
122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
123 | unsigned int time, finish; | ||
124 | unsigned int respseq, respcmd, error; | ||
125 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | ||
126 | unsigned int rc, spins; | ||
127 | efx_dword_t reg; | ||
128 | |||
129 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ | ||
130 | rc = -efx_mcdi_poll_reboot(efx); | ||
131 | if (rc) | ||
132 | goto out; | ||
133 | |||
134 | /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, | ||
135 | * because generally mcdi responses are fast. After that, back off | ||
136 | * and poll once a jiffy (approximately) | ||
137 | */ | ||
138 | spins = TICK_USEC; | ||
139 | finish = get_seconds() + MCDI_RPC_TIMEOUT; | ||
140 | |||
141 | while (1) { | ||
142 | if (spins != 0) { | ||
143 | --spins; | ||
144 | udelay(1); | ||
145 | } else { | ||
146 | schedule_timeout_uninterruptible(1); | ||
147 | } | ||
148 | |||
149 | time = get_seconds(); | ||
150 | |||
151 | rmb(); | ||
152 | efx_readd(efx, ®, pdu); | ||
153 | |||
154 | /* All 1's indicates that shared memory is in reset (and is | ||
155 | * not a valid header). Wait for it to come out reset before | ||
156 | * completing the command */ | ||
157 | if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && | ||
158 | EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) | ||
159 | break; | ||
160 | |||
161 | if (time >= finish) | ||
162 | return -ETIMEDOUT; | ||
163 | } | ||
164 | |||
165 | mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); | ||
166 | respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); | ||
167 | respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); | ||
168 | error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); | ||
169 | |||
170 | if (error && mcdi->resplen == 0) { | ||
171 | EFX_ERR(efx, "MC rebooted\n"); | ||
172 | rc = EIO; | ||
173 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { | ||
174 | EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n", | ||
175 | respseq, mcdi->seqno); | ||
176 | rc = EIO; | ||
177 | } else if (error) { | ||
178 | efx_readd(efx, ®, pdu + 4); | ||
179 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { | ||
180 | #define TRANSLATE_ERROR(name) \ | ||
181 | case MC_CMD_ERR_ ## name: \ | ||
182 | rc = name; \ | ||
183 | break | ||
184 | TRANSLATE_ERROR(ENOENT); | ||
185 | TRANSLATE_ERROR(EINTR); | ||
186 | TRANSLATE_ERROR(EACCES); | ||
187 | TRANSLATE_ERROR(EBUSY); | ||
188 | TRANSLATE_ERROR(EINVAL); | ||
189 | TRANSLATE_ERROR(EDEADLK); | ||
190 | TRANSLATE_ERROR(ENOSYS); | ||
191 | TRANSLATE_ERROR(ETIME); | ||
192 | #undef TRANSLATE_ERROR | ||
193 | default: | ||
194 | rc = EIO; | ||
195 | break; | ||
196 | } | ||
197 | } else | ||
198 | rc = 0; | ||
199 | |||
200 | out: | ||
201 | mcdi->resprc = rc; | ||
202 | if (rc) | ||
203 | mcdi->resplen = 0; | ||
204 | |||
205 | /* Return rc=0 like wait_event_timeout() */ | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | /* Test and clear MC-rebooted flag for this port/function */ | ||
210 | int efx_mcdi_poll_reboot(struct efx_nic *efx) | ||
211 | { | ||
212 | unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); | ||
213 | efx_dword_t reg; | ||
214 | uint32_t value; | ||
215 | |||
216 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
217 | return false; | ||
218 | |||
219 | efx_readd(efx, ®, addr); | ||
220 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); | ||
221 | |||
222 | if (value == 0) | ||
223 | return 0; | ||
224 | |||
225 | EFX_ZERO_DWORD(reg); | ||
226 | efx_writed(efx, ®, addr); | ||
227 | |||
228 | if (value == MC_STATUS_DWORD_ASSERT) | ||
229 | return -EINTR; | ||
230 | else | ||
231 | return -EIO; | ||
232 | } | ||
233 | |||
234 | static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) | ||
235 | { | ||
236 | /* Wait until the interface becomes QUIESCENT and we win the race | ||
237 | * to mark it RUNNING. */ | ||
238 | wait_event(mcdi->wq, | ||
239 | atomic_cmpxchg(&mcdi->state, | ||
240 | MCDI_STATE_QUIESCENT, | ||
241 | MCDI_STATE_RUNNING) | ||
242 | == MCDI_STATE_QUIESCENT); | ||
243 | } | ||
244 | |||
245 | static int efx_mcdi_await_completion(struct efx_nic *efx) | ||
246 | { | ||
247 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
248 | |||
249 | if (wait_event_timeout( | ||
250 | mcdi->wq, | ||
251 | atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, | ||
252 | msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) | ||
253 | return -ETIMEDOUT; | ||
254 | |||
255 | /* Check if efx_mcdi_set_mode() switched us back to polled completions. | ||
256 | * In which case, poll for completions directly. If efx_mcdi_ev_cpl() | ||
257 | * completed the request first, then we'll just end up completing the | ||
258 | * request again, which is safe. | ||
259 | * | ||
260 | * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which | ||
261 | * wait_event_timeout() implicitly provides. | ||
262 | */ | ||
263 | if (mcdi->mode == MCDI_MODE_POLL) | ||
264 | return efx_mcdi_poll(efx); | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) | ||
270 | { | ||
271 | /* If the interface is RUNNING, then move to COMPLETED and wake any | ||
272 | * waiters. If the interface isn't in RUNNING then we've received a | ||
273 | * duplicate completion after we've already transitioned back to | ||
274 | * QUIESCENT. [A subsequent invocation would increment seqno, so would | ||
275 | * have failed the seqno check]. | ||
276 | */ | ||
277 | if (atomic_cmpxchg(&mcdi->state, | ||
278 | MCDI_STATE_RUNNING, | ||
279 | MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { | ||
280 | wake_up(&mcdi->wq); | ||
281 | return true; | ||
282 | } | ||
283 | |||
284 | return false; | ||
285 | } | ||
286 | |||
287 | static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) | ||
288 | { | ||
289 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | ||
290 | wake_up(&mcdi->wq); | ||
291 | } | ||
292 | |||
293 | static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, | ||
294 | unsigned int datalen, unsigned int errno) | ||
295 | { | ||
296 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
297 | bool wake = false; | ||
298 | |||
299 | spin_lock(&mcdi->iface_lock); | ||
300 | |||
301 | if ((seqno ^ mcdi->seqno) & SEQ_MASK) { | ||
302 | if (mcdi->credits) | ||
303 | /* The request has been cancelled */ | ||
304 | --mcdi->credits; | ||
305 | else | ||
306 | EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx " | ||
307 | "seq 0x%x\n", seqno, mcdi->seqno); | ||
308 | } else { | ||
309 | mcdi->resprc = errno; | ||
310 | mcdi->resplen = datalen; | ||
311 | |||
312 | wake = true; | ||
313 | } | ||
314 | |||
315 | spin_unlock(&mcdi->iface_lock); | ||
316 | |||
317 | if (wake) | ||
318 | efx_mcdi_complete(mcdi); | ||
319 | } | ||
320 | |||
321 | /* Issue the given command by writing the data into the shared memory PDU, | ||
322 | * ring the doorbell and wait for completion. Copyout the result. */ | ||
323 | int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, | ||
324 | const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, | ||
325 | size_t *outlen_actual) | ||
326 | { | ||
327 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
328 | int rc; | ||
329 | BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | ||
330 | |||
331 | efx_mcdi_acquire(mcdi); | ||
332 | |||
333 | /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ | ||
334 | spin_lock_bh(&mcdi->iface_lock); | ||
335 | ++mcdi->seqno; | ||
336 | spin_unlock_bh(&mcdi->iface_lock); | ||
337 | |||
338 | efx_mcdi_copyin(efx, cmd, inbuf, inlen); | ||
339 | |||
340 | if (mcdi->mode == MCDI_MODE_POLL) | ||
341 | rc = efx_mcdi_poll(efx); | ||
342 | else | ||
343 | rc = efx_mcdi_await_completion(efx); | ||
344 | |||
345 | if (rc != 0) { | ||
346 | /* Close the race with efx_mcdi_ev_cpl() executing just too late | ||
347 | * and completing a request we've just cancelled, by ensuring | ||
348 | * that the seqno check therein fails. | ||
349 | */ | ||
350 | spin_lock_bh(&mcdi->iface_lock); | ||
351 | ++mcdi->seqno; | ||
352 | ++mcdi->credits; | ||
353 | spin_unlock_bh(&mcdi->iface_lock); | ||
354 | |||
355 | EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n", | ||
356 | cmd, (int)inlen, mcdi->mode); | ||
357 | } else { | ||
358 | size_t resplen; | ||
359 | |||
360 | /* At the very least we need a memory barrier here to ensure | ||
361 | * we pick up changes from efx_mcdi_ev_cpl(). Protect against | ||
362 | * a spurious efx_mcdi_ev_cpl() running concurrently by | ||
363 | * acquiring the iface_lock. */ | ||
364 | spin_lock_bh(&mcdi->iface_lock); | ||
365 | rc = -mcdi->resprc; | ||
366 | resplen = mcdi->resplen; | ||
367 | spin_unlock_bh(&mcdi->iface_lock); | ||
368 | |||
369 | if (rc == 0) { | ||
370 | efx_mcdi_copyout(efx, outbuf, | ||
371 | min(outlen, mcdi->resplen + 3) & ~0x3); | ||
372 | if (outlen_actual != NULL) | ||
373 | *outlen_actual = resplen; | ||
374 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) | ||
375 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ | ||
376 | else if (rc == -EIO || rc == -EINTR) { | ||
377 | EFX_ERR(efx, "MC fatal error %d\n", -rc); | ||
378 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
379 | } else | ||
380 | EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n", | ||
381 | cmd, (int)inlen, -rc); | ||
382 | } | ||
383 | |||
384 | efx_mcdi_release(mcdi); | ||
385 | return rc; | ||
386 | } | ||
387 | |||
388 | void efx_mcdi_mode_poll(struct efx_nic *efx) | ||
389 | { | ||
390 | struct efx_mcdi_iface *mcdi; | ||
391 | |||
392 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
393 | return; | ||
394 | |||
395 | mcdi = efx_mcdi(efx); | ||
396 | if (mcdi->mode == MCDI_MODE_POLL) | ||
397 | return; | ||
398 | |||
399 | /* We can switch from event completion to polled completion, because | ||
400 | * mcdi requests are always completed in shared memory. We do this by | ||
401 | * switching the mode to POLL'd then completing the request. | ||
402 | * efx_mcdi_await_completion() will then call efx_mcdi_poll(). | ||
403 | * | ||
404 | * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), | ||
405 | * which efx_mcdi_complete() provides for us. | ||
406 | */ | ||
407 | mcdi->mode = MCDI_MODE_POLL; | ||
408 | |||
409 | efx_mcdi_complete(mcdi); | ||
410 | } | ||
411 | |||
412 | void efx_mcdi_mode_event(struct efx_nic *efx) | ||
413 | { | ||
414 | struct efx_mcdi_iface *mcdi; | ||
415 | |||
416 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
417 | return; | ||
418 | |||
419 | mcdi = efx_mcdi(efx); | ||
420 | |||
421 | if (mcdi->mode == MCDI_MODE_EVENTS) | ||
422 | return; | ||
423 | |||
424 | /* We can't switch from polled to event completion in the middle of a | ||
425 | * request, because the completion method is specified in the request. | ||
426 | * So acquire the interface to serialise the requestors. We don't need | ||
427 | * to acquire the iface_lock to change the mode here, but we do need a | ||
428 | * write memory barrier ensure that efx_mcdi_rpc() sees it, which | ||
429 | * efx_mcdi_acquire() provides. | ||
430 | */ | ||
431 | efx_mcdi_acquire(mcdi); | ||
432 | mcdi->mode = MCDI_MODE_EVENTS; | ||
433 | efx_mcdi_release(mcdi); | ||
434 | } | ||
435 | |||
436 | static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | ||
437 | { | ||
438 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
439 | |||
440 | /* If there is an outstanding MCDI request, it has been terminated | ||
441 | * either by a BADASSERT or REBOOT event. If the mcdi interface is | ||
442 | * in polled mode, then do nothing because the MC reboot handler will | ||
443 | * set the header correctly. However, if the mcdi interface is waiting | ||
444 | * for a CMDDONE event it won't receive it [and since all MCDI events | ||
445 | * are sent to the same queue, we can't be racing with | ||
446 | * efx_mcdi_ev_cpl()] | ||
447 | * | ||
448 | * There's a race here with efx_mcdi_rpc(), because we might receive | ||
449 | * a REBOOT event *before* the request has been copied out. In polled | ||
450 | * mode (during startup) this is irrelevent, because efx_mcdi_complete() | ||
451 | * is ignored. In event mode, this condition is just an edge-case of | ||
452 | * receiving a REBOOT event after posting the MCDI request. Did the mc | ||
453 | * reboot before or after the copyout? The best we can do always is | ||
454 | * just return failure. | ||
455 | */ | ||
456 | spin_lock(&mcdi->iface_lock); | ||
457 | if (efx_mcdi_complete(mcdi)) { | ||
458 | if (mcdi->mode == MCDI_MODE_EVENTS) { | ||
459 | mcdi->resprc = rc; | ||
460 | mcdi->resplen = 0; | ||
461 | } | ||
462 | } else | ||
463 | /* Nobody was waiting for an MCDI request, so trigger a reset */ | ||
464 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
465 | |||
466 | spin_unlock(&mcdi->iface_lock); | ||
467 | } | ||
468 | |||
469 | static unsigned int efx_mcdi_event_link_speed[] = { | ||
470 | [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, | ||
471 | [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, | ||
472 | [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, | ||
473 | }; | ||
474 | |||
475 | |||
476 | static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) | ||
477 | { | ||
478 | u32 flags, fcntl, speed, lpa; | ||
479 | |||
480 | speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); | ||
481 | EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); | ||
482 | speed = efx_mcdi_event_link_speed[speed]; | ||
483 | |||
484 | flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); | ||
485 | fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); | ||
486 | lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); | ||
487 | |||
488 | /* efx->link_state is only modified by efx_mcdi_phy_get_link(), | ||
489 | * which is only run after flushing the event queues. Therefore, it | ||
490 | * is safe to modify the link state outside of the mac_lock here. | ||
491 | */ | ||
492 | efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); | ||
493 | |||
494 | efx_mcdi_phy_check_fcntl(efx, lpa); | ||
495 | |||
496 | efx_link_status_changed(efx); | ||
497 | } | ||
498 | |||
499 | static const char *sensor_names[] = { | ||
500 | [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor", | ||
501 | [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor", | ||
502 | [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling", | ||
503 | [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor", | ||
504 | [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling", | ||
505 | [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor", | ||
506 | [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling", | ||
507 | [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor", | ||
508 | [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor", | ||
509 | [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor", | ||
510 | [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor", | ||
511 | [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor", | ||
512 | [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor" | ||
513 | }; | ||
514 | |||
515 | static const char *sensor_status_names[] = { | ||
516 | [MC_CMD_SENSOR_STATE_OK] = "OK", | ||
517 | [MC_CMD_SENSOR_STATE_WARNING] = "Warning", | ||
518 | [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", | ||
519 | [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", | ||
520 | }; | ||
521 | |||
522 | static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) | ||
523 | { | ||
524 | unsigned int monitor, state, value; | ||
525 | const char *name, *state_txt; | ||
526 | monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); | ||
527 | state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); | ||
528 | value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); | ||
529 | /* Deal gracefully with the board having more drivers than we | ||
530 | * know about, but do not expect new sensor states. */ | ||
531 | name = (monitor >= ARRAY_SIZE(sensor_names)) | ||
532 | ? "No sensor name available" : | ||
533 | sensor_names[monitor]; | ||
534 | EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); | ||
535 | state_txt = sensor_status_names[state]; | ||
536 | |||
537 | EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n", | ||
538 | monitor, name, state_txt, value); | ||
539 | } | ||
540 | |||
541 | /* Called from falcon_process_eventq for MCDI events */ | ||
542 | void efx_mcdi_process_event(struct efx_channel *channel, | ||
543 | efx_qword_t *event) | ||
544 | { | ||
545 | struct efx_nic *efx = channel->efx; | ||
546 | int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); | ||
547 | u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); | ||
548 | |||
549 | switch (code) { | ||
550 | case MCDI_EVENT_CODE_BADSSERT: | ||
551 | EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data); | ||
552 | efx_mcdi_ev_death(efx, EINTR); | ||
553 | break; | ||
554 | |||
555 | case MCDI_EVENT_CODE_PMNOTICE: | ||
556 | EFX_INFO(efx, "MCDI PM event.\n"); | ||
557 | break; | ||
558 | |||
559 | case MCDI_EVENT_CODE_CMDDONE: | ||
560 | efx_mcdi_ev_cpl(efx, | ||
561 | MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), | ||
562 | MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), | ||
563 | MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); | ||
564 | break; | ||
565 | |||
566 | case MCDI_EVENT_CODE_LINKCHANGE: | ||
567 | efx_mcdi_process_link_change(efx, event); | ||
568 | break; | ||
569 | case MCDI_EVENT_CODE_SENSOREVT: | ||
570 | efx_mcdi_sensor_event(efx, event); | ||
571 | break; | ||
572 | case MCDI_EVENT_CODE_SCHEDERR: | ||
573 | EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data); | ||
574 | break; | ||
575 | case MCDI_EVENT_CODE_REBOOT: | ||
576 | EFX_INFO(efx, "MC Reboot\n"); | ||
577 | efx_mcdi_ev_death(efx, EIO); | ||
578 | break; | ||
579 | case MCDI_EVENT_CODE_MAC_STATS_DMA: | ||
580 | /* MAC stats are gather lazily. We can ignore this. */ | ||
581 | break; | ||
582 | |||
583 | default: | ||
584 | EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code); | ||
585 | } | ||
586 | } | ||
587 | |||
588 | /************************************************************************** | ||
589 | * | ||
590 | * Specific request functions | ||
591 | * | ||
592 | ************************************************************************** | ||
593 | */ | ||
594 | |||
595 | int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) | ||
596 | { | ||
597 | u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; | ||
598 | size_t outlength; | ||
599 | const __le16 *ver_words; | ||
600 | int rc; | ||
601 | |||
602 | BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); | ||
603 | |||
604 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, | ||
605 | outbuf, sizeof(outbuf), &outlength); | ||
606 | if (rc) | ||
607 | goto fail; | ||
608 | |||
609 | if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) { | ||
610 | *version = 0; | ||
611 | *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE); | ||
612 | return 0; | ||
613 | } | ||
614 | |||
615 | if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { | ||
616 | rc = -EMSGSIZE; | ||
617 | goto fail; | ||
618 | } | ||
619 | |||
620 | ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); | ||
621 | *version = (((u64)le16_to_cpu(ver_words[0]) << 48) | | ||
622 | ((u64)le16_to_cpu(ver_words[1]) << 32) | | ||
623 | ((u64)le16_to_cpu(ver_words[2]) << 16) | | ||
624 | le16_to_cpu(ver_words[3])); | ||
625 | *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE); | ||
626 | |||
627 | return 0; | ||
628 | |||
629 | fail: | ||
630 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
631 | return rc; | ||
632 | } | ||
633 | |||
634 | int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | ||
635 | bool *was_attached) | ||
636 | { | ||
637 | u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; | ||
638 | u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; | ||
639 | size_t outlen; | ||
640 | int rc; | ||
641 | |||
642 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, | ||
643 | driver_operating ? 1 : 0); | ||
644 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); | ||
645 | |||
646 | rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), | ||
647 | outbuf, sizeof(outbuf), &outlen); | ||
648 | if (rc) | ||
649 | goto fail; | ||
650 | if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) | ||
651 | goto fail; | ||
652 | |||
653 | if (was_attached != NULL) | ||
654 | *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); | ||
655 | return 0; | ||
656 | |||
657 | fail: | ||
658 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
659 | return rc; | ||
660 | } | ||
661 | |||
662 | int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | ||
663 | u16 *fw_subtype_list) | ||
664 | { | ||
665 | uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; | ||
666 | size_t outlen; | ||
667 | int port_num = efx_port_num(efx); | ||
668 | int offset; | ||
669 | int rc; | ||
670 | |||
671 | BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); | ||
672 | |||
673 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, | ||
674 | outbuf, sizeof(outbuf), &outlen); | ||
675 | if (rc) | ||
676 | goto fail; | ||
677 | |||
678 | if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { | ||
679 | rc = -EMSGSIZE; | ||
680 | goto fail; | ||
681 | } | ||
682 | |||
683 | offset = (port_num) | ||
684 | ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST | ||
685 | : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; | ||
686 | if (mac_address) | ||
687 | memcpy(mac_address, outbuf + offset, ETH_ALEN); | ||
688 | if (fw_subtype_list) | ||
689 | memcpy(fw_subtype_list, | ||
690 | outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, | ||
691 | MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); | ||
692 | |||
693 | return 0; | ||
694 | |||
695 | fail: | ||
696 | EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen); | ||
697 | |||
698 | return rc; | ||
699 | } | ||
700 | |||
701 | int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) | ||
702 | { | ||
703 | u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; | ||
704 | u32 dest = 0; | ||
705 | int rc; | ||
706 | |||
707 | if (uart) | ||
708 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; | ||
709 | if (evq) | ||
710 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; | ||
711 | |||
712 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); | ||
713 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); | ||
714 | |||
715 | BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); | ||
716 | |||
717 | rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), | ||
718 | NULL, 0, NULL); | ||
719 | if (rc) | ||
720 | goto fail; | ||
721 | |||
722 | return 0; | ||
723 | |||
724 | fail: | ||
725 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
726 | return rc; | ||
727 | } | ||
728 | |||
729 | int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) | ||
730 | { | ||
731 | u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; | ||
732 | size_t outlen; | ||
733 | int rc; | ||
734 | |||
735 | BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); | ||
736 | |||
737 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, | ||
738 | outbuf, sizeof(outbuf), &outlen); | ||
739 | if (rc) | ||
740 | goto fail; | ||
741 | if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) | ||
742 | goto fail; | ||
743 | |||
744 | *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); | ||
745 | return 0; | ||
746 | |||
747 | fail: | ||
748 | EFX_ERR(efx, "%s: failed rc=%d\n", | ||
749 | __func__, rc); | ||
750 | return rc; | ||
751 | } | ||
752 | |||
753 | int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | ||
754 | size_t *size_out, size_t *erase_size_out, | ||
755 | bool *protected_out) | ||
756 | { | ||
757 | u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; | ||
758 | u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; | ||
759 | size_t outlen; | ||
760 | int rc; | ||
761 | |||
762 | MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); | ||
763 | |||
764 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), | ||
765 | outbuf, sizeof(outbuf), &outlen); | ||
766 | if (rc) | ||
767 | goto fail; | ||
768 | if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) | ||
769 | goto fail; | ||
770 | |||
771 | *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); | ||
772 | *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); | ||
773 | *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & | ||
774 | (1 << MC_CMD_NVRAM_PROTECTED_LBN)); | ||
775 | return 0; | ||
776 | |||
777 | fail: | ||
778 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
779 | return rc; | ||
780 | } | ||
781 | |||
782 | int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) | ||
783 | { | ||
784 | u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; | ||
785 | int rc; | ||
786 | |||
787 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); | ||
788 | |||
789 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); | ||
790 | |||
791 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), | ||
792 | NULL, 0, NULL); | ||
793 | if (rc) | ||
794 | goto fail; | ||
795 | |||
796 | return 0; | ||
797 | |||
798 | fail: | ||
799 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
800 | return rc; | ||
801 | } | ||
802 | |||
803 | int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | ||
804 | loff_t offset, u8 *buffer, size_t length) | ||
805 | { | ||
806 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; | ||
807 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | ||
808 | size_t outlen; | ||
809 | int rc; | ||
810 | |||
811 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); | ||
812 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); | ||
813 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); | ||
814 | |||
815 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), | ||
816 | outbuf, sizeof(outbuf), &outlen); | ||
817 | if (rc) | ||
818 | goto fail; | ||
819 | |||
820 | memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); | ||
821 | return 0; | ||
822 | |||
823 | fail: | ||
824 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
825 | return rc; | ||
826 | } | ||
827 | |||
828 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | ||
829 | loff_t offset, const u8 *buffer, size_t length) | ||
830 | { | ||
831 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | ||
832 | int rc; | ||
833 | |||
834 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); | ||
835 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); | ||
836 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); | ||
837 | memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); | ||
838 | |||
839 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); | ||
840 | |||
841 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, | ||
842 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), | ||
843 | NULL, 0, NULL); | ||
844 | if (rc) | ||
845 | goto fail; | ||
846 | |||
847 | return 0; | ||
848 | |||
849 | fail: | ||
850 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
851 | return rc; | ||
852 | } | ||
853 | |||
854 | int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | ||
855 | loff_t offset, size_t length) | ||
856 | { | ||
857 | u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; | ||
858 | int rc; | ||
859 | |||
860 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); | ||
861 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); | ||
862 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); | ||
863 | |||
864 | BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); | ||
865 | |||
866 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), | ||
867 | NULL, 0, NULL); | ||
868 | if (rc) | ||
869 | goto fail; | ||
870 | |||
871 | return 0; | ||
872 | |||
873 | fail: | ||
874 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
875 | return rc; | ||
876 | } | ||
877 | |||
878 | int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) | ||
879 | { | ||
880 | u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; | ||
881 | int rc; | ||
882 | |||
883 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); | ||
884 | |||
885 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); | ||
886 | |||
887 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), | ||
888 | NULL, 0, NULL); | ||
889 | if (rc) | ||
890 | goto fail; | ||
891 | |||
892 | return 0; | ||
893 | |||
894 | fail: | ||
895 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
896 | return rc; | ||
897 | } | ||
898 | |||
899 | static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) | ||
900 | { | ||
901 | u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; | ||
902 | u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; | ||
903 | int rc; | ||
904 | |||
905 | MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); | ||
906 | |||
907 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), | ||
908 | outbuf, sizeof(outbuf), NULL); | ||
909 | if (rc) | ||
910 | return rc; | ||
911 | |||
912 | switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { | ||
913 | case MC_CMD_NVRAM_TEST_PASS: | ||
914 | case MC_CMD_NVRAM_TEST_NOTSUPP: | ||
915 | return 0; | ||
916 | default: | ||
917 | return -EIO; | ||
918 | } | ||
919 | } | ||
920 | |||
921 | int efx_mcdi_nvram_test_all(struct efx_nic *efx) | ||
922 | { | ||
923 | u32 nvram_types; | ||
924 | unsigned int type; | ||
925 | int rc; | ||
926 | |||
927 | rc = efx_mcdi_nvram_types(efx, &nvram_types); | ||
928 | if (rc) | ||
929 | return rc; | ||
930 | |||
931 | type = 0; | ||
932 | while (nvram_types != 0) { | ||
933 | if (nvram_types & 1) { | ||
934 | rc = efx_mcdi_nvram_test(efx, type); | ||
935 | if (rc) | ||
936 | return rc; | ||
937 | } | ||
938 | type++; | ||
939 | nvram_types >>= 1; | ||
940 | } | ||
941 | |||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | static int efx_mcdi_read_assertion(struct efx_nic *efx) | ||
946 | { | ||
947 | u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; | ||
948 | u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; | ||
949 | unsigned int flags, index, ofst; | ||
950 | const char *reason; | ||
951 | size_t outlen; | ||
952 | int retry; | ||
953 | int rc; | ||
954 | |||
955 | /* Attempt to read any stored assertion state before we reboot | ||
956 | * the mcfw out of the assertion handler. Retry twice, once | ||
957 | * because a boot-time assertion might cause this command to fail | ||
958 | * with EINTR. And once again because GET_ASSERTS can race with | ||
959 | * MC_CMD_REBOOT running on the other port. */ | ||
960 | retry = 2; | ||
961 | do { | ||
962 | MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); | ||
963 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, | ||
964 | inbuf, MC_CMD_GET_ASSERTS_IN_LEN, | ||
965 | outbuf, sizeof(outbuf), &outlen); | ||
966 | } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); | ||
967 | |||
968 | if (rc) | ||
969 | return rc; | ||
970 | if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) | ||
971 | return -EINVAL; | ||
972 | |||
973 | /* Print out any recorded assertion state */ | ||
974 | flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); | ||
975 | if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) | ||
976 | return 0; | ||
977 | |||
978 | reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) | ||
979 | ? "system-level assertion" | ||
980 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) | ||
981 | ? "thread-level assertion" | ||
982 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) | ||
983 | ? "watchdog reset" | ||
984 | : "unknown assertion"; | ||
985 | EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, | ||
986 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), | ||
987 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); | ||
988 | |||
989 | /* Print out the registers */ | ||
990 | ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; | ||
991 | for (index = 1; index < 32; index++) { | ||
992 | EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index, | ||
993 | MCDI_DWORD2(outbuf, ofst)); | ||
994 | ofst += sizeof(efx_dword_t); | ||
995 | } | ||
996 | |||
997 | return 0; | ||
998 | } | ||
999 | |||
1000 | static void efx_mcdi_exit_assertion(struct efx_nic *efx) | ||
1001 | { | ||
1002 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | ||
1003 | |||
1004 | /* Atomically reboot the mcfw out of the assertion handler */ | ||
1005 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | ||
1006 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, | ||
1007 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); | ||
1008 | efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, | ||
1009 | NULL, 0, NULL); | ||
1010 | } | ||
1011 | |||
1012 | int efx_mcdi_handle_assertion(struct efx_nic *efx) | ||
1013 | { | ||
1014 | int rc; | ||
1015 | |||
1016 | rc = efx_mcdi_read_assertion(efx); | ||
1017 | if (rc) | ||
1018 | return rc; | ||
1019 | |||
1020 | efx_mcdi_exit_assertion(efx); | ||
1021 | |||
1022 | return 0; | ||
1023 | } | ||
1024 | |||
1025 | void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
1026 | { | ||
1027 | u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; | ||
1028 | int rc; | ||
1029 | |||
1030 | BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); | ||
1031 | BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); | ||
1032 | BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); | ||
1033 | |||
1034 | BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); | ||
1035 | |||
1036 | MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); | ||
1037 | |||
1038 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), | ||
1039 | NULL, 0, NULL); | ||
1040 | if (rc) | ||
1041 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1042 | } | ||
1043 | |||
1044 | int efx_mcdi_reset_port(struct efx_nic *efx) | ||
1045 | { | ||
1046 | int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); | ||
1047 | if (rc) | ||
1048 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1049 | return rc; | ||
1050 | } | ||
1051 | |||
1052 | int efx_mcdi_reset_mc(struct efx_nic *efx) | ||
1053 | { | ||
1054 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | ||
1055 | int rc; | ||
1056 | |||
1057 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | ||
1058 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); | ||
1059 | rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), | ||
1060 | NULL, 0, NULL); | ||
1061 | /* White is black, and up is down */ | ||
1062 | if (rc == -EIO) | ||
1063 | return 0; | ||
1064 | if (rc == 0) | ||
1065 | rc = -EIO; | ||
1066 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1067 | return rc; | ||
1068 | } | ||
1069 | |||
1070 | int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, | ||
1071 | const u8 *mac, int *id_out) | ||
1072 | { | ||
1073 | u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; | ||
1074 | u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; | ||
1075 | size_t outlen; | ||
1076 | int rc; | ||
1077 | |||
1078 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); | ||
1079 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, | ||
1080 | MC_CMD_FILTER_MODE_SIMPLE); | ||
1081 | memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); | ||
1082 | |||
1083 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), | ||
1084 | outbuf, sizeof(outbuf), &outlen); | ||
1085 | if (rc) | ||
1086 | goto fail; | ||
1087 | |||
1088 | if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { | ||
1089 | rc = -EMSGSIZE; | ||
1090 | goto fail; | ||
1091 | } | ||
1092 | |||
1093 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); | ||
1094 | |||
1095 | return 0; | ||
1096 | |||
1097 | fail: | ||
1098 | *id_out = -1; | ||
1099 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1100 | return rc; | ||
1101 | |||
1102 | } | ||
1103 | |||
1104 | |||
1105 | int | ||
1106 | efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) | ||
1107 | { | ||
1108 | return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); | ||
1109 | } | ||
1110 | |||
1111 | |||
1112 | int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) | ||
1113 | { | ||
1114 | u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; | ||
1115 | size_t outlen; | ||
1116 | int rc; | ||
1117 | |||
1118 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, | ||
1119 | outbuf, sizeof(outbuf), &outlen); | ||
1120 | if (rc) | ||
1121 | goto fail; | ||
1122 | |||
1123 | if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { | ||
1124 | rc = -EMSGSIZE; | ||
1125 | goto fail; | ||
1126 | } | ||
1127 | |||
1128 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); | ||
1129 | |||
1130 | return 0; | ||
1131 | |||
1132 | fail: | ||
1133 | *id_out = -1; | ||
1134 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1135 | return rc; | ||
1136 | } | ||
1137 | |||
1138 | |||
1139 | int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) | ||
1140 | { | ||
1141 | u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; | ||
1142 | int rc; | ||
1143 | |||
1144 | MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); | ||
1145 | |||
1146 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), | ||
1147 | NULL, 0, NULL); | ||
1148 | if (rc) | ||
1149 | goto fail; | ||
1150 | |||
1151 | return 0; | ||
1152 | |||
1153 | fail: | ||
1154 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1155 | return rc; | ||
1156 | } | ||
1157 | |||
1158 | |||
1159 | int efx_mcdi_wol_filter_reset(struct efx_nic *efx) | ||
1160 | { | ||
1161 | int rc; | ||
1162 | |||
1163 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); | ||
1164 | if (rc) | ||
1165 | goto fail; | ||
1166 | |||
1167 | return 0; | ||
1168 | |||
1169 | fail: | ||
1170 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1171 | return rc; | ||
1172 | } | ||
1173 | |||
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h new file mode 100644 index 000000000000..f1f89ad4075a --- /dev/null +++ b/drivers/net/sfc/mcdi.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2008-2009 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #ifndef EFX_MCDI_H | ||
11 | #define EFX_MCDI_H | ||
12 | |||
13 | /** | ||
14 | * enum efx_mcdi_state | ||
15 | * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the | ||
16 | * mcdi_lock then they are able to move to MCDI_STATE_RUNNING | ||
17 | * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that | ||
18 | * moved into this state is allowed to move out of it. | ||
19 | * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread | ||
20 | * has not yet consumed the result. For all other threads, equivalent to | ||
21 | * MCDI_STATE_RUNNING. | ||
22 | */ | ||
23 | enum efx_mcdi_state { | ||
24 | MCDI_STATE_QUIESCENT, | ||
25 | MCDI_STATE_RUNNING, | ||
26 | MCDI_STATE_COMPLETED, | ||
27 | }; | ||
28 | |||
29 | enum efx_mcdi_mode { | ||
30 | MCDI_MODE_POLL, | ||
31 | MCDI_MODE_EVENTS, | ||
32 | }; | ||
33 | |||
34 | /** | ||
35 | * struct efx_mcdi_iface | ||
36 | * @state: Interface state. Waited for by mcdi_wq. | ||
37 | * @wq: Wait queue for threads waiting for state != STATE_RUNNING | ||
38 | * @iface_lock: Protects @credits, @seqno, @resprc, @resplen | ||
39 | * @mode: Poll for mcdi completion, or wait for an mcdi_event. | ||
40 | * Serialised by @lock | ||
41 | * @seqno: The next sequence number to use for mcdi requests. | ||
42 | * Serialised by @lock | ||
43 | * @credits: Number of spurious MCDI completion events allowed before we | ||
44 | * trigger a fatal error. Protected by @lock | ||
45 | * @resprc: Returned MCDI completion | ||
46 | * @resplen: Returned payload length | ||
47 | */ | ||
48 | struct efx_mcdi_iface { | ||
49 | atomic_t state; | ||
50 | wait_queue_head_t wq; | ||
51 | spinlock_t iface_lock; | ||
52 | enum efx_mcdi_mode mode; | ||
53 | unsigned int credits; | ||
54 | unsigned int seqno; | ||
55 | unsigned int resprc; | ||
56 | size_t resplen; | ||
57 | }; | ||
58 | |||
59 | extern void efx_mcdi_init(struct efx_nic *efx); | ||
60 | |||
61 | extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, | ||
62 | size_t inlen, u8 *outbuf, size_t outlen, | ||
63 | size_t *outlen_actual); | ||
64 | |||
65 | extern int efx_mcdi_poll_reboot(struct efx_nic *efx); | ||
66 | extern void efx_mcdi_mode_poll(struct efx_nic *efx); | ||
67 | extern void efx_mcdi_mode_event(struct efx_nic *efx); | ||
68 | |||
69 | extern void efx_mcdi_process_event(struct efx_channel *channel, | ||
70 | efx_qword_t *event); | ||
71 | |||
72 | #define MCDI_PTR2(_buf, _ofst) \ | ||
73 | (((u8 *)_buf) + _ofst) | ||
74 | #define MCDI_SET_DWORD2(_buf, _ofst, _value) \ | ||
75 | EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ | ||
76 | EFX_DWORD_0, _value) | ||
77 | #define MCDI_DWORD2(_buf, _ofst) \ | ||
78 | EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ | ||
79 | EFX_DWORD_0) | ||
80 | #define MCDI_QWORD2(_buf, _ofst) \ | ||
81 | EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \ | ||
82 | EFX_QWORD_0) | ||
83 | |||
84 | #define MCDI_PTR(_buf, _ofst) \ | ||
85 | MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST) | ||
86 | #define MCDI_SET_DWORD(_buf, _ofst, _value) \ | ||
87 | MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value) | ||
88 | #define MCDI_DWORD(_buf, _ofst) \ | ||
89 | MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) | ||
90 | #define MCDI_QWORD(_buf, _ofst) \ | ||
91 | MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) | ||
92 | |||
93 | #define MCDI_EVENT_FIELD(_ev, _field) \ | ||
94 | EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) | ||
95 | |||
96 | extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build); | ||
97 | extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | ||
98 | bool *was_attached_out); | ||
99 | extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | ||
100 | u16 *fw_subtype_list); | ||
101 | extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, | ||
102 | u32 dest_evq); | ||
103 | extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); | ||
104 | extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | ||
105 | size_t *size_out, size_t *erase_size_out, | ||
106 | bool *protected_out); | ||
107 | extern int efx_mcdi_nvram_update_start(struct efx_nic *efx, | ||
108 | unsigned int type); | ||
109 | extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | ||
110 | loff_t offset, u8 *buffer, size_t length); | ||
111 | extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | ||
112 | loff_t offset, const u8 *buffer, | ||
113 | size_t length); | ||
114 | #define EFX_MCDI_NVRAM_LEN_MAX 128 | ||
115 | extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | ||
116 | loff_t offset, size_t length); | ||
117 | extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, | ||
118 | unsigned int type); | ||
119 | extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); | ||
120 | extern int efx_mcdi_handle_assertion(struct efx_nic *efx); | ||
121 | extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); | ||
122 | extern int efx_mcdi_reset_port(struct efx_nic *efx); | ||
123 | extern int efx_mcdi_reset_mc(struct efx_nic *efx); | ||
124 | extern int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, | ||
125 | const u8 *mac, int *id_out); | ||
126 | extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, | ||
127 | const u8 *mac, int *id_out); | ||
128 | extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); | ||
129 | extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); | ||
130 | extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); | ||
131 | |||
132 | #endif /* EFX_MCDI_H */ | ||
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c new file mode 100644 index 000000000000..06d24a1e412a --- /dev/null +++ b/drivers/net/sfc/mcdi_mac.c | |||
@@ -0,0 +1,152 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2009 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include "net_driver.h" | ||
11 | #include "efx.h" | ||
12 | #include "mac.h" | ||
13 | #include "mcdi.h" | ||
14 | #include "mcdi_pcol.h" | ||
15 | |||
16 | static int efx_mcdi_set_mac(struct efx_nic *efx) | ||
17 | { | ||
18 | u32 reject, fcntl; | ||
19 | u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN]; | ||
20 | |||
21 | memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST, | ||
22 | efx->net_dev->dev_addr, ETH_ALEN); | ||
23 | |||
24 | MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, | ||
25 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); | ||
26 | MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); | ||
27 | |||
28 | /* The MCDI command provides for controlling accept/reject | ||
29 | * of broadcast packets too, but the driver doesn't currently | ||
30 | * expose this. */ | ||
31 | reject = (efx->promiscuous) ? 0 : | ||
32 | (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN); | ||
33 | MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject); | ||
34 | |||
35 | switch (efx->wanted_fc) { | ||
36 | case EFX_FC_RX | EFX_FC_TX: | ||
37 | fcntl = MC_CMD_FCNTL_BIDIR; | ||
38 | break; | ||
39 | case EFX_FC_RX: | ||
40 | fcntl = MC_CMD_FCNTL_RESPOND; | ||
41 | break; | ||
42 | default: | ||
43 | fcntl = MC_CMD_FCNTL_OFF; | ||
44 | break; | ||
45 | } | ||
46 | if (efx->wanted_fc & EFX_FC_AUTO) | ||
47 | fcntl = MC_CMD_FCNTL_AUTO; | ||
48 | |||
49 | MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); | ||
50 | |||
51 | return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes), | ||
52 | NULL, 0, NULL); | ||
53 | } | ||
54 | |||
55 | static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults) | ||
56 | { | ||
57 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | ||
58 | size_t outlength; | ||
59 | int rc; | ||
60 | |||
61 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | ||
62 | |||
63 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | ||
64 | outbuf, sizeof(outbuf), &outlength); | ||
65 | if (rc) | ||
66 | goto fail; | ||
67 | |||
68 | *faults = MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT); | ||
69 | return 0; | ||
70 | |||
71 | fail: | ||
72 | EFX_ERR(efx, "%s: failed rc=%d\n", | ||
73 | __func__, rc); | ||
74 | return rc; | ||
75 | } | ||
76 | |||
77 | int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, | ||
78 | u32 dma_len, int enable, int clear) | ||
79 | { | ||
80 | u8 inbuf[MC_CMD_MAC_STATS_IN_LEN]; | ||
81 | int rc; | ||
82 | efx_dword_t *cmd_ptr; | ||
83 | int period = 1000; | ||
84 | u32 addr_hi; | ||
85 | u32 addr_lo; | ||
86 | |||
87 | BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_LEN != 0); | ||
88 | |||
89 | addr_lo = ((u64)dma_addr) >> 0; | ||
90 | addr_hi = ((u64)dma_addr) >> 32; | ||
91 | |||
92 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo); | ||
93 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi); | ||
94 | cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); | ||
95 | if (enable) | ||
96 | EFX_POPULATE_DWORD_6(*cmd_ptr, | ||
97 | MC_CMD_MAC_STATS_CMD_DMA, 1, | ||
98 | MC_CMD_MAC_STATS_CMD_CLEAR, clear, | ||
99 | MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1, | ||
100 | MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 1, | ||
101 | MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0, | ||
102 | MC_CMD_MAC_STATS_CMD_PERIOD_MS, period); | ||
103 | else | ||
104 | EFX_POPULATE_DWORD_5(*cmd_ptr, | ||
105 | MC_CMD_MAC_STATS_CMD_DMA, 0, | ||
106 | MC_CMD_MAC_STATS_CMD_CLEAR, clear, | ||
107 | MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1, | ||
108 | MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 0, | ||
109 | MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0); | ||
110 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); | ||
111 | |||
112 | rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), | ||
113 | NULL, 0, NULL); | ||
114 | if (rc) | ||
115 | goto fail; | ||
116 | |||
117 | return 0; | ||
118 | |||
119 | fail: | ||
120 | EFX_ERR(efx, "%s: %s failed rc=%d\n", | ||
121 | __func__, enable ? "enable" : "disable", rc); | ||
122 | return rc; | ||
123 | } | ||
124 | |||
125 | static int efx_mcdi_mac_reconfigure(struct efx_nic *efx) | ||
126 | { | ||
127 | int rc; | ||
128 | |||
129 | rc = efx_mcdi_set_mac(efx); | ||
130 | if (rc != 0) | ||
131 | return rc; | ||
132 | |||
133 | /* Restore the multicast hash registers. */ | ||
134 | efx->type->push_multicast_hash(efx); | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | |||
140 | static bool efx_mcdi_mac_check_fault(struct efx_nic *efx) | ||
141 | { | ||
142 | u32 faults; | ||
143 | int rc = efx_mcdi_get_mac_faults(efx, &faults); | ||
144 | return (rc != 0) || (faults != 0); | ||
145 | } | ||
146 | |||
147 | |||
148 | struct efx_mac_operations efx_mcdi_mac_operations = { | ||
149 | .reconfigure = efx_mcdi_mac_reconfigure, | ||
150 | .update_stats = efx_port_dummy_op_void, | ||
151 | .check_fault = efx_mcdi_mac_check_fault, | ||
152 | }; | ||
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h new file mode 100644 index 000000000000..bd59302695b3 --- /dev/null +++ b/drivers/net/sfc/mcdi_pcol.h | |||
@@ -0,0 +1,1736 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2009 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | |||
11 | #ifndef MCDI_PCOL_H | ||
12 | #define MCDI_PCOL_H | ||
13 | |||
14 | /* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */ | ||
15 | /* Power-on reset state */ | ||
16 | #define MC_FW_STATE_POR (1) | ||
17 | /* If this is set in MC_RESET_STATE_REG then it should be | ||
18 | * possible to jump into IMEM without loading code from flash. */ | ||
19 | #define MC_FW_WARM_BOOT_OK (2) | ||
20 | /* The MC main image has started to boot. */ | ||
21 | #define MC_FW_STATE_BOOTING (4) | ||
22 | /* The Scheduler has started. */ | ||
23 | #define MC_FW_STATE_SCHED (8) | ||
24 | |||
25 | /* Values to be written to the per-port status dword in shared | ||
26 | * memory on reboot and assert */ | ||
27 | #define MC_STATUS_DWORD_REBOOT (0xb007b007) | ||
28 | #define MC_STATUS_DWORD_ASSERT (0xdeaddead) | ||
29 | |||
30 | /* The current version of the MCDI protocol. | ||
31 | * | ||
32 | * Note that the ROM burnt into the card only talks V0, so at the very | ||
33 | * least every driver must support version 0 and MCDI_PCOL_VERSION | ||
34 | */ | ||
35 | #define MCDI_PCOL_VERSION 1 | ||
36 | |||
37 | /** | ||
38 | * MCDI version 1 | ||
39 | * | ||
40 | * Each MCDI request starts with an MCDI_HEADER, which is a 32byte | ||
41 | * structure, filled in by the client. | ||
42 | * | ||
43 | * 0 7 8 16 20 22 23 24 31 | ||
44 | * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS | | ||
45 | * | | | | ||
46 | * | | \--- Response | ||
47 | * | \------- Error | ||
48 | * \------------------------------ Resync (always set) | ||
49 | * | ||
50 | * The client writes it's request into MC shared memory, and rings the | ||
51 | * doorbell. Each request is completed by either by the MC writting | ||
52 | * back into shared memory, or by writting out an event. | ||
53 | * | ||
54 | * All MCDI commands support completion by shared memory response. Each | ||
55 | * request may also contain additional data (accounted for by HEADER.LEN), | ||
56 | * and some response's may also contain additional data (again, accounted | ||
57 | * for by HEADER.LEN). | ||
58 | * | ||
59 | * Some MCDI commands support completion by event, in which any associated | ||
60 | * response data is included in the event. | ||
61 | * | ||
62 | * The protocol requires one response to be delivered for every request, a | ||
63 | * request should not be sent unless the response for the previous request | ||
64 | * has been received (either by polling shared memory, or by receiving | ||
65 | * an event). | ||
66 | */ | ||
67 | |||
68 | /** Request/Response structure */ | ||
69 | #define MCDI_HEADER_OFST 0 | ||
70 | #define MCDI_HEADER_CODE_LBN 0 | ||
71 | #define MCDI_HEADER_CODE_WIDTH 7 | ||
72 | #define MCDI_HEADER_RESYNC_LBN 7 | ||
73 | #define MCDI_HEADER_RESYNC_WIDTH 1 | ||
74 | #define MCDI_HEADER_DATALEN_LBN 8 | ||
75 | #define MCDI_HEADER_DATALEN_WIDTH 8 | ||
76 | #define MCDI_HEADER_SEQ_LBN 16 | ||
77 | #define MCDI_HEADER_RSVD_LBN 20 | ||
78 | #define MCDI_HEADER_RSVD_WIDTH 2 | ||
79 | #define MCDI_HEADER_SEQ_WIDTH 4 | ||
80 | #define MCDI_HEADER_ERROR_LBN 22 | ||
81 | #define MCDI_HEADER_ERROR_WIDTH 1 | ||
82 | #define MCDI_HEADER_RESPONSE_LBN 23 | ||
83 | #define MCDI_HEADER_RESPONSE_WIDTH 1 | ||
84 | #define MCDI_HEADER_XFLAGS_LBN 24 | ||
85 | #define MCDI_HEADER_XFLAGS_WIDTH 8 | ||
86 | /* Request response using event */ | ||
87 | #define MCDI_HEADER_XFLAGS_EVREQ 0x01 | ||
88 | |||
89 | /* Maximum number of payload bytes */ | ||
90 | #define MCDI_CTL_SDU_LEN_MAX 0xfc | ||
91 | |||
92 | /* The MC can generate events for two reasons: | ||
93 | * - To complete a shared memory request if XFLAGS_EVREQ was set | ||
94 | * - As a notification (link state, i2c event), controlled | ||
95 | * via MC_CMD_LOG_CTRL | ||
96 | * | ||
97 | * Both events share a common structure: | ||
98 | * | ||
99 | * 0 32 33 36 44 52 60 | ||
100 | * | Data | Cont | Level | Src | Code | Rsvd | | ||
101 | * | | ||
102 | * \ There is another event pending in this notification | ||
103 | * | ||
104 | * If Code==CMDDONE, then the fields are further interpreted as: | ||
105 | * | ||
106 | * - LEVEL==INFO Command succeded | ||
107 | * - LEVEL==ERR Command failed | ||
108 | * | ||
109 | * 0 8 16 24 32 | ||
110 | * | Seq | Datalen | Errno | Rsvd | | ||
111 | * | ||
112 | * These fields are taken directly out of the standard MCDI header, i.e., | ||
113 | * LEVEL==ERR, Datalen == 0 => Reboot | ||
114 | * | ||
115 | * Events can be squirted out of the UART (using LOG_CTRL) without a | ||
116 | * MCDI header. An event can be distinguished from a MCDI response by | ||
117 | * examining the first byte which is 0xc0. This corresponds to the | ||
118 | * non-existent MCDI command MC_CMD_DEBUG_LOG. | ||
119 | * | ||
120 | * 0 7 8 | ||
121 | * | command | Resync | = 0xc0 | ||
122 | * | ||
123 | * Since the event is written in big-endian byte order, this works | ||
124 | * providing bits 56-63 of the event are 0xc0. | ||
125 | * | ||
126 | * 56 60 63 | ||
127 | * | Rsvd | Code | = 0xc0 | ||
128 | * | ||
129 | * Which means for convenience the event code is 0xc for all MC | ||
130 | * generated events. | ||
131 | */ | ||
132 | #define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc | ||
133 | |||
134 | #define MCDI_EVENT_DATA_LBN 0 | ||
135 | #define MCDI_EVENT_DATA_WIDTH 32 | ||
136 | #define MCDI_EVENT_CONT_LBN 32 | ||
137 | #define MCDI_EVENT_CONT_WIDTH 1 | ||
138 | #define MCDI_EVENT_LEVEL_LBN 33 | ||
139 | #define MCDI_EVENT_LEVEL_WIDTH 3 | ||
140 | #define MCDI_EVENT_LEVEL_INFO (0) | ||
141 | #define MCDI_EVENT_LEVEL_WARN (1) | ||
142 | #define MCDI_EVENT_LEVEL_ERR (2) | ||
143 | #define MCDI_EVENT_LEVEL_FATAL (3) | ||
144 | #define MCDI_EVENT_SRC_LBN 36 | ||
145 | #define MCDI_EVENT_SRC_WIDTH 8 | ||
146 | #define MCDI_EVENT_CODE_LBN 44 | ||
147 | #define MCDI_EVENT_CODE_WIDTH 8 | ||
148 | #define MCDI_EVENT_CODE_BADSSERT (1) | ||
149 | #define MCDI_EVENT_CODE_PMNOTICE (2) | ||
150 | #define MCDI_EVENT_CODE_CMDDONE (3) | ||
151 | #define MCDI_EVENT_CMDDONE_SEQ_LBN 0 | ||
152 | #define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 | ||
153 | #define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 | ||
154 | #define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8 | ||
155 | #define MCDI_EVENT_CMDDONE_ERRNO_LBN 16 | ||
156 | #define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8 | ||
157 | #define MCDI_EVENT_CODE_LINKCHANGE (4) | ||
158 | #define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0 | ||
159 | #define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 | ||
160 | #define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 | ||
161 | #define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 | ||
162 | #define MCDI_EVENT_LINKCHANGE_SPEED_100M 1 | ||
163 | #define MCDI_EVENT_LINKCHANGE_SPEED_1G 2 | ||
164 | #define MCDI_EVENT_LINKCHANGE_SPEED_10G 3 | ||
165 | #define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 | ||
166 | #define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 | ||
167 | #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 | ||
168 | #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8 | ||
169 | #define MCDI_EVENT_CODE_SENSOREVT (5) | ||
170 | #define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0 | ||
171 | #define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8 | ||
172 | #define MCDI_EVENT_SENSOREVT_STATE_LBN 8 | ||
173 | #define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8 | ||
174 | #define MCDI_EVENT_SENSOREVT_VALUE_LBN 16 | ||
175 | #define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16 | ||
176 | #define MCDI_EVENT_CODE_SCHEDERR (6) | ||
177 | #define MCDI_EVENT_CODE_REBOOT (7) | ||
178 | #define MCDI_EVENT_CODE_MAC_STATS_DMA (8) | ||
179 | #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 | ||
180 | #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 | ||
181 | |||
182 | /* Non-existent command target */ | ||
183 | #define MC_CMD_ERR_ENOENT 2 | ||
184 | /* assert() has killed the MC */ | ||
185 | #define MC_CMD_ERR_EINTR 4 | ||
186 | /* Caller does not hold required locks */ | ||
187 | #define MC_CMD_ERR_EACCES 13 | ||
188 | /* Resource is currently unavailable (e.g. lock contention) */ | ||
189 | #define MC_CMD_ERR_EBUSY 16 | ||
190 | /* Invalid argument to target */ | ||
191 | #define MC_CMD_ERR_EINVAL 22 | ||
192 | /* Non-recursive resource is already acquired */ | ||
193 | #define MC_CMD_ERR_EDEADLK 35 | ||
194 | /* Operation not implemented */ | ||
195 | #define MC_CMD_ERR_ENOSYS 38 | ||
196 | /* Operation timed out */ | ||
197 | #define MC_CMD_ERR_ETIME 62 | ||
198 | |||
199 | #define MC_CMD_ERR_CODE_OFST 0 | ||
200 | |||
201 | |||
202 | /* MC_CMD_READ32: (debug, variadic out) | ||
203 | * Read multiple 32byte words from MC memory | ||
204 | */ | ||
205 | #define MC_CMD_READ32 0x01 | ||
206 | #define MC_CMD_READ32_IN_LEN 8 | ||
207 | #define MC_CMD_READ32_IN_ADDR_OFST 0 | ||
208 | #define MC_CMD_READ32_IN_NUMWORDS_OFST 4 | ||
209 | #define MC_CMD_READ32_OUT_LEN(_numwords) \ | ||
210 | (4 * (_numwords)) | ||
211 | #define MC_CMD_READ32_OUT_BUFFER_OFST 0 | ||
212 | |||
213 | /* MC_CMD_WRITE32: (debug, variadic in) | ||
214 | * Write multiple 32byte words to MC memory | ||
215 | */ | ||
216 | #define MC_CMD_WRITE32 0x02 | ||
217 | #define MC_CMD_WRITE32_IN_LEN(_numwords) (((_numwords) * 4) + 4) | ||
218 | #define MC_CMD_WRITE32_IN_ADDR_OFST 0 | ||
219 | #define MC_CMD_WRITE32_IN_BUFFER_OFST 4 | ||
220 | #define MC_CMD_WRITE32_OUT_LEN 0 | ||
221 | |||
222 | /* MC_CMD_COPYCODE: (debug) | ||
223 | * Copy MC code between two locations and jump | ||
224 | */ | ||
225 | #define MC_CMD_COPYCODE 0x03 | ||
226 | #define MC_CMD_COPYCODE_IN_LEN 16 | ||
227 | #define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 | ||
228 | #define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 | ||
229 | #define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 | ||
230 | #define MC_CMD_COPYCODE_IN_JUMP_OFST 12 | ||
231 | /* Control should return to the caller rather than jumping */ | ||
232 | #define MC_CMD_COPYCODE_JUMP_NONE 1 | ||
233 | #define MC_CMD_COPYCODE_OUT_LEN 0 | ||
234 | |||
235 | /* MC_CMD_SET_FUNC: (debug) | ||
236 | * Select function for function-specific commands. | ||
237 | */ | ||
238 | #define MC_CMD_SET_FUNC 0x04 | ||
239 | #define MC_CMD_SET_FUNC_IN_LEN 4 | ||
240 | #define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 | ||
241 | #define MC_CMD_SET_FUNC_OUT_LEN 0 | ||
242 | |||
243 | /* MC_CMD_GET_BOOT_STATUS: | ||
244 | * Get the instruction address from which the MC booted. | ||
245 | */ | ||
246 | #define MC_CMD_GET_BOOT_STATUS 0x05 | ||
247 | #define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 | ||
248 | #define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 | ||
249 | #define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 | ||
250 | #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 | ||
251 | /* Reboot caused by watchdog */ | ||
252 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_LBN (0) | ||
253 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_WIDTH (1) | ||
254 | /* MC booted from primary flash partition */ | ||
255 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_LBN (1) | ||
256 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_WIDTH (1) | ||
257 | /* MC booted from backup flash partition */ | ||
258 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_LBN (2) | ||
259 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_WIDTH (1) | ||
260 | |||
261 | /* MC_CMD_GET_ASSERTS: (debug, variadic out) | ||
262 | * Get (and optionally clear) the current assertion status. | ||
263 | * | ||
264 | * Only OUT.GLOBAL_FLAGS is guaranteed to exist in the completion | ||
265 | * payload. The other fields will only be present if | ||
266 | * OUT.GLOBAL_FLAGS != NO_FAILS | ||
267 | */ | ||
268 | #define MC_CMD_GET_ASSERTS 0x06 | ||
269 | #define MC_CMD_GET_ASSERTS_IN_LEN 4 | ||
270 | #define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 | ||
271 | #define MC_CMD_GET_ASSERTS_OUT_LEN 140 | ||
272 | /* Assertion status flag */ | ||
273 | #define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 | ||
274 | /*! No assertions have failed. */ | ||
275 | #define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 1 | ||
276 | /*! A system-level assertion has failed. */ | ||
277 | #define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 2 | ||
278 | /*! A thread-level assertion has failed. */ | ||
279 | #define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 3 | ||
280 | /*! The system was reset by the watchdog. */ | ||
281 | #define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 4 | ||
282 | /* Failing PC value */ | ||
283 | #define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 | ||
284 | /* Saved GP regs */ | ||
285 | #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 | ||
286 | #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_LEN 124 | ||
287 | /* Failing thread address */ | ||
288 | #define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 | ||
289 | |||
290 | /* MC_CMD_LOG_CTRL: | ||
291 | * Determine the output stream for various events and messages | ||
292 | */ | ||
293 | #define MC_CMD_LOG_CTRL 0x07 | ||
294 | #define MC_CMD_LOG_CTRL_IN_LEN 8 | ||
295 | #define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 | ||
296 | #define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART (1) | ||
297 | #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ (2) | ||
298 | #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 | ||
299 | #define MC_CMD_LOG_CTRL_OUT_LEN 0 | ||
300 | |||
301 | /* MC_CMD_GET_VERSION: | ||
302 | * Get version information about the MC firmware | ||
303 | */ | ||
304 | #define MC_CMD_GET_VERSION 0x08 | ||
305 | #define MC_CMD_GET_VERSION_IN_LEN 0 | ||
306 | #define MC_CMD_GET_VERSION_V0_OUT_LEN 4 | ||
307 | #define MC_CMD_GET_VERSION_V1_OUT_LEN 32 | ||
308 | #define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 | ||
309 | /* Reserved version number to indicate "any" version. */ | ||
310 | #define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff | ||
311 | /* The version response of a boot ROM awaiting rescue */ | ||
312 | #define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 | ||
313 | #define MC_CMD_GET_VERSION_V1_OUT_PCOL_OFST 4 | ||
314 | /* 128bit mask of functions supported by the current firmware */ | ||
315 | #define MC_CMD_GET_VERSION_V1_OUT_SUPPORTED_FUNCS_OFST 8 | ||
316 | /* The command set exported by the boot ROM (MCDI v0) */ | ||
317 | #define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ | ||
318 | (1 << MC_CMD_READ32) | \ | ||
319 | (1 << MC_CMD_WRITE32) | \ | ||
320 | (1 << MC_CMD_COPYCODE) | \ | ||
321 | (1 << MC_CMD_GET_VERSION), \ | ||
322 | 0, 0, 0 } | ||
323 | #define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24 | ||
324 | |||
325 | /* Vectors in the boot ROM */ | ||
326 | /* Point to the copycode entry point. */ | ||
327 | #define MC_BOOTROM_COPYCODE_VEC (0x7f4) | ||
328 | /* Points to the recovery mode entry point. */ | ||
329 | #define MC_BOOTROM_NOFLASH_VEC (0x7f8) | ||
330 | |||
331 | /* Test execution limits */ | ||
332 | #define MC_TESTEXEC_VARIANT_COUNT 16 | ||
333 | #define MC_TESTEXEC_RESULT_COUNT 7 | ||
334 | |||
335 | /* MC_CMD_SET_TESTVARS: (debug, variadic in) | ||
336 | * Write variant words for test. | ||
337 | * | ||
338 | * The user supplies a bitmap of the variants they wish to set. | ||
339 | * They must ensure that IN.LEN >= 4 + 4 * ffs(BITMAP) | ||
340 | */ | ||
341 | #define MC_CMD_SET_TESTVARS 0x09 | ||
342 | #define MC_CMD_SET_TESTVARS_IN_LEN(_numwords) \ | ||
343 | (4 + 4*(_numwords)) | ||
344 | #define MC_CMD_SET_TESTVARS_IN_ARGS_BITMAP_OFST 0 | ||
345 | /* Up to MC_TESTEXEC_VARIANT_COUNT of 32byte words start here */ | ||
346 | #define MC_CMD_SET_TESTVARS_IN_ARGS_BUFFER_OFST 4 | ||
347 | #define MC_CMD_SET_TESTVARS_OUT_LEN 0 | ||
348 | |||
349 | /* MC_CMD_GET_TESTRCS: (debug, variadic out) | ||
350 | * Return result words from test. | ||
351 | */ | ||
352 | #define MC_CMD_GET_TESTRCS 0x0a | ||
353 | #define MC_CMD_GET_TESTRCS_IN_LEN 4 | ||
354 | #define MC_CMD_GET_TESTRCS_IN_NUMWORDS_OFST 0 | ||
355 | #define MC_CMD_GET_TESTRCS_OUT_LEN(_numwords) \ | ||
356 | (4 * (_numwords)) | ||
357 | #define MC_CMD_GET_TESTRCS_OUT_BUFFER_OFST 0 | ||
358 | |||
359 | /* MC_CMD_RUN_TEST: (debug) | ||
360 | * Run the test exported by this firmware image | ||
361 | */ | ||
362 | #define MC_CMD_RUN_TEST 0x0b | ||
363 | #define MC_CMD_RUN_TEST_IN_LEN 0 | ||
364 | #define MC_CMD_RUN_TEST_OUT_LEN 0 | ||
365 | |||
366 | /* MC_CMD_CSR_READ32: (debug, variadic out) | ||
367 | * Read 32bit words from the indirect memory map | ||
368 | */ | ||
369 | #define MC_CMD_CSR_READ32 0x0c | ||
370 | #define MC_CMD_CSR_READ32_IN_LEN 12 | ||
371 | #define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 | ||
372 | #define MC_CMD_CSR_READ32_IN_STEP_OFST 4 | ||
373 | #define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 | ||
374 | #define MC_CMD_CSR_READ32_OUT_LEN(_numwords) \ | ||
375 | (((_numwords) * 4) + 4) | ||
376 | /* IN.NUMWORDS of 32bit words start here */ | ||
377 | #define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 | ||
378 | #define MC_CMD_CSR_READ32_OUT_IREG_STATUS_OFST(_numwords) \ | ||
379 | ((_numwords) * 4) | ||
380 | |||
381 | /* MC_CMD_CSR_WRITE32: (debug, variadic in) | ||
382 | * Write 32bit dwords to the indirect memory map | ||
383 | */ | ||
384 | #define MC_CMD_CSR_WRITE32 0x0d | ||
385 | #define MC_CMD_CSR_WRITE32_IN_LEN(_numwords) \ | ||
386 | (((_numwords) * 4) + 8) | ||
387 | #define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 | ||
388 | #define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 | ||
389 | /* Multiple 32bit words of data to write start here */ | ||
390 | #define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 | ||
391 | #define MC_CMD_CSR_WRITE32_OUT_LEN 4 | ||
392 | #define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 | ||
393 | |||
394 | /* MC_CMD_JTAG_WORK: (debug, fpga only) | ||
395 | * Process JTAG work buffer for RBF acceleration. | ||
396 | * | ||
397 | * Host: bit count, (up to) 32 words of data to clock out to JTAG | ||
398 | * (bits 1,0=TMS,TDO for first bit; bits 3,2=TMS,TDO for second bit, etc.) | ||
399 | * MC: bit count, (up to) 32 words of data clocked in from JTAG | ||
400 | * (bit 0=TDI for first bit, bit 1=TDI for second bit, etc.; [31:16] unused) | ||
401 | */ | ||
402 | #define MC_CMD_JTAG_WORK 0x0e | ||
403 | |||
404 | /* MC_CMD_STACKINFO: (debug, variadic out) | ||
405 | * Get stack information | ||
406 | * | ||
407 | * Host: nothing | ||
408 | * MC: (thread ptr, stack size, free space) for each thread in system | ||
409 | */ | ||
410 | #define MC_CMD_STACKINFO 0x0f | ||
411 | |||
412 | /* MC_CMD_MDIO_READ: | ||
413 | * MDIO register read | ||
414 | */ | ||
415 | #define MC_CMD_MDIO_READ 0x10 | ||
416 | #define MC_CMD_MDIO_READ_IN_LEN 16 | ||
417 | #define MC_CMD_MDIO_READ_IN_BUS_OFST 0 | ||
418 | #define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 | ||
419 | #define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 | ||
420 | #define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 | ||
421 | #define MC_CMD_MDIO_READ_OUT_LEN 8 | ||
422 | #define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 | ||
423 | #define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 | ||
424 | |||
425 | /* MC_CMD_MDIO_WRITE: | ||
426 | * MDIO register write | ||
427 | */ | ||
428 | #define MC_CMD_MDIO_WRITE 0x11 | ||
429 | #define MC_CMD_MDIO_WRITE_IN_LEN 20 | ||
430 | #define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 | ||
431 | #define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 | ||
432 | #define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 | ||
433 | #define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 | ||
434 | #define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 | ||
435 | #define MC_CMD_MDIO_WRITE_OUT_LEN 4 | ||
436 | #define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 | ||
437 | |||
438 | /* By default all the MCDI MDIO operations perform clause45 mode. | ||
439 | * If you want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. | ||
440 | */ | ||
441 | #define MC_CMD_MDIO_CLAUSE22 32 | ||
442 | |||
443 | /* There are two MDIO buses: one for the internal PHY, and one for external | ||
444 | * devices. | ||
445 | */ | ||
446 | #define MC_CMD_MDIO_BUS_INTERNAL 0 | ||
447 | #define MC_CMD_MDIO_BUS_EXTERNAL 1 | ||
448 | |||
449 | /* The MDIO commands return the raw status bits from the MDIO block. A "good" | ||
450 | * transaction should have the DONE bit set and all other bits clear. | ||
451 | */ | ||
452 | #define MC_CMD_MDIO_STATUS_GOOD 0x08 | ||
453 | |||
454 | |||
455 | /* MC_CMD_DBI_WRITE: (debug) | ||
456 | * Write DBI register(s) | ||
457 | * | ||
458 | * Host: address, byte-enables (and VF selection, and cs2 flag), | ||
459 | * value [,address ...] | ||
460 | * MC: nothing | ||
461 | */ | ||
462 | #define MC_CMD_DBI_WRITE 0x12 | ||
463 | #define MC_CMD_DBI_WRITE_IN_LEN(_numwords) \ | ||
464 | (12 * (_numwords)) | ||
465 | #define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(_word) \ | ||
466 | (((_word) * 12) + 0) | ||
467 | #define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(_word) \ | ||
468 | (((_word) * 12) + 4) | ||
469 | #define MC_CMD_DBI_WRITE_IN_VALUE_OFST(_word) \ | ||
470 | (((_word) * 12) + 8) | ||
471 | #define MC_CMD_DBI_WRITE_OUT_LEN 0 | ||
472 | |||
473 | /* MC_CMD_DBI_READ: (debug) | ||
474 | * Read DBI register(s) | ||
475 | * | ||
476 | * Host: address, [,address ...] | ||
477 | * MC: value [,value ...] | ||
478 | * (note: this does not support reading from VFs, but is retained for backwards | ||
479 | * compatibility; see MC_CMD_DBI_READX below) | ||
480 | */ | ||
481 | #define MC_CMD_DBI_READ 0x13 | ||
482 | #define MC_CMD_DBI_READ_IN_LEN(_numwords) \ | ||
483 | (4 * (_numwords)) | ||
484 | #define MC_CMD_DBI_READ_OUT_LEN(_numwords) \ | ||
485 | (4 * (_numwords)) | ||
486 | |||
487 | /* MC_CMD_PORT_READ32: (debug) | ||
488 | * Read a 32-bit register from the indirect port register map. | ||
489 | * | ||
490 | * The port to access is implied by the Shared memory channel used. | ||
491 | */ | ||
492 | #define MC_CMD_PORT_READ32 0x14 | ||
493 | #define MC_CMD_PORT_READ32_IN_LEN 4 | ||
494 | #define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 | ||
495 | #define MC_CMD_PORT_READ32_OUT_LEN 8 | ||
496 | #define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 | ||
497 | #define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 | ||
498 | |||
499 | /* MC_CMD_PORT_WRITE32: (debug) | ||
500 | * Write a 32-bit register to the indirect port register map. | ||
501 | * | ||
502 | * The port to access is implied by the Shared memory channel used. | ||
503 | */ | ||
504 | #define MC_CMD_PORT_WRITE32 0x15 | ||
505 | #define MC_CMD_PORT_WRITE32_IN_LEN 8 | ||
506 | #define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 | ||
507 | #define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 | ||
508 | #define MC_CMD_PORT_WRITE32_OUT_LEN 4 | ||
509 | #define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 | ||
510 | |||
511 | /* MC_CMD_PORT_READ128: (debug) | ||
512 | * Read a 128-bit register from indirect port register map | ||
513 | * | ||
514 | * The port to access is implied by the Shared memory channel used. | ||
515 | */ | ||
516 | #define MC_CMD_PORT_READ128 0x16 | ||
517 | #define MC_CMD_PORT_READ128_IN_LEN 4 | ||
518 | #define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 | ||
519 | #define MC_CMD_PORT_READ128_OUT_LEN 20 | ||
520 | #define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 | ||
521 | #define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 | ||
522 | |||
523 | /* MC_CMD_PORT_WRITE128: (debug) | ||
524 | * Write a 128-bit register to indirect port register map. | ||
525 | * | ||
526 | * The port to access is implied by the Shared memory channel used. | ||
527 | */ | ||
528 | #define MC_CMD_PORT_WRITE128 0x17 | ||
529 | #define MC_CMD_PORT_WRITE128_IN_LEN 20 | ||
530 | #define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 | ||
531 | #define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 | ||
532 | #define MC_CMD_PORT_WRITE128_OUT_LEN 4 | ||
533 | #define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 | ||
534 | |||
535 | /* MC_CMD_GET_BOARD_CFG: | ||
536 | * Returns the MC firmware configuration structure | ||
537 | * | ||
538 | * The FW_SUBTYPE_LIST contains a 16-bit value for each of the 12 types of | ||
539 | * NVRAM area. The values are defined in the firmware/mc/platform/<xxx>.c file | ||
540 | * for a specific board type, but otherwise have no meaning to the MC; they | ||
541 | * are used by the driver to manage selection of appropriate firmware updates. | ||
542 | */ | ||
543 | #define MC_CMD_GET_BOARD_CFG 0x18 | ||
544 | #define MC_CMD_GET_BOARD_CFG_IN_LEN 0 | ||
545 | #define MC_CMD_GET_BOARD_CFG_OUT_LEN 96 | ||
546 | #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 | ||
547 | #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 | ||
548 | #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 | ||
549 | #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 | ||
550 | #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 | ||
551 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 | ||
552 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 | ||
553 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 | ||
554 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 | ||
555 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 | ||
556 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 | ||
557 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 | ||
558 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 | ||
559 | #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 | ||
560 | #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 24 | ||
561 | |||
562 | /* MC_CMD_DBI_READX: (debug) | ||
563 | * Read DBI register(s) -- extended functionality | ||
564 | * | ||
565 | * Host: vf selection, address, [,vf selection ...] | ||
566 | * MC: value [,value ...] | ||
567 | */ | ||
568 | #define MC_CMD_DBI_READX 0x19 | ||
569 | #define MC_CMD_DBI_READX_IN_LEN(_numwords) \ | ||
570 | (8*(_numwords)) | ||
571 | #define MC_CMD_DBI_READX_OUT_LEN(_numwords) \ | ||
572 | (4*(_numwords)) | ||
573 | |||
574 | /* MC_CMD_SET_RAND_SEED: | ||
575 | * Set the 16byte seed for the MC psuedo-random generator | ||
576 | */ | ||
577 | #define MC_CMD_SET_RAND_SEED 0x1a | ||
578 | #define MC_CMD_SET_RAND_SEED_IN_LEN 16 | ||
579 | #define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 | ||
580 | #define MC_CMD_SET_RAND_SEED_OUT_LEN 0 | ||
581 | |||
582 | /* MC_CMD_LTSSM_HIST: (debug) | ||
583 | * Retrieve the history of the LTSSM, if the build supports it. | ||
584 | * | ||
585 | * Host: nothing | ||
586 | * MC: variable number of LTSSM values, as bytes | ||
587 | * The history is read-to-clear. | ||
588 | */ | ||
589 | #define MC_CMD_LTSSM_HIST 0x1b | ||
590 | |||
591 | /* MC_CMD_DRV_ATTACH: | ||
592 | * Inform MCPU that this port is managed on the host (i.e. driver active) | ||
593 | */ | ||
594 | #define MC_CMD_DRV_ATTACH 0x1c | ||
595 | #define MC_CMD_DRV_ATTACH_IN_LEN 8 | ||
596 | #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 | ||
597 | #define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 | ||
598 | #define MC_CMD_DRV_ATTACH_OUT_LEN 4 | ||
599 | #define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 | ||
600 | |||
601 | /* MC_CMD_NCSI_PROD: (debug) | ||
602 | * Trigger an NC-SI event (and possibly an AEN in response) | ||
603 | */ | ||
604 | #define MC_CMD_NCSI_PROD 0x1d | ||
605 | #define MC_CMD_NCSI_PROD_IN_LEN 4 | ||
606 | #define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0 | ||
607 | #define MC_CMD_NCSI_PROD_LINKCHANGE_LBN 0 | ||
608 | #define MC_CMD_NCSI_PROD_LINKCHANGE_WIDTH 1 | ||
609 | #define MC_CMD_NCSI_PROD_RESET_LBN 1 | ||
610 | #define MC_CMD_NCSI_PROD_RESET_WIDTH 1 | ||
611 | #define MC_CMD_NCSI_PROD_DRVATTACH_LBN 2 | ||
612 | #define MC_CMD_NCSI_PROD_DRVATTACH_WIDTH 1 | ||
613 | #define MC_CMD_NCSI_PROD_OUT_LEN 0 | ||
614 | |||
615 | /* Enumeration */ | ||
616 | #define MC_CMD_NCSI_PROD_LINKCHANGE 0 | ||
617 | #define MC_CMD_NCSI_PROD_RESET 1 | ||
618 | #define MC_CMD_NCSI_PROD_DRVATTACH 2 | ||
619 | |||
620 | /* MC_CMD_DEVEL: (debug) | ||
621 | * Reserved for development | ||
622 | */ | ||
623 | #define MC_CMD_DEVEL 0x1e | ||
624 | |||
625 | /* MC_CMD_SHMUART: (debug) | ||
626 | * Route UART output to circular buffer in shared memory instead. | ||
627 | */ | ||
628 | #define MC_CMD_SHMUART 0x1f | ||
629 | #define MC_CMD_SHMUART_IN_FLAG_OFST 0 | ||
630 | #define MC_CMD_SHMUART_IN_LEN 4 | ||
631 | #define MC_CMD_SHMUART_OUT_LEN 0 | ||
632 | |||
633 | /* MC_CMD_PORT_RESET: | ||
634 | * Generic per-port reset. There is no equivalent for per-board reset. | ||
635 | * | ||
636 | * Locks required: None | ||
637 | * Return code: 0, ETIME | ||
638 | */ | ||
639 | #define MC_CMD_PORT_RESET 0x20 | ||
640 | #define MC_CMD_PORT_RESET_IN_LEN 0 | ||
641 | #define MC_CMD_PORT_RESET_OUT_LEN 0 | ||
642 | |||
643 | /* MC_CMD_RESOURCE_LOCK: | ||
644 | * Generic resource lock/unlock interface. | ||
645 | * | ||
646 | * Locks required: None | ||
647 | * Return code: 0, | ||
648 | * EBUSY (if trylock is contended by other port), | ||
649 | * EDEADLK (if trylock is already acquired by this port) | ||
650 | * EINVAL (if unlock doesn't own the lock) | ||
651 | */ | ||
652 | #define MC_CMD_RESOURCE_LOCK 0x21 | ||
653 | #define MC_CMD_RESOURCE_LOCK_IN_LEN 8 | ||
654 | #define MC_CMD_RESOURCE_LOCK_IN_ACTION_OFST 0 | ||
655 | #define MC_CMD_RESOURCE_LOCK_ACTION_TRYLOCK 1 | ||
656 | #define MC_CMD_RESOURCE_LOCK_ACTION_UNLOCK 0 | ||
657 | #define MC_CMD_RESOURCE_LOCK_IN_RESOURCE_OFST 4 | ||
658 | #define MC_CMD_RESOURCE_LOCK_I2C 2 | ||
659 | #define MC_CMD_RESOURCE_LOCK_PHY 3 | ||
660 | #define MC_CMD_RESOURCE_LOCK_OUT_LEN 0 | ||
661 | |||
662 | /* MC_CMD_SPI_COMMAND: (variadic in, variadic out) | ||
663 | * Read/Write to/from the SPI device. | ||
664 | * | ||
665 | * Locks required: SPI_LOCK | ||
666 | * Return code: 0, ETIME, EINVAL, EACCES (if SPI_LOCK is not held) | ||
667 | */ | ||
668 | #define MC_CMD_SPI_COMMAND 0x22 | ||
669 | #define MC_CMD_SPI_COMMAND_IN_LEN(_write_bytes) (12 + (_write_bytes)) | ||
670 | #define MC_CMD_SPI_COMMAND_IN_ARGS_OFST 0 | ||
671 | #define MC_CMD_SPI_COMMAND_IN_ARGS_ADDRESS_OFST 0 | ||
672 | #define MC_CMD_SPI_COMMAND_IN_ARGS_READ_BYTES_OFST 4 | ||
673 | #define MC_CMD_SPI_COMMAND_IN_ARGS_CHIP_SELECT_OFST 8 | ||
674 | /* Data to write here */ | ||
675 | #define MC_CMD_SPI_COMMAND_IN_WRITE_BUFFER_OFST 12 | ||
676 | #define MC_CMD_SPI_COMMAND_OUT_LEN(_read_bytes) (_read_bytes) | ||
677 | /* Data read here */ | ||
678 | #define MC_CMD_SPI_COMMAND_OUT_READ_BUFFER_OFST 0 | ||
679 | |||
680 | /* MC_CMD_I2C_READ_WRITE: (variadic in, variadic out) | ||
681 | * Read/Write to/from the I2C bus. | ||
682 | * | ||
683 | * Locks required: I2C_LOCK | ||
684 | * Return code: 0, ETIME, EINVAL, EACCES (if I2C_LOCK is not held) | ||
685 | */ | ||
686 | #define MC_CMD_I2C_RW 0x23 | ||
687 | #define MC_CMD_I2C_RW_IN_LEN(_write_bytes) (8 + (_write_bytes)) | ||
688 | #define MC_CMD_I2C_RW_IN_ARGS_OFST 0 | ||
689 | #define MC_CMD_I2C_RW_IN_ARGS_ADDR_OFST 0 | ||
690 | #define MC_CMD_I2C_RW_IN_ARGS_READ_BYTES_OFST 4 | ||
691 | /* Data to write here */ | ||
692 | #define MC_CMD_I2C_RW_IN_WRITE_BUFFER_OFSET 8 | ||
693 | #define MC_CMD_I2C_RW_OUT_LEN(_read_bytes) (_read_bytes) | ||
694 | /* Data read here */ | ||
695 | #define MC_CMD_I2C_RW_OUT_READ_BUFFER_OFST 0 | ||
696 | |||
697 | /* Generic phy capability bitmask */ | ||
698 | #define MC_CMD_PHY_CAP_10HDX_LBN 1 | ||
699 | #define MC_CMD_PHY_CAP_10HDX_WIDTH 1 | ||
700 | #define MC_CMD_PHY_CAP_10FDX_LBN 2 | ||
701 | #define MC_CMD_PHY_CAP_10FDX_WIDTH 1 | ||
702 | #define MC_CMD_PHY_CAP_100HDX_LBN 3 | ||
703 | #define MC_CMD_PHY_CAP_100HDX_WIDTH 1 | ||
704 | #define MC_CMD_PHY_CAP_100FDX_LBN 4 | ||
705 | #define MC_CMD_PHY_CAP_100FDX_WIDTH 1 | ||
706 | #define MC_CMD_PHY_CAP_1000HDX_LBN 5 | ||
707 | #define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 | ||
708 | #define MC_CMD_PHY_CAP_1000FDX_LBN 6 | ||
709 | #define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 | ||
710 | #define MC_CMD_PHY_CAP_10000FDX_LBN 7 | ||
711 | #define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 | ||
712 | #define MC_CMD_PHY_CAP_PAUSE_LBN 8 | ||
713 | #define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 | ||
714 | #define MC_CMD_PHY_CAP_ASYM_LBN 9 | ||
715 | #define MC_CMD_PHY_CAP_ASYM_WIDTH 1 | ||
716 | #define MC_CMD_PHY_CAP_AN_LBN 10 | ||
717 | #define MC_CMD_PHY_CAP_AN_WIDTH 1 | ||
718 | |||
719 | /* Generic loopback enumeration */ | ||
720 | #define MC_CMD_LOOPBACK_NONE 0 | ||
721 | #define MC_CMD_LOOPBACK_DATA 1 | ||
722 | #define MC_CMD_LOOPBACK_GMAC 2 | ||
723 | #define MC_CMD_LOOPBACK_XGMII 3 | ||
724 | #define MC_CMD_LOOPBACK_XGXS 4 | ||
725 | #define MC_CMD_LOOPBACK_XAUI 5 | ||
726 | #define MC_CMD_LOOPBACK_GMII 6 | ||
727 | #define MC_CMD_LOOPBACK_SGMII 7 | ||
728 | #define MC_CMD_LOOPBACK_XGBR 8 | ||
729 | #define MC_CMD_LOOPBACK_XFI 9 | ||
730 | #define MC_CMD_LOOPBACK_XAUI_FAR 10 | ||
731 | #define MC_CMD_LOOPBACK_GMII_FAR 11 | ||
732 | #define MC_CMD_LOOPBACK_SGMII_FAR 12 | ||
733 | #define MC_CMD_LOOPBACK_XFI_FAR 13 | ||
734 | #define MC_CMD_LOOPBACK_GPHY 14 | ||
735 | #define MC_CMD_LOOPBACK_PHYXS 15 | ||
736 | #define MC_CMD_LOOPBACK_PCS 16 | ||
737 | #define MC_CMD_LOOPBACK_PMAPMD 17 | ||
738 | #define MC_CMD_LOOPBACK_XPORT 18 | ||
739 | #define MC_CMD_LOOPBACK_XGMII_WS 19 | ||
740 | #define MC_CMD_LOOPBACK_XAUI_WS 20 | ||
741 | #define MC_CMD_LOOPBACK_XAUI_WS_FAR 21 | ||
742 | #define MC_CMD_LOOPBACK_XAUI_WS_NEAR 22 | ||
743 | #define MC_CMD_LOOPBACK_GMII_WS 23 | ||
744 | #define MC_CMD_LOOPBACK_XFI_WS 24 | ||
745 | #define MC_CMD_LOOPBACK_XFI_WS_FAR 25 | ||
746 | #define MC_CMD_LOOPBACK_PHYXS_WS 26 | ||
747 | |||
748 | /* Generic PHY statistics enumeration */ | ||
749 | #define MC_CMD_OUI 0 | ||
750 | #define MC_CMD_PMA_PMD_LINK_UP 1 | ||
751 | #define MC_CMD_PMA_PMD_RX_FAULT 2 | ||
752 | #define MC_CMD_PMA_PMD_TX_FAULT 3 | ||
753 | #define MC_CMD_PMA_PMD_SIGNAL 4 | ||
754 | #define MC_CMD_PMA_PMD_SNR_A 5 | ||
755 | #define MC_CMD_PMA_PMD_SNR_B 6 | ||
756 | #define MC_CMD_PMA_PMD_SNR_C 7 | ||
757 | #define MC_CMD_PMA_PMD_SNR_D 8 | ||
758 | #define MC_CMD_PCS_LINK_UP 9 | ||
759 | #define MC_CMD_PCS_RX_FAULT 10 | ||
760 | #define MC_CMD_PCS_TX_FAULT 11 | ||
761 | #define MC_CMD_PCS_BER 12 | ||
762 | #define MC_CMD_PCS_BLOCK_ERRORS 13 | ||
763 | #define MC_CMD_PHYXS_LINK_UP 14 | ||
764 | #define MC_CMD_PHYXS_RX_FAULT 15 | ||
765 | #define MC_CMD_PHYXS_TX_FAULT 16 | ||
766 | #define MC_CMD_PHYXS_ALIGN 17 | ||
767 | #define MC_CMD_PHYXS_SYNC 18 | ||
768 | #define MC_CMD_AN_LINK_UP 19 | ||
769 | #define MC_CMD_AN_COMPLETE 20 | ||
770 | #define MC_CMD_AN_10GBT_STATUS 21 | ||
771 | #define MC_CMD_CL22_LINK_UP 22 | ||
772 | #define MC_CMD_PHY_NSTATS 23 | ||
773 | |||
774 | /* MC_CMD_GET_PHY_CFG: | ||
775 | * Report PHY configuration. This guarantees to succeed even if the PHY is in | ||
776 | * a "zombie" state. | ||
777 | * | ||
778 | * Locks required: None | ||
779 | * Return code: 0 | ||
780 | */ | ||
781 | #define MC_CMD_GET_PHY_CFG 0x24 | ||
782 | |||
783 | #define MC_CMD_GET_PHY_CFG_IN_LEN 0 | ||
784 | #define MC_CMD_GET_PHY_CFG_OUT_LEN 72 | ||
785 | |||
786 | #define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 | ||
787 | #define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0 | ||
788 | #define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1 | ||
789 | #define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1 | ||
790 | #define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1 | ||
791 | #define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2 | ||
792 | #define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1 | ||
793 | #define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3 | ||
794 | #define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1 | ||
795 | #define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4 | ||
796 | #define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1 | ||
797 | #define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5 | ||
798 | #define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1 | ||
799 | #define MC_CMD_GET_PHY_CFG_BIST_LBN 6 | ||
800 | #define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1 | ||
801 | #define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 | ||
802 | /* Bitmask of supported capabilities */ | ||
803 | #define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 | ||
804 | #define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 | ||
805 | #define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 | ||
806 | /* PHY statistics bitmap */ | ||
807 | #define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 | ||
808 | /* PHY type/name string */ | ||
809 | #define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 | ||
810 | #define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 | ||
811 | #define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 | ||
812 | #define MC_CMD_MEDIA_XAUI 1 | ||
813 | #define MC_CMD_MEDIA_CX4 2 | ||
814 | #define MC_CMD_MEDIA_KX4 3 | ||
815 | #define MC_CMD_MEDIA_XFP 4 | ||
816 | #define MC_CMD_MEDIA_SFP_PLUS 5 | ||
817 | #define MC_CMD_MEDIA_BASE_T 6 | ||
818 | /* MDIO "MMDS" supported */ | ||
819 | #define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 | ||
820 | /* Native clause 22 */ | ||
821 | #define MC_CMD_MMD_CLAUSE22 0 | ||
822 | #define MC_CMD_MMD_CLAUSE45_PMAPMD 1 | ||
823 | #define MC_CMD_MMD_CLAUSE45_WIS 2 | ||
824 | #define MC_CMD_MMD_CLAUSE45_PCS 3 | ||
825 | #define MC_CMD_MMD_CLAUSE45_PHYXS 4 | ||
826 | #define MC_CMD_MMD_CLAUSE45_DTEXS 5 | ||
827 | #define MC_CMD_MMD_CLAUSE45_TC 6 | ||
828 | #define MC_CMD_MMD_CLAUSE45_AN 7 | ||
829 | /* Clause22 proxied over clause45 by PHY */ | ||
830 | #define MC_CMD_MMD_CLAUSE45_C22EXT 29 | ||
831 | #define MC_CMD_MMD_CLAUSE45_VEND1 30 | ||
832 | #define MC_CMD_MMD_CLAUSE45_VEND2 31 | ||
833 | /* PHY stepping version */ | ||
834 | #define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 | ||
835 | #define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 | ||
836 | |||
837 | /* MC_CMD_START_BIST: | ||
838 | * Start a BIST test on the PHY. | ||
839 | * | ||
840 | * Locks required: PHY_LOCK if doing a PHY BIST | ||
841 | * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held) | ||
842 | */ | ||
843 | #define MC_CMD_START_BIST 0x25 | ||
844 | #define MC_CMD_START_BIST_IN_LEN 4 | ||
845 | #define MC_CMD_START_BIST_IN_TYPE_OFST 0 | ||
846 | #define MC_CMD_START_BIST_OUT_LEN 0 | ||
847 | |||
848 | /* Run the PHY's short cable BIST */ | ||
849 | #define MC_CMD_PHY_BIST_CABLE_SHORT 1 | ||
850 | /* Run the PHY's long cable BIST */ | ||
851 | #define MC_CMD_PHY_BIST_CABLE_LONG 2 | ||
852 | /* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */ | ||
853 | #define MC_CMD_BPX_SERDES_BIST 3 | ||
854 | /* Run the MC loopback tests */ | ||
855 | #define MC_CMD_MC_LOOPBACK_BIST 4 | ||
856 | /* Run the PHY's standard BIST */ | ||
857 | #define MC_CMD_PHY_BIST 5 | ||
858 | |||
859 | /* MC_CMD_POLL_PHY_BIST: (variadic output) | ||
860 | * Poll for BIST completion | ||
861 | * | ||
862 | * Returns a single status code, and optionally some PHY specific | ||
863 | * bist output. The driver should only consume the BIST output | ||
864 | * after validating OUTLEN and PHY_CFG.PHY_TYPE. | ||
865 | * | ||
866 | * If a driver can't succesfully parse the BIST output, it should | ||
867 | * still respect the pass/Fail in OUT.RESULT | ||
868 | * | ||
869 | * Locks required: PHY_LOCK if doing a PHY BIST | ||
870 | * Return code: 0, EACCES (if PHY_LOCK is not held) | ||
871 | */ | ||
872 | #define MC_CMD_POLL_BIST 0x26 | ||
873 | #define MC_CMD_POLL_BIST_IN_LEN 0 | ||
874 | #define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN | ||
875 | #define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40 | ||
876 | #define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 | ||
877 | #define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 | ||
878 | #define MC_CMD_POLL_BIST_RUNNING 1 | ||
879 | #define MC_CMD_POLL_BIST_PASSED 2 | ||
880 | #define MC_CMD_POLL_BIST_FAILED 3 | ||
881 | #define MC_CMD_POLL_BIST_TIMEOUT 4 | ||
882 | /* Generic: */ | ||
883 | #define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 | ||
884 | /* SFT9001-specific: */ | ||
885 | /* (offset 4 unused?) */ | ||
886 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8 | ||
887 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12 | ||
888 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16 | ||
889 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20 | ||
890 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24 | ||
891 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28 | ||
892 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32 | ||
893 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36 | ||
894 | #define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1 | ||
895 | #define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2 | ||
896 | #define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3 | ||
897 | #define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4 | ||
898 | #define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9 | ||
899 | /* mrsfp "PHY" driver: */ | ||
900 | #define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 | ||
901 | #define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0 | ||
902 | #define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1 | ||
903 | #define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2 | ||
904 | #define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3 | ||
905 | #define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4 | ||
906 | #define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5 | ||
907 | #define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6 | ||
908 | #define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7 | ||
909 | #define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8 | ||
910 | |||
911 | /* MC_CMD_PHY_SPI: (variadic in, variadic out) | ||
912 | * Read/Write/Erase the PHY SPI device | ||
913 | * | ||
914 | * Locks required: PHY_LOCK | ||
915 | * Return code: 0, ETIME, EINVAL, EACCES (if PHY_LOCK is not held) | ||
916 | */ | ||
917 | #define MC_CMD_PHY_SPI 0x27 | ||
918 | #define MC_CMD_PHY_SPI_IN_LEN(_write_bytes) (12 + (_write_bytes)) | ||
919 | #define MC_CMD_PHY_SPI_IN_ARGS_OFST 0 | ||
920 | #define MC_CMD_PHY_SPI_IN_ARGS_ADDR_OFST 0 | ||
921 | #define MC_CMD_PHY_SPI_IN_ARGS_READ_BYTES_OFST 4 | ||
922 | #define MC_CMD_PHY_SPI_IN_ARGS_ERASE_ALL_OFST 8 | ||
923 | /* Data to write here */ | ||
924 | #define MC_CMD_PHY_SPI_IN_WRITE_BUFFER_OFSET 12 | ||
925 | #define MC_CMD_PHY_SPI_OUT_LEN(_read_bytes) (_read_bytes) | ||
926 | /* Data read here */ | ||
927 | #define MC_CMD_PHY_SPI_OUT_READ_BUFFER_OFST 0 | ||
928 | |||
929 | |||
930 | /* MC_CMD_GET_LOOPBACK_MODES: | ||
931 | * Returns a bitmask of loopback modes evailable at each speed. | ||
932 | * | ||
933 | * Locks required: None | ||
934 | * Return code: 0 | ||
935 | */ | ||
936 | #define MC_CMD_GET_LOOPBACK_MODES 0x28 | ||
937 | #define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 | ||
938 | #define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32 | ||
939 | #define MC_CMD_GET_LOOPBACK_MODES_100M_OFST 0 | ||
940 | #define MC_CMD_GET_LOOPBACK_MODES_1G_OFST 8 | ||
941 | #define MC_CMD_GET_LOOPBACK_MODES_10G_OFST 16 | ||
942 | #define MC_CMD_GET_LOOPBACK_MODES_SUGGESTED_OFST 24 | ||
943 | |||
944 | /* Flow control enumeration */ | ||
945 | #define MC_CMD_FCNTL_OFF 0 | ||
946 | #define MC_CMD_FCNTL_RESPOND 1 | ||
947 | #define MC_CMD_FCNTL_BIDIR 2 | ||
948 | /* Auto - Use what the link has autonegotiated | ||
949 | * - The driver should modify the advertised capabilities via SET_LINK.CAP | ||
950 | * to control the negotiated flow control mode. | ||
951 | * - Can only be set if the PHY supports PAUSE+ASYM capabilities | ||
952 | * - Never returned by GET_LINK as the value programmed into the MAC | ||
953 | */ | ||
954 | #define MC_CMD_FCNTL_AUTO 3 | ||
955 | |||
956 | /* Generic mac fault bitmask */ | ||
957 | #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 | ||
958 | #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 | ||
959 | #define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 | ||
960 | #define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 | ||
961 | #define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 | ||
962 | #define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 | ||
963 | |||
964 | /* MC_CMD_GET_LINK: | ||
965 | * Read the unified MAC/PHY link state | ||
966 | * | ||
967 | * Locks required: None | ||
968 | * Return code: 0, ETIME | ||
969 | */ | ||
970 | #define MC_CMD_GET_LINK 0x29 | ||
971 | #define MC_CMD_GET_LINK_IN_LEN 0 | ||
972 | #define MC_CMD_GET_LINK_OUT_LEN 28 | ||
973 | /* near-side and link-partner advertised capabilities */ | ||
974 | #define MC_CMD_GET_LINK_OUT_CAP_OFST 0 | ||
975 | #define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 | ||
976 | /* Autonegotiated speed in mbit/s. The link may still be down | ||
977 | * even if this reads non-zero */ | ||
978 | #define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 | ||
979 | #define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 | ||
980 | #define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 | ||
981 | /* Whether we have overall link up */ | ||
982 | #define MC_CMD_GET_LINK_LINK_UP_LBN 0 | ||
983 | #define MC_CMD_GET_LINK_LINK_UP_WIDTH 1 | ||
984 | #define MC_CMD_GET_LINK_FULL_DUPLEX_LBN 1 | ||
985 | #define MC_CMD_GET_LINK_FULL_DUPLEX_WIDTH 1 | ||
986 | /* Whether we have link at the layers provided by the BPX */ | ||
987 | #define MC_CMD_GET_LINK_BPX_LINK_LBN 2 | ||
988 | #define MC_CMD_GET_LINK_BPX_LINK_WIDTH 1 | ||
989 | /* Whether the PHY has external link */ | ||
990 | #define MC_CMD_GET_LINK_PHY_LINK_LBN 3 | ||
991 | #define MC_CMD_GET_LINK_PHY_LINK_WIDTH 1 | ||
992 | #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 | ||
993 | #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 | ||
994 | |||
995 | /* MC_CMD_SET_LINK: | ||
996 | * Write the unified MAC/PHY link configuration | ||
997 | * | ||
998 | * A loopback speed of "0" is supported, and means | ||
999 | * (choose any available speed) | ||
1000 | * | ||
1001 | * Locks required: None | ||
1002 | * Return code: 0, EINVAL, ETIME | ||
1003 | */ | ||
1004 | #define MC_CMD_SET_LINK 0x2a | ||
1005 | #define MC_CMD_SET_LINK_IN_LEN 16 | ||
1006 | #define MC_CMD_SET_LINK_IN_CAP_OFST 0 | ||
1007 | #define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 | ||
1008 | #define MC_CMD_SET_LINK_LOWPOWER_LBN 0 | ||
1009 | #define MC_CMD_SET_LINK_LOWPOWER_WIDTH 1 | ||
1010 | #define MC_CMD_SET_LINK_POWEROFF_LBN 1 | ||
1011 | #define MC_CMD_SET_LINK_POWEROFF_WIDTH 1 | ||
1012 | #define MC_CMD_SET_LINK_TXDIS_LBN 2 | ||
1013 | #define MC_CMD_SET_LINK_TXDIS_WIDTH 1 | ||
1014 | #define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 | ||
1015 | #define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 | ||
1016 | #define MC_CMD_SET_LINK_OUT_LEN 0 | ||
1017 | |||
1018 | /* MC_CMD_SET_ID_LED: | ||
1019 | * Set indentification LED state | ||
1020 | * | ||
1021 | * Locks required: None | ||
1022 | * Return code: 0, EINVAL | ||
1023 | */ | ||
1024 | #define MC_CMD_SET_ID_LED 0x2b | ||
1025 | #define MC_CMD_SET_ID_LED_IN_LEN 4 | ||
1026 | #define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 | ||
1027 | #define MC_CMD_LED_OFF 0 | ||
1028 | #define MC_CMD_LED_ON 1 | ||
1029 | #define MC_CMD_LED_DEFAULT 2 | ||
1030 | #define MC_CMD_SET_ID_LED_OUT_LEN 0 | ||
1031 | |||
1032 | /* MC_CMD_SET_MAC: | ||
1033 | * Set MAC configuration | ||
1034 | * | ||
1035 | * The MTU is the MTU programmed directly into the XMAC/GMAC | ||
1036 | * (inclusive of EtherII, VLAN, bug16011 padding) | ||
1037 | * | ||
1038 | * Locks required: None | ||
1039 | * Return code: 0, EINVAL | ||
1040 | */ | ||
1041 | #define MC_CMD_SET_MAC 0x2c | ||
1042 | #define MC_CMD_SET_MAC_IN_LEN 24 | ||
1043 | #define MC_CMD_SET_MAC_IN_MTU_OFST 0 | ||
1044 | #define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 | ||
1045 | #define MC_CMD_SET_MAC_IN_ADDR_OFST 8 | ||
1046 | #define MC_CMD_SET_MAC_IN_REJECT_OFST 16 | ||
1047 | #define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 | ||
1048 | #define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 | ||
1049 | #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 | ||
1050 | #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 | ||
1051 | #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 | ||
1052 | #define MC_CMD_SET_MAC_OUT_LEN 0 | ||
1053 | |||
1054 | /* MC_CMD_PHY_STATS: | ||
1055 | * Get generic PHY statistics | ||
1056 | * | ||
1057 | * This call returns the statistics for a generic PHY, by direct DMA | ||
1058 | * into host memory, in a sparse array (indexed by the enumerate). | ||
1059 | * Each value is represented by a 32bit number. | ||
1060 | * | ||
1061 | * Locks required: None | ||
1062 | * Returns: 0, ETIME | ||
1063 | * Response methods: shared memory, event | ||
1064 | */ | ||
1065 | #define MC_CMD_PHY_STATS 0x2d | ||
1066 | #define MC_CMD_PHY_STATS_IN_LEN 8 | ||
1067 | #define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 | ||
1068 | #define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4 | ||
1069 | #define MC_CMD_PHY_STATS_OUT_LEN 0 | ||
1070 | |||
1071 | /* Unified MAC statistics enumeration */ | ||
1072 | #define MC_CMD_MAC_GENERATION_START 0 | ||
1073 | #define MC_CMD_MAC_TX_PKTS 1 | ||
1074 | #define MC_CMD_MAC_TX_PAUSE_PKTS 2 | ||
1075 | #define MC_CMD_MAC_TX_CONTROL_PKTS 3 | ||
1076 | #define MC_CMD_MAC_TX_UNICAST_PKTS 4 | ||
1077 | #define MC_CMD_MAC_TX_MULTICAST_PKTS 5 | ||
1078 | #define MC_CMD_MAC_TX_BROADCAST_PKTS 6 | ||
1079 | #define MC_CMD_MAC_TX_BYTES 7 | ||
1080 | #define MC_CMD_MAC_TX_BAD_BYTES 8 | ||
1081 | #define MC_CMD_MAC_TX_LT64_PKTS 9 | ||
1082 | #define MC_CMD_MAC_TX_64_PKTS 10 | ||
1083 | #define MC_CMD_MAC_TX_65_TO_127_PKTS 11 | ||
1084 | #define MC_CMD_MAC_TX_128_TO_255_PKTS 12 | ||
1085 | #define MC_CMD_MAC_TX_256_TO_511_PKTS 13 | ||
1086 | #define MC_CMD_MAC_TX_512_TO_1023_PKTS 14 | ||
1087 | #define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 15 | ||
1088 | #define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 16 | ||
1089 | #define MC_CMD_MAC_TX_GTJUMBO_PKTS 17 | ||
1090 | #define MC_CMD_MAC_TX_BAD_FCS_PKTS 18 | ||
1091 | #define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 19 | ||
1092 | #define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 20 | ||
1093 | #define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 21 | ||
1094 | #define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 22 | ||
1095 | #define MC_CMD_MAC_TX_DEFERRED_PKTS 23 | ||
1096 | #define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 24 | ||
1097 | #define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 25 | ||
1098 | #define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 26 | ||
1099 | #define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 27 | ||
1100 | #define MC_CMD_MAC_RX_PKTS 28 | ||
1101 | #define MC_CMD_MAC_RX_PAUSE_PKTS 29 | ||
1102 | #define MC_CMD_MAC_RX_GOOD_PKTS 30 | ||
1103 | #define MC_CMD_MAC_RX_CONTROL_PKTS 31 | ||
1104 | #define MC_CMD_MAC_RX_UNICAST_PKTS 32 | ||
1105 | #define MC_CMD_MAC_RX_MULTICAST_PKTS 33 | ||
1106 | #define MC_CMD_MAC_RX_BROADCAST_PKTS 34 | ||
1107 | #define MC_CMD_MAC_RX_BYTES 35 | ||
1108 | #define MC_CMD_MAC_RX_BAD_BYTES 36 | ||
1109 | #define MC_CMD_MAC_RX_64_PKTS 37 | ||
1110 | #define MC_CMD_MAC_RX_65_TO_127_PKTS 38 | ||
1111 | #define MC_CMD_MAC_RX_128_TO_255_PKTS 39 | ||
1112 | #define MC_CMD_MAC_RX_256_TO_511_PKTS 40 | ||
1113 | #define MC_CMD_MAC_RX_512_TO_1023_PKTS 41 | ||
1114 | #define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 42 | ||
1115 | #define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 43 | ||
1116 | #define MC_CMD_MAC_RX_GTJUMBO_PKTS 44 | ||
1117 | #define MC_CMD_MAC_RX_UNDERSIZE_PKTS 45 | ||
1118 | #define MC_CMD_MAC_RX_BAD_FCS_PKTS 46 | ||
1119 | #define MC_CMD_MAC_RX_OVERFLOW_PKTS 47 | ||
1120 | #define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 48 | ||
1121 | #define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 49 | ||
1122 | #define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 50 | ||
1123 | #define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 51 | ||
1124 | #define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 52 | ||
1125 | #define MC_CMD_MAC_RX_JABBER_PKTS 53 | ||
1126 | #define MC_CMD_MAC_RX_NODESC_DROPS 54 | ||
1127 | #define MC_CMD_MAC_RX_LANES01_CHAR_ERR 55 | ||
1128 | #define MC_CMD_MAC_RX_LANES23_CHAR_ERR 56 | ||
1129 | #define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 | ||
1130 | #define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 | ||
1131 | #define MC_CMD_MAC_RX_MATCH_FAULT 59 | ||
1132 | #define MC_CMD_GMAC_DMABUF_START 64 | ||
1133 | #define MC_CMD_GMAC_DMABUF_END 95 | ||
1134 | /* Insert new members here. */ | ||
1135 | #define MC_CMD_MAC_GENERATION_END 96 | ||
1136 | #define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) | ||
1137 | |||
1138 | /* MC_CMD_MAC_STATS: | ||
1139 | * Get unified GMAC/XMAC statistics | ||
1140 | * | ||
1141 | * This call returns unified statistics maintained by the MC as it | ||
1142 | * switches between the GMAC and XMAC. The MC will write out all | ||
1143 | * supported stats. The driver should zero initialise the buffer to | ||
1144 | * guarantee consistent results. | ||
1145 | * | ||
1146 | * Locks required: None | ||
1147 | * Returns: 0 | ||
1148 | * Response methods: shared memory, event | ||
1149 | */ | ||
1150 | #define MC_CMD_MAC_STATS 0x2e | ||
1151 | #define MC_CMD_MAC_STATS_IN_LEN 16 | ||
1152 | #define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 | ||
1153 | #define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 | ||
1154 | #define MC_CMD_MAC_STATS_IN_CMD_OFST 8 | ||
1155 | #define MC_CMD_MAC_STATS_CMD_DMA_LBN 0 | ||
1156 | #define MC_CMD_MAC_STATS_CMD_DMA_WIDTH 1 | ||
1157 | #define MC_CMD_MAC_STATS_CMD_CLEAR_LBN 1 | ||
1158 | #define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1 | ||
1159 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2 | ||
1160 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1 | ||
1161 | /* Fields only relevent when PERIODIC_CHANGE is set */ | ||
1162 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3 | ||
1163 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1 | ||
1164 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4 | ||
1165 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1 | ||
1166 | #define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16 | ||
1167 | #define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16 | ||
1168 | #define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 | ||
1169 | |||
1170 | #define MC_CMD_MAC_STATS_OUT_LEN 0 | ||
1171 | |||
1172 | /* Callisto flags */ | ||
1173 | #define MC_CMD_SFT9001_ROBUST_LBN 0 | ||
1174 | #define MC_CMD_SFT9001_ROBUST_WIDTH 1 | ||
1175 | #define MC_CMD_SFT9001_SHORT_REACH_LBN 1 | ||
1176 | #define MC_CMD_SFT9001_SHORT_REACH_WIDTH 1 | ||
1177 | |||
1178 | /* MC_CMD_SFT9001_GET: | ||
1179 | * Read current callisto specific setting | ||
1180 | * | ||
1181 | * Locks required: None | ||
1182 | * Returns: 0, ETIME | ||
1183 | */ | ||
1184 | #define MC_CMD_SFT9001_GET 0x30 | ||
1185 | #define MC_CMD_SFT9001_GET_IN_LEN 0 | ||
1186 | #define MC_CMD_SFT9001_GET_OUT_LEN 4 | ||
1187 | #define MC_CMD_SFT9001_GET_OUT_FLAGS_OFST 0 | ||
1188 | |||
1189 | /* MC_CMD_SFT9001_SET: | ||
1190 | * Write current callisto specific setting | ||
1191 | * | ||
1192 | * Locks required: None | ||
1193 | * Returns: 0, ETIME, EINVAL | ||
1194 | */ | ||
1195 | #define MC_CMD_SFT9001_SET 0x31 | ||
1196 | #define MC_CMD_SFT9001_SET_IN_LEN 4 | ||
1197 | #define MC_CMD_SFT9001_SET_IN_FLAGS_OFST 0 | ||
1198 | #define MC_CMD_SFT9001_SET_OUT_LEN 0 | ||
1199 | |||
1200 | |||
1201 | /* MC_CMD_WOL_FILTER_SET: | ||
1202 | * Set a WoL filter | ||
1203 | * | ||
1204 | * Locks required: None | ||
1205 | * Returns: 0, EBUSY, EINVAL, ENOSYS | ||
1206 | */ | ||
1207 | #define MC_CMD_WOL_FILTER_SET 0x32 | ||
1208 | #define MC_CMD_WOL_FILTER_SET_IN_LEN 192 /* 190 rounded up to a word */ | ||
1209 | #define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 | ||
1210 | #define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 | ||
1211 | |||
1212 | /* There is a union at offset 8, following defines overlap due to | ||
1213 | * this */ | ||
1214 | #define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 | ||
1215 | |||
1216 | #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST \ | ||
1217 | MC_CMD_WOL_FILTER_SET_IN_DATA_OFST | ||
1218 | |||
1219 | #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST \ | ||
1220 | MC_CMD_WOL_FILTER_SET_IN_DATA_OFST | ||
1221 | #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST \ | ||
1222 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 4) | ||
1223 | #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST \ | ||
1224 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 8) | ||
1225 | #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST \ | ||
1226 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 10) | ||
1227 | |||
1228 | #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST \ | ||
1229 | MC_CMD_WOL_FILTER_SET_IN_DATA_OFST | ||
1230 | #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST \ | ||
1231 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 16) | ||
1232 | #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST \ | ||
1233 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 32) | ||
1234 | #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST \ | ||
1235 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 34) | ||
1236 | |||
1237 | #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST \ | ||
1238 | MC_CMD_WOL_FILTER_SET_IN_DATA_OFST | ||
1239 | #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_OFST \ | ||
1240 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 48) | ||
1241 | #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST \ | ||
1242 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 176) | ||
1243 | #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST \ | ||
1244 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 177) | ||
1245 | #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \ | ||
1246 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178) | ||
1247 | |||
1248 | #define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \ | ||
1249 | MC_CMD_WOL_FILTER_SET_IN_DATA_OFST | ||
1250 | #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 | ||
1251 | #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 | ||
1252 | #define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 | ||
1253 | #define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1 | ||
1254 | |||
1255 | #define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 | ||
1256 | #define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 | ||
1257 | |||
1258 | /* WOL Filter types enumeration */ | ||
1259 | #define MC_CMD_WOL_TYPE_MAGIC 0x0 | ||
1260 | /* unused 0x1 */ | ||
1261 | #define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 | ||
1262 | #define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 | ||
1263 | #define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 | ||
1264 | #define MC_CMD_WOL_TYPE_BITMAP 0x5 | ||
1265 | #define MC_CMD_WOL_TYPE_LINK 0x6 | ||
1266 | #define MC_CMD_WOL_TYPE_MAX 0x7 | ||
1267 | |||
1268 | #define MC_CMD_FILTER_MODE_SIMPLE 0x0 | ||
1269 | #define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff | ||
1270 | |||
1271 | /* MC_CMD_WOL_FILTER_REMOVE: | ||
1272 | * Remove a WoL filter | ||
1273 | * | ||
1274 | * Locks required: None | ||
1275 | * Returns: 0, EINVAL, ENOSYS | ||
1276 | */ | ||
1277 | #define MC_CMD_WOL_FILTER_REMOVE 0x33 | ||
1278 | #define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 | ||
1279 | #define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 | ||
1280 | #define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0 | ||
1281 | |||
1282 | |||
1283 | /* MC_CMD_WOL_FILTER_RESET: | ||
1284 | * Reset (i.e. remove all) WoL filters | ||
1285 | * | ||
1286 | * Locks required: None | ||
1287 | * Returns: 0, ENOSYS | ||
1288 | */ | ||
1289 | #define MC_CMD_WOL_FILTER_RESET 0x34 | ||
1290 | #define MC_CMD_WOL_FILTER_RESET_IN_LEN 0 | ||
1291 | #define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0 | ||
1292 | |||
1293 | /* MC_CMD_SET_MCAST_HASH: | ||
1294 | * Set the MCASH hash value without otherwise | ||
1295 | * reconfiguring the MAC | ||
1296 | */ | ||
1297 | #define MC_CMD_SET_MCAST_HASH 0x35 | ||
1298 | #define MC_CMD_SET_MCAST_HASH_IN_LEN 32 | ||
1299 | #define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0 | ||
1300 | #define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16 | ||
1301 | #define MC_CMD_SET_MCAST_HASH_OUT_LEN 0 | ||
1302 | |||
1303 | /* MC_CMD_NVRAM_TYPES: | ||
1304 | * Return bitfield indicating available types of virtual NVRAM partitions | ||
1305 | * | ||
1306 | * Locks required: none | ||
1307 | * Returns: 0 | ||
1308 | */ | ||
1309 | #define MC_CMD_NVRAM_TYPES 0x36 | ||
1310 | #define MC_CMD_NVRAM_TYPES_IN_LEN 0 | ||
1311 | #define MC_CMD_NVRAM_TYPES_OUT_LEN 4 | ||
1312 | #define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 | ||
1313 | |||
1314 | /* Supported NVRAM types */ | ||
1315 | #define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0 | ||
1316 | #define MC_CMD_NVRAM_TYPE_MC_FW 1 | ||
1317 | #define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 2 | ||
1318 | #define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 3 | ||
1319 | #define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 4 | ||
1320 | #define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 5 | ||
1321 | #define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 6 | ||
1322 | #define MC_CMD_NVRAM_TYPE_EXP_ROM 7 | ||
1323 | #define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 8 | ||
1324 | #define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 9 | ||
1325 | #define MC_CMD_NVRAM_TYPE_PHY_PORT0 10 | ||
1326 | #define MC_CMD_NVRAM_TYPE_PHY_PORT1 11 | ||
1327 | #define MC_CMD_NVRAM_TYPE_LOG 12 | ||
1328 | |||
1329 | /* MC_CMD_NVRAM_INFO: | ||
1330 | * Read info about a virtual NVRAM partition | ||
1331 | * | ||
1332 | * Locks required: none | ||
1333 | * Returns: 0, EINVAL (bad type) | ||
1334 | */ | ||
1335 | #define MC_CMD_NVRAM_INFO 0x37 | ||
1336 | #define MC_CMD_NVRAM_INFO_IN_LEN 4 | ||
1337 | #define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 | ||
1338 | #define MC_CMD_NVRAM_INFO_OUT_LEN 24 | ||
1339 | #define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 | ||
1340 | #define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 | ||
1341 | #define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 | ||
1342 | #define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 | ||
1343 | #define MC_CMD_NVRAM_PROTECTED_LBN 0 | ||
1344 | #define MC_CMD_NVRAM_PROTECTED_WIDTH 1 | ||
1345 | #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 | ||
1346 | #define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 | ||
1347 | |||
1348 | /* MC_CMD_NVRAM_UPDATE_START: | ||
1349 | * Start a group of update operations on a virtual NVRAM partition | ||
1350 | * | ||
1351 | * Locks required: PHY_LOCK if type==*PHY* | ||
1352 | * Returns: 0, EINVAL (bad type), EACCES (if PHY_LOCK required and not held) | ||
1353 | */ | ||
1354 | #define MC_CMD_NVRAM_UPDATE_START 0x38 | ||
1355 | #define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 | ||
1356 | #define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 | ||
1357 | #define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0 | ||
1358 | |||
1359 | /* MC_CMD_NVRAM_READ: | ||
1360 | * Read data from a virtual NVRAM partition | ||
1361 | * | ||
1362 | * Locks required: PHY_LOCK if type==*PHY* | ||
1363 | * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) | ||
1364 | */ | ||
1365 | #define MC_CMD_NVRAM_READ 0x39 | ||
1366 | #define MC_CMD_NVRAM_READ_IN_LEN 12 | ||
1367 | #define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 | ||
1368 | #define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 | ||
1369 | #define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 | ||
1370 | #define MC_CMD_NVRAM_READ_OUT_LEN(_read_bytes) (_read_bytes) | ||
1371 | #define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 | ||
1372 | |||
1373 | /* MC_CMD_NVRAM_WRITE: | ||
1374 | * Write data to a virtual NVRAM partition | ||
1375 | * | ||
1376 | * Locks required: PHY_LOCK if type==*PHY* | ||
1377 | * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) | ||
1378 | */ | ||
1379 | #define MC_CMD_NVRAM_WRITE 0x3a | ||
1380 | #define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 | ||
1381 | #define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 | ||
1382 | #define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 | ||
1383 | #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 | ||
1384 | #define MC_CMD_NVRAM_WRITE_IN_LEN(_write_bytes) (12 + _write_bytes) | ||
1385 | #define MC_CMD_NVRAM_WRITE_OUT_LEN 0 | ||
1386 | |||
1387 | /* MC_CMD_NVRAM_ERASE: | ||
1388 | * Erase sector(s) from a virtual NVRAM partition | ||
1389 | * | ||
1390 | * Locks required: PHY_LOCK if type==*PHY* | ||
1391 | * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) | ||
1392 | */ | ||
1393 | #define MC_CMD_NVRAM_ERASE 0x3b | ||
1394 | #define MC_CMD_NVRAM_ERASE_IN_LEN 12 | ||
1395 | #define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 | ||
1396 | #define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 | ||
1397 | #define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 | ||
1398 | #define MC_CMD_NVRAM_ERASE_OUT_LEN 0 | ||
1399 | |||
1400 | /* MC_CMD_NVRAM_UPDATE_FINISH: | ||
1401 | * Finish a group of update operations on a virtual NVRAM partition | ||
1402 | * | ||
1403 | * Locks required: PHY_LOCK if type==*PHY* | ||
1404 | * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) | ||
1405 | */ | ||
1406 | #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c | ||
1407 | #define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 | ||
1408 | #define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 | ||
1409 | #define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4 | ||
1410 | #define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0 | ||
1411 | |||
1412 | /* MC_CMD_REBOOT: | ||
1413 | * Reboot the MC. | ||
1414 | * | ||
1415 | * The AFTER_ASSERTION flag is intended to be used when the driver notices | ||
1416 | * an assertion failure (at which point it is expected to perform a complete | ||
1417 | * tear down and reinitialise), to allow both ports to reset the MC once | ||
1418 | * in an atomic fashion. | ||
1419 | * | ||
1420 | * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1, | ||
1421 | * which means that they will automatically reboot out of the assertion | ||
1422 | * handler, so this is in practise an optional operation. It is still | ||
1423 | * recommended that drivers execute this to support custom firmwares | ||
1424 | * with REBOOT_ON_ASSERT=0. | ||
1425 | * | ||
1426 | * Locks required: NONE | ||
1427 | * Returns: Nothing. You get back a response with ERR=1, DATALEN=0 | ||
1428 | */ | ||
1429 | #define MC_CMD_REBOOT 0x3d | ||
1430 | #define MC_CMD_REBOOT_IN_LEN 4 | ||
1431 | #define MC_CMD_REBOOT_IN_FLAGS_OFST 0 | ||
1432 | #define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 1 | ||
1433 | #define MC_CMD_REBOOT_OUT_LEN 0 | ||
1434 | |||
1435 | /* MC_CMD_SCHEDINFO: | ||
1436 | * Request scheduler info. from the MC. | ||
1437 | * | ||
1438 | * Locks required: NONE | ||
1439 | * Returns: An array of (timeslice,maximum overrun), one for each thread, | ||
1440 | * in ascending order of thread address.s | ||
1441 | */ | ||
1442 | #define MC_CMD_SCHEDINFO 0x3e | ||
1443 | #define MC_CMD_SCHEDINFO_IN_LEN 0 | ||
1444 | |||
1445 | |||
1446 | /* MC_CMD_SET_REBOOT_MODE: (debug) | ||
1447 | * Set the mode for the next MC reboot. | ||
1448 | * | ||
1449 | * Locks required: NONE | ||
1450 | * | ||
1451 | * Sets the reboot mode to the specified value. Returns the old mode. | ||
1452 | */ | ||
1453 | #define MC_CMD_REBOOT_MODE 0x3f | ||
1454 | #define MC_CMD_REBOOT_MODE_IN_LEN 4 | ||
1455 | #define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 | ||
1456 | #define MC_CMD_REBOOT_MODE_OUT_LEN 4 | ||
1457 | #define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 | ||
1458 | #define MC_CMD_REBOOT_MODE_NORMAL 0 | ||
1459 | #define MC_CMD_REBOOT_MODE_SNAPPER 3 | ||
1460 | |||
1461 | /* MC_CMD_DEBUG_LOG: | ||
1462 | * Null request/response command (debug) | ||
1463 | * - sequence number is always zero | ||
1464 | * - only supported on the UART interface | ||
1465 | * (the same set of bytes is delivered as an | ||
1466 | * event over PCI) | ||
1467 | */ | ||
1468 | #define MC_CMD_DEBUG_LOG 0x40 | ||
1469 | #define MC_CMD_DEBUG_LOG_IN_LEN 0 | ||
1470 | #define MC_CMD_DEBUG_LOG_OUT_LEN 0 | ||
1471 | |||
1472 | /* Generic sensor enumeration. Note that a dual port NIC | ||
1473 | * will EITHER expose PHY_COMMON_TEMP OR PHY0_TEMP and | ||
1474 | * PHY1_TEMP depending on whether there is a single sensor | ||
1475 | * in the vicinity of the two port, or one per port. | ||
1476 | */ | ||
1477 | #define MC_CMD_SENSOR_CONTROLLER_TEMP 0 /* degC */ | ||
1478 | #define MC_CMD_SENSOR_PHY_COMMON_TEMP 1 /* degC */ | ||
1479 | #define MC_CMD_SENSOR_CONTROLLER_COOLING 2 /* bool */ | ||
1480 | #define MC_CMD_SENSOR_PHY0_TEMP 3 /* degC */ | ||
1481 | #define MC_CMD_SENSOR_PHY0_COOLING 4 /* bool */ | ||
1482 | #define MC_CMD_SENSOR_PHY1_TEMP 5 /* degC */ | ||
1483 | #define MC_CMD_SENSOR_PHY1_COOLING 6 /* bool */ | ||
1484 | #define MC_CMD_SENSOR_IN_1V0 7 /* mV */ | ||
1485 | #define MC_CMD_SENSOR_IN_1V2 8 /* mV */ | ||
1486 | #define MC_CMD_SENSOR_IN_1V8 9 /* mV */ | ||
1487 | #define MC_CMD_SENSOR_IN_2V5 10 /* mV */ | ||
1488 | #define MC_CMD_SENSOR_IN_3V3 11 /* mV */ | ||
1489 | #define MC_CMD_SENSOR_IN_12V0 12 /* mV */ | ||
1490 | |||
1491 | |||
1492 | /* Sensor state */ | ||
1493 | #define MC_CMD_SENSOR_STATE_OK 0 | ||
1494 | #define MC_CMD_SENSOR_STATE_WARNING 1 | ||
1495 | #define MC_CMD_SENSOR_STATE_FATAL 2 | ||
1496 | #define MC_CMD_SENSOR_STATE_BROKEN 3 | ||
1497 | |||
1498 | /* MC_CMD_SENSOR_INFO: | ||
1499 | * Returns information about every available sensor. | ||
1500 | * | ||
1501 | * Each sensor has a single (16bit) value, and a corresponding state. | ||
1502 | * The mapping between value and sensor is nominally determined by the | ||
1503 | * MC, but in practise is implemented as zero (BROKEN), one (TEMPERATURE), | ||
1504 | * or two (VOLTAGE) ranges per sensor per state. | ||
1505 | * | ||
1506 | * This call returns a mask (32bit) of the sensors that are supported | ||
1507 | * by this platform, then an array (indexed by MC_CMD_SENSOR) of byte | ||
1508 | * offsets to the per-sensor arrays. Each sensor array has four 16bit | ||
1509 | * numbers, min1, max1, min2, max2. | ||
1510 | * | ||
1511 | * Locks required: None | ||
1512 | * Returns: 0 | ||
1513 | */ | ||
1514 | #define MC_CMD_SENSOR_INFO 0x41 | ||
1515 | #define MC_CMD_SENSOR_INFO_IN_LEN 0 | ||
1516 | #define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 | ||
1517 | #define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \ | ||
1518 | (4 + (_x)) | ||
1519 | #define MC_CMD_SENSOR_INFO_OUT_MIN1_OFST(_ofst) \ | ||
1520 | ((_ofst) + 0) | ||
1521 | #define MC_CMD_SENSOR_INFO_OUT_MAX1_OFST(_ofst) \ | ||
1522 | ((_ofst) + 2) | ||
1523 | #define MC_CMD_SENSOR_INFO_OUT_MIN2_OFST(_ofst) \ | ||
1524 | ((_ofst) + 4) | ||
1525 | #define MC_CMD_SENSOR_INFO_OUT_MAX2_OFST(_ofst) \ | ||
1526 | ((_ofst) + 6) | ||
1527 | |||
1528 | /* MC_CMD_READ_SENSORS | ||
1529 | * Returns the current reading from each sensor | ||
1530 | * | ||
1531 | * Returns a sparse array of sensor readings (indexed by the sensor | ||
1532 | * type) into host memory. Each array element is a dword. | ||
1533 | * | ||
1534 | * The MC will send a SENSOREVT event every time any sensor changes state. The | ||
1535 | * driver is responsible for ensuring that it doesn't miss any events. The board | ||
1536 | * will function normally if all sensors are in STATE_OK or state_WARNING. | ||
1537 | * Otherwise the board should not be expected to function. | ||
1538 | */ | ||
1539 | #define MC_CMD_READ_SENSORS 0x42 | ||
1540 | #define MC_CMD_READ_SENSORS_IN_LEN 8 | ||
1541 | #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 | ||
1542 | #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 | ||
1543 | #define MC_CMD_READ_SENSORS_OUT_LEN 0 | ||
1544 | |||
1545 | /* Sensor reading fields */ | ||
1546 | #define MC_CMD_READ_SENSOR_VALUE_LBN 0 | ||
1547 | #define MC_CMD_READ_SENSOR_VALUE_WIDTH 16 | ||
1548 | #define MC_CMD_READ_SENSOR_STATE_LBN 16 | ||
1549 | #define MC_CMD_READ_SENSOR_STATE_WIDTH 8 | ||
1550 | |||
1551 | |||
1552 | /* MC_CMD_GET_PHY_STATE: | ||
1553 | * Report current state of PHY. A "zombie" PHY is a PHY that has failed to | ||
1554 | * boot (e.g. due to missing or corrupted firmware). | ||
1555 | * | ||
1556 | * Locks required: None | ||
1557 | * Return code: 0 | ||
1558 | */ | ||
1559 | #define MC_CMD_GET_PHY_STATE 0x43 | ||
1560 | |||
1561 | #define MC_CMD_GET_PHY_STATE_IN_LEN 0 | ||
1562 | #define MC_CMD_GET_PHY_STATE_OUT_LEN 4 | ||
1563 | #define MC_CMD_GET_PHY_STATE_STATE_OFST 0 | ||
1564 | /* PHY state enumeration: */ | ||
1565 | #define MC_CMD_PHY_STATE_OK 1 | ||
1566 | #define MC_CMD_PHY_STATE_ZOMBIE 2 | ||
1567 | |||
1568 | |||
1569 | /* 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to | ||
1570 | * disable 802.Qbb for a given priority. */ | ||
1571 | #define MC_CMD_SETUP_8021QBB 0x44 | ||
1572 | #define MC_CMD_SETUP_8021QBB_IN_LEN 32 | ||
1573 | #define MC_CMD_SETUP_8021QBB_OUT_LEN 0 | ||
1574 | #define MC_CMD_SETUP_8021QBB_IN_TXQS_OFFST 0 | ||
1575 | |||
1576 | |||
1577 | /* MC_CMD_WOL_FILTER_GET: | ||
1578 | * Retrieve ID of any WoL filters | ||
1579 | * | ||
1580 | * Locks required: None | ||
1581 | * Returns: 0, ENOSYS | ||
1582 | */ | ||
1583 | #define MC_CMD_WOL_FILTER_GET 0x45 | ||
1584 | #define MC_CMD_WOL_FILTER_GET_IN_LEN 0 | ||
1585 | #define MC_CMD_WOL_FILTER_GET_OUT_LEN 4 | ||
1586 | #define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0 | ||
1587 | |||
1588 | |||
1589 | /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD: | ||
1590 | * Offload a protocol to NIC for lights-out state | ||
1591 | * | ||
1592 | * Locks required: None | ||
1593 | * Returns: 0, ENOSYS | ||
1594 | */ | ||
1595 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 | ||
1596 | |||
1597 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN 16 | ||
1598 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 | ||
1599 | |||
1600 | /* There is a union at offset 4, following defines overlap due to | ||
1601 | * this */ | ||
1602 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 | ||
1603 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPMAC_OFST 4 | ||
1604 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPIP_OFST 10 | ||
1605 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSMAC_OFST 4 | ||
1606 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSSNIPV6_OFST 10 | ||
1607 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSIPV6_OFST 26 | ||
1608 | |||
1609 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 | ||
1610 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 | ||
1611 | |||
1612 | |||
1613 | /* MC_CMD_REMOVE_LIGHTSOUT_PROTOCOL_OFFLOAD: | ||
1614 | * Offload a protocol to NIC for lights-out state | ||
1615 | * | ||
1616 | * Locks required: None | ||
1617 | * Returns: 0, ENOSYS | ||
1618 | */ | ||
1619 | #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 | ||
1620 | #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 | ||
1621 | #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 | ||
1622 | |||
1623 | #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 | ||
1624 | #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 | ||
1625 | |||
1626 | /* Lights-out offload protocols enumeration */ | ||
1627 | #define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 | ||
1628 | #define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 | ||
1629 | |||
1630 | |||
1631 | /* MC_CMD_MAC_RESET_RESTORE: | ||
1632 | * Restore MAC after block reset | ||
1633 | * | ||
1634 | * Locks required: None | ||
1635 | * Returns: 0 | ||
1636 | */ | ||
1637 | |||
1638 | #define MC_CMD_MAC_RESET_RESTORE 0x48 | ||
1639 | #define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0 | ||
1640 | #define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0 | ||
1641 | |||
1642 | |||
1643 | /* MC_CMD_TEST_ASSERT: | ||
1644 | * Deliberately trigger an assert-detonation in the firmware for testing | ||
1645 | * purposes (i.e. to allow tests that the driver copes gracefully). | ||
1646 | * | ||
1647 | * Locks required: None | ||
1648 | * Returns: 0 | ||
1649 | */ | ||
1650 | |||
1651 | #define MC_CMD_TESTASSERT 0x49 | ||
1652 | #define MC_CMD_TESTASSERT_IN_LEN 0 | ||
1653 | #define MC_CMD_TESTASSERT_OUT_LEN 0 | ||
1654 | |||
1655 | /* MC_CMD_WORKAROUND 0x4a | ||
1656 | * | ||
1657 | * Enable/Disable a given workaround. The mcfw will return EINVAL if it | ||
1658 | * doesn't understand the given workaround number - which should not | ||
1659 | * be treated as a hard error by client code. | ||
1660 | * | ||
1661 | * This op does not imply any semantics about each workaround, that's between | ||
1662 | * the driver and the mcfw on a per-workaround basis. | ||
1663 | * | ||
1664 | * Locks required: None | ||
1665 | * Returns: 0, EINVAL | ||
1666 | */ | ||
1667 | #define MC_CMD_WORKAROUND 0x4a | ||
1668 | #define MC_CMD_WORKAROUND_IN_LEN 8 | ||
1669 | #define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 | ||
1670 | #define MC_CMD_WORKAROUND_BUG17230 1 | ||
1671 | #define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 | ||
1672 | #define MC_CMD_WORKAROUND_OUT_LEN 0 | ||
1673 | |||
1674 | /* MC_CMD_GET_PHY_MEDIA_INFO: | ||
1675 | * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for | ||
1676 | * SFP+ PHYs). | ||
1677 | * | ||
1678 | * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE); | ||
1679 | * the valid "page number" input values, and the output data, are interpreted | ||
1680 | * on a per-type basis. | ||
1681 | * | ||
1682 | * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address | ||
1683 | * 0xA0 offset 0 or 0x80. | ||
1684 | * Anything else: currently undefined. | ||
1685 | * | ||
1686 | * Locks required: None | ||
1687 | * Return code: 0 | ||
1688 | */ | ||
1689 | #define MC_CMD_GET_PHY_MEDIA_INFO 0x4b | ||
1690 | #define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 | ||
1691 | #define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 | ||
1692 | #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes)) | ||
1693 | #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 | ||
1694 | #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 | ||
1695 | |||
1696 | /* MC_CMD_NVRAM_TEST: | ||
1697 | * Test a particular NVRAM partition for valid contents (where "valid" | ||
1698 | * depends on the type of partition). | ||
1699 | * | ||
1700 | * Locks required: None | ||
1701 | * Return code: 0 | ||
1702 | */ | ||
1703 | #define MC_CMD_NVRAM_TEST 0x4c | ||
1704 | #define MC_CMD_NVRAM_TEST_IN_LEN 4 | ||
1705 | #define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 | ||
1706 | #define MC_CMD_NVRAM_TEST_OUT_LEN 4 | ||
1707 | #define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 | ||
1708 | #define MC_CMD_NVRAM_TEST_PASS 0 | ||
1709 | #define MC_CMD_NVRAM_TEST_FAIL 1 | ||
1710 | #define MC_CMD_NVRAM_TEST_NOTSUPP 2 | ||
1711 | |||
1712 | /* MC_CMD_MRSFP_TWEAK: (debug) | ||
1713 | * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds. | ||
1714 | * I2C I/O expander bits are always read; if equaliser parameters are supplied, | ||
1715 | * they are configured first. | ||
1716 | * | ||
1717 | * Locks required: None | ||
1718 | * Return code: 0, EINVAL | ||
1719 | */ | ||
1720 | #define MC_CMD_MRSFP_TWEAK 0x4d | ||
1721 | #define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0 | ||
1722 | #define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16 | ||
1723 | #define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */ | ||
1724 | #define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */ | ||
1725 | #define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */ | ||
1726 | #define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */ | ||
1727 | #define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 | ||
1728 | #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */ | ||
1729 | #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */ | ||
1730 | #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */ | ||
1731 | |||
1732 | /* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be | ||
1733 | * used for post-3.0 extensions. If you run out of space, look for gaps or | ||
1734 | * commands that are unused in the existing range. */ | ||
1735 | |||
1736 | #endif /* MCDI_PCOL_H */ | ||
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c new file mode 100644 index 000000000000..2f2354696663 --- /dev/null +++ b/drivers/net/sfc/mcdi_phy.c | |||
@@ -0,0 +1,609 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2009 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * Driver for PHY related operations via MCDI. | ||
12 | */ | ||
13 | |||
14 | #include <linux/slab.h> | ||
15 | #include "efx.h" | ||
16 | #include "phy.h" | ||
17 | #include "mcdi.h" | ||
18 | #include "mcdi_pcol.h" | ||
19 | #include "mdio_10g.h" | ||
20 | |||
21 | struct efx_mcdi_phy_cfg { | ||
22 | u32 flags; | ||
23 | u32 type; | ||
24 | u32 supported_cap; | ||
25 | u32 channel; | ||
26 | u32 port; | ||
27 | u32 stats_mask; | ||
28 | u8 name[20]; | ||
29 | u32 media; | ||
30 | u32 mmd_mask; | ||
31 | u8 revision[20]; | ||
32 | u32 forced_cap; | ||
33 | }; | ||
34 | |||
35 | static int | ||
36 | efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg) | ||
37 | { | ||
38 | u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; | ||
39 | size_t outlen; | ||
40 | int rc; | ||
41 | |||
42 | BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); | ||
43 | BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); | ||
44 | |||
45 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, | ||
46 | outbuf, sizeof(outbuf), &outlen); | ||
47 | if (rc) | ||
48 | goto fail; | ||
49 | |||
50 | if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { | ||
51 | rc = -EMSGSIZE; | ||
52 | goto fail; | ||
53 | } | ||
54 | |||
55 | cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS); | ||
56 | cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE); | ||
57 | cfg->supported_cap = | ||
58 | MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP); | ||
59 | cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL); | ||
60 | cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT); | ||
61 | cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK); | ||
62 | memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME), | ||
63 | sizeof(cfg->name)); | ||
64 | cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE); | ||
65 | cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK); | ||
66 | memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION), | ||
67 | sizeof(cfg->revision)); | ||
68 | |||
69 | return 0; | ||
70 | |||
71 | fail: | ||
72 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
73 | return rc; | ||
74 | } | ||
75 | |||
76 | static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, | ||
77 | u32 flags, u32 loopback_mode, | ||
78 | u32 loopback_speed) | ||
79 | { | ||
80 | u8 inbuf[MC_CMD_SET_LINK_IN_LEN]; | ||
81 | int rc; | ||
82 | |||
83 | BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); | ||
84 | |||
85 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities); | ||
86 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags); | ||
87 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); | ||
88 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); | ||
89 | |||
90 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), | ||
91 | NULL, 0, NULL); | ||
92 | if (rc) | ||
93 | goto fail; | ||
94 | |||
95 | return 0; | ||
96 | |||
97 | fail: | ||
98 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
99 | return rc; | ||
100 | } | ||
101 | |||
102 | static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) | ||
103 | { | ||
104 | u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN]; | ||
105 | size_t outlen; | ||
106 | int rc; | ||
107 | |||
108 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, | ||
109 | outbuf, sizeof(outbuf), &outlen); | ||
110 | if (rc) | ||
111 | goto fail; | ||
112 | |||
113 | if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { | ||
114 | rc = -EMSGSIZE; | ||
115 | goto fail; | ||
116 | } | ||
117 | |||
118 | *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED); | ||
119 | |||
120 | return 0; | ||
121 | |||
122 | fail: | ||
123 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
124 | return rc; | ||
125 | } | ||
126 | |||
127 | int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, | ||
128 | unsigned int prtad, unsigned int devad, u16 addr, | ||
129 | u16 *value_out, u32 *status_out) | ||
130 | { | ||
131 | u8 inbuf[MC_CMD_MDIO_READ_IN_LEN]; | ||
132 | u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN]; | ||
133 | size_t outlen; | ||
134 | int rc; | ||
135 | |||
136 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus); | ||
137 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); | ||
138 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); | ||
139 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); | ||
140 | |||
141 | rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), | ||
142 | outbuf, sizeof(outbuf), &outlen); | ||
143 | if (rc) | ||
144 | goto fail; | ||
145 | |||
146 | *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); | ||
147 | *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS); | ||
148 | return 0; | ||
149 | |||
150 | fail: | ||
151 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
152 | return rc; | ||
153 | } | ||
154 | |||
155 | int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, | ||
156 | unsigned int prtad, unsigned int devad, u16 addr, | ||
157 | u16 value, u32 *status_out) | ||
158 | { | ||
159 | u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN]; | ||
160 | u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN]; | ||
161 | size_t outlen; | ||
162 | int rc; | ||
163 | |||
164 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus); | ||
165 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); | ||
166 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); | ||
167 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); | ||
168 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); | ||
169 | |||
170 | rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), | ||
171 | outbuf, sizeof(outbuf), &outlen); | ||
172 | if (rc) | ||
173 | goto fail; | ||
174 | |||
175 | *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS); | ||
176 | return 0; | ||
177 | |||
178 | fail: | ||
179 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
180 | return rc; | ||
181 | } | ||
182 | |||
183 | static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) | ||
184 | { | ||
185 | u32 result = 0; | ||
186 | |||
187 | switch (media) { | ||
188 | case MC_CMD_MEDIA_KX4: | ||
189 | result |= SUPPORTED_Backplane; | ||
190 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) | ||
191 | result |= SUPPORTED_1000baseKX_Full; | ||
192 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) | ||
193 | result |= SUPPORTED_10000baseKX4_Full; | ||
194 | break; | ||
195 | |||
196 | case MC_CMD_MEDIA_XFP: | ||
197 | case MC_CMD_MEDIA_SFP_PLUS: | ||
198 | result |= SUPPORTED_FIBRE; | ||
199 | break; | ||
200 | |||
201 | case MC_CMD_MEDIA_BASE_T: | ||
202 | result |= SUPPORTED_TP; | ||
203 | if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) | ||
204 | result |= SUPPORTED_10baseT_Half; | ||
205 | if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) | ||
206 | result |= SUPPORTED_10baseT_Full; | ||
207 | if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) | ||
208 | result |= SUPPORTED_100baseT_Half; | ||
209 | if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) | ||
210 | result |= SUPPORTED_100baseT_Full; | ||
211 | if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) | ||
212 | result |= SUPPORTED_1000baseT_Half; | ||
213 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) | ||
214 | result |= SUPPORTED_1000baseT_Full; | ||
215 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) | ||
216 | result |= SUPPORTED_10000baseT_Full; | ||
217 | break; | ||
218 | } | ||
219 | |||
220 | if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) | ||
221 | result |= SUPPORTED_Pause; | ||
222 | if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) | ||
223 | result |= SUPPORTED_Asym_Pause; | ||
224 | if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) | ||
225 | result |= SUPPORTED_Autoneg; | ||
226 | |||
227 | return result; | ||
228 | } | ||
229 | |||
230 | static u32 ethtool_to_mcdi_cap(u32 cap) | ||
231 | { | ||
232 | u32 result = 0; | ||
233 | |||
234 | if (cap & SUPPORTED_10baseT_Half) | ||
235 | result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); | ||
236 | if (cap & SUPPORTED_10baseT_Full) | ||
237 | result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); | ||
238 | if (cap & SUPPORTED_100baseT_Half) | ||
239 | result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); | ||
240 | if (cap & SUPPORTED_100baseT_Full) | ||
241 | result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); | ||
242 | if (cap & SUPPORTED_1000baseT_Half) | ||
243 | result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); | ||
244 | if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) | ||
245 | result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); | ||
246 | if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full)) | ||
247 | result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); | ||
248 | if (cap & SUPPORTED_Pause) | ||
249 | result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); | ||
250 | if (cap & SUPPORTED_Asym_Pause) | ||
251 | result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); | ||
252 | if (cap & SUPPORTED_Autoneg) | ||
253 | result |= (1 << MC_CMD_PHY_CAP_AN_LBN); | ||
254 | |||
255 | return result; | ||
256 | } | ||
257 | |||
258 | static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) | ||
259 | { | ||
260 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | ||
261 | enum efx_phy_mode mode, supported; | ||
262 | u32 flags; | ||
263 | |||
264 | /* TODO: Advertise the capabilities supported by this PHY */ | ||
265 | supported = 0; | ||
266 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN)) | ||
267 | supported |= PHY_MODE_TX_DISABLED; | ||
268 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN)) | ||
269 | supported |= PHY_MODE_LOW_POWER; | ||
270 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN)) | ||
271 | supported |= PHY_MODE_OFF; | ||
272 | |||
273 | mode = efx->phy_mode & supported; | ||
274 | |||
275 | flags = 0; | ||
276 | if (mode & PHY_MODE_TX_DISABLED) | ||
277 | flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN); | ||
278 | if (mode & PHY_MODE_LOW_POWER) | ||
279 | flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN); | ||
280 | if (mode & PHY_MODE_OFF) | ||
281 | flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN); | ||
282 | |||
283 | return flags; | ||
284 | } | ||
285 | |||
286 | static u32 mcdi_to_ethtool_media(u32 media) | ||
287 | { | ||
288 | switch (media) { | ||
289 | case MC_CMD_MEDIA_XAUI: | ||
290 | case MC_CMD_MEDIA_CX4: | ||
291 | case MC_CMD_MEDIA_KX4: | ||
292 | return PORT_OTHER; | ||
293 | |||
294 | case MC_CMD_MEDIA_XFP: | ||
295 | case MC_CMD_MEDIA_SFP_PLUS: | ||
296 | return PORT_FIBRE; | ||
297 | |||
298 | case MC_CMD_MEDIA_BASE_T: | ||
299 | return PORT_TP; | ||
300 | |||
301 | default: | ||
302 | return PORT_OTHER; | ||
303 | } | ||
304 | } | ||
305 | |||
306 | static int efx_mcdi_phy_probe(struct efx_nic *efx) | ||
307 | { | ||
308 | struct efx_mcdi_phy_cfg *phy_data; | ||
309 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | ||
310 | u32 caps; | ||
311 | int rc; | ||
312 | |||
313 | /* Initialise and populate phy_data */ | ||
314 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | ||
315 | if (phy_data == NULL) | ||
316 | return -ENOMEM; | ||
317 | |||
318 | rc = efx_mcdi_get_phy_cfg(efx, phy_data); | ||
319 | if (rc != 0) | ||
320 | goto fail; | ||
321 | |||
322 | /* Read initial link advertisement */ | ||
323 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | ||
324 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | ||
325 | outbuf, sizeof(outbuf), NULL); | ||
326 | if (rc) | ||
327 | goto fail; | ||
328 | |||
329 | /* Fill out nic state */ | ||
330 | efx->phy_data = phy_data; | ||
331 | efx->phy_type = phy_data->type; | ||
332 | |||
333 | efx->mdio_bus = phy_data->channel; | ||
334 | efx->mdio.prtad = phy_data->port; | ||
335 | efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); | ||
336 | efx->mdio.mode_support = 0; | ||
337 | if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) | ||
338 | efx->mdio.mode_support |= MDIO_SUPPORTS_C22; | ||
339 | if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) | ||
340 | efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
341 | |||
342 | caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); | ||
343 | if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) | ||
344 | efx->link_advertising = | ||
345 | mcdi_to_ethtool_cap(phy_data->media, caps); | ||
346 | else | ||
347 | phy_data->forced_cap = caps; | ||
348 | |||
349 | /* Assert that we can map efx -> mcdi loopback modes */ | ||
350 | BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); | ||
351 | BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); | ||
352 | BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC); | ||
353 | BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII); | ||
354 | BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS); | ||
355 | BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI); | ||
356 | BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII); | ||
357 | BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII); | ||
358 | BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR); | ||
359 | BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI); | ||
360 | BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR); | ||
361 | BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR); | ||
362 | BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR); | ||
363 | BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR); | ||
364 | BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY); | ||
365 | BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS); | ||
366 | BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS); | ||
367 | BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD); | ||
368 | BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT); | ||
369 | BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS); | ||
370 | BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS); | ||
371 | BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR); | ||
372 | BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR); | ||
373 | BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS); | ||
374 | BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS); | ||
375 | BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR); | ||
376 | BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS); | ||
377 | |||
378 | rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes); | ||
379 | if (rc != 0) | ||
380 | goto fail; | ||
381 | /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, | ||
382 | * but by convention we don't */ | ||
383 | efx->loopback_modes &= ~(1 << LOOPBACK_NONE); | ||
384 | |||
385 | /* Set the initial link mode */ | ||
386 | efx_mcdi_phy_decode_link( | ||
387 | efx, &efx->link_state, | ||
388 | MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), | ||
389 | MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), | ||
390 | MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); | ||
391 | |||
392 | /* Default to Autonegotiated flow control if the PHY supports it */ | ||
393 | efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; | ||
394 | if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) | ||
395 | efx->wanted_fc |= EFX_FC_AUTO; | ||
396 | |||
397 | return 0; | ||
398 | |||
399 | fail: | ||
400 | kfree(phy_data); | ||
401 | return rc; | ||
402 | } | ||
403 | |||
404 | int efx_mcdi_phy_reconfigure(struct efx_nic *efx) | ||
405 | { | ||
406 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | ||
407 | u32 caps = (efx->link_advertising ? | ||
408 | ethtool_to_mcdi_cap(efx->link_advertising) : | ||
409 | phy_cfg->forced_cap); | ||
410 | |||
411 | return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), | ||
412 | efx->loopback_mode, 0); | ||
413 | } | ||
414 | |||
415 | void efx_mcdi_phy_decode_link(struct efx_nic *efx, | ||
416 | struct efx_link_state *link_state, | ||
417 | u32 speed, u32 flags, u32 fcntl) | ||
418 | { | ||
419 | switch (fcntl) { | ||
420 | case MC_CMD_FCNTL_AUTO: | ||
421 | WARN_ON(1); /* This is not a link mode */ | ||
422 | link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX; | ||
423 | break; | ||
424 | case MC_CMD_FCNTL_BIDIR: | ||
425 | link_state->fc = EFX_FC_TX | EFX_FC_RX; | ||
426 | break; | ||
427 | case MC_CMD_FCNTL_RESPOND: | ||
428 | link_state->fc = EFX_FC_RX; | ||
429 | break; | ||
430 | default: | ||
431 | WARN_ON(1); | ||
432 | case MC_CMD_FCNTL_OFF: | ||
433 | link_state->fc = 0; | ||
434 | break; | ||
435 | } | ||
436 | |||
437 | link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN)); | ||
438 | link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN)); | ||
439 | link_state->speed = speed; | ||
440 | } | ||
441 | |||
442 | /* Verify that the forced flow control settings (!EFX_FC_AUTO) are | ||
443 | * supported by the link partner. Warn the user if this isn't the case | ||
444 | */ | ||
445 | void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) | ||
446 | { | ||
447 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | ||
448 | u32 rmtadv; | ||
449 | |||
450 | /* The link partner capabilities are only relevent if the | ||
451 | * link supports flow control autonegotiation */ | ||
452 | if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) | ||
453 | return; | ||
454 | |||
455 | /* If flow control autoneg is supported and enabled, then fine */ | ||
456 | if (efx->wanted_fc & EFX_FC_AUTO) | ||
457 | return; | ||
458 | |||
459 | rmtadv = 0; | ||
460 | if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) | ||
461 | rmtadv |= ADVERTISED_Pause; | ||
462 | if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) | ||
463 | rmtadv |= ADVERTISED_Asym_Pause; | ||
464 | |||
465 | if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) | ||
466 | EFX_ERR(efx, "warning: link partner doesn't support " | ||
467 | "pause frames"); | ||
468 | } | ||
469 | |||
470 | static bool efx_mcdi_phy_poll(struct efx_nic *efx) | ||
471 | { | ||
472 | struct efx_link_state old_state = efx->link_state; | ||
473 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | ||
474 | int rc; | ||
475 | |||
476 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
477 | |||
478 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | ||
479 | |||
480 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | ||
481 | outbuf, sizeof(outbuf), NULL); | ||
482 | if (rc) { | ||
483 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
484 | efx->link_state.up = false; | ||
485 | } else { | ||
486 | efx_mcdi_phy_decode_link( | ||
487 | efx, &efx->link_state, | ||
488 | MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), | ||
489 | MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), | ||
490 | MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); | ||
491 | } | ||
492 | |||
493 | return !efx_link_state_equal(&efx->link_state, &old_state); | ||
494 | } | ||
495 | |||
496 | static void efx_mcdi_phy_remove(struct efx_nic *efx) | ||
497 | { | ||
498 | struct efx_mcdi_phy_data *phy_data = efx->phy_data; | ||
499 | |||
500 | efx->phy_data = NULL; | ||
501 | kfree(phy_data); | ||
502 | } | ||
503 | |||
504 | static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
505 | { | ||
506 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | ||
507 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | ||
508 | int rc; | ||
509 | |||
510 | ecmd->supported = | ||
511 | mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); | ||
512 | ecmd->advertising = efx->link_advertising; | ||
513 | ecmd->speed = efx->link_state.speed; | ||
514 | ecmd->duplex = efx->link_state.fd; | ||
515 | ecmd->port = mcdi_to_ethtool_media(phy_cfg->media); | ||
516 | ecmd->phy_address = phy_cfg->port; | ||
517 | ecmd->transceiver = XCVR_INTERNAL; | ||
518 | ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg); | ||
519 | ecmd->mdio_support = (efx->mdio.mode_support & | ||
520 | (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); | ||
521 | |||
522 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | ||
523 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | ||
524 | outbuf, sizeof(outbuf), NULL); | ||
525 | if (rc) { | ||
526 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
527 | return; | ||
528 | } | ||
529 | ecmd->lp_advertising = | ||
530 | mcdi_to_ethtool_cap(phy_cfg->media, | ||
531 | MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); | ||
532 | } | ||
533 | |||
534 | static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
535 | { | ||
536 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | ||
537 | u32 caps; | ||
538 | int rc; | ||
539 | |||
540 | if (ecmd->autoneg) { | ||
541 | caps = (ethtool_to_mcdi_cap(ecmd->advertising) | | ||
542 | 1 << MC_CMD_PHY_CAP_AN_LBN); | ||
543 | } else if (ecmd->duplex) { | ||
544 | switch (ecmd->speed) { | ||
545 | case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; | ||
546 | case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; | ||
547 | case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; | ||
548 | case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; | ||
549 | default: return -EINVAL; | ||
550 | } | ||
551 | } else { | ||
552 | switch (ecmd->speed) { | ||
553 | case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; | ||
554 | case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; | ||
555 | case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; | ||
556 | default: return -EINVAL; | ||
557 | } | ||
558 | } | ||
559 | |||
560 | rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), | ||
561 | efx->loopback_mode, 0); | ||
562 | if (rc) | ||
563 | return rc; | ||
564 | |||
565 | if (ecmd->autoneg) { | ||
566 | efx_link_set_advertising( | ||
567 | efx, ecmd->advertising | ADVERTISED_Autoneg); | ||
568 | phy_cfg->forced_cap = 0; | ||
569 | } else { | ||
570 | efx_link_set_advertising(efx, 0); | ||
571 | phy_cfg->forced_cap = caps; | ||
572 | } | ||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | static int efx_mcdi_phy_test_alive(struct efx_nic *efx) | ||
577 | { | ||
578 | u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN]; | ||
579 | size_t outlen; | ||
580 | int rc; | ||
581 | |||
582 | BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); | ||
583 | |||
584 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, | ||
585 | outbuf, sizeof(outbuf), &outlen); | ||
586 | if (rc) | ||
587 | return rc; | ||
588 | |||
589 | if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) | ||
590 | return -EMSGSIZE; | ||
591 | if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK) | ||
592 | return -EINVAL; | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | struct efx_phy_operations efx_mcdi_phy_ops = { | ||
598 | .probe = efx_mcdi_phy_probe, | ||
599 | .init = efx_port_dummy_op_int, | ||
600 | .reconfigure = efx_mcdi_phy_reconfigure, | ||
601 | .poll = efx_mcdi_phy_poll, | ||
602 | .fini = efx_port_dummy_op_void, | ||
603 | .remove = efx_mcdi_phy_remove, | ||
604 | .get_settings = efx_mcdi_phy_get_settings, | ||
605 | .set_settings = efx_mcdi_phy_set_settings, | ||
606 | .test_alive = efx_mcdi_phy_test_alive, | ||
607 | .run_tests = NULL, | ||
608 | .test_name = NULL, | ||
609 | }; | ||
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index 6c33459f9ea9..0548fcbbdcd0 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2006-2008 Solarflare Communications Inc. | 3 | * Copyright 2006-2009 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
@@ -14,8 +14,8 @@ | |||
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include "net_driver.h" | 15 | #include "net_driver.h" |
16 | #include "mdio_10g.h" | 16 | #include "mdio_10g.h" |
17 | #include "boards.h" | ||
18 | #include "workarounds.h" | 17 | #include "workarounds.h" |
18 | #include "nic.h" | ||
19 | 19 | ||
20 | unsigned efx_mdio_id_oui(u32 id) | 20 | unsigned efx_mdio_id_oui(u32 id) |
21 | { | 21 | { |
@@ -174,7 +174,7 @@ bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask) | |||
174 | * of mmd's */ | 174 | * of mmd's */ |
175 | if (LOOPBACK_INTERNAL(efx)) | 175 | if (LOOPBACK_INTERNAL(efx)) |
176 | return true; | 176 | return true; |
177 | else if (efx->loopback_mode == LOOPBACK_NETWORK) | 177 | else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS) |
178 | return false; | 178 | return false; |
179 | else if (efx_phy_mode_disabled(efx->phy_mode)) | 179 | else if (efx_phy_mode_disabled(efx->phy_mode)) |
180 | return false; | 180 | return false; |
@@ -211,7 +211,7 @@ void efx_mdio_phy_reconfigure(struct efx_nic *efx) | |||
211 | efx->loopback_mode == LOOPBACK_PCS); | 211 | efx->loopback_mode == LOOPBACK_PCS); |
212 | efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, | 212 | efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, |
213 | MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK, | 213 | MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK, |
214 | efx->loopback_mode == LOOPBACK_NETWORK); | 214 | efx->loopback_mode == LOOPBACK_PHYXS_WS); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void efx_mdio_set_mmd_lpower(struct efx_nic *efx, | 217 | static void efx_mdio_set_mmd_lpower(struct efx_nic *efx, |
@@ -249,8 +249,6 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx, | |||
249 | int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | 249 | int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) |
250 | { | 250 | { |
251 | struct ethtool_cmd prev; | 251 | struct ethtool_cmd prev; |
252 | u32 required; | ||
253 | int reg; | ||
254 | 252 | ||
255 | efx->phy_op->get_settings(efx, &prev); | 253 | efx->phy_op->get_settings(efx, &prev); |
256 | 254 | ||
@@ -266,86 +264,98 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | |||
266 | return -EINVAL; | 264 | return -EINVAL; |
267 | 265 | ||
268 | /* Check that PHY supports these settings */ | 266 | /* Check that PHY supports these settings */ |
269 | if (ecmd->autoneg) { | 267 | if (!ecmd->autoneg || |
270 | required = SUPPORTED_Autoneg; | 268 | (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported) |
271 | } else if (ecmd->duplex) { | ||
272 | switch (ecmd->speed) { | ||
273 | case SPEED_10: required = SUPPORTED_10baseT_Full; break; | ||
274 | case SPEED_100: required = SUPPORTED_100baseT_Full; break; | ||
275 | default: return -EINVAL; | ||
276 | } | ||
277 | } else { | ||
278 | switch (ecmd->speed) { | ||
279 | case SPEED_10: required = SUPPORTED_10baseT_Half; break; | ||
280 | case SPEED_100: required = SUPPORTED_100baseT_Half; break; | ||
281 | default: return -EINVAL; | ||
282 | } | ||
283 | } | ||
284 | required |= ecmd->advertising; | ||
285 | if (required & ~prev.supported) | ||
286 | return -EINVAL; | 269 | return -EINVAL; |
287 | 270 | ||
288 | if (ecmd->autoneg) { | 271 | efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg); |
289 | bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full | 272 | efx_mdio_an_reconfigure(efx); |
290 | || EFX_WORKAROUND_13204(efx)); | ||
291 | |||
292 | /* Set up the base page */ | ||
293 | reg = ADVERTISE_CSMA; | ||
294 | if (ecmd->advertising & ADVERTISED_10baseT_Half) | ||
295 | reg |= ADVERTISE_10HALF; | ||
296 | if (ecmd->advertising & ADVERTISED_10baseT_Full) | ||
297 | reg |= ADVERTISE_10FULL; | ||
298 | if (ecmd->advertising & ADVERTISED_100baseT_Half) | ||
299 | reg |= ADVERTISE_100HALF; | ||
300 | if (ecmd->advertising & ADVERTISED_100baseT_Full) | ||
301 | reg |= ADVERTISE_100FULL; | ||
302 | if (xnp) | ||
303 | reg |= ADVERTISE_RESV; | ||
304 | else if (ecmd->advertising & (ADVERTISED_1000baseT_Half | | ||
305 | ADVERTISED_1000baseT_Full)) | ||
306 | reg |= ADVERTISE_NPAGE; | ||
307 | reg |= mii_advertise_flowctrl(efx->wanted_fc); | ||
308 | efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); | ||
309 | |||
310 | /* Set up the (extended) next page if necessary */ | ||
311 | if (efx->phy_op->set_npage_adv) | ||
312 | efx->phy_op->set_npage_adv(efx, ecmd->advertising); | ||
313 | |||
314 | /* Enable and restart AN */ | ||
315 | reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1); | ||
316 | reg |= MDIO_AN_CTRL1_ENABLE; | ||
317 | if (!(EFX_WORKAROUND_15195(efx) && | ||
318 | LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)) | ||
319 | reg |= MDIO_AN_CTRL1_RESTART; | ||
320 | if (xnp) | ||
321 | reg |= MDIO_AN_CTRL1_XNP; | ||
322 | else | ||
323 | reg &= ~MDIO_AN_CTRL1_XNP; | ||
324 | efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg); | ||
325 | } else { | ||
326 | /* Disable AN */ | ||
327 | efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_CTRL1, | ||
328 | MDIO_AN_CTRL1_ENABLE, false); | ||
329 | |||
330 | /* Set the basic control bits */ | ||
331 | reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1); | ||
332 | reg &= ~(MDIO_CTRL1_SPEEDSEL | MDIO_CTRL1_FULLDPLX); | ||
333 | if (ecmd->speed == SPEED_100) | ||
334 | reg |= MDIO_PMA_CTRL1_SPEED100; | ||
335 | if (ecmd->duplex) | ||
336 | reg |= MDIO_CTRL1_FULLDPLX; | ||
337 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, reg); | ||
338 | } | ||
339 | |||
340 | return 0; | 273 | return 0; |
341 | } | 274 | } |
342 | 275 | ||
276 | /** | ||
277 | * efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation | ||
278 | * @efx: Efx NIC | ||
279 | */ | ||
280 | void efx_mdio_an_reconfigure(struct efx_nic *efx) | ||
281 | { | ||
282 | bool xnp = (efx->link_advertising & ADVERTISED_10000baseT_Full | ||
283 | || EFX_WORKAROUND_13204(efx)); | ||
284 | int reg; | ||
285 | |||
286 | WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN)); | ||
287 | |||
288 | /* Set up the base page */ | ||
289 | reg = ADVERTISE_CSMA; | ||
290 | if (efx->link_advertising & ADVERTISED_10baseT_Half) | ||
291 | reg |= ADVERTISE_10HALF; | ||
292 | if (efx->link_advertising & ADVERTISED_10baseT_Full) | ||
293 | reg |= ADVERTISE_10FULL; | ||
294 | if (efx->link_advertising & ADVERTISED_100baseT_Half) | ||
295 | reg |= ADVERTISE_100HALF; | ||
296 | if (efx->link_advertising & ADVERTISED_100baseT_Full) | ||
297 | reg |= ADVERTISE_100FULL; | ||
298 | if (xnp) | ||
299 | reg |= ADVERTISE_RESV; | ||
300 | else if (efx->link_advertising & (ADVERTISED_1000baseT_Half | | ||
301 | ADVERTISED_1000baseT_Full)) | ||
302 | reg |= ADVERTISE_NPAGE; | ||
303 | if (efx->link_advertising & ADVERTISED_Pause) | ||
304 | reg |= ADVERTISE_PAUSE_CAP; | ||
305 | if (efx->link_advertising & ADVERTISED_Asym_Pause) | ||
306 | reg |= ADVERTISE_PAUSE_ASYM; | ||
307 | efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); | ||
308 | |||
309 | /* Set up the (extended) next page if necessary */ | ||
310 | if (efx->phy_op->set_npage_adv) | ||
311 | efx->phy_op->set_npage_adv(efx, efx->link_advertising); | ||
312 | |||
313 | /* Enable and restart AN */ | ||
314 | reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1); | ||
315 | reg |= MDIO_AN_CTRL1_ENABLE; | ||
316 | if (!(EFX_WORKAROUND_15195(efx) && LOOPBACK_EXTERNAL(efx))) | ||
317 | reg |= MDIO_AN_CTRL1_RESTART; | ||
318 | if (xnp) | ||
319 | reg |= MDIO_AN_CTRL1_XNP; | ||
320 | else | ||
321 | reg &= ~MDIO_AN_CTRL1_XNP; | ||
322 | efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg); | ||
323 | } | ||
324 | |||
343 | enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx) | 325 | enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx) |
344 | { | 326 | { |
345 | int lpa; | 327 | BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX)); |
346 | 328 | ||
347 | if (!(efx->phy_op->mmds & MDIO_DEVS_AN)) | 329 | if (!(efx->wanted_fc & EFX_FC_AUTO)) |
348 | return efx->wanted_fc; | 330 | return efx->wanted_fc; |
349 | lpa = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA); | 331 | |
350 | return efx_fc_resolve(efx->wanted_fc, lpa); | 332 | WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN)); |
333 | |||
334 | return mii_resolve_flowctrl_fdx( | ||
335 | mii_advertise_flowctrl(efx->wanted_fc), | ||
336 | efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA)); | ||
337 | } | ||
338 | |||
339 | int efx_mdio_test_alive(struct efx_nic *efx) | ||
340 | { | ||
341 | int rc; | ||
342 | int devad = __ffs(efx->mdio.mmds); | ||
343 | u16 physid1, physid2; | ||
344 | |||
345 | mutex_lock(&efx->mac_lock); | ||
346 | |||
347 | physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1); | ||
348 | physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2); | ||
349 | |||
350 | if ((physid1 == 0x0000) || (physid1 == 0xffff) || | ||
351 | (physid2 == 0x0000) || (physid2 == 0xffff)) { | ||
352 | EFX_ERR(efx, "no MDIO PHY present with ID %d\n", | ||
353 | efx->mdio.prtad); | ||
354 | rc = -EINVAL; | ||
355 | } else { | ||
356 | rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0); | ||
357 | } | ||
358 | |||
359 | mutex_unlock(&efx->mac_lock); | ||
360 | return rc; | ||
351 | } | 361 | } |
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index 6b14421a7444..f89e71929603 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2006-2008 Solarflare Communications Inc. | 3 | * Copyright 2006-2009 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
@@ -17,7 +17,6 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include "efx.h" | 19 | #include "efx.h" |
20 | #include "boards.h" | ||
21 | 20 | ||
22 | static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } | 21 | static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } |
23 | static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } | 22 | static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } |
@@ -87,6 +86,9 @@ extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx, | |||
87 | /* Set (some of) the PHY settings over MDIO */ | 86 | /* Set (some of) the PHY settings over MDIO */ |
88 | extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); | 87 | extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); |
89 | 88 | ||
89 | /* Push advertising flags and restart autonegotiation */ | ||
90 | extern void efx_mdio_an_reconfigure(struct efx_nic *efx); | ||
91 | |||
90 | /* Get pause parameters from AN if available (otherwise return | 92 | /* Get pause parameters from AN if available (otherwise return |
91 | * requested pause parameters) | 93 | * requested pause parameters) |
92 | */ | 94 | */ |
@@ -104,4 +106,7 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr, | |||
104 | mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state); | 106 | mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state); |
105 | } | 107 | } |
106 | 108 | ||
109 | /* Liveness self-test for MDIO PHYs */ | ||
110 | extern int efx_mdio_test_alive(struct efx_nic *efx); | ||
111 | |||
107 | #endif /* EFX_MDIO_10G_H */ | 112 | #endif /* EFX_MDIO_10G_H */ |
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c index 820c233c3ea0..f3ac7f30b5e7 100644 --- a/drivers/net/sfc/mtd.c +++ b/drivers/net/sfc/mtd.c | |||
@@ -1,36 +1,80 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2008 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/bitops.h> | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/mtd/mtd.h> | 13 | #include <linux/mtd/mtd.h> |
13 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/slab.h> | ||
16 | #include <linux/rtnetlink.h> | ||
14 | 17 | ||
15 | #define EFX_DRIVER_NAME "sfc_mtd" | 18 | #define EFX_DRIVER_NAME "sfc_mtd" |
16 | #include "net_driver.h" | 19 | #include "net_driver.h" |
17 | #include "spi.h" | 20 | #include "spi.h" |
18 | #include "efx.h" | 21 | #include "efx.h" |
22 | #include "nic.h" | ||
23 | #include "mcdi.h" | ||
24 | #include "mcdi_pcol.h" | ||
19 | 25 | ||
20 | #define EFX_SPI_VERIFY_BUF_LEN 16 | 26 | #define EFX_SPI_VERIFY_BUF_LEN 16 |
21 | 27 | ||
22 | struct efx_mtd { | 28 | struct efx_mtd_partition { |
23 | const struct efx_spi_device *spi; | ||
24 | struct mtd_info mtd; | 29 | struct mtd_info mtd; |
30 | union { | ||
31 | struct { | ||
32 | bool updating; | ||
33 | u8 nvram_type; | ||
34 | u16 fw_subtype; | ||
35 | } mcdi; | ||
36 | size_t offset; | ||
37 | }; | ||
38 | const char *type_name; | ||
25 | char name[IFNAMSIZ + 20]; | 39 | char name[IFNAMSIZ + 20]; |
26 | }; | 40 | }; |
27 | 41 | ||
42 | struct efx_mtd_ops { | ||
43 | int (*read)(struct mtd_info *mtd, loff_t start, size_t len, | ||
44 | size_t *retlen, u8 *buffer); | ||
45 | int (*erase)(struct mtd_info *mtd, loff_t start, size_t len); | ||
46 | int (*write)(struct mtd_info *mtd, loff_t start, size_t len, | ||
47 | size_t *retlen, const u8 *buffer); | ||
48 | int (*sync)(struct mtd_info *mtd); | ||
49 | }; | ||
50 | |||
51 | struct efx_mtd { | ||
52 | struct list_head node; | ||
53 | struct efx_nic *efx; | ||
54 | const struct efx_spi_device *spi; | ||
55 | const char *name; | ||
56 | const struct efx_mtd_ops *ops; | ||
57 | size_t n_parts; | ||
58 | struct efx_mtd_partition part[0]; | ||
59 | }; | ||
60 | |||
61 | #define efx_for_each_partition(part, efx_mtd) \ | ||
62 | for ((part) = &(efx_mtd)->part[0]; \ | ||
63 | (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \ | ||
64 | (part)++) | ||
65 | |||
66 | #define to_efx_mtd_partition(mtd) \ | ||
67 | container_of(mtd, struct efx_mtd_partition, mtd) | ||
68 | |||
69 | static int falcon_mtd_probe(struct efx_nic *efx); | ||
70 | static int siena_mtd_probe(struct efx_nic *efx); | ||
71 | |||
28 | /* SPI utilities */ | 72 | /* SPI utilities */ |
29 | 73 | ||
30 | static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible) | 74 | static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible) |
31 | { | 75 | { |
32 | const struct efx_spi_device *spi = efx_mtd->spi; | 76 | const struct efx_spi_device *spi = efx_mtd->spi; |
33 | struct efx_nic *efx = spi->efx; | 77 | struct efx_nic *efx = efx_mtd->efx; |
34 | u8 status; | 78 | u8 status; |
35 | int rc, i; | 79 | int rc, i; |
36 | 80 | ||
@@ -39,7 +83,7 @@ static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible) | |||
39 | __set_current_state(uninterruptible ? | 83 | __set_current_state(uninterruptible ? |
40 | TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); | 84 | TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); |
41 | schedule_timeout(HZ / 10); | 85 | schedule_timeout(HZ / 10); |
42 | rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, | 86 | rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, |
43 | &status, sizeof(status)); | 87 | &status, sizeof(status)); |
44 | if (rc) | 88 | if (rc) |
45 | return rc; | 89 | return rc; |
@@ -52,32 +96,35 @@ static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible) | |||
52 | return -ETIMEDOUT; | 96 | return -ETIMEDOUT; |
53 | } | 97 | } |
54 | 98 | ||
55 | static int efx_spi_unlock(const struct efx_spi_device *spi) | 99 | static int |
100 | efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi) | ||
56 | { | 101 | { |
57 | const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 | | 102 | const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 | |
58 | SPI_STATUS_BP0); | 103 | SPI_STATUS_BP0); |
59 | u8 status; | 104 | u8 status; |
60 | int rc; | 105 | int rc; |
61 | 106 | ||
62 | rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, &status, sizeof(status)); | 107 | rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, |
108 | &status, sizeof(status)); | ||
63 | if (rc) | 109 | if (rc) |
64 | return rc; | 110 | return rc; |
65 | 111 | ||
66 | if (!(status & unlock_mask)) | 112 | if (!(status & unlock_mask)) |
67 | return 0; /* already unlocked */ | 113 | return 0; /* already unlocked */ |
68 | 114 | ||
69 | rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0); | 115 | rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); |
70 | if (rc) | 116 | if (rc) |
71 | return rc; | 117 | return rc; |
72 | rc = falcon_spi_cmd(spi, SPI_SST_EWSR, -1, NULL, NULL, 0); | 118 | rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0); |
73 | if (rc) | 119 | if (rc) |
74 | return rc; | 120 | return rc; |
75 | 121 | ||
76 | status &= ~unlock_mask; | 122 | status &= ~unlock_mask; |
77 | rc = falcon_spi_cmd(spi, SPI_WRSR, -1, &status, NULL, sizeof(status)); | 123 | rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status, |
124 | NULL, sizeof(status)); | ||
78 | if (rc) | 125 | if (rc) |
79 | return rc; | 126 | return rc; |
80 | rc = falcon_spi_wait_write(spi); | 127 | rc = falcon_spi_wait_write(efx, spi); |
81 | if (rc) | 128 | if (rc) |
82 | return rc; | 129 | return rc; |
83 | 130 | ||
@@ -87,6 +134,7 @@ static int efx_spi_unlock(const struct efx_spi_device *spi) | |||
87 | static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len) | 134 | static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len) |
88 | { | 135 | { |
89 | const struct efx_spi_device *spi = efx_mtd->spi; | 136 | const struct efx_spi_device *spi = efx_mtd->spi; |
137 | struct efx_nic *efx = efx_mtd->efx; | ||
90 | unsigned pos, block_len; | 138 | unsigned pos, block_len; |
91 | u8 empty[EFX_SPI_VERIFY_BUF_LEN]; | 139 | u8 empty[EFX_SPI_VERIFY_BUF_LEN]; |
92 | u8 buffer[EFX_SPI_VERIFY_BUF_LEN]; | 140 | u8 buffer[EFX_SPI_VERIFY_BUF_LEN]; |
@@ -98,13 +146,14 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len) | |||
98 | if (spi->erase_command == 0) | 146 | if (spi->erase_command == 0) |
99 | return -EOPNOTSUPP; | 147 | return -EOPNOTSUPP; |
100 | 148 | ||
101 | rc = efx_spi_unlock(spi); | 149 | rc = efx_spi_unlock(efx, spi); |
102 | if (rc) | 150 | if (rc) |
103 | return rc; | 151 | return rc; |
104 | rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0); | 152 | rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); |
105 | if (rc) | 153 | if (rc) |
106 | return rc; | 154 | return rc; |
107 | rc = falcon_spi_cmd(spi, spi->erase_command, start, NULL, NULL, 0); | 155 | rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL, |
156 | NULL, 0); | ||
108 | if (rc) | 157 | if (rc) |
109 | return rc; | 158 | return rc; |
110 | rc = efx_spi_slow_wait(efx_mtd, false); | 159 | rc = efx_spi_slow_wait(efx_mtd, false); |
@@ -113,7 +162,8 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len) | |||
113 | memset(empty, 0xff, sizeof(empty)); | 162 | memset(empty, 0xff, sizeof(empty)); |
114 | for (pos = 0; pos < len; pos += block_len) { | 163 | for (pos = 0; pos < len; pos += block_len) { |
115 | block_len = min(len - pos, sizeof(buffer)); | 164 | block_len = min(len - pos, sizeof(buffer)); |
116 | rc = falcon_spi_read(spi, start + pos, block_len, NULL, buffer); | 165 | rc = falcon_spi_read(efx, spi, start + pos, block_len, |
166 | NULL, buffer); | ||
117 | if (rc) | 167 | if (rc) |
118 | return rc; | 168 | return rc; |
119 | if (memcmp(empty, buffer, block_len)) | 169 | if (memcmp(empty, buffer, block_len)) |
@@ -130,140 +180,473 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len) | |||
130 | 180 | ||
131 | /* MTD interface */ | 181 | /* MTD interface */ |
132 | 182 | ||
133 | static int efx_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, | 183 | static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) |
134 | size_t *retlen, u8 *buffer) | ||
135 | { | 184 | { |
136 | struct efx_mtd *efx_mtd = mtd->priv; | 185 | struct efx_mtd *efx_mtd = mtd->priv; |
186 | int rc; | ||
187 | |||
188 | rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len); | ||
189 | if (rc == 0) { | ||
190 | erase->state = MTD_ERASE_DONE; | ||
191 | } else { | ||
192 | erase->state = MTD_ERASE_FAILED; | ||
193 | erase->fail_addr = 0xffffffff; | ||
194 | } | ||
195 | mtd_erase_callback(erase); | ||
196 | return rc; | ||
197 | } | ||
198 | |||
199 | static void efx_mtd_sync(struct mtd_info *mtd) | ||
200 | { | ||
201 | struct efx_mtd *efx_mtd = mtd->priv; | ||
202 | struct efx_nic *efx = efx_mtd->efx; | ||
203 | int rc; | ||
204 | |||
205 | rc = efx_mtd->ops->sync(mtd); | ||
206 | if (rc) | ||
207 | EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc); | ||
208 | } | ||
209 | |||
210 | static void efx_mtd_remove_partition(struct efx_mtd_partition *part) | ||
211 | { | ||
212 | int rc; | ||
213 | |||
214 | for (;;) { | ||
215 | rc = del_mtd_device(&part->mtd); | ||
216 | if (rc != -EBUSY) | ||
217 | break; | ||
218 | ssleep(1); | ||
219 | } | ||
220 | WARN_ON(rc); | ||
221 | } | ||
222 | |||
223 | static void efx_mtd_remove_device(struct efx_mtd *efx_mtd) | ||
224 | { | ||
225 | struct efx_mtd_partition *part; | ||
226 | |||
227 | efx_for_each_partition(part, efx_mtd) | ||
228 | efx_mtd_remove_partition(part); | ||
229 | list_del(&efx_mtd->node); | ||
230 | kfree(efx_mtd); | ||
231 | } | ||
232 | |||
233 | static void efx_mtd_rename_device(struct efx_mtd *efx_mtd) | ||
234 | { | ||
235 | struct efx_mtd_partition *part; | ||
236 | |||
237 | efx_for_each_partition(part, efx_mtd) | ||
238 | if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0) | ||
239 | snprintf(part->name, sizeof(part->name), | ||
240 | "%s %s:%02x", efx_mtd->efx->name, | ||
241 | part->type_name, part->mcdi.fw_subtype); | ||
242 | else | ||
243 | snprintf(part->name, sizeof(part->name), | ||
244 | "%s %s", efx_mtd->efx->name, | ||
245 | part->type_name); | ||
246 | } | ||
247 | |||
248 | static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd) | ||
249 | { | ||
250 | struct efx_mtd_partition *part; | ||
251 | |||
252 | efx_mtd->efx = efx; | ||
253 | |||
254 | efx_mtd_rename_device(efx_mtd); | ||
255 | |||
256 | efx_for_each_partition(part, efx_mtd) { | ||
257 | part->mtd.writesize = 1; | ||
258 | |||
259 | part->mtd.owner = THIS_MODULE; | ||
260 | part->mtd.priv = efx_mtd; | ||
261 | part->mtd.name = part->name; | ||
262 | part->mtd.erase = efx_mtd_erase; | ||
263 | part->mtd.read = efx_mtd->ops->read; | ||
264 | part->mtd.write = efx_mtd->ops->write; | ||
265 | part->mtd.sync = efx_mtd_sync; | ||
266 | |||
267 | if (add_mtd_device(&part->mtd)) | ||
268 | goto fail; | ||
269 | } | ||
270 | |||
271 | list_add(&efx_mtd->node, &efx->mtd_list); | ||
272 | return 0; | ||
273 | |||
274 | fail: | ||
275 | while (part != &efx_mtd->part[0]) { | ||
276 | --part; | ||
277 | efx_mtd_remove_partition(part); | ||
278 | } | ||
279 | /* add_mtd_device() returns 1 if the MTD table is full */ | ||
280 | return -ENOMEM; | ||
281 | } | ||
282 | |||
283 | void efx_mtd_remove(struct efx_nic *efx) | ||
284 | { | ||
285 | struct efx_mtd *efx_mtd, *next; | ||
286 | |||
287 | WARN_ON(efx_dev_registered(efx)); | ||
288 | |||
289 | list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node) | ||
290 | efx_mtd_remove_device(efx_mtd); | ||
291 | } | ||
292 | |||
293 | void efx_mtd_rename(struct efx_nic *efx) | ||
294 | { | ||
295 | struct efx_mtd *efx_mtd; | ||
296 | |||
297 | ASSERT_RTNL(); | ||
298 | |||
299 | list_for_each_entry(efx_mtd, &efx->mtd_list, node) | ||
300 | efx_mtd_rename_device(efx_mtd); | ||
301 | } | ||
302 | |||
303 | int efx_mtd_probe(struct efx_nic *efx) | ||
304 | { | ||
305 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
306 | return siena_mtd_probe(efx); | ||
307 | else | ||
308 | return falcon_mtd_probe(efx); | ||
309 | } | ||
310 | |||
311 | /* Implementation of MTD operations for Falcon */ | ||
312 | |||
313 | static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, | ||
314 | size_t len, size_t *retlen, u8 *buffer) | ||
315 | { | ||
316 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
317 | struct efx_mtd *efx_mtd = mtd->priv; | ||
137 | const struct efx_spi_device *spi = efx_mtd->spi; | 318 | const struct efx_spi_device *spi = efx_mtd->spi; |
138 | struct efx_nic *efx = spi->efx; | 319 | struct efx_nic *efx = efx_mtd->efx; |
139 | int rc; | 320 | int rc; |
140 | 321 | ||
141 | rc = mutex_lock_interruptible(&efx->spi_lock); | 322 | rc = mutex_lock_interruptible(&efx->spi_lock); |
142 | if (rc) | 323 | if (rc) |
143 | return rc; | 324 | return rc; |
144 | rc = falcon_spi_read(spi, FALCON_FLASH_BOOTCODE_START + start, | 325 | rc = falcon_spi_read(efx, spi, part->offset + start, len, |
145 | len, retlen, buffer); | 326 | retlen, buffer); |
146 | mutex_unlock(&efx->spi_lock); | 327 | mutex_unlock(&efx->spi_lock); |
147 | return rc; | 328 | return rc; |
148 | } | 329 | } |
149 | 330 | ||
150 | static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) | 331 | static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) |
151 | { | 332 | { |
333 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
152 | struct efx_mtd *efx_mtd = mtd->priv; | 334 | struct efx_mtd *efx_mtd = mtd->priv; |
153 | struct efx_nic *efx = efx_mtd->spi->efx; | 335 | struct efx_nic *efx = efx_mtd->efx; |
154 | int rc; | 336 | int rc; |
155 | 337 | ||
156 | rc = mutex_lock_interruptible(&efx->spi_lock); | 338 | rc = mutex_lock_interruptible(&efx->spi_lock); |
157 | if (rc) | 339 | if (rc) |
158 | return rc; | 340 | return rc; |
159 | rc = efx_spi_erase(efx_mtd, FALCON_FLASH_BOOTCODE_START + erase->addr, | 341 | rc = efx_spi_erase(efx_mtd, part->offset + start, len); |
160 | erase->len); | ||
161 | mutex_unlock(&efx->spi_lock); | 342 | mutex_unlock(&efx->spi_lock); |
162 | |||
163 | if (rc == 0) { | ||
164 | erase->state = MTD_ERASE_DONE; | ||
165 | } else { | ||
166 | erase->state = MTD_ERASE_FAILED; | ||
167 | erase->fail_addr = 0xffffffff; | ||
168 | } | ||
169 | mtd_erase_callback(erase); | ||
170 | return rc; | 343 | return rc; |
171 | } | 344 | } |
172 | 345 | ||
173 | static int efx_mtd_write(struct mtd_info *mtd, loff_t start, | 346 | static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, |
174 | size_t len, size_t *retlen, const u8 *buffer) | 347 | size_t len, size_t *retlen, const u8 *buffer) |
175 | { | 348 | { |
349 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
176 | struct efx_mtd *efx_mtd = mtd->priv; | 350 | struct efx_mtd *efx_mtd = mtd->priv; |
177 | const struct efx_spi_device *spi = efx_mtd->spi; | 351 | const struct efx_spi_device *spi = efx_mtd->spi; |
178 | struct efx_nic *efx = spi->efx; | 352 | struct efx_nic *efx = efx_mtd->efx; |
179 | int rc; | 353 | int rc; |
180 | 354 | ||
181 | rc = mutex_lock_interruptible(&efx->spi_lock); | 355 | rc = mutex_lock_interruptible(&efx->spi_lock); |
182 | if (rc) | 356 | if (rc) |
183 | return rc; | 357 | return rc; |
184 | rc = falcon_spi_write(spi, FALCON_FLASH_BOOTCODE_START + start, | 358 | rc = falcon_spi_write(efx, spi, part->offset + start, len, |
185 | len, retlen, buffer); | 359 | retlen, buffer); |
186 | mutex_unlock(&efx->spi_lock); | 360 | mutex_unlock(&efx->spi_lock); |
187 | return rc; | 361 | return rc; |
188 | } | 362 | } |
189 | 363 | ||
190 | static void efx_mtd_sync(struct mtd_info *mtd) | 364 | static int falcon_mtd_sync(struct mtd_info *mtd) |
191 | { | 365 | { |
192 | struct efx_mtd *efx_mtd = mtd->priv; | 366 | struct efx_mtd *efx_mtd = mtd->priv; |
193 | struct efx_nic *efx = efx_mtd->spi->efx; | 367 | struct efx_nic *efx = efx_mtd->efx; |
194 | int rc; | 368 | int rc; |
195 | 369 | ||
196 | mutex_lock(&efx->spi_lock); | 370 | mutex_lock(&efx->spi_lock); |
197 | rc = efx_spi_slow_wait(efx_mtd, true); | 371 | rc = efx_spi_slow_wait(efx_mtd, true); |
198 | mutex_unlock(&efx->spi_lock); | 372 | mutex_unlock(&efx->spi_lock); |
373 | return rc; | ||
374 | } | ||
375 | |||
376 | static struct efx_mtd_ops falcon_mtd_ops = { | ||
377 | .read = falcon_mtd_read, | ||
378 | .erase = falcon_mtd_erase, | ||
379 | .write = falcon_mtd_write, | ||
380 | .sync = falcon_mtd_sync, | ||
381 | }; | ||
382 | |||
383 | static int falcon_mtd_probe(struct efx_nic *efx) | ||
384 | { | ||
385 | struct efx_spi_device *spi = efx->spi_flash; | ||
386 | struct efx_mtd *efx_mtd; | ||
387 | int rc; | ||
388 | |||
389 | ASSERT_RTNL(); | ||
199 | 390 | ||
391 | if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START) | ||
392 | return -ENODEV; | ||
393 | |||
394 | efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), | ||
395 | GFP_KERNEL); | ||
396 | if (!efx_mtd) | ||
397 | return -ENOMEM; | ||
398 | |||
399 | efx_mtd->spi = spi; | ||
400 | efx_mtd->name = "flash"; | ||
401 | efx_mtd->ops = &falcon_mtd_ops; | ||
402 | |||
403 | efx_mtd->n_parts = 1; | ||
404 | efx_mtd->part[0].mtd.type = MTD_NORFLASH; | ||
405 | efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH; | ||
406 | efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; | ||
407 | efx_mtd->part[0].mtd.erasesize = spi->erase_size; | ||
408 | efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START; | ||
409 | efx_mtd->part[0].type_name = "sfc_flash_bootrom"; | ||
410 | |||
411 | rc = efx_mtd_probe_device(efx, efx_mtd); | ||
200 | if (rc) | 412 | if (rc) |
201 | EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc); | 413 | kfree(efx_mtd); |
202 | return; | 414 | return rc; |
203 | } | 415 | } |
204 | 416 | ||
205 | void efx_mtd_remove(struct efx_nic *efx) | 417 | /* Implementation of MTD operations for Siena */ |
418 | |||
419 | static int siena_mtd_read(struct mtd_info *mtd, loff_t start, | ||
420 | size_t len, size_t *retlen, u8 *buffer) | ||
206 | { | 421 | { |
207 | if (efx->spi_flash && efx->spi_flash->mtd) { | 422 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); |
208 | struct efx_mtd *efx_mtd = efx->spi_flash->mtd; | 423 | struct efx_mtd *efx_mtd = mtd->priv; |
209 | int rc; | 424 | struct efx_nic *efx = efx_mtd->efx; |
210 | 425 | loff_t offset = start; | |
211 | for (;;) { | 426 | loff_t end = min_t(loff_t, start + len, mtd->size); |
212 | rc = del_mtd_device(&efx_mtd->mtd); | 427 | size_t chunk; |
213 | if (rc != -EBUSY) | 428 | int rc = 0; |
214 | break; | 429 | |
215 | ssleep(1); | 430 | while (offset < end) { |
216 | } | 431 | chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); |
217 | WARN_ON(rc); | 432 | rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, |
218 | kfree(efx_mtd); | 433 | buffer, chunk); |
434 | if (rc) | ||
435 | goto out; | ||
436 | offset += chunk; | ||
437 | buffer += chunk; | ||
219 | } | 438 | } |
439 | out: | ||
440 | *retlen = offset - start; | ||
441 | return rc; | ||
220 | } | 442 | } |
221 | 443 | ||
222 | void efx_mtd_rename(struct efx_nic *efx) | 444 | static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) |
223 | { | 445 | { |
224 | if (efx->spi_flash && efx->spi_flash->mtd) { | 446 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); |
225 | struct efx_mtd *efx_mtd = efx->spi_flash->mtd; | 447 | struct efx_mtd *efx_mtd = mtd->priv; |
226 | snprintf(efx_mtd->name, sizeof(efx_mtd->name), | 448 | struct efx_nic *efx = efx_mtd->efx; |
227 | "%s sfc_flash_bootrom", efx->name); | 449 | loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); |
450 | loff_t end = min_t(loff_t, start + len, mtd->size); | ||
451 | size_t chunk = part->mtd.erasesize; | ||
452 | int rc = 0; | ||
453 | |||
454 | if (!part->mcdi.updating) { | ||
455 | rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type); | ||
456 | if (rc) | ||
457 | goto out; | ||
458 | part->mcdi.updating = 1; | ||
459 | } | ||
460 | |||
461 | /* The MCDI interface can in fact do multiple erase blocks at once; | ||
462 | * but erasing may be slow, so we make multiple calls here to avoid | ||
463 | * tripping the MCDI RPC timeout. */ | ||
464 | while (offset < end) { | ||
465 | rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset, | ||
466 | chunk); | ||
467 | if (rc) | ||
468 | goto out; | ||
469 | offset += chunk; | ||
228 | } | 470 | } |
471 | out: | ||
472 | return rc; | ||
229 | } | 473 | } |
230 | 474 | ||
231 | int efx_mtd_probe(struct efx_nic *efx) | 475 | static int siena_mtd_write(struct mtd_info *mtd, loff_t start, |
476 | size_t len, size_t *retlen, const u8 *buffer) | ||
232 | { | 477 | { |
233 | struct efx_spi_device *spi = efx->spi_flash; | 478 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); |
234 | struct efx_mtd *efx_mtd; | 479 | struct efx_mtd *efx_mtd = mtd->priv; |
480 | struct efx_nic *efx = efx_mtd->efx; | ||
481 | loff_t offset = start; | ||
482 | loff_t end = min_t(loff_t, start + len, mtd->size); | ||
483 | size_t chunk; | ||
484 | int rc = 0; | ||
485 | |||
486 | if (!part->mcdi.updating) { | ||
487 | rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type); | ||
488 | if (rc) | ||
489 | goto out; | ||
490 | part->mcdi.updating = 1; | ||
491 | } | ||
235 | 492 | ||
236 | if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START) | 493 | while (offset < end) { |
494 | chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); | ||
495 | rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, | ||
496 | buffer, chunk); | ||
497 | if (rc) | ||
498 | goto out; | ||
499 | offset += chunk; | ||
500 | buffer += chunk; | ||
501 | } | ||
502 | out: | ||
503 | *retlen = offset - start; | ||
504 | return rc; | ||
505 | } | ||
506 | |||
507 | static int siena_mtd_sync(struct mtd_info *mtd) | ||
508 | { | ||
509 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
510 | struct efx_mtd *efx_mtd = mtd->priv; | ||
511 | struct efx_nic *efx = efx_mtd->efx; | ||
512 | int rc = 0; | ||
513 | |||
514 | if (part->mcdi.updating) { | ||
515 | part->mcdi.updating = 0; | ||
516 | rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type); | ||
517 | } | ||
518 | |||
519 | return rc; | ||
520 | } | ||
521 | |||
522 | static struct efx_mtd_ops siena_mtd_ops = { | ||
523 | .read = siena_mtd_read, | ||
524 | .erase = siena_mtd_erase, | ||
525 | .write = siena_mtd_write, | ||
526 | .sync = siena_mtd_sync, | ||
527 | }; | ||
528 | |||
529 | struct siena_nvram_type_info { | ||
530 | int port; | ||
531 | const char *name; | ||
532 | }; | ||
533 | |||
534 | static struct siena_nvram_type_info siena_nvram_types[] = { | ||
535 | [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, | ||
536 | [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" }, | ||
537 | [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" }, | ||
538 | [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" }, | ||
539 | [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" }, | ||
540 | [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" }, | ||
541 | [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" }, | ||
542 | [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" }, | ||
543 | [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" }, | ||
544 | [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, | ||
545 | [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, | ||
546 | [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, | ||
547 | }; | ||
548 | |||
549 | static int siena_mtd_probe_partition(struct efx_nic *efx, | ||
550 | struct efx_mtd *efx_mtd, | ||
551 | unsigned int part_id, | ||
552 | unsigned int type) | ||
553 | { | ||
554 | struct efx_mtd_partition *part = &efx_mtd->part[part_id]; | ||
555 | struct siena_nvram_type_info *info; | ||
556 | size_t size, erase_size; | ||
557 | bool protected; | ||
558 | int rc; | ||
559 | |||
560 | if (type >= ARRAY_SIZE(siena_nvram_types)) | ||
237 | return -ENODEV; | 561 | return -ENODEV; |
238 | 562 | ||
239 | efx_mtd = kzalloc(sizeof(*efx_mtd), GFP_KERNEL); | 563 | info = &siena_nvram_types[type]; |
564 | |||
565 | if (info->port != efx_port_num(efx)) | ||
566 | return -ENODEV; | ||
567 | |||
568 | rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); | ||
569 | if (rc) | ||
570 | return rc; | ||
571 | if (protected) | ||
572 | return -ENODEV; /* hide it */ | ||
573 | |||
574 | part->mcdi.nvram_type = type; | ||
575 | part->type_name = info->name; | ||
576 | |||
577 | part->mtd.type = MTD_NORFLASH; | ||
578 | part->mtd.flags = MTD_CAP_NORFLASH; | ||
579 | part->mtd.size = size; | ||
580 | part->mtd.erasesize = erase_size; | ||
581 | |||
582 | return 0; | ||
583 | } | ||
584 | |||
585 | static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, | ||
586 | struct efx_mtd *efx_mtd) | ||
587 | { | ||
588 | struct efx_mtd_partition *part; | ||
589 | uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN / | ||
590 | sizeof(uint16_t)]; | ||
591 | int rc; | ||
592 | |||
593 | rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list); | ||
594 | if (rc) | ||
595 | return rc; | ||
596 | |||
597 | efx_for_each_partition(part, efx_mtd) | ||
598 | part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type]; | ||
599 | |||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | static int siena_mtd_probe(struct efx_nic *efx) | ||
604 | { | ||
605 | struct efx_mtd *efx_mtd; | ||
606 | int rc = -ENODEV; | ||
607 | u32 nvram_types; | ||
608 | unsigned int type; | ||
609 | |||
610 | ASSERT_RTNL(); | ||
611 | |||
612 | rc = efx_mcdi_nvram_types(efx, &nvram_types); | ||
613 | if (rc) | ||
614 | return rc; | ||
615 | |||
616 | efx_mtd = kzalloc(sizeof(*efx_mtd) + | ||
617 | hweight32(nvram_types) * sizeof(efx_mtd->part[0]), | ||
618 | GFP_KERNEL); | ||
240 | if (!efx_mtd) | 619 | if (!efx_mtd) |
241 | return -ENOMEM; | 620 | return -ENOMEM; |
242 | 621 | ||
243 | efx_mtd->spi = spi; | 622 | efx_mtd->name = "Siena NVRAM manager"; |
244 | spi->mtd = efx_mtd; | 623 | |
245 | 624 | efx_mtd->ops = &siena_mtd_ops; | |
246 | efx_mtd->mtd.type = MTD_NORFLASH; | 625 | |
247 | efx_mtd->mtd.flags = MTD_CAP_NORFLASH; | 626 | type = 0; |
248 | efx_mtd->mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; | 627 | efx_mtd->n_parts = 0; |
249 | efx_mtd->mtd.erasesize = spi->erase_size; | 628 | |
250 | efx_mtd->mtd.writesize = 1; | 629 | while (nvram_types != 0) { |
251 | efx_mtd_rename(efx); | 630 | if (nvram_types & 1) { |
252 | 631 | rc = siena_mtd_probe_partition(efx, efx_mtd, | |
253 | efx_mtd->mtd.owner = THIS_MODULE; | 632 | efx_mtd->n_parts, type); |
254 | efx_mtd->mtd.priv = efx_mtd; | 633 | if (rc == 0) |
255 | efx_mtd->mtd.name = efx_mtd->name; | 634 | efx_mtd->n_parts++; |
256 | efx_mtd->mtd.erase = efx_mtd_erase; | 635 | else if (rc != -ENODEV) |
257 | efx_mtd->mtd.read = efx_mtd_read; | 636 | goto fail; |
258 | efx_mtd->mtd.write = efx_mtd_write; | 637 | } |
259 | efx_mtd->mtd.sync = efx_mtd_sync; | 638 | type++; |
260 | 639 | nvram_types >>= 1; | |
261 | if (add_mtd_device(&efx_mtd->mtd)) { | ||
262 | kfree(efx_mtd); | ||
263 | spi->mtd = NULL; | ||
264 | /* add_mtd_device() returns 1 if the MTD table is full */ | ||
265 | return -ENOMEM; | ||
266 | } | 640 | } |
267 | 641 | ||
268 | return 0; | 642 | rc = siena_mtd_get_fw_subtypes(efx, efx_mtd); |
643 | if (rc) | ||
644 | goto fail; | ||
645 | |||
646 | rc = efx_mtd_probe_device(efx, efx_mtd); | ||
647 | fail: | ||
648 | if (rc) | ||
649 | kfree(efx_mtd); | ||
650 | return rc; | ||
269 | } | 651 | } |
652 | |||
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 298566da638b..cb018e272097 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2005-2008 Solarflare Communications Inc. | 4 | * Copyright 2005-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/etherdevice.h> | 18 | #include <linux/etherdevice.h> |
19 | #include <linux/ethtool.h> | 19 | #include <linux/ethtool.h> |
20 | #include <linux/if_vlan.h> | 20 | #include <linux/if_vlan.h> |
21 | #include <linux/timer.h> | ||
22 | #include <linux/mdio.h> | 21 | #include <linux/mdio.h> |
23 | #include <linux/list.h> | 22 | #include <linux/list.h> |
24 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
@@ -38,7 +37,7 @@ | |||
38 | #ifndef EFX_DRIVER_NAME | 37 | #ifndef EFX_DRIVER_NAME |
39 | #define EFX_DRIVER_NAME "sfc" | 38 | #define EFX_DRIVER_NAME "sfc" |
40 | #endif | 39 | #endif |
41 | #define EFX_DRIVER_VERSION "2.3" | 40 | #define EFX_DRIVER_VERSION "3.0" |
42 | 41 | ||
43 | #ifdef EFX_ENABLE_DEBUG | 42 | #ifdef EFX_ENABLE_DEBUG |
44 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) | 43 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) |
@@ -101,9 +100,6 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) | |||
101 | * Special buffers are used for the event queues and the TX and RX | 100 | * Special buffers are used for the event queues and the TX and RX |
102 | * descriptor queues for each channel. They are *not* used for the | 101 | * descriptor queues for each channel. They are *not* used for the |
103 | * actual transmit and receive buffers. | 102 | * actual transmit and receive buffers. |
104 | * | ||
105 | * Note that for Falcon, TX and RX descriptor queues live in host memory. | ||
106 | * Allocation and freeing procedures must take this into account. | ||
107 | */ | 103 | */ |
108 | struct efx_special_buffer { | 104 | struct efx_special_buffer { |
109 | void *addr; | 105 | void *addr; |
@@ -113,6 +109,13 @@ struct efx_special_buffer { | |||
113 | int entries; | 109 | int entries; |
114 | }; | 110 | }; |
115 | 111 | ||
112 | enum efx_flush_state { | ||
113 | FLUSH_NONE, | ||
114 | FLUSH_PENDING, | ||
115 | FLUSH_FAILED, | ||
116 | FLUSH_DONE, | ||
117 | }; | ||
118 | |||
116 | /** | 119 | /** |
117 | * struct efx_tx_buffer - An Efx TX buffer | 120 | * struct efx_tx_buffer - An Efx TX buffer |
118 | * @skb: The associated socket buffer. | 121 | * @skb: The associated socket buffer. |
@@ -189,7 +192,7 @@ struct efx_tx_queue { | |||
189 | struct efx_nic *nic; | 192 | struct efx_nic *nic; |
190 | struct efx_tx_buffer *buffer; | 193 | struct efx_tx_buffer *buffer; |
191 | struct efx_special_buffer txd; | 194 | struct efx_special_buffer txd; |
192 | bool flushed; | 195 | enum efx_flush_state flushed; |
193 | 196 | ||
194 | /* Members used mainly on the completion path */ | 197 | /* Members used mainly on the completion path */ |
195 | unsigned int read_count ____cacheline_aligned_in_smp; | 198 | unsigned int read_count ____cacheline_aligned_in_smp; |
@@ -284,7 +287,7 @@ struct efx_rx_queue { | |||
284 | struct page *buf_page; | 287 | struct page *buf_page; |
285 | dma_addr_t buf_dma_addr; | 288 | dma_addr_t buf_dma_addr; |
286 | char *buf_data; | 289 | char *buf_data; |
287 | bool flushed; | 290 | enum efx_flush_state flushed; |
288 | }; | 291 | }; |
289 | 292 | ||
290 | /** | 293 | /** |
@@ -293,7 +296,7 @@ struct efx_rx_queue { | |||
293 | * @dma_addr: DMA base address of the buffer | 296 | * @dma_addr: DMA base address of the buffer |
294 | * @len: Buffer length, in bytes | 297 | * @len: Buffer length, in bytes |
295 | * | 298 | * |
296 | * Falcon uses these buffers for its interrupt status registers and | 299 | * The NIC uses these buffers for its interrupt status registers and |
297 | * MAC stats dumps. | 300 | * MAC stats dumps. |
298 | */ | 301 | */ |
299 | struct efx_buffer { | 302 | struct efx_buffer { |
@@ -327,7 +330,7 @@ enum efx_rx_alloc_method { | |||
327 | * @used_flags: Channel is used by net driver | 330 | * @used_flags: Channel is used by net driver |
328 | * @enabled: Channel enabled indicator | 331 | * @enabled: Channel enabled indicator |
329 | * @irq: IRQ number (MSI and MSI-X only) | 332 | * @irq: IRQ number (MSI and MSI-X only) |
330 | * @irq_moderation: IRQ moderation value (in us) | 333 | * @irq_moderation: IRQ moderation value (in hardware ticks) |
331 | * @napi_dev: Net device used with NAPI | 334 | * @napi_dev: Net device used with NAPI |
332 | * @napi_str: NAPI control structure | 335 | * @napi_str: NAPI control structure |
333 | * @reset_work: Scheduled reset work thread | 336 | * @reset_work: Scheduled reset work thread |
@@ -343,9 +346,9 @@ enum efx_rx_alloc_method { | |||
343 | * @rx_alloc_push_pages: RX allocation method currently in use for pushing | 346 | * @rx_alloc_push_pages: RX allocation method currently in use for pushing |
344 | * descriptors | 347 | * descriptors |
345 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors | 348 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors |
346 | * @n_rx_ip_frag_err: Count of RX IP fragment errors | ||
347 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors | 349 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors |
348 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors | 350 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors |
351 | * @n_rx_mcast_mismatch: Count of unmatched multicast frames | ||
349 | * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors | 352 | * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors |
350 | * @n_rx_overlength: Count of RX_OVERLENGTH errors | 353 | * @n_rx_overlength: Count of RX_OVERLENGTH errors |
351 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun | 354 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun |
@@ -373,9 +376,9 @@ struct efx_channel { | |||
373 | int rx_alloc_push_pages; | 376 | int rx_alloc_push_pages; |
374 | 377 | ||
375 | unsigned n_rx_tobe_disc; | 378 | unsigned n_rx_tobe_disc; |
376 | unsigned n_rx_ip_frag_err; | ||
377 | unsigned n_rx_ip_hdr_chksum_err; | 379 | unsigned n_rx_ip_hdr_chksum_err; |
378 | unsigned n_rx_tcp_udp_chksum_err; | 380 | unsigned n_rx_tcp_udp_chksum_err; |
381 | unsigned n_rx_mcast_mismatch; | ||
379 | unsigned n_rx_frm_trunc; | 382 | unsigned n_rx_frm_trunc; |
380 | unsigned n_rx_overlength; | 383 | unsigned n_rx_overlength; |
381 | unsigned n_skbuff_leaks; | 384 | unsigned n_skbuff_leaks; |
@@ -388,53 +391,29 @@ struct efx_channel { | |||
388 | 391 | ||
389 | }; | 392 | }; |
390 | 393 | ||
391 | /** | 394 | enum efx_led_mode { |
392 | * struct efx_blinker - S/W LED blinking context | 395 | EFX_LED_OFF = 0, |
393 | * @state: Current state - on or off | 396 | EFX_LED_ON = 1, |
394 | * @resubmit: Timer resubmission flag | 397 | EFX_LED_DEFAULT = 2 |
395 | * @timer: Control timer for blinking | ||
396 | */ | ||
397 | struct efx_blinker { | ||
398 | bool state; | ||
399 | bool resubmit; | ||
400 | struct timer_list timer; | ||
401 | }; | 398 | }; |
402 | 399 | ||
400 | #define STRING_TABLE_LOOKUP(val, member) \ | ||
401 | ((val) < member ## _max) ? member ## _names[val] : "(invalid)" | ||
403 | 402 | ||
404 | /** | 403 | extern const char *efx_loopback_mode_names[]; |
405 | * struct efx_board - board information | 404 | extern const unsigned int efx_loopback_mode_max; |
406 | * @type: Board model type | 405 | #define LOOPBACK_MODE(efx) \ |
407 | * @major: Major rev. ('A', 'B' ...) | 406 | STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) |
408 | * @minor: Minor rev. (0, 1, ...) | 407 | |
409 | * @init: Initialisation function | 408 | extern const char *efx_interrupt_mode_names[]; |
410 | * @init_leds: Sets up board LEDs. May be called repeatedly. | 409 | extern const unsigned int efx_interrupt_mode_max; |
411 | * @set_id_led: Turns the identification LED on or off | 410 | #define INT_MODE(efx) \ |
412 | * @blink: Starts/stops blinking | 411 | STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) |
413 | * @monitor: Board-specific health check function | ||
414 | * @fini: Cleanup function | ||
415 | * @blinker: used to blink LEDs in software | ||
416 | * @hwmon_client: I2C client for hardware monitor | ||
417 | * @ioexp_client: I2C client for power/port control | ||
418 | */ | ||
419 | struct efx_board { | ||
420 | int type; | ||
421 | int major; | ||
422 | int minor; | ||
423 | int (*init) (struct efx_nic *nic); | ||
424 | /* As the LEDs are typically attached to the PHY, LEDs | ||
425 | * have a separate init callback that happens later than | ||
426 | * board init. */ | ||
427 | void (*init_leds)(struct efx_nic *efx); | ||
428 | void (*set_id_led) (struct efx_nic *efx, bool state); | ||
429 | int (*monitor) (struct efx_nic *nic); | ||
430 | void (*blink) (struct efx_nic *efx, bool start); | ||
431 | void (*fini) (struct efx_nic *nic); | ||
432 | struct efx_blinker blinker; | ||
433 | struct i2c_client *hwmon_client, *ioexp_client; | ||
434 | }; | ||
435 | 412 | ||
436 | #define STRING_TABLE_LOOKUP(val, member) \ | 413 | extern const char *efx_reset_type_names[]; |
437 | member ## _names[val] | 414 | extern const unsigned int efx_reset_type_max; |
415 | #define RESET_TYPE(type) \ | ||
416 | STRING_TABLE_LOOKUP(type, efx_reset_type) | ||
438 | 417 | ||
439 | enum efx_int_mode { | 418 | enum efx_int_mode { |
440 | /* Be careful if altering to correct macro below */ | 419 | /* Be careful if altering to correct macro below */ |
@@ -445,20 +424,7 @@ enum efx_int_mode { | |||
445 | }; | 424 | }; |
446 | #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) | 425 | #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) |
447 | 426 | ||
448 | enum phy_type { | 427 | #define EFX_IS10G(efx) ((efx)->link_state.speed == 10000) |
449 | PHY_TYPE_NONE = 0, | ||
450 | PHY_TYPE_TXC43128 = 1, | ||
451 | PHY_TYPE_88E1111 = 2, | ||
452 | PHY_TYPE_SFX7101 = 3, | ||
453 | PHY_TYPE_QT2022C2 = 4, | ||
454 | PHY_TYPE_PM8358 = 6, | ||
455 | PHY_TYPE_SFT9001A = 8, | ||
456 | PHY_TYPE_QT2025C = 9, | ||
457 | PHY_TYPE_SFT9001B = 10, | ||
458 | PHY_TYPE_MAX /* Insert any new items before this */ | ||
459 | }; | ||
460 | |||
461 | #define EFX_IS10G(efx) ((efx)->link_speed == 10000) | ||
462 | 428 | ||
463 | enum nic_state { | 429 | enum nic_state { |
464 | STATE_INIT = 0, | 430 | STATE_INIT = 0, |
@@ -500,73 +466,72 @@ enum efx_fc_type { | |||
500 | EFX_FC_AUTO = 4, | 466 | EFX_FC_AUTO = 4, |
501 | }; | 467 | }; |
502 | 468 | ||
503 | /* Supported MAC bit-mask */ | 469 | /** |
504 | enum efx_mac_type { | 470 | * struct efx_link_state - Current state of the link |
505 | EFX_GMAC = 1, | 471 | * @up: Link is up |
506 | EFX_XMAC = 2, | 472 | * @fd: Link is full-duplex |
473 | * @fc: Actual flow control flags | ||
474 | * @speed: Link speed (Mbps) | ||
475 | */ | ||
476 | struct efx_link_state { | ||
477 | bool up; | ||
478 | bool fd; | ||
479 | enum efx_fc_type fc; | ||
480 | unsigned int speed; | ||
507 | }; | 481 | }; |
508 | 482 | ||
509 | static inline enum efx_fc_type efx_fc_resolve(enum efx_fc_type wanted_fc, | 483 | static inline bool efx_link_state_equal(const struct efx_link_state *left, |
510 | unsigned int lpa) | 484 | const struct efx_link_state *right) |
511 | { | 485 | { |
512 | BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX)); | 486 | return left->up == right->up && left->fd == right->fd && |
513 | 487 | left->fc == right->fc && left->speed == right->speed; | |
514 | if (!(wanted_fc & EFX_FC_AUTO)) | ||
515 | return wanted_fc; | ||
516 | |||
517 | return mii_resolve_flowctrl_fdx(mii_advertise_flowctrl(wanted_fc), lpa); | ||
518 | } | 488 | } |
519 | 489 | ||
520 | /** | 490 | /** |
521 | * struct efx_mac_operations - Efx MAC operations table | 491 | * struct efx_mac_operations - Efx MAC operations table |
522 | * @reconfigure: Reconfigure MAC. Serialised by the mac_lock | 492 | * @reconfigure: Reconfigure MAC. Serialised by the mac_lock |
523 | * @update_stats: Update statistics | 493 | * @update_stats: Update statistics |
524 | * @irq: Hardware MAC event callback. Serialised by the mac_lock | 494 | * @check_fault: Check fault state. True if fault present. |
525 | * @poll: Poll for hardware state. Serialised by the mac_lock | ||
526 | */ | 495 | */ |
527 | struct efx_mac_operations { | 496 | struct efx_mac_operations { |
528 | void (*reconfigure) (struct efx_nic *efx); | 497 | int (*reconfigure) (struct efx_nic *efx); |
529 | void (*update_stats) (struct efx_nic *efx); | 498 | void (*update_stats) (struct efx_nic *efx); |
530 | void (*irq) (struct efx_nic *efx); | 499 | bool (*check_fault)(struct efx_nic *efx); |
531 | void (*poll) (struct efx_nic *efx); | ||
532 | }; | 500 | }; |
533 | 501 | ||
534 | /** | 502 | /** |
535 | * struct efx_phy_operations - Efx PHY operations table | 503 | * struct efx_phy_operations - Efx PHY operations table |
504 | * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, | ||
505 | * efx->loopback_modes. | ||
536 | * @init: Initialise PHY | 506 | * @init: Initialise PHY |
537 | * @fini: Shut down PHY | 507 | * @fini: Shut down PHY |
538 | * @reconfigure: Reconfigure PHY (e.g. for new link parameters) | 508 | * @reconfigure: Reconfigure PHY (e.g. for new link parameters) |
539 | * @clear_interrupt: Clear down interrupt | 509 | * @poll: Update @link_state and report whether it changed. |
540 | * @blink: Blink LEDs | 510 | * Serialised by the mac_lock. |
541 | * @poll: Poll for hardware state. Serialised by the mac_lock. | ||
542 | * @get_settings: Get ethtool settings. Serialised by the mac_lock. | 511 | * @get_settings: Get ethtool settings. Serialised by the mac_lock. |
543 | * @set_settings: Set ethtool settings. Serialised by the mac_lock. | 512 | * @set_settings: Set ethtool settings. Serialised by the mac_lock. |
544 | * @set_npage_adv: Set abilities advertised in (Extended) Next Page | 513 | * @set_npage_adv: Set abilities advertised in (Extended) Next Page |
545 | * (only needed where AN bit is set in mmds) | 514 | * (only needed where AN bit is set in mmds) |
546 | * @num_tests: Number of PHY-specific tests/results | 515 | * @test_alive: Test that PHY is 'alive' (online) |
547 | * @test_names: Names of the tests/results | 516 | * @test_name: Get the name of a PHY-specific test/result |
548 | * @run_tests: Run tests and record results as appropriate. | 517 | * @run_tests: Run tests and record results as appropriate (offline). |
549 | * Flags are the ethtool tests flags. | 518 | * Flags are the ethtool tests flags. |
550 | * @mmds: MMD presence mask | ||
551 | * @loopbacks: Supported loopback modes mask | ||
552 | */ | 519 | */ |
553 | struct efx_phy_operations { | 520 | struct efx_phy_operations { |
554 | enum efx_mac_type macs; | 521 | int (*probe) (struct efx_nic *efx); |
555 | int (*init) (struct efx_nic *efx); | 522 | int (*init) (struct efx_nic *efx); |
556 | void (*fini) (struct efx_nic *efx); | 523 | void (*fini) (struct efx_nic *efx); |
557 | void (*reconfigure) (struct efx_nic *efx); | 524 | void (*remove) (struct efx_nic *efx); |
558 | void (*clear_interrupt) (struct efx_nic *efx); | 525 | int (*reconfigure) (struct efx_nic *efx); |
559 | void (*poll) (struct efx_nic *efx); | 526 | bool (*poll) (struct efx_nic *efx); |
560 | void (*get_settings) (struct efx_nic *efx, | 527 | void (*get_settings) (struct efx_nic *efx, |
561 | struct ethtool_cmd *ecmd); | 528 | struct ethtool_cmd *ecmd); |
562 | int (*set_settings) (struct efx_nic *efx, | 529 | int (*set_settings) (struct efx_nic *efx, |
563 | struct ethtool_cmd *ecmd); | 530 | struct ethtool_cmd *ecmd); |
564 | void (*set_npage_adv) (struct efx_nic *efx, u32); | 531 | void (*set_npage_adv) (struct efx_nic *efx, u32); |
565 | u32 num_tests; | 532 | int (*test_alive) (struct efx_nic *efx); |
566 | const char *const *test_names; | 533 | const char *(*test_name) (struct efx_nic *efx, unsigned int index); |
567 | int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); | 534 | int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); |
568 | int mmds; | ||
569 | unsigned loopbacks; | ||
570 | }; | 535 | }; |
571 | 536 | ||
572 | /** | 537 | /** |
@@ -690,36 +655,38 @@ union efx_multicast_hash { | |||
690 | * @interrupt_mode: Interrupt mode | 655 | * @interrupt_mode: Interrupt mode |
691 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues | 656 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues |
692 | * @irq_rx_moderation: IRQ moderation time for RX event queues | 657 | * @irq_rx_moderation: IRQ moderation time for RX event queues |
693 | * @i2c_adap: I2C adapter | ||
694 | * @board_info: Board-level information | ||
695 | * @state: Device state flag. Serialised by the rtnl_lock. | 658 | * @state: Device state flag. Serialised by the rtnl_lock. |
696 | * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) | 659 | * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) |
697 | * @tx_queue: TX DMA queues | 660 | * @tx_queue: TX DMA queues |
698 | * @rx_queue: RX DMA queues | 661 | * @rx_queue: RX DMA queues |
699 | * @channel: Channels | 662 | * @channel: Channels |
663 | * @next_buffer_table: First available buffer table id | ||
700 | * @n_rx_queues: Number of RX queues | 664 | * @n_rx_queues: Number of RX queues |
701 | * @n_channels: Number of channels in use | 665 | * @n_channels: Number of channels in use |
702 | * @rx_buffer_len: RX buffer length | 666 | * @rx_buffer_len: RX buffer length |
703 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer | 667 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer |
668 | * @int_error_count: Number of internal errors seen recently | ||
669 | * @int_error_expire: Time at which error count will be expired | ||
704 | * @irq_status: Interrupt status buffer | 670 | * @irq_status: Interrupt status buffer |
705 | * @last_irq_cpu: Last CPU to handle interrupt. | 671 | * @last_irq_cpu: Last CPU to handle interrupt. |
706 | * This register is written with the SMP processor ID whenever an | 672 | * This register is written with the SMP processor ID whenever an |
707 | * interrupt is handled. It is used by falcon_test_interrupt() | 673 | * interrupt is handled. It is used by efx_nic_test_interrupt() |
708 | * to verify that an interrupt has occurred. | 674 | * to verify that an interrupt has occurred. |
709 | * @spi_flash: SPI flash device | 675 | * @spi_flash: SPI flash device |
710 | * This field will be %NULL if no flash device is present. | 676 | * This field will be %NULL if no flash device is present (or for Siena). |
711 | * @spi_eeprom: SPI EEPROM device | 677 | * @spi_eeprom: SPI EEPROM device |
712 | * This field will be %NULL if no EEPROM device is present. | 678 | * This field will be %NULL if no EEPROM device is present (or for Siena). |
713 | * @spi_lock: SPI bus lock | 679 | * @spi_lock: SPI bus lock |
680 | * @mtd_list: List of MTDs attached to the NIC | ||
714 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count | 681 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count |
715 | * @nic_data: Hardware dependant state | 682 | * @nic_data: Hardware dependant state |
716 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, | 683 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, |
717 | * @port_inhibited, efx_monitor() and efx_reconfigure_port() | 684 | * @port_inhibited, efx_monitor() and efx_reconfigure_port() |
718 | * @port_enabled: Port enabled indicator. | 685 | * @port_enabled: Port enabled indicator. |
719 | * Serialises efx_stop_all(), efx_start_all(), efx_monitor(), | 686 | * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and |
720 | * efx_phy_work(), and efx_mac_work() with kernel interfaces. Safe to read | 687 | * efx_mac_work() with kernel interfaces. Safe to read under any |
721 | * under any one of the rtnl_lock, mac_lock, or netif_tx_lock, but all | 688 | * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must |
722 | * three must be held to modify it. | 689 | * be held to modify it. |
723 | * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock | 690 | * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock |
724 | * @port_initialized: Port initialized? | 691 | * @port_initialized: Port initialized? |
725 | * @net_dev: Operating system network device. Consider holding the rtnl lock | 692 | * @net_dev: Operating system network device. Consider holding the rtnl lock |
@@ -731,32 +698,28 @@ union efx_multicast_hash { | |||
731 | * &struct net_device_stats. | 698 | * &struct net_device_stats. |
732 | * @stats_buffer: DMA buffer for statistics | 699 | * @stats_buffer: DMA buffer for statistics |
733 | * @stats_lock: Statistics update lock. Serialises statistics fetches | 700 | * @stats_lock: Statistics update lock. Serialises statistics fetches |
734 | * @stats_disable_count: Nest count for disabling statistics fetches | ||
735 | * @mac_op: MAC interface | 701 | * @mac_op: MAC interface |
736 | * @mac_address: Permanent MAC address | 702 | * @mac_address: Permanent MAC address |
737 | * @phy_type: PHY type | 703 | * @phy_type: PHY type |
738 | * @phy_lock: PHY access lock | 704 | * @mdio_lock: MDIO lock |
739 | * @phy_op: PHY interface | 705 | * @phy_op: PHY interface |
740 | * @phy_data: PHY private data (including PHY-specific stats) | 706 | * @phy_data: PHY private data (including PHY-specific stats) |
741 | * @mdio: PHY MDIO interface | 707 | * @mdio: PHY MDIO interface |
708 | * @mdio_bus: PHY MDIO bus ID (only used by Siena) | ||
742 | * @phy_mode: PHY operating mode. Serialised by @mac_lock. | 709 | * @phy_mode: PHY operating mode. Serialised by @mac_lock. |
743 | * @mac_up: MAC link state | 710 | * @xmac_poll_required: XMAC link state needs polling |
744 | * @link_up: Link status | 711 | * @link_advertising: Autonegotiation advertising flags |
745 | * @link_fd: Link is full duplex | 712 | * @link_state: Current state of the link |
746 | * @link_fc: Actualy flow control flags | ||
747 | * @link_speed: Link speed (Mbps) | ||
748 | * @n_link_state_changes: Number of times the link has changed state | 713 | * @n_link_state_changes: Number of times the link has changed state |
749 | * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. | 714 | * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. |
750 | * @multicast_hash: Multicast hash table | 715 | * @multicast_hash: Multicast hash table |
751 | * @wanted_fc: Wanted flow control flags | 716 | * @wanted_fc: Wanted flow control flags |
752 | * @phy_work: work item for dealing with PHY events | 717 | * @mac_work: Work item for changing MAC promiscuity and multicast hash |
753 | * @mac_work: work item for dealing with MAC events | ||
754 | * @loopback_mode: Loopback status | 718 | * @loopback_mode: Loopback status |
755 | * @loopback_modes: Supported loopback mode bitmask | 719 | * @loopback_modes: Supported loopback mode bitmask |
756 | * @loopback_selftest: Offline self-test private state | 720 | * @loopback_selftest: Offline self-test private state |
757 | * | 721 | * |
758 | * The @priv field of the corresponding &struct net_device points to | 722 | * This is stored in the private area of the &struct net_device. |
759 | * this. | ||
760 | */ | 723 | */ |
761 | struct efx_nic { | 724 | struct efx_nic { |
762 | char name[IFNAMSIZ]; | 725 | char name[IFNAMSIZ]; |
@@ -774,9 +737,6 @@ struct efx_nic { | |||
774 | bool irq_rx_adaptive; | 737 | bool irq_rx_adaptive; |
775 | unsigned int irq_rx_moderation; | 738 | unsigned int irq_rx_moderation; |
776 | 739 | ||
777 | struct i2c_adapter i2c_adap; | ||
778 | struct efx_board board_info; | ||
779 | |||
780 | enum nic_state state; | 740 | enum nic_state state; |
781 | enum reset_type reset_pending; | 741 | enum reset_type reset_pending; |
782 | 742 | ||
@@ -784,21 +744,29 @@ struct efx_nic { | |||
784 | struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; | 744 | struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; |
785 | struct efx_channel channel[EFX_MAX_CHANNELS]; | 745 | struct efx_channel channel[EFX_MAX_CHANNELS]; |
786 | 746 | ||
747 | unsigned next_buffer_table; | ||
787 | int n_rx_queues; | 748 | int n_rx_queues; |
788 | int n_channels; | 749 | int n_channels; |
789 | unsigned int rx_buffer_len; | 750 | unsigned int rx_buffer_len; |
790 | unsigned int rx_buffer_order; | 751 | unsigned int rx_buffer_order; |
791 | 752 | ||
753 | unsigned int_error_count; | ||
754 | unsigned long int_error_expire; | ||
755 | |||
792 | struct efx_buffer irq_status; | 756 | struct efx_buffer irq_status; |
793 | volatile signed int last_irq_cpu; | 757 | volatile signed int last_irq_cpu; |
758 | unsigned long irq_zero_count; | ||
794 | 759 | ||
795 | struct efx_spi_device *spi_flash; | 760 | struct efx_spi_device *spi_flash; |
796 | struct efx_spi_device *spi_eeprom; | 761 | struct efx_spi_device *spi_eeprom; |
797 | struct mutex spi_lock; | 762 | struct mutex spi_lock; |
763 | #ifdef CONFIG_SFC_MTD | ||
764 | struct list_head mtd_list; | ||
765 | #endif | ||
798 | 766 | ||
799 | unsigned n_rx_nodesc_drop_cnt; | 767 | unsigned n_rx_nodesc_drop_cnt; |
800 | 768 | ||
801 | struct falcon_nic_data *nic_data; | 769 | void *nic_data; |
802 | 770 | ||
803 | struct mutex mac_lock; | 771 | struct mutex mac_lock; |
804 | struct work_struct mac_work; | 772 | struct work_struct mac_work; |
@@ -815,24 +783,21 @@ struct efx_nic { | |||
815 | struct efx_mac_stats mac_stats; | 783 | struct efx_mac_stats mac_stats; |
816 | struct efx_buffer stats_buffer; | 784 | struct efx_buffer stats_buffer; |
817 | spinlock_t stats_lock; | 785 | spinlock_t stats_lock; |
818 | unsigned int stats_disable_count; | ||
819 | 786 | ||
820 | struct efx_mac_operations *mac_op; | 787 | struct efx_mac_operations *mac_op; |
821 | unsigned char mac_address[ETH_ALEN]; | 788 | unsigned char mac_address[ETH_ALEN]; |
822 | 789 | ||
823 | enum phy_type phy_type; | 790 | unsigned int phy_type; |
824 | spinlock_t phy_lock; | 791 | struct mutex mdio_lock; |
825 | struct work_struct phy_work; | ||
826 | struct efx_phy_operations *phy_op; | 792 | struct efx_phy_operations *phy_op; |
827 | void *phy_data; | 793 | void *phy_data; |
828 | struct mdio_if_info mdio; | 794 | struct mdio_if_info mdio; |
795 | unsigned int mdio_bus; | ||
829 | enum efx_phy_mode phy_mode; | 796 | enum efx_phy_mode phy_mode; |
830 | 797 | ||
831 | bool mac_up; | 798 | bool xmac_poll_required; |
832 | bool link_up; | 799 | u32 link_advertising; |
833 | bool link_fd; | 800 | struct efx_link_state link_state; |
834 | enum efx_fc_type link_fc; | ||
835 | unsigned int link_speed; | ||
836 | unsigned int n_link_state_changes; | 801 | unsigned int n_link_state_changes; |
837 | 802 | ||
838 | bool promiscuous; | 803 | bool promiscuous; |
@@ -841,7 +806,7 @@ struct efx_nic { | |||
841 | 806 | ||
842 | atomic_t rx_reset; | 807 | atomic_t rx_reset; |
843 | enum efx_loopback_mode loopback_mode; | 808 | enum efx_loopback_mode loopback_mode; |
844 | unsigned int loopback_modes; | 809 | u64 loopback_modes; |
845 | 810 | ||
846 | void *loopback_selftest; | 811 | void *loopback_selftest; |
847 | }; | 812 | }; |
@@ -860,50 +825,95 @@ static inline const char *efx_dev_name(struct efx_nic *efx) | |||
860 | return efx_dev_registered(efx) ? efx->name : ""; | 825 | return efx_dev_registered(efx) ? efx->name : ""; |
861 | } | 826 | } |
862 | 827 | ||
828 | static inline unsigned int efx_port_num(struct efx_nic *efx) | ||
829 | { | ||
830 | return PCI_FUNC(efx->pci_dev->devfn); | ||
831 | } | ||
832 | |||
863 | /** | 833 | /** |
864 | * struct efx_nic_type - Efx device type definition | 834 | * struct efx_nic_type - Efx device type definition |
865 | * @mem_bar: Memory BAR number | 835 | * @probe: Probe the controller |
836 | * @remove: Free resources allocated by probe() | ||
837 | * @init: Initialise the controller | ||
838 | * @fini: Shut down the controller | ||
839 | * @monitor: Periodic function for polling link state and hardware monitor | ||
840 | * @reset: Reset the controller hardware and possibly the PHY. This will | ||
841 | * be called while the controller is uninitialised. | ||
842 | * @probe_port: Probe the MAC and PHY | ||
843 | * @remove_port: Free resources allocated by probe_port() | ||
844 | * @prepare_flush: Prepare the hardware for flushing the DMA queues | ||
845 | * @update_stats: Update statistics not provided by event handling | ||
846 | * @start_stats: Start the regular fetching of statistics | ||
847 | * @stop_stats: Stop the regular fetching of statistics | ||
848 | * @set_id_led: Set state of identifying LED or revert to automatic function | ||
849 | * @push_irq_moderation: Apply interrupt moderation value | ||
850 | * @push_multicast_hash: Apply multicast hash table | ||
851 | * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY | ||
852 | * @get_wol: Get WoL configuration from driver state | ||
853 | * @set_wol: Push WoL configuration to the NIC | ||
854 | * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) | ||
855 | * @test_registers: Test read/write functionality of control registers | ||
856 | * @test_nvram: Test validity of NVRAM contents | ||
857 | * @default_mac_ops: efx_mac_operations to set at startup | ||
858 | * @revision: Hardware architecture revision | ||
866 | * @mem_map_size: Memory BAR mapped size | 859 | * @mem_map_size: Memory BAR mapped size |
867 | * @txd_ptr_tbl_base: TX descriptor ring base address | 860 | * @txd_ptr_tbl_base: TX descriptor ring base address |
868 | * @rxd_ptr_tbl_base: RX descriptor ring base address | 861 | * @rxd_ptr_tbl_base: RX descriptor ring base address |
869 | * @buf_tbl_base: Buffer table base address | 862 | * @buf_tbl_base: Buffer table base address |
870 | * @evq_ptr_tbl_base: Event queue pointer table base address | 863 | * @evq_ptr_tbl_base: Event queue pointer table base address |
871 | * @evq_rptr_tbl_base: Event queue read-pointer table base address | 864 | * @evq_rptr_tbl_base: Event queue read-pointer table base address |
872 | * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1) | ||
873 | * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1) | ||
874 | * @evq_size: Event queue size (must be a power of two) | ||
875 | * @max_dma_mask: Maximum possible DMA mask | 865 | * @max_dma_mask: Maximum possible DMA mask |
876 | * @tx_dma_mask: TX DMA mask | ||
877 | * @bug5391_mask: Address mask for bug 5391 workaround | ||
878 | * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes) | ||
879 | * @rx_xon_thresh: RX FIFO XON watermark (bytes) | ||
880 | * @rx_buffer_padding: Padding added to each RX buffer | 866 | * @rx_buffer_padding: Padding added to each RX buffer |
881 | * @max_interrupt_mode: Highest capability interrupt mode supported | 867 | * @max_interrupt_mode: Highest capability interrupt mode supported |
882 | * from &enum efx_init_mode. | 868 | * from &enum efx_init_mode. |
883 | * @phys_addr_channels: Number of channels with physically addressed | 869 | * @phys_addr_channels: Number of channels with physically addressed |
884 | * descriptors | 870 | * descriptors |
871 | * @tx_dc_base: Base address in SRAM of TX queue descriptor caches | ||
872 | * @rx_dc_base: Base address in SRAM of RX queue descriptor caches | ||
873 | * @offload_features: net_device feature flags for protocol offload | ||
874 | * features implemented in hardware | ||
875 | * @reset_world_flags: Flags for additional components covered by | ||
876 | * reset method RESET_TYPE_WORLD | ||
885 | */ | 877 | */ |
886 | struct efx_nic_type { | 878 | struct efx_nic_type { |
887 | unsigned int mem_bar; | 879 | int (*probe)(struct efx_nic *efx); |
880 | void (*remove)(struct efx_nic *efx); | ||
881 | int (*init)(struct efx_nic *efx); | ||
882 | void (*fini)(struct efx_nic *efx); | ||
883 | void (*monitor)(struct efx_nic *efx); | ||
884 | int (*reset)(struct efx_nic *efx, enum reset_type method); | ||
885 | int (*probe_port)(struct efx_nic *efx); | ||
886 | void (*remove_port)(struct efx_nic *efx); | ||
887 | void (*prepare_flush)(struct efx_nic *efx); | ||
888 | void (*update_stats)(struct efx_nic *efx); | ||
889 | void (*start_stats)(struct efx_nic *efx); | ||
890 | void (*stop_stats)(struct efx_nic *efx); | ||
891 | void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); | ||
892 | void (*push_irq_moderation)(struct efx_channel *channel); | ||
893 | void (*push_multicast_hash)(struct efx_nic *efx); | ||
894 | int (*reconfigure_port)(struct efx_nic *efx); | ||
895 | void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); | ||
896 | int (*set_wol)(struct efx_nic *efx, u32 type); | ||
897 | void (*resume_wol)(struct efx_nic *efx); | ||
898 | int (*test_registers)(struct efx_nic *efx); | ||
899 | int (*test_nvram)(struct efx_nic *efx); | ||
900 | struct efx_mac_operations *default_mac_ops; | ||
901 | |||
902 | int revision; | ||
888 | unsigned int mem_map_size; | 903 | unsigned int mem_map_size; |
889 | unsigned int txd_ptr_tbl_base; | 904 | unsigned int txd_ptr_tbl_base; |
890 | unsigned int rxd_ptr_tbl_base; | 905 | unsigned int rxd_ptr_tbl_base; |
891 | unsigned int buf_tbl_base; | 906 | unsigned int buf_tbl_base; |
892 | unsigned int evq_ptr_tbl_base; | 907 | unsigned int evq_ptr_tbl_base; |
893 | unsigned int evq_rptr_tbl_base; | 908 | unsigned int evq_rptr_tbl_base; |
894 | |||
895 | unsigned int txd_ring_mask; | ||
896 | unsigned int rxd_ring_mask; | ||
897 | unsigned int evq_size; | ||
898 | u64 max_dma_mask; | 909 | u64 max_dma_mask; |
899 | unsigned int tx_dma_mask; | ||
900 | unsigned bug5391_mask; | ||
901 | |||
902 | int rx_xoff_thresh; | ||
903 | int rx_xon_thresh; | ||
904 | unsigned int rx_buffer_padding; | 910 | unsigned int rx_buffer_padding; |
905 | unsigned int max_interrupt_mode; | 911 | unsigned int max_interrupt_mode; |
906 | unsigned int phys_addr_channels; | 912 | unsigned int phys_addr_channels; |
913 | unsigned int tx_dc_base; | ||
914 | unsigned int rx_dc_base; | ||
915 | unsigned long offload_features; | ||
916 | u32 reset_world_flags; | ||
907 | }; | 917 | }; |
908 | 918 | ||
909 | /************************************************************************** | 919 | /************************************************************************** |
@@ -982,7 +992,7 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr) | |||
982 | * that the net driver will program into the MAC as the maximum frame | 992 | * that the net driver will program into the MAC as the maximum frame |
983 | * length. | 993 | * length. |
984 | * | 994 | * |
985 | * The 10G MAC used in Falcon requires 8-byte alignment on the frame | 995 | * The 10G MAC requires 8-byte alignment on the frame |
986 | * length, so we round up to the nearest 8. | 996 | * length, so we round up to the nearest 8. |
987 | * | 997 | * |
988 | * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an | 998 | * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an |
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c new file mode 100644 index 000000000000..b06f8e348307 --- /dev/null +++ b/drivers/net/sfc/nic.c | |||
@@ -0,0 +1,1590 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2009 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/seq_file.h> | ||
16 | #include "net_driver.h" | ||
17 | #include "bitfield.h" | ||
18 | #include "efx.h" | ||
19 | #include "nic.h" | ||
20 | #include "regs.h" | ||
21 | #include "io.h" | ||
22 | #include "workarounds.h" | ||
23 | |||
24 | /************************************************************************** | ||
25 | * | ||
26 | * Configurable values | ||
27 | * | ||
28 | ************************************************************************** | ||
29 | */ | ||
30 | |||
31 | /* This is set to 16 for a good reason. In summary, if larger than | ||
32 | * 16, the descriptor cache holds more than a default socket | ||
33 | * buffer's worth of packets (for UDP we can only have at most one | ||
34 | * socket buffer's worth outstanding). This combined with the fact | ||
35 | * that we only get 1 TX event per descriptor cache means the NIC | ||
36 | * goes idle. | ||
37 | */ | ||
38 | #define TX_DC_ENTRIES 16 | ||
39 | #define TX_DC_ENTRIES_ORDER 1 | ||
40 | |||
41 | #define RX_DC_ENTRIES 64 | ||
42 | #define RX_DC_ENTRIES_ORDER 3 | ||
43 | |||
44 | /* RX FIFO XOFF watermark | ||
45 | * | ||
46 | * When the amount of the RX FIFO increases used increases past this | ||
47 | * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A) | ||
48 | * This also has an effect on RX/TX arbitration | ||
49 | */ | ||
50 | int efx_nic_rx_xoff_thresh = -1; | ||
51 | module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644); | ||
52 | MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold"); | ||
53 | |||
54 | /* RX FIFO XON watermark | ||
55 | * | ||
56 | * When the amount of the RX FIFO used decreases below this | ||
57 | * watermark send XON. Only used if TX flow control is enabled (ethtool -A) | ||
58 | * This also has an effect on RX/TX arbitration | ||
59 | */ | ||
60 | int efx_nic_rx_xon_thresh = -1; | ||
61 | module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644); | ||
62 | MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | ||
63 | |||
64 | /* If EFX_MAX_INT_ERRORS internal errors occur within | ||
65 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | ||
66 | * disable it. | ||
67 | */ | ||
68 | #define EFX_INT_ERROR_EXPIRE 3600 | ||
69 | #define EFX_MAX_INT_ERRORS 5 | ||
70 | |||
71 | /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times | ||
72 | */ | ||
73 | #define EFX_FLUSH_INTERVAL 10 | ||
74 | #define EFX_FLUSH_POLL_COUNT 100 | ||
75 | |||
76 | /* Size and alignment of special buffers (4KB) */ | ||
77 | #define EFX_BUF_SIZE 4096 | ||
78 | |||
79 | /* Depth of RX flush request fifo */ | ||
80 | #define EFX_RX_FLUSH_COUNT 4 | ||
81 | |||
82 | /************************************************************************** | ||
83 | * | ||
84 | * Solarstorm hardware access | ||
85 | * | ||
86 | **************************************************************************/ | ||
87 | |||
88 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | ||
89 | unsigned int index) | ||
90 | { | ||
91 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, | ||
92 | value, index); | ||
93 | } | ||
94 | |||
95 | /* Read the current event from the event queue */ | ||
96 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | ||
97 | unsigned int index) | ||
98 | { | ||
99 | return (((efx_qword_t *) (channel->eventq.addr)) + index); | ||
100 | } | ||
101 | |||
102 | /* See if an event is present | ||
103 | * | ||
104 | * We check both the high and low dword of the event for all ones. We | ||
105 | * wrote all ones when we cleared the event, and no valid event can | ||
106 | * have all ones in either its high or low dwords. This approach is | ||
107 | * robust against reordering. | ||
108 | * | ||
109 | * Note that using a single 64-bit comparison is incorrect; even | ||
110 | * though the CPU read will be atomic, the DMA write may not be. | ||
111 | */ | ||
112 | static inline int efx_event_present(efx_qword_t *event) | ||
113 | { | ||
114 | return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | ||
115 | EFX_DWORD_IS_ALL_ONES(event->dword[1]))); | ||
116 | } | ||
117 | |||
118 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | ||
119 | const efx_oword_t *mask) | ||
120 | { | ||
121 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | ||
122 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | ||
123 | } | ||
124 | |||
125 | int efx_nic_test_registers(struct efx_nic *efx, | ||
126 | const struct efx_nic_register_test *regs, | ||
127 | size_t n_regs) | ||
128 | { | ||
129 | unsigned address = 0, i, j; | ||
130 | efx_oword_t mask, imask, original, reg, buf; | ||
131 | |||
132 | /* Falcon should be in loopback to isolate the XMAC from the PHY */ | ||
133 | WARN_ON(!LOOPBACK_INTERNAL(efx)); | ||
134 | |||
135 | for (i = 0; i < n_regs; ++i) { | ||
136 | address = regs[i].address; | ||
137 | mask = imask = regs[i].mask; | ||
138 | EFX_INVERT_OWORD(imask); | ||
139 | |||
140 | efx_reado(efx, &original, address); | ||
141 | |||
142 | /* bit sweep on and off */ | ||
143 | for (j = 0; j < 128; j++) { | ||
144 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | ||
145 | continue; | ||
146 | |||
147 | /* Test this testable bit can be set in isolation */ | ||
148 | EFX_AND_OWORD(reg, original, mask); | ||
149 | EFX_SET_OWORD32(reg, j, j, 1); | ||
150 | |||
151 | efx_writeo(efx, ®, address); | ||
152 | efx_reado(efx, &buf, address); | ||
153 | |||
154 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
155 | goto fail; | ||
156 | |||
157 | /* Test this testable bit can be cleared in isolation */ | ||
158 | EFX_OR_OWORD(reg, original, mask); | ||
159 | EFX_SET_OWORD32(reg, j, j, 0); | ||
160 | |||
161 | efx_writeo(efx, ®, address); | ||
162 | efx_reado(efx, &buf, address); | ||
163 | |||
164 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
165 | goto fail; | ||
166 | } | ||
167 | |||
168 | efx_writeo(efx, &original, address); | ||
169 | } | ||
170 | |||
171 | return 0; | ||
172 | |||
173 | fail: | ||
174 | EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | ||
175 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | ||
176 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | ||
177 | return -EIO; | ||
178 | } | ||
179 | |||
180 | /************************************************************************** | ||
181 | * | ||
182 | * Special buffer handling | ||
183 | * Special buffers are used for event queues and the TX and RX | ||
184 | * descriptor rings. | ||
185 | * | ||
186 | *************************************************************************/ | ||
187 | |||
188 | /* | ||
189 | * Initialise a special buffer | ||
190 | * | ||
191 | * This will define a buffer (previously allocated via | ||
192 | * efx_alloc_special_buffer()) in the buffer table, allowing | ||
193 | * it to be used for event queues, descriptor rings etc. | ||
194 | */ | ||
195 | static void | ||
196 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
197 | { | ||
198 | efx_qword_t buf_desc; | ||
199 | int index; | ||
200 | dma_addr_t dma_addr; | ||
201 | int i; | ||
202 | |||
203 | EFX_BUG_ON_PARANOID(!buffer->addr); | ||
204 | |||
205 | /* Write buffer descriptors to NIC */ | ||
206 | for (i = 0; i < buffer->entries; i++) { | ||
207 | index = buffer->index + i; | ||
208 | dma_addr = buffer->dma_addr + (i * 4096); | ||
209 | EFX_LOG(efx, "mapping special buffer %d at %llx\n", | ||
210 | index, (unsigned long long)dma_addr); | ||
211 | EFX_POPULATE_QWORD_3(buf_desc, | ||
212 | FRF_AZ_BUF_ADR_REGION, 0, | ||
213 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | ||
214 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); | ||
215 | efx_write_buf_tbl(efx, &buf_desc, index); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* Unmaps a buffer and clears the buffer table entries */ | ||
220 | static void | ||
221 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
222 | { | ||
223 | efx_oword_t buf_tbl_upd; | ||
224 | unsigned int start = buffer->index; | ||
225 | unsigned int end = (buffer->index + buffer->entries - 1); | ||
226 | |||
227 | if (!buffer->entries) | ||
228 | return; | ||
229 | |||
230 | EFX_LOG(efx, "unmapping special buffers %d-%d\n", | ||
231 | buffer->index, buffer->index + buffer->entries - 1); | ||
232 | |||
233 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | ||
234 | FRF_AZ_BUF_UPD_CMD, 0, | ||
235 | FRF_AZ_BUF_CLR_CMD, 1, | ||
236 | FRF_AZ_BUF_CLR_END_ID, end, | ||
237 | FRF_AZ_BUF_CLR_START_ID, start); | ||
238 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * Allocate a new special buffer | ||
243 | * | ||
244 | * This allocates memory for a new buffer, clears it and allocates a | ||
245 | * new buffer ID range. It does not write into the buffer table. | ||
246 | * | ||
247 | * This call will allocate 4KB buffers, since 8KB buffers can't be | ||
248 | * used for event queues and descriptor rings. | ||
249 | */ | ||
250 | static int efx_alloc_special_buffer(struct efx_nic *efx, | ||
251 | struct efx_special_buffer *buffer, | ||
252 | unsigned int len) | ||
253 | { | ||
254 | len = ALIGN(len, EFX_BUF_SIZE); | ||
255 | |||
256 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | ||
257 | &buffer->dma_addr); | ||
258 | if (!buffer->addr) | ||
259 | return -ENOMEM; | ||
260 | buffer->len = len; | ||
261 | buffer->entries = len / EFX_BUF_SIZE; | ||
262 | BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); | ||
263 | |||
264 | /* All zeros is a potentially valid event so memset to 0xff */ | ||
265 | memset(buffer->addr, 0xff, len); | ||
266 | |||
267 | /* Select new buffer ID */ | ||
268 | buffer->index = efx->next_buffer_table; | ||
269 | efx->next_buffer_table += buffer->entries; | ||
270 | |||
271 | EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " | ||
272 | "(virt %p phys %llx)\n", buffer->index, | ||
273 | buffer->index + buffer->entries - 1, | ||
274 | (u64)buffer->dma_addr, len, | ||
275 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static void | ||
281 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
282 | { | ||
283 | if (!buffer->addr) | ||
284 | return; | ||
285 | |||
286 | EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " | ||
287 | "(virt %p phys %llx)\n", buffer->index, | ||
288 | buffer->index + buffer->entries - 1, | ||
289 | (u64)buffer->dma_addr, buffer->len, | ||
290 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | ||
291 | |||
292 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, | ||
293 | buffer->dma_addr); | ||
294 | buffer->addr = NULL; | ||
295 | buffer->entries = 0; | ||
296 | } | ||
297 | |||
298 | /************************************************************************** | ||
299 | * | ||
300 | * Generic buffer handling | ||
301 | * These buffers are used for interrupt status and MAC stats | ||
302 | * | ||
303 | **************************************************************************/ | ||
304 | |||
305 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | ||
306 | unsigned int len) | ||
307 | { | ||
308 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | ||
309 | &buffer->dma_addr); | ||
310 | if (!buffer->addr) | ||
311 | return -ENOMEM; | ||
312 | buffer->len = len; | ||
313 | memset(buffer->addr, 0, len); | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) | ||
318 | { | ||
319 | if (buffer->addr) { | ||
320 | pci_free_consistent(efx->pci_dev, buffer->len, | ||
321 | buffer->addr, buffer->dma_addr); | ||
322 | buffer->addr = NULL; | ||
323 | } | ||
324 | } | ||
325 | |||
326 | /************************************************************************** | ||
327 | * | ||
328 | * TX path | ||
329 | * | ||
330 | **************************************************************************/ | ||
331 | |||
332 | /* Returns a pointer to the specified transmit descriptor in the TX | ||
333 | * descriptor queue belonging to the specified channel. | ||
334 | */ | ||
335 | static inline efx_qword_t * | ||
336 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) | ||
337 | { | ||
338 | return (((efx_qword_t *) (tx_queue->txd.addr)) + index); | ||
339 | } | ||
340 | |||
341 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | ||
342 | static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) | ||
343 | { | ||
344 | unsigned write_ptr; | ||
345 | efx_dword_t reg; | ||
346 | |||
347 | write_ptr = tx_queue->write_count & EFX_TXQ_MASK; | ||
348 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | ||
349 | efx_writed_page(tx_queue->efx, ®, | ||
350 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | ||
351 | } | ||
352 | |||
353 | |||
354 | /* For each entry inserted into the software descriptor ring, create a | ||
355 | * descriptor in the hardware TX descriptor ring (in host memory), and | ||
356 | * write a doorbell. | ||
357 | */ | ||
358 | void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | ||
359 | { | ||
360 | |||
361 | struct efx_tx_buffer *buffer; | ||
362 | efx_qword_t *txd; | ||
363 | unsigned write_ptr; | ||
364 | |||
365 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | ||
366 | |||
367 | do { | ||
368 | write_ptr = tx_queue->write_count & EFX_TXQ_MASK; | ||
369 | buffer = &tx_queue->buffer[write_ptr]; | ||
370 | txd = efx_tx_desc(tx_queue, write_ptr); | ||
371 | ++tx_queue->write_count; | ||
372 | |||
373 | /* Create TX descriptor ring entry */ | ||
374 | EFX_POPULATE_QWORD_4(*txd, | ||
375 | FSF_AZ_TX_KER_CONT, buffer->continuation, | ||
376 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, | ||
377 | FSF_AZ_TX_KER_BUF_REGION, 0, | ||
378 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); | ||
379 | } while (tx_queue->write_count != tx_queue->insert_count); | ||
380 | |||
381 | wmb(); /* Ensure descriptors are written before they are fetched */ | ||
382 | efx_notify_tx_desc(tx_queue); | ||
383 | } | ||
384 | |||
385 | /* Allocate hardware resources for a TX queue */ | ||
386 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) | ||
387 | { | ||
388 | struct efx_nic *efx = tx_queue->efx; | ||
389 | BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 || | ||
390 | EFX_TXQ_SIZE & EFX_TXQ_MASK); | ||
391 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | ||
392 | EFX_TXQ_SIZE * sizeof(efx_qword_t)); | ||
393 | } | ||
394 | |||
395 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | ||
396 | { | ||
397 | efx_oword_t tx_desc_ptr; | ||
398 | struct efx_nic *efx = tx_queue->efx; | ||
399 | |||
400 | tx_queue->flushed = FLUSH_NONE; | ||
401 | |||
402 | /* Pin TX descriptor ring */ | ||
403 | efx_init_special_buffer(efx, &tx_queue->txd); | ||
404 | |||
405 | /* Push TX descriptor ring to card */ | ||
406 | EFX_POPULATE_OWORD_10(tx_desc_ptr, | ||
407 | FRF_AZ_TX_DESCQ_EN, 1, | ||
408 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, | ||
409 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, | ||
410 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | ||
411 | FRF_AZ_TX_DESCQ_EVQ_ID, | ||
412 | tx_queue->channel->channel, | ||
413 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, | ||
414 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, | ||
415 | FRF_AZ_TX_DESCQ_SIZE, | ||
416 | __ffs(tx_queue->txd.entries), | ||
417 | FRF_AZ_TX_DESCQ_TYPE, 0, | ||
418 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); | ||
419 | |||
420 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
421 | int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; | ||
422 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); | ||
423 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, | ||
424 | !csum); | ||
425 | } | ||
426 | |||
427 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | ||
428 | tx_queue->queue); | ||
429 | |||
430 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | ||
431 | efx_oword_t reg; | ||
432 | |||
433 | /* Only 128 bits in this register */ | ||
434 | BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); | ||
435 | |||
436 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); | ||
437 | if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) | ||
438 | clear_bit_le(tx_queue->queue, (void *)®); | ||
439 | else | ||
440 | set_bit_le(tx_queue->queue, (void *)®); | ||
441 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); | ||
442 | } | ||
443 | } | ||
444 | |||
445 | static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) | ||
446 | { | ||
447 | struct efx_nic *efx = tx_queue->efx; | ||
448 | efx_oword_t tx_flush_descq; | ||
449 | |||
450 | tx_queue->flushed = FLUSH_PENDING; | ||
451 | |||
452 | /* Post a flush command */ | ||
453 | EFX_POPULATE_OWORD_2(tx_flush_descq, | ||
454 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, | ||
455 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); | ||
456 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); | ||
457 | } | ||
458 | |||
459 | void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) | ||
460 | { | ||
461 | struct efx_nic *efx = tx_queue->efx; | ||
462 | efx_oword_t tx_desc_ptr; | ||
463 | |||
464 | /* The queue should have been flushed */ | ||
465 | WARN_ON(tx_queue->flushed != FLUSH_DONE); | ||
466 | |||
467 | /* Remove TX descriptor ring from card */ | ||
468 | EFX_ZERO_OWORD(tx_desc_ptr); | ||
469 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | ||
470 | tx_queue->queue); | ||
471 | |||
472 | /* Unpin TX descriptor ring */ | ||
473 | efx_fini_special_buffer(efx, &tx_queue->txd); | ||
474 | } | ||
475 | |||
476 | /* Free buffers backing TX queue */ | ||
477 | void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) | ||
478 | { | ||
479 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); | ||
480 | } | ||
481 | |||
482 | /************************************************************************** | ||
483 | * | ||
484 | * RX path | ||
485 | * | ||
486 | **************************************************************************/ | ||
487 | |||
488 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ | ||
489 | static inline efx_qword_t * | ||
490 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | ||
491 | { | ||
492 | return (((efx_qword_t *) (rx_queue->rxd.addr)) + index); | ||
493 | } | ||
494 | |||
495 | /* This creates an entry in the RX descriptor queue */ | ||
496 | static inline void | ||
497 | efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | ||
498 | { | ||
499 | struct efx_rx_buffer *rx_buf; | ||
500 | efx_qword_t *rxd; | ||
501 | |||
502 | rxd = efx_rx_desc(rx_queue, index); | ||
503 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
504 | EFX_POPULATE_QWORD_3(*rxd, | ||
505 | FSF_AZ_RX_KER_BUF_SIZE, | ||
506 | rx_buf->len - | ||
507 | rx_queue->efx->type->rx_buffer_padding, | ||
508 | FSF_AZ_RX_KER_BUF_REGION, 0, | ||
509 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | ||
510 | } | ||
511 | |||
512 | /* This writes to the RX_DESC_WPTR register for the specified receive | ||
513 | * descriptor ring. | ||
514 | */ | ||
515 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) | ||
516 | { | ||
517 | efx_dword_t reg; | ||
518 | unsigned write_ptr; | ||
519 | |||
520 | while (rx_queue->notified_count != rx_queue->added_count) { | ||
521 | efx_build_rx_desc(rx_queue, | ||
522 | rx_queue->notified_count & | ||
523 | EFX_RXQ_MASK); | ||
524 | ++rx_queue->notified_count; | ||
525 | } | ||
526 | |||
527 | wmb(); | ||
528 | write_ptr = rx_queue->added_count & EFX_RXQ_MASK; | ||
529 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | ||
530 | efx_writed_page(rx_queue->efx, ®, | ||
531 | FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue); | ||
532 | } | ||
533 | |||
534 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) | ||
535 | { | ||
536 | struct efx_nic *efx = rx_queue->efx; | ||
537 | BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 || | ||
538 | EFX_RXQ_SIZE & EFX_RXQ_MASK); | ||
539 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | ||
540 | EFX_RXQ_SIZE * sizeof(efx_qword_t)); | ||
541 | } | ||
542 | |||
543 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | ||
544 | { | ||
545 | efx_oword_t rx_desc_ptr; | ||
546 | struct efx_nic *efx = rx_queue->efx; | ||
547 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | ||
548 | bool iscsi_digest_en = is_b0; | ||
549 | |||
550 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | ||
551 | rx_queue->queue, rx_queue->rxd.index, | ||
552 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | ||
553 | |||
554 | rx_queue->flushed = FLUSH_NONE; | ||
555 | |||
556 | /* Pin RX descriptor ring */ | ||
557 | efx_init_special_buffer(efx, &rx_queue->rxd); | ||
558 | |||
559 | /* Push RX descriptor ring to card */ | ||
560 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | ||
561 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, | ||
562 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, | ||
563 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | ||
564 | FRF_AZ_RX_DESCQ_EVQ_ID, | ||
565 | rx_queue->channel->channel, | ||
566 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, | ||
567 | FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue, | ||
568 | FRF_AZ_RX_DESCQ_SIZE, | ||
569 | __ffs(rx_queue->rxd.entries), | ||
570 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | ||
571 | /* For >=B0 this is scatter so disable */ | ||
572 | FRF_AZ_RX_DESCQ_JUMBO, !is_b0, | ||
573 | FRF_AZ_RX_DESCQ_EN, 1); | ||
574 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
575 | rx_queue->queue); | ||
576 | } | ||
577 | |||
578 | static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) | ||
579 | { | ||
580 | struct efx_nic *efx = rx_queue->efx; | ||
581 | efx_oword_t rx_flush_descq; | ||
582 | |||
583 | rx_queue->flushed = FLUSH_PENDING; | ||
584 | |||
585 | /* Post a flush command */ | ||
586 | EFX_POPULATE_OWORD_2(rx_flush_descq, | ||
587 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | ||
588 | FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue); | ||
589 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); | ||
590 | } | ||
591 | |||
592 | void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) | ||
593 | { | ||
594 | efx_oword_t rx_desc_ptr; | ||
595 | struct efx_nic *efx = rx_queue->efx; | ||
596 | |||
597 | /* The queue should already have been flushed */ | ||
598 | WARN_ON(rx_queue->flushed != FLUSH_DONE); | ||
599 | |||
600 | /* Remove RX descriptor ring from card */ | ||
601 | EFX_ZERO_OWORD(rx_desc_ptr); | ||
602 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
603 | rx_queue->queue); | ||
604 | |||
605 | /* Unpin RX descriptor ring */ | ||
606 | efx_fini_special_buffer(efx, &rx_queue->rxd); | ||
607 | } | ||
608 | |||
609 | /* Free buffers backing RX queue */ | ||
610 | void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) | ||
611 | { | ||
612 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | ||
613 | } | ||
614 | |||
615 | /************************************************************************** | ||
616 | * | ||
617 | * Event queue processing | ||
618 | * Event queues are processed by per-channel tasklets. | ||
619 | * | ||
620 | **************************************************************************/ | ||
621 | |||
622 | /* Update a channel's event queue's read pointer (RPTR) register | ||
623 | * | ||
624 | * This writes the EVQ_RPTR_REG register for the specified channel's | ||
625 | * event queue. | ||
626 | */ | ||
627 | void efx_nic_eventq_read_ack(struct efx_channel *channel) | ||
628 | { | ||
629 | efx_dword_t reg; | ||
630 | struct efx_nic *efx = channel->efx; | ||
631 | |||
632 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); | ||
633 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, | ||
634 | channel->channel); | ||
635 | } | ||
636 | |||
637 | /* Use HW to insert a SW defined event */ | ||
638 | void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) | ||
639 | { | ||
640 | efx_oword_t drv_ev_reg; | ||
641 | |||
642 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || | ||
643 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); | ||
644 | drv_ev_reg.u32[0] = event->u32[0]; | ||
645 | drv_ev_reg.u32[1] = event->u32[1]; | ||
646 | drv_ev_reg.u32[2] = 0; | ||
647 | drv_ev_reg.u32[3] = 0; | ||
648 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); | ||
649 | efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); | ||
650 | } | ||
651 | |||
652 | /* Handle a transmit completion event | ||
653 | * | ||
654 | * The NIC batches TX completion events; the message we receive is of | ||
655 | * the form "complete all TX events up to this index". | ||
656 | */ | ||
657 | static void | ||
658 | efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | ||
659 | { | ||
660 | unsigned int tx_ev_desc_ptr; | ||
661 | unsigned int tx_ev_q_label; | ||
662 | struct efx_tx_queue *tx_queue; | ||
663 | struct efx_nic *efx = channel->efx; | ||
664 | |||
665 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | ||
666 | /* Transmit completion */ | ||
667 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); | ||
668 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | ||
669 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | ||
670 | channel->irq_mod_score += | ||
671 | (tx_ev_desc_ptr - tx_queue->read_count) & | ||
672 | EFX_TXQ_MASK; | ||
673 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | ||
674 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | ||
675 | /* Rewrite the FIFO write pointer */ | ||
676 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | ||
677 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | ||
678 | |||
679 | if (efx_dev_registered(efx)) | ||
680 | netif_tx_lock(efx->net_dev); | ||
681 | efx_notify_tx_desc(tx_queue); | ||
682 | if (efx_dev_registered(efx)) | ||
683 | netif_tx_unlock(efx->net_dev); | ||
684 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && | ||
685 | EFX_WORKAROUND_10727(efx)) { | ||
686 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
687 | } else { | ||
688 | EFX_ERR(efx, "channel %d unexpected TX event " | ||
689 | EFX_QWORD_FMT"\n", channel->channel, | ||
690 | EFX_QWORD_VAL(*event)); | ||
691 | } | ||
692 | } | ||
693 | |||
694 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | ||
695 | static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | ||
696 | const efx_qword_t *event, | ||
697 | bool *rx_ev_pkt_ok, | ||
698 | bool *discard) | ||
699 | { | ||
700 | struct efx_nic *efx = rx_queue->efx; | ||
701 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | ||
702 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | ||
703 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | ||
704 | bool rx_ev_other_err, rx_ev_pause_frm; | ||
705 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
706 | unsigned rx_ev_pkt_type; | ||
707 | |||
708 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | ||
709 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | ||
710 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); | ||
711 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); | ||
712 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | ||
713 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); | ||
714 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | ||
715 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); | ||
716 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | ||
717 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); | ||
718 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); | ||
719 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); | ||
720 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? | ||
721 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); | ||
722 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); | ||
723 | |||
724 | /* Every error apart from tobe_disc and pause_frm */ | ||
725 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | ||
726 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | ||
727 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | ||
728 | |||
729 | /* Count errors that are not in MAC stats. Ignore expected | ||
730 | * checksum errors during self-test. */ | ||
731 | if (rx_ev_frm_trunc) | ||
732 | ++rx_queue->channel->n_rx_frm_trunc; | ||
733 | else if (rx_ev_tobe_disc) | ||
734 | ++rx_queue->channel->n_rx_tobe_disc; | ||
735 | else if (!efx->loopback_selftest) { | ||
736 | if (rx_ev_ip_hdr_chksum_err) | ||
737 | ++rx_queue->channel->n_rx_ip_hdr_chksum_err; | ||
738 | else if (rx_ev_tcp_udp_chksum_err) | ||
739 | ++rx_queue->channel->n_rx_tcp_udp_chksum_err; | ||
740 | } | ||
741 | |||
742 | /* The frame must be discarded if any of these are true. */ | ||
743 | *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | ||
744 | rx_ev_tobe_disc | rx_ev_pause_frm); | ||
745 | |||
746 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | ||
747 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | ||
748 | * to a FIFO overflow. | ||
749 | */ | ||
750 | #ifdef EFX_ENABLE_DEBUG | ||
751 | if (rx_ev_other_err) { | ||
752 | EFX_INFO_RL(efx, " RX queue %d unexpected RX event " | ||
753 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | ||
754 | rx_queue->queue, EFX_QWORD_VAL(*event), | ||
755 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | ||
756 | rx_ev_ip_hdr_chksum_err ? | ||
757 | " [IP_HDR_CHKSUM_ERR]" : "", | ||
758 | rx_ev_tcp_udp_chksum_err ? | ||
759 | " [TCP_UDP_CHKSUM_ERR]" : "", | ||
760 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | ||
761 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | ||
762 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | ||
763 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | ||
764 | rx_ev_pause_frm ? " [PAUSE]" : ""); | ||
765 | } | ||
766 | #endif | ||
767 | } | ||
768 | |||
769 | /* Handle receive events that are not in-order. */ | ||
770 | static void | ||
771 | efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | ||
772 | { | ||
773 | struct efx_nic *efx = rx_queue->efx; | ||
774 | unsigned expected, dropped; | ||
775 | |||
776 | expected = rx_queue->removed_count & EFX_RXQ_MASK; | ||
777 | dropped = (index - expected) & EFX_RXQ_MASK; | ||
778 | EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", | ||
779 | dropped, index, expected); | ||
780 | |||
781 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | ||
782 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
783 | } | ||
784 | |||
785 | /* Handle a packet received event | ||
786 | * | ||
787 | * The NIC gives a "discard" flag if it's a unicast packet with the | ||
788 | * wrong destination address | ||
789 | * Also "is multicast" and "matches multicast filter" flags can be used to | ||
790 | * discard non-matching multicast packets. | ||
791 | */ | ||
792 | static void | ||
793 | efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | ||
794 | { | ||
795 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; | ||
796 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
797 | unsigned expected_ptr; | ||
798 | bool rx_ev_pkt_ok, discard = false, checksummed; | ||
799 | struct efx_rx_queue *rx_queue; | ||
800 | struct efx_nic *efx = channel->efx; | ||
801 | |||
802 | /* Basic packet information */ | ||
803 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); | ||
804 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); | ||
805 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | ||
806 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); | ||
807 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); | ||
808 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != | ||
809 | channel->channel); | ||
810 | |||
811 | rx_queue = &efx->rx_queue[channel->channel]; | ||
812 | |||
813 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | ||
814 | expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; | ||
815 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) | ||
816 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | ||
817 | |||
818 | if (likely(rx_ev_pkt_ok)) { | ||
819 | /* If packet is marked as OK and packet type is TCP/IP or | ||
820 | * UDP/IP, then we can rely on the hardware checksum. | ||
821 | */ | ||
822 | checksummed = | ||
823 | likely(efx->rx_checksum_enabled) && | ||
824 | (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || | ||
825 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP); | ||
826 | } else { | ||
827 | efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); | ||
828 | checksummed = false; | ||
829 | } | ||
830 | |||
831 | /* Detect multicast packets that didn't match the filter */ | ||
832 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | ||
833 | if (rx_ev_mcast_pkt) { | ||
834 | unsigned int rx_ev_mcast_hash_match = | ||
835 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); | ||
836 | |||
837 | if (unlikely(!rx_ev_mcast_hash_match)) { | ||
838 | ++channel->n_rx_mcast_mismatch; | ||
839 | discard = true; | ||
840 | } | ||
841 | } | ||
842 | |||
843 | channel->irq_mod_score += 2; | ||
844 | |||
845 | /* Handle received packet */ | ||
846 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, | ||
847 | checksummed, discard); | ||
848 | } | ||
849 | |||
850 | /* Global events are basically PHY events */ | ||
851 | static void | ||
852 | efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) | ||
853 | { | ||
854 | struct efx_nic *efx = channel->efx; | ||
855 | bool handled = false; | ||
856 | |||
857 | if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || | ||
858 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || | ||
859 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) { | ||
860 | /* Ignored */ | ||
861 | handled = true; | ||
862 | } | ||
863 | |||
864 | if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) && | ||
865 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { | ||
866 | efx->xmac_poll_required = true; | ||
867 | handled = true; | ||
868 | } | ||
869 | |||
870 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? | ||
871 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : | ||
872 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { | ||
873 | EFX_ERR(efx, "channel %d seen global RX_RESET " | ||
874 | "event. Resetting.\n", channel->channel); | ||
875 | |||
876 | atomic_inc(&efx->rx_reset); | ||
877 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? | ||
878 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
879 | handled = true; | ||
880 | } | ||
881 | |||
882 | if (!handled) | ||
883 | EFX_ERR(efx, "channel %d unknown global event " | ||
884 | EFX_QWORD_FMT "\n", channel->channel, | ||
885 | EFX_QWORD_VAL(*event)); | ||
886 | } | ||
887 | |||
888 | static void | ||
889 | efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | ||
890 | { | ||
891 | struct efx_nic *efx = channel->efx; | ||
892 | unsigned int ev_sub_code; | ||
893 | unsigned int ev_sub_data; | ||
894 | |||
895 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); | ||
896 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | ||
897 | |||
898 | switch (ev_sub_code) { | ||
899 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | ||
900 | EFX_TRACE(efx, "channel %d TXQ %d flushed\n", | ||
901 | channel->channel, ev_sub_data); | ||
902 | break; | ||
903 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | ||
904 | EFX_TRACE(efx, "channel %d RXQ %d flushed\n", | ||
905 | channel->channel, ev_sub_data); | ||
906 | break; | ||
907 | case FSE_AZ_EVQ_INIT_DONE_EV: | ||
908 | EFX_LOG(efx, "channel %d EVQ %d initialised\n", | ||
909 | channel->channel, ev_sub_data); | ||
910 | break; | ||
911 | case FSE_AZ_SRM_UPD_DONE_EV: | ||
912 | EFX_TRACE(efx, "channel %d SRAM update done\n", | ||
913 | channel->channel); | ||
914 | break; | ||
915 | case FSE_AZ_WAKE_UP_EV: | ||
916 | EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", | ||
917 | channel->channel, ev_sub_data); | ||
918 | break; | ||
919 | case FSE_AZ_TIMER_EV: | ||
920 | EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", | ||
921 | channel->channel, ev_sub_data); | ||
922 | break; | ||
923 | case FSE_AA_RX_RECOVER_EV: | ||
924 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " | ||
925 | "Resetting.\n", channel->channel); | ||
926 | atomic_inc(&efx->rx_reset); | ||
927 | efx_schedule_reset(efx, | ||
928 | EFX_WORKAROUND_6555(efx) ? | ||
929 | RESET_TYPE_RX_RECOVERY : | ||
930 | RESET_TYPE_DISABLE); | ||
931 | break; | ||
932 | case FSE_BZ_RX_DSC_ERROR_EV: | ||
933 | EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." | ||
934 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
935 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | ||
936 | break; | ||
937 | case FSE_BZ_TX_DSC_ERROR_EV: | ||
938 | EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." | ||
939 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
940 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
941 | break; | ||
942 | default: | ||
943 | EFX_TRACE(efx, "channel %d unknown driver event code %d " | ||
944 | "data %04x\n", channel->channel, ev_sub_code, | ||
945 | ev_sub_data); | ||
946 | break; | ||
947 | } | ||
948 | } | ||
949 | |||
950 | int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota) | ||
951 | { | ||
952 | unsigned int read_ptr; | ||
953 | efx_qword_t event, *p_event; | ||
954 | int ev_code; | ||
955 | int rx_packets = 0; | ||
956 | |||
957 | read_ptr = channel->eventq_read_ptr; | ||
958 | |||
959 | do { | ||
960 | p_event = efx_event(channel, read_ptr); | ||
961 | event = *p_event; | ||
962 | |||
963 | if (!efx_event_present(&event)) | ||
964 | /* End of events */ | ||
965 | break; | ||
966 | |||
967 | EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n", | ||
968 | channel->channel, EFX_QWORD_VAL(event)); | ||
969 | |||
970 | /* Clear this event by marking it all ones */ | ||
971 | EFX_SET_QWORD(*p_event); | ||
972 | |||
973 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | ||
974 | |||
975 | switch (ev_code) { | ||
976 | case FSE_AZ_EV_CODE_RX_EV: | ||
977 | efx_handle_rx_event(channel, &event); | ||
978 | ++rx_packets; | ||
979 | break; | ||
980 | case FSE_AZ_EV_CODE_TX_EV: | ||
981 | efx_handle_tx_event(channel, &event); | ||
982 | break; | ||
983 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | ||
984 | channel->eventq_magic = EFX_QWORD_FIELD( | ||
985 | event, FSF_AZ_DRV_GEN_EV_MAGIC); | ||
986 | EFX_LOG(channel->efx, "channel %d received generated " | ||
987 | "event "EFX_QWORD_FMT"\n", channel->channel, | ||
988 | EFX_QWORD_VAL(event)); | ||
989 | break; | ||
990 | case FSE_AZ_EV_CODE_GLOBAL_EV: | ||
991 | efx_handle_global_event(channel, &event); | ||
992 | break; | ||
993 | case FSE_AZ_EV_CODE_DRIVER_EV: | ||
994 | efx_handle_driver_event(channel, &event); | ||
995 | break; | ||
996 | case FSE_CZ_EV_CODE_MCDI_EV: | ||
997 | efx_mcdi_process_event(channel, &event); | ||
998 | break; | ||
999 | default: | ||
1000 | EFX_ERR(channel->efx, "channel %d unknown event type %d" | ||
1001 | " (data " EFX_QWORD_FMT ")\n", channel->channel, | ||
1002 | ev_code, EFX_QWORD_VAL(event)); | ||
1003 | } | ||
1004 | |||
1005 | /* Increment read pointer */ | ||
1006 | read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; | ||
1007 | |||
1008 | } while (rx_packets < rx_quota); | ||
1009 | |||
1010 | channel->eventq_read_ptr = read_ptr; | ||
1011 | return rx_packets; | ||
1012 | } | ||
1013 | |||
1014 | |||
1015 | /* Allocate buffer table entries for event queue */ | ||
1016 | int efx_nic_probe_eventq(struct efx_channel *channel) | ||
1017 | { | ||
1018 | struct efx_nic *efx = channel->efx; | ||
1019 | BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 || | ||
1020 | EFX_EVQ_SIZE & EFX_EVQ_MASK); | ||
1021 | return efx_alloc_special_buffer(efx, &channel->eventq, | ||
1022 | EFX_EVQ_SIZE * sizeof(efx_qword_t)); | ||
1023 | } | ||
1024 | |||
1025 | void efx_nic_init_eventq(struct efx_channel *channel) | ||
1026 | { | ||
1027 | efx_oword_t reg; | ||
1028 | struct efx_nic *efx = channel->efx; | ||
1029 | |||
1030 | EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", | ||
1031 | channel->channel, channel->eventq.index, | ||
1032 | channel->eventq.index + channel->eventq.entries - 1); | ||
1033 | |||
1034 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | ||
1035 | EFX_POPULATE_OWORD_3(reg, | ||
1036 | FRF_CZ_TIMER_Q_EN, 1, | ||
1037 | FRF_CZ_HOST_NOTIFY_MODE, 0, | ||
1038 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); | ||
1039 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | ||
1040 | } | ||
1041 | |||
1042 | /* Pin event queue buffer */ | ||
1043 | efx_init_special_buffer(efx, &channel->eventq); | ||
1044 | |||
1045 | /* Fill event queue with all ones (i.e. empty events) */ | ||
1046 | memset(channel->eventq.addr, 0xff, channel->eventq.len); | ||
1047 | |||
1048 | /* Push event queue to card */ | ||
1049 | EFX_POPULATE_OWORD_3(reg, | ||
1050 | FRF_AZ_EVQ_EN, 1, | ||
1051 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), | ||
1052 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); | ||
1053 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | ||
1054 | channel->channel); | ||
1055 | |||
1056 | efx->type->push_irq_moderation(channel); | ||
1057 | } | ||
1058 | |||
1059 | void efx_nic_fini_eventq(struct efx_channel *channel) | ||
1060 | { | ||
1061 | efx_oword_t reg; | ||
1062 | struct efx_nic *efx = channel->efx; | ||
1063 | |||
1064 | /* Remove event queue from card */ | ||
1065 | EFX_ZERO_OWORD(reg); | ||
1066 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | ||
1067 | channel->channel); | ||
1068 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
1069 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | ||
1070 | |||
1071 | /* Unpin event queue */ | ||
1072 | efx_fini_special_buffer(efx, &channel->eventq); | ||
1073 | } | ||
1074 | |||
1075 | /* Free buffers backing event queue */ | ||
1076 | void efx_nic_remove_eventq(struct efx_channel *channel) | ||
1077 | { | ||
1078 | efx_free_special_buffer(channel->efx, &channel->eventq); | ||
1079 | } | ||
1080 | |||
1081 | |||
1082 | /* Generates a test event on the event queue. A subsequent call to | ||
1083 | * process_eventq() should pick up the event and place the value of | ||
1084 | * "magic" into channel->eventq_magic; | ||
1085 | */ | ||
1086 | void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic) | ||
1087 | { | ||
1088 | efx_qword_t test_event; | ||
1089 | |||
1090 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, | ||
1091 | FSE_AZ_EV_CODE_DRV_GEN_EV, | ||
1092 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | ||
1093 | efx_generate_event(channel, &test_event); | ||
1094 | } | ||
1095 | |||
1096 | /************************************************************************** | ||
1097 | * | ||
1098 | * Flush handling | ||
1099 | * | ||
1100 | **************************************************************************/ | ||
1101 | |||
1102 | |||
1103 | static void efx_poll_flush_events(struct efx_nic *efx) | ||
1104 | { | ||
1105 | struct efx_channel *channel = &efx->channel[0]; | ||
1106 | struct efx_tx_queue *tx_queue; | ||
1107 | struct efx_rx_queue *rx_queue; | ||
1108 | unsigned int read_ptr = channel->eventq_read_ptr; | ||
1109 | unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK; | ||
1110 | |||
1111 | do { | ||
1112 | efx_qword_t *event = efx_event(channel, read_ptr); | ||
1113 | int ev_code, ev_sub_code, ev_queue; | ||
1114 | bool ev_failed; | ||
1115 | |||
1116 | if (!efx_event_present(event)) | ||
1117 | break; | ||
1118 | |||
1119 | ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); | ||
1120 | ev_sub_code = EFX_QWORD_FIELD(*event, | ||
1121 | FSF_AZ_DRIVER_EV_SUBCODE); | ||
1122 | if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | ||
1123 | ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { | ||
1124 | ev_queue = EFX_QWORD_FIELD(*event, | ||
1125 | FSF_AZ_DRIVER_EV_SUBDATA); | ||
1126 | if (ev_queue < EFX_TX_QUEUE_COUNT) { | ||
1127 | tx_queue = efx->tx_queue + ev_queue; | ||
1128 | tx_queue->flushed = FLUSH_DONE; | ||
1129 | } | ||
1130 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | ||
1131 | ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { | ||
1132 | ev_queue = EFX_QWORD_FIELD( | ||
1133 | *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); | ||
1134 | ev_failed = EFX_QWORD_FIELD( | ||
1135 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | ||
1136 | if (ev_queue < efx->n_rx_queues) { | ||
1137 | rx_queue = efx->rx_queue + ev_queue; | ||
1138 | rx_queue->flushed = | ||
1139 | ev_failed ? FLUSH_FAILED : FLUSH_DONE; | ||
1140 | } | ||
1141 | } | ||
1142 | |||
1143 | /* We're about to destroy the queue anyway, so | ||
1144 | * it's ok to throw away every non-flush event */ | ||
1145 | EFX_SET_QWORD(*event); | ||
1146 | |||
1147 | read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; | ||
1148 | } while (read_ptr != end_ptr); | ||
1149 | |||
1150 | channel->eventq_read_ptr = read_ptr; | ||
1151 | } | ||
1152 | |||
1153 | /* Handle tx and rx flushes at the same time, since they run in | ||
1154 | * parallel in the hardware and there's no reason for us to | ||
1155 | * serialise them */ | ||
1156 | int efx_nic_flush_queues(struct efx_nic *efx) | ||
1157 | { | ||
1158 | struct efx_rx_queue *rx_queue; | ||
1159 | struct efx_tx_queue *tx_queue; | ||
1160 | int i, tx_pending, rx_pending; | ||
1161 | |||
1162 | /* If necessary prepare the hardware for flushing */ | ||
1163 | efx->type->prepare_flush(efx); | ||
1164 | |||
1165 | /* Flush all tx queues in parallel */ | ||
1166 | efx_for_each_tx_queue(tx_queue, efx) | ||
1167 | efx_flush_tx_queue(tx_queue); | ||
1168 | |||
1169 | /* The hardware supports four concurrent rx flushes, each of which may | ||
1170 | * need to be retried if there is an outstanding descriptor fetch */ | ||
1171 | for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { | ||
1172 | rx_pending = tx_pending = 0; | ||
1173 | efx_for_each_rx_queue(rx_queue, efx) { | ||
1174 | if (rx_queue->flushed == FLUSH_PENDING) | ||
1175 | ++rx_pending; | ||
1176 | } | ||
1177 | efx_for_each_rx_queue(rx_queue, efx) { | ||
1178 | if (rx_pending == EFX_RX_FLUSH_COUNT) | ||
1179 | break; | ||
1180 | if (rx_queue->flushed == FLUSH_FAILED || | ||
1181 | rx_queue->flushed == FLUSH_NONE) { | ||
1182 | efx_flush_rx_queue(rx_queue); | ||
1183 | ++rx_pending; | ||
1184 | } | ||
1185 | } | ||
1186 | efx_for_each_tx_queue(tx_queue, efx) { | ||
1187 | if (tx_queue->flushed != FLUSH_DONE) | ||
1188 | ++tx_pending; | ||
1189 | } | ||
1190 | |||
1191 | if (rx_pending == 0 && tx_pending == 0) | ||
1192 | return 0; | ||
1193 | |||
1194 | msleep(EFX_FLUSH_INTERVAL); | ||
1195 | efx_poll_flush_events(efx); | ||
1196 | } | ||
1197 | |||
1198 | /* Mark the queues as all flushed. We're going to return failure | ||
1199 | * leading to a reset, or fake up success anyway */ | ||
1200 | efx_for_each_tx_queue(tx_queue, efx) { | ||
1201 | if (tx_queue->flushed != FLUSH_DONE) | ||
1202 | EFX_ERR(efx, "tx queue %d flush command timed out\n", | ||
1203 | tx_queue->queue); | ||
1204 | tx_queue->flushed = FLUSH_DONE; | ||
1205 | } | ||
1206 | efx_for_each_rx_queue(rx_queue, efx) { | ||
1207 | if (rx_queue->flushed != FLUSH_DONE) | ||
1208 | EFX_ERR(efx, "rx queue %d flush command timed out\n", | ||
1209 | rx_queue->queue); | ||
1210 | rx_queue->flushed = FLUSH_DONE; | ||
1211 | } | ||
1212 | |||
1213 | if (EFX_WORKAROUND_7803(efx)) | ||
1214 | return 0; | ||
1215 | |||
1216 | return -ETIMEDOUT; | ||
1217 | } | ||
1218 | |||
1219 | /************************************************************************** | ||
1220 | * | ||
1221 | * Hardware interrupts | ||
1222 | * The hardware interrupt handler does very little work; all the event | ||
1223 | * queue processing is carried out by per-channel tasklets. | ||
1224 | * | ||
1225 | **************************************************************************/ | ||
1226 | |||
1227 | /* Enable/disable/generate interrupts */ | ||
1228 | static inline void efx_nic_interrupts(struct efx_nic *efx, | ||
1229 | bool enabled, bool force) | ||
1230 | { | ||
1231 | efx_oword_t int_en_reg_ker; | ||
1232 | unsigned int level = 0; | ||
1233 | |||
1234 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) | ||
1235 | /* Set the level always even if we're generating a test | ||
1236 | * interrupt, because our legacy interrupt handler is safe */ | ||
1237 | level = 0x1f; | ||
1238 | |||
1239 | EFX_POPULATE_OWORD_3(int_en_reg_ker, | ||
1240 | FRF_AZ_KER_INT_LEVE_SEL, level, | ||
1241 | FRF_AZ_KER_INT_KER, force, | ||
1242 | FRF_AZ_DRV_INT_EN_KER, enabled); | ||
1243 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); | ||
1244 | } | ||
1245 | |||
1246 | void efx_nic_enable_interrupts(struct efx_nic *efx) | ||
1247 | { | ||
1248 | struct efx_channel *channel; | ||
1249 | |||
1250 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | ||
1251 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | ||
1252 | |||
1253 | /* Enable interrupts */ | ||
1254 | efx_nic_interrupts(efx, true, false); | ||
1255 | |||
1256 | /* Force processing of all the channels to get the EVQ RPTRs up to | ||
1257 | date */ | ||
1258 | efx_for_each_channel(channel, efx) | ||
1259 | efx_schedule_channel(channel); | ||
1260 | } | ||
1261 | |||
1262 | void efx_nic_disable_interrupts(struct efx_nic *efx) | ||
1263 | { | ||
1264 | /* Disable interrupts */ | ||
1265 | efx_nic_interrupts(efx, false, false); | ||
1266 | } | ||
1267 | |||
1268 | /* Generate a test interrupt | ||
1269 | * Interrupt must already have been enabled, otherwise nasty things | ||
1270 | * may happen. | ||
1271 | */ | ||
1272 | void efx_nic_generate_interrupt(struct efx_nic *efx) | ||
1273 | { | ||
1274 | efx_nic_interrupts(efx, true, true); | ||
1275 | } | ||
1276 | |||
1277 | /* Process a fatal interrupt | ||
1278 | * Disable bus mastering ASAP and schedule a reset | ||
1279 | */ | ||
1280 | irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | ||
1281 | { | ||
1282 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1283 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1284 | efx_oword_t fatal_intr; | ||
1285 | int error, mem_perr; | ||
1286 | |||
1287 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | ||
1288 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | ||
1289 | |||
1290 | EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " | ||
1291 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | ||
1292 | EFX_OWORD_VAL(fatal_intr), | ||
1293 | error ? "disabling bus mastering" : "no recognised error"); | ||
1294 | if (error == 0) | ||
1295 | goto out; | ||
1296 | |||
1297 | /* If this is a memory parity error dump which blocks are offending */ | ||
1298 | mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER); | ||
1299 | if (mem_perr) { | ||
1300 | efx_oword_t reg; | ||
1301 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | ||
1302 | EFX_ERR(efx, "SYSTEM ERROR: memory parity error " | ||
1303 | EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); | ||
1304 | } | ||
1305 | |||
1306 | /* Disable both devices */ | ||
1307 | pci_clear_master(efx->pci_dev); | ||
1308 | if (efx_nic_is_dual_func(efx)) | ||
1309 | pci_clear_master(nic_data->pci_dev2); | ||
1310 | efx_nic_disable_interrupts(efx); | ||
1311 | |||
1312 | /* Count errors and reset or disable the NIC accordingly */ | ||
1313 | if (efx->int_error_count == 0 || | ||
1314 | time_after(jiffies, efx->int_error_expire)) { | ||
1315 | efx->int_error_count = 0; | ||
1316 | efx->int_error_expire = | ||
1317 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | ||
1318 | } | ||
1319 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | ||
1320 | EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); | ||
1321 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | ||
1322 | } else { | ||
1323 | EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen." | ||
1324 | "NIC will be disabled\n"); | ||
1325 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
1326 | } | ||
1327 | out: | ||
1328 | return IRQ_HANDLED; | ||
1329 | } | ||
1330 | |||
1331 | /* Handle a legacy interrupt | ||
1332 | * Acknowledges the interrupt and schedule event queue processing. | ||
1333 | */ | ||
1334 | static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | ||
1335 | { | ||
1336 | struct efx_nic *efx = dev_id; | ||
1337 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1338 | irqreturn_t result = IRQ_NONE; | ||
1339 | struct efx_channel *channel; | ||
1340 | efx_dword_t reg; | ||
1341 | u32 queues; | ||
1342 | int syserr; | ||
1343 | |||
1344 | /* Read the ISR which also ACKs the interrupts */ | ||
1345 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | ||
1346 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | ||
1347 | |||
1348 | /* Check to see if we have a serious error condition */ | ||
1349 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
1350 | if (unlikely(syserr)) | ||
1351 | return efx_nic_fatal_interrupt(efx); | ||
1352 | |||
1353 | if (queues != 0) { | ||
1354 | if (EFX_WORKAROUND_15783(efx)) | ||
1355 | efx->irq_zero_count = 0; | ||
1356 | |||
1357 | /* Schedule processing of any interrupting queues */ | ||
1358 | efx_for_each_channel(channel, efx) { | ||
1359 | if (queues & 1) | ||
1360 | efx_schedule_channel(channel); | ||
1361 | queues >>= 1; | ||
1362 | } | ||
1363 | result = IRQ_HANDLED; | ||
1364 | |||
1365 | } else if (EFX_WORKAROUND_15783(efx) && | ||
1366 | efx->irq_zero_count++ == 0) { | ||
1367 | efx_qword_t *event; | ||
1368 | |||
1369 | /* Ensure we rearm all event queues */ | ||
1370 | efx_for_each_channel(channel, efx) { | ||
1371 | event = efx_event(channel, channel->eventq_read_ptr); | ||
1372 | if (efx_event_present(event)) | ||
1373 | efx_schedule_channel(channel); | ||
1374 | } | ||
1375 | |||
1376 | result = IRQ_HANDLED; | ||
1377 | } | ||
1378 | |||
1379 | if (result == IRQ_HANDLED) { | ||
1380 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1381 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | ||
1382 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
1383 | } else if (EFX_WORKAROUND_15783(efx)) { | ||
1384 | /* We can't return IRQ_HANDLED more than once on seeing ISR0=0 | ||
1385 | * because this might be a shared interrupt, but we do need to | ||
1386 | * check the channel every time and preemptively rearm it if | ||
1387 | * it's idle. */ | ||
1388 | efx_for_each_channel(channel, efx) { | ||
1389 | if (!channel->work_pending) | ||
1390 | efx_nic_eventq_read_ack(channel); | ||
1391 | } | ||
1392 | } | ||
1393 | |||
1394 | return result; | ||
1395 | } | ||
1396 | |||
1397 | /* Handle an MSI interrupt | ||
1398 | * | ||
1399 | * Handle an MSI hardware interrupt. This routine schedules event | ||
1400 | * queue processing. No interrupt acknowledgement cycle is necessary. | ||
1401 | * Also, we never need to check that the interrupt is for us, since | ||
1402 | * MSI interrupts cannot be shared. | ||
1403 | */ | ||
1404 | static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) | ||
1405 | { | ||
1406 | struct efx_channel *channel = dev_id; | ||
1407 | struct efx_nic *efx = channel->efx; | ||
1408 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1409 | int syserr; | ||
1410 | |||
1411 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1412 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | ||
1413 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
1414 | |||
1415 | /* Check to see if we have a serious error condition */ | ||
1416 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
1417 | if (unlikely(syserr)) | ||
1418 | return efx_nic_fatal_interrupt(efx); | ||
1419 | |||
1420 | /* Schedule processing of the channel */ | ||
1421 | efx_schedule_channel(channel); | ||
1422 | |||
1423 | return IRQ_HANDLED; | ||
1424 | } | ||
1425 | |||
1426 | |||
1427 | /* Setup RSS indirection table. | ||
1428 | * This maps from the hash value of the packet to RXQ | ||
1429 | */ | ||
1430 | static void efx_setup_rss_indir_table(struct efx_nic *efx) | ||
1431 | { | ||
1432 | int i = 0; | ||
1433 | unsigned long offset; | ||
1434 | efx_dword_t dword; | ||
1435 | |||
1436 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | ||
1437 | return; | ||
1438 | |||
1439 | for (offset = FR_BZ_RX_INDIRECTION_TBL; | ||
1440 | offset < FR_BZ_RX_INDIRECTION_TBL + 0x800; | ||
1441 | offset += 0x10) { | ||
1442 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, | ||
1443 | i % efx->n_rx_queues); | ||
1444 | efx_writed(efx, &dword, offset); | ||
1445 | i++; | ||
1446 | } | ||
1447 | } | ||
1448 | |||
1449 | /* Hook interrupt handler(s) | ||
1450 | * Try MSI and then legacy interrupts. | ||
1451 | */ | ||
1452 | int efx_nic_init_interrupt(struct efx_nic *efx) | ||
1453 | { | ||
1454 | struct efx_channel *channel; | ||
1455 | int rc; | ||
1456 | |||
1457 | if (!EFX_INT_MODE_USE_MSI(efx)) { | ||
1458 | irq_handler_t handler; | ||
1459 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
1460 | handler = efx_legacy_interrupt; | ||
1461 | else | ||
1462 | handler = falcon_legacy_interrupt_a1; | ||
1463 | |||
1464 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, | ||
1465 | efx->name, efx); | ||
1466 | if (rc) { | ||
1467 | EFX_ERR(efx, "failed to hook legacy IRQ %d\n", | ||
1468 | efx->pci_dev->irq); | ||
1469 | goto fail1; | ||
1470 | } | ||
1471 | return 0; | ||
1472 | } | ||
1473 | |||
1474 | /* Hook MSI or MSI-X interrupt */ | ||
1475 | efx_for_each_channel(channel, efx) { | ||
1476 | rc = request_irq(channel->irq, efx_msi_interrupt, | ||
1477 | IRQF_PROBE_SHARED, /* Not shared */ | ||
1478 | channel->name, channel); | ||
1479 | if (rc) { | ||
1480 | EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq); | ||
1481 | goto fail2; | ||
1482 | } | ||
1483 | } | ||
1484 | |||
1485 | return 0; | ||
1486 | |||
1487 | fail2: | ||
1488 | efx_for_each_channel(channel, efx) | ||
1489 | free_irq(channel->irq, channel); | ||
1490 | fail1: | ||
1491 | return rc; | ||
1492 | } | ||
1493 | |||
1494 | void efx_nic_fini_interrupt(struct efx_nic *efx) | ||
1495 | { | ||
1496 | struct efx_channel *channel; | ||
1497 | efx_oword_t reg; | ||
1498 | |||
1499 | /* Disable MSI/MSI-X interrupts */ | ||
1500 | efx_for_each_channel(channel, efx) { | ||
1501 | if (channel->irq) | ||
1502 | free_irq(channel->irq, channel); | ||
1503 | } | ||
1504 | |||
1505 | /* ACK legacy interrupt */ | ||
1506 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
1507 | efx_reado(efx, ®, FR_BZ_INT_ISR0); | ||
1508 | else | ||
1509 | falcon_irq_ack_a1(efx); | ||
1510 | |||
1511 | /* Disable legacy interrupt */ | ||
1512 | if (efx->legacy_irq) | ||
1513 | free_irq(efx->legacy_irq, efx); | ||
1514 | } | ||
1515 | |||
1516 | u32 efx_nic_fpga_ver(struct efx_nic *efx) | ||
1517 | { | ||
1518 | efx_oword_t altera_build; | ||
1519 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); | ||
1520 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); | ||
1521 | } | ||
1522 | |||
1523 | void efx_nic_init_common(struct efx_nic *efx) | ||
1524 | { | ||
1525 | efx_oword_t temp; | ||
1526 | |||
1527 | /* Set positions of descriptor caches in SRAM. */ | ||
1528 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, | ||
1529 | efx->type->tx_dc_base / 8); | ||
1530 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); | ||
1531 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, | ||
1532 | efx->type->rx_dc_base / 8); | ||
1533 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); | ||
1534 | |||
1535 | /* Set TX descriptor cache size. */ | ||
1536 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); | ||
1537 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | ||
1538 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); | ||
1539 | |||
1540 | /* Set RX descriptor cache size. Set low watermark to size-8, as | ||
1541 | * this allows most efficient prefetching. | ||
1542 | */ | ||
1543 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); | ||
1544 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | ||
1545 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); | ||
1546 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | ||
1547 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); | ||
1548 | |||
1549 | /* Program INT_KER address */ | ||
1550 | EFX_POPULATE_OWORD_2(temp, | ||
1551 | FRF_AZ_NORM_INT_VEC_DIS_KER, | ||
1552 | EFX_INT_MODE_USE_MSI(efx), | ||
1553 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); | ||
1554 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); | ||
1555 | |||
1556 | /* Enable all the genuinely fatal interrupts. (They are still | ||
1557 | * masked by the overall interrupt mask, controlled by | ||
1558 | * falcon_interrupts()). | ||
1559 | * | ||
1560 | * Note: All other fatal interrupts are enabled | ||
1561 | */ | ||
1562 | EFX_POPULATE_OWORD_3(temp, | ||
1563 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, | ||
1564 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, | ||
1565 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); | ||
1566 | EFX_INVERT_OWORD(temp); | ||
1567 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); | ||
1568 | |||
1569 | efx_setup_rss_indir_table(efx); | ||
1570 | |||
1571 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | ||
1572 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | ||
1573 | */ | ||
1574 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | ||
1575 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | ||
1576 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | ||
1577 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | ||
1578 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); | ||
1579 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | ||
1580 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | ||
1581 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | ||
1582 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | ||
1583 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); | ||
1584 | /* Disable hardware watchdog which can misfire */ | ||
1585 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); | ||
1586 | /* Squash TX of packets of 16 bytes or less */ | ||
1587 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
1588 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | ||
1589 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | ||
1590 | } | ||
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h new file mode 100644 index 000000000000..3166bafdfbef --- /dev/null +++ b/drivers/net/sfc/nic.h | |||
@@ -0,0 +1,261 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2009 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_NIC_H | ||
12 | #define EFX_NIC_H | ||
13 | |||
14 | #include <linux/i2c-algo-bit.h> | ||
15 | #include "net_driver.h" | ||
16 | #include "efx.h" | ||
17 | #include "mcdi.h" | ||
18 | |||
19 | /* | ||
20 | * Falcon hardware control | ||
21 | */ | ||
22 | |||
23 | enum { | ||
24 | EFX_REV_FALCON_A0 = 0, | ||
25 | EFX_REV_FALCON_A1 = 1, | ||
26 | EFX_REV_FALCON_B0 = 2, | ||
27 | EFX_REV_SIENA_A0 = 3, | ||
28 | }; | ||
29 | |||
30 | static inline int efx_nic_rev(struct efx_nic *efx) | ||
31 | { | ||
32 | return efx->type->revision; | ||
33 | } | ||
34 | |||
35 | extern u32 efx_nic_fpga_ver(struct efx_nic *efx); | ||
36 | |||
37 | static inline bool efx_nic_has_mc(struct efx_nic *efx) | ||
38 | { | ||
39 | return efx_nic_rev(efx) >= EFX_REV_SIENA_A0; | ||
40 | } | ||
41 | /* NIC has two interlinked PCI functions for the same port. */ | ||
42 | static inline bool efx_nic_is_dual_func(struct efx_nic *efx) | ||
43 | { | ||
44 | return efx_nic_rev(efx) < EFX_REV_FALCON_B0; | ||
45 | } | ||
46 | |||
47 | enum { | ||
48 | PHY_TYPE_NONE = 0, | ||
49 | PHY_TYPE_TXC43128 = 1, | ||
50 | PHY_TYPE_88E1111 = 2, | ||
51 | PHY_TYPE_SFX7101 = 3, | ||
52 | PHY_TYPE_QT2022C2 = 4, | ||
53 | PHY_TYPE_PM8358 = 6, | ||
54 | PHY_TYPE_SFT9001A = 8, | ||
55 | PHY_TYPE_QT2025C = 9, | ||
56 | PHY_TYPE_SFT9001B = 10, | ||
57 | }; | ||
58 | |||
59 | #define FALCON_XMAC_LOOPBACKS \ | ||
60 | ((1 << LOOPBACK_XGMII) | \ | ||
61 | (1 << LOOPBACK_XGXS) | \ | ||
62 | (1 << LOOPBACK_XAUI)) | ||
63 | |||
64 | #define FALCON_GMAC_LOOPBACKS \ | ||
65 | (1 << LOOPBACK_GMAC) | ||
66 | |||
67 | /** | ||
68 | * struct falcon_board_type - board operations and type information | ||
69 | * @id: Board type id, as found in NVRAM | ||
70 | * @ref_model: Model number of Solarflare reference design | ||
71 | * @gen_type: Generic board type description | ||
72 | * @init: Allocate resources and initialise peripheral hardware | ||
73 | * @init_phy: Do board-specific PHY initialisation | ||
74 | * @fini: Shut down hardware and free resources | ||
75 | * @set_id_led: Set state of identifying LED or revert to automatic function | ||
76 | * @monitor: Board-specific health check function | ||
77 | */ | ||
78 | struct falcon_board_type { | ||
79 | u8 id; | ||
80 | const char *ref_model; | ||
81 | const char *gen_type; | ||
82 | int (*init) (struct efx_nic *nic); | ||
83 | void (*init_phy) (struct efx_nic *efx); | ||
84 | void (*fini) (struct efx_nic *nic); | ||
85 | void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode); | ||
86 | int (*monitor) (struct efx_nic *nic); | ||
87 | }; | ||
88 | |||
89 | /** | ||
90 | * struct falcon_board - board information | ||
91 | * @type: Type of board | ||
92 | * @major: Major rev. ('A', 'B' ...) | ||
93 | * @minor: Minor rev. (0, 1, ...) | ||
94 | * @i2c_adap: I2C adapter for on-board peripherals | ||
95 | * @i2c_data: Data for bit-banging algorithm | ||
96 | * @hwmon_client: I2C client for hardware monitor | ||
97 | * @ioexp_client: I2C client for power/port control | ||
98 | */ | ||
99 | struct falcon_board { | ||
100 | const struct falcon_board_type *type; | ||
101 | int major; | ||
102 | int minor; | ||
103 | struct i2c_adapter i2c_adap; | ||
104 | struct i2c_algo_bit_data i2c_data; | ||
105 | struct i2c_client *hwmon_client, *ioexp_client; | ||
106 | }; | ||
107 | |||
108 | /** | ||
109 | * struct falcon_nic_data - Falcon NIC state | ||
110 | * @pci_dev2: Secondary function of Falcon A | ||
111 | * @board: Board state and functions | ||
112 | * @stats_disable_count: Nest count for disabling statistics fetches | ||
113 | * @stats_pending: Is there a pending DMA of MAC statistics. | ||
114 | * @stats_timer: A timer for regularly fetching MAC statistics. | ||
115 | * @stats_dma_done: Pointer to the flag which indicates DMA completion. | ||
116 | */ | ||
117 | struct falcon_nic_data { | ||
118 | struct pci_dev *pci_dev2; | ||
119 | struct falcon_board board; | ||
120 | unsigned int stats_disable_count; | ||
121 | bool stats_pending; | ||
122 | struct timer_list stats_timer; | ||
123 | u32 *stats_dma_done; | ||
124 | }; | ||
125 | |||
126 | static inline struct falcon_board *falcon_board(struct efx_nic *efx) | ||
127 | { | ||
128 | struct falcon_nic_data *data = efx->nic_data; | ||
129 | return &data->board; | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * struct siena_nic_data - Siena NIC state | ||
134 | * @fw_version: Management controller firmware version | ||
135 | * @fw_build: Firmware build number | ||
136 | * @mcdi: Management-Controller-to-Driver Interface | ||
137 | * @wol_filter_id: Wake-on-LAN packet filter id | ||
138 | */ | ||
139 | struct siena_nic_data { | ||
140 | u64 fw_version; | ||
141 | u32 fw_build; | ||
142 | struct efx_mcdi_iface mcdi; | ||
143 | int wol_filter_id; | ||
144 | }; | ||
145 | |||
146 | extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len); | ||
147 | |||
148 | extern struct efx_nic_type falcon_a1_nic_type; | ||
149 | extern struct efx_nic_type falcon_b0_nic_type; | ||
150 | extern struct efx_nic_type siena_a0_nic_type; | ||
151 | |||
152 | /************************************************************************** | ||
153 | * | ||
154 | * Externs | ||
155 | * | ||
156 | ************************************************************************** | ||
157 | */ | ||
158 | |||
159 | extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); | ||
160 | |||
161 | /* TX data path */ | ||
162 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); | ||
163 | extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue); | ||
164 | extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue); | ||
165 | extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue); | ||
166 | extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue); | ||
167 | |||
168 | /* RX data path */ | ||
169 | extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue); | ||
170 | extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue); | ||
171 | extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue); | ||
172 | extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); | ||
173 | extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); | ||
174 | |||
175 | /* Event data path */ | ||
176 | extern int efx_nic_probe_eventq(struct efx_channel *channel); | ||
177 | extern void efx_nic_init_eventq(struct efx_channel *channel); | ||
178 | extern void efx_nic_fini_eventq(struct efx_channel *channel); | ||
179 | extern void efx_nic_remove_eventq(struct efx_channel *channel); | ||
180 | extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); | ||
181 | extern void efx_nic_eventq_read_ack(struct efx_channel *channel); | ||
182 | |||
183 | /* MAC/PHY */ | ||
184 | extern void falcon_drain_tx_fifo(struct efx_nic *efx); | ||
185 | extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); | ||
186 | extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh; | ||
187 | |||
188 | /* Interrupts and test events */ | ||
189 | extern int efx_nic_init_interrupt(struct efx_nic *efx); | ||
190 | extern void efx_nic_enable_interrupts(struct efx_nic *efx); | ||
191 | extern void efx_nic_generate_test_event(struct efx_channel *channel, | ||
192 | unsigned int magic); | ||
193 | extern void efx_nic_generate_interrupt(struct efx_nic *efx); | ||
194 | extern void efx_nic_disable_interrupts(struct efx_nic *efx); | ||
195 | extern void efx_nic_fini_interrupt(struct efx_nic *efx); | ||
196 | extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx); | ||
197 | extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); | ||
198 | extern void falcon_irq_ack_a1(struct efx_nic *efx); | ||
199 | |||
200 | #define EFX_IRQ_MOD_RESOLUTION 5 | ||
201 | |||
202 | /* Global Resources */ | ||
203 | extern int efx_nic_flush_queues(struct efx_nic *efx); | ||
204 | extern void falcon_start_nic_stats(struct efx_nic *efx); | ||
205 | extern void falcon_stop_nic_stats(struct efx_nic *efx); | ||
206 | extern int falcon_reset_xaui(struct efx_nic *efx); | ||
207 | extern void efx_nic_init_common(struct efx_nic *efx); | ||
208 | |||
209 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | ||
210 | unsigned int len); | ||
211 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); | ||
212 | |||
213 | /* Tests */ | ||
214 | struct efx_nic_register_test { | ||
215 | unsigned address; | ||
216 | efx_oword_t mask; | ||
217 | }; | ||
218 | extern int efx_nic_test_registers(struct efx_nic *efx, | ||
219 | const struct efx_nic_register_test *regs, | ||
220 | size_t n_regs); | ||
221 | |||
222 | /************************************************************************** | ||
223 | * | ||
224 | * Falcon MAC stats | ||
225 | * | ||
226 | ************************************************************************** | ||
227 | */ | ||
228 | |||
229 | #define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) | ||
230 | #define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) | ||
231 | |||
232 | /* Retrieve statistic from statistics block */ | ||
233 | #define FALCON_STAT(efx, falcon_stat, efx_stat) do { \ | ||
234 | if (FALCON_STAT_WIDTH(falcon_stat) == 16) \ | ||
235 | (efx)->mac_stats.efx_stat += le16_to_cpu( \ | ||
236 | *((__force __le16 *) \ | ||
237 | (efx->stats_buffer.addr + \ | ||
238 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
239 | else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \ | ||
240 | (efx)->mac_stats.efx_stat += le32_to_cpu( \ | ||
241 | *((__force __le32 *) \ | ||
242 | (efx->stats_buffer.addr + \ | ||
243 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
244 | else \ | ||
245 | (efx)->mac_stats.efx_stat += le64_to_cpu( \ | ||
246 | *((__force __le64 *) \ | ||
247 | (efx->stats_buffer.addr + \ | ||
248 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
249 | } while (0) | ||
250 | |||
251 | #define FALCON_MAC_STATS_SIZE 0x100 | ||
252 | |||
253 | #define MAC_DATA_LBN 0 | ||
254 | #define MAC_DATA_WIDTH 32 | ||
255 | |||
256 | extern void efx_nic_generate_event(struct efx_channel *channel, | ||
257 | efx_qword_t *event); | ||
258 | |||
259 | extern void falcon_poll_xmac(struct efx_nic *efx); | ||
260 | |||
261 | #endif /* EFX_NIC_H */ | ||
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h index c1cff9c0c173..5bc26137257b 100644 --- a/drivers/net/sfc/phy.h +++ b/drivers/net/sfc/phy.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2007-2008 Solarflare Communications Inc. | 3 | * Copyright 2007-2009 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
@@ -16,16 +16,16 @@ | |||
16 | extern struct efx_phy_operations falcon_sfx7101_phy_ops; | 16 | extern struct efx_phy_operations falcon_sfx7101_phy_ops; |
17 | extern struct efx_phy_operations falcon_sft9001_phy_ops; | 17 | extern struct efx_phy_operations falcon_sft9001_phy_ops; |
18 | 18 | ||
19 | extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink); | 19 | extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); |
20 | 20 | ||
21 | /* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed | 21 | /* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed |
22 | * to boot due to corrupt flash, or some other negative error code. */ | 22 | * to boot due to corrupt flash, or some other negative error code. */ |
23 | extern int sft9001_wait_boot(struct efx_nic *efx); | 23 | extern int sft9001_wait_boot(struct efx_nic *efx); |
24 | 24 | ||
25 | /**************************************************************************** | 25 | /**************************************************************************** |
26 | * AMCC/Quake QT20xx PHYs | 26 | * AMCC/Quake QT202x PHYs |
27 | */ | 27 | */ |
28 | extern struct efx_phy_operations falcon_xfp_phy_ops; | 28 | extern struct efx_phy_operations falcon_qt202x_phy_ops; |
29 | 29 | ||
30 | /* These PHYs provide various H/W control states for LEDs */ | 30 | /* These PHYs provide various H/W control states for LEDs */ |
31 | #define QUAKE_LED_LINK_INVAL (0) | 31 | #define QUAKE_LED_LINK_INVAL (0) |
@@ -39,6 +39,23 @@ extern struct efx_phy_operations falcon_xfp_phy_ops; | |||
39 | #define QUAKE_LED_TXLINK (0) | 39 | #define QUAKE_LED_TXLINK (0) |
40 | #define QUAKE_LED_RXLINK (8) | 40 | #define QUAKE_LED_RXLINK (8) |
41 | 41 | ||
42 | extern void xfp_set_led(struct efx_nic *p, int led, int state); | 42 | extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state); |
43 | |||
44 | /**************************************************************************** | ||
45 | * Siena managed PHYs | ||
46 | */ | ||
47 | extern struct efx_phy_operations efx_mcdi_phy_ops; | ||
48 | |||
49 | extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, | ||
50 | unsigned int prtad, unsigned int devad, | ||
51 | u16 addr, u16 *value_out, u32 *status_out); | ||
52 | extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, | ||
53 | unsigned int prtad, unsigned int devad, | ||
54 | u16 addr, u16 value, u32 *status_out); | ||
55 | extern void efx_mcdi_phy_decode_link(struct efx_nic *efx, | ||
56 | struct efx_link_state *link_state, | ||
57 | u32 speed, u32 flags, u32 fcntl); | ||
58 | extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx); | ||
59 | extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa); | ||
43 | 60 | ||
44 | #endif | 61 | #endif |
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c new file mode 100644 index 000000000000..e077bef08a50 --- /dev/null +++ b/drivers/net/sfc/qt202x_phy.c | |||
@@ -0,0 +1,450 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2006-2009 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | /* | ||
10 | * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details | ||
11 | */ | ||
12 | |||
13 | #include <linux/slab.h> | ||
14 | #include <linux/timer.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include "efx.h" | ||
17 | #include "mdio_10g.h" | ||
18 | #include "phy.h" | ||
19 | #include "nic.h" | ||
20 | |||
21 | #define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \ | ||
22 | MDIO_DEVS_PMAPMD | \ | ||
23 | MDIO_DEVS_PHYXS) | ||
24 | |||
25 | #define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \ | ||
26 | (1 << LOOPBACK_PMAPMD) | \ | ||
27 | (1 << LOOPBACK_PHYXS_WS)) | ||
28 | |||
29 | /****************************************************************************/ | ||
30 | /* Quake-specific MDIO registers */ | ||
31 | #define MDIO_QUAKE_LED0_REG (0xD006) | ||
32 | |||
33 | /* QT2025C only */ | ||
34 | #define PCS_FW_HEARTBEAT_REG 0xd7ee | ||
35 | #define PCS_FW_HEARTB_LBN 0 | ||
36 | #define PCS_FW_HEARTB_WIDTH 8 | ||
37 | #define PCS_FW_PRODUCT_CODE_1 0xd7f0 | ||
38 | #define PCS_FW_VERSION_1 0xd7f3 | ||
39 | #define PCS_FW_BUILD_1 0xd7f6 | ||
40 | #define PCS_UC8051_STATUS_REG 0xd7fd | ||
41 | #define PCS_UC_STATUS_LBN 0 | ||
42 | #define PCS_UC_STATUS_WIDTH 8 | ||
43 | #define PCS_UC_STATUS_FW_SAVE 0x20 | ||
44 | #define PMA_PMD_FTX_CTRL2_REG 0xc309 | ||
45 | #define PMA_PMD_FTX_STATIC_LBN 13 | ||
46 | #define PMA_PMD_VEND1_REG 0xc001 | ||
47 | #define PMA_PMD_VEND1_LBTXD_LBN 15 | ||
48 | #define PCS_VEND1_REG 0xc000 | ||
49 | #define PCS_VEND1_LBTXD_LBN 5 | ||
50 | |||
51 | void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode) | ||
52 | { | ||
53 | int addr = MDIO_QUAKE_LED0_REG + led; | ||
54 | efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode); | ||
55 | } | ||
56 | |||
57 | struct qt202x_phy_data { | ||
58 | enum efx_phy_mode phy_mode; | ||
59 | bool bug17190_in_bad_state; | ||
60 | unsigned long bug17190_timer; | ||
61 | u32 firmware_ver; | ||
62 | }; | ||
63 | |||
64 | #define QT2022C2_MAX_RESET_TIME 500 | ||
65 | #define QT2022C2_RESET_WAIT 10 | ||
66 | |||
67 | #define QT2025C_MAX_HEARTB_TIME (5 * HZ) | ||
68 | #define QT2025C_HEARTB_WAIT 100 | ||
69 | #define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10) | ||
70 | #define QT2025C_FWSTART_WAIT 100 | ||
71 | |||
72 | #define BUG17190_INTERVAL (2 * HZ) | ||
73 | |||
74 | static int qt2025c_wait_heartbeat(struct efx_nic *efx) | ||
75 | { | ||
76 | unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME; | ||
77 | int reg, old_counter = 0; | ||
78 | |||
79 | /* Wait for firmware heartbeat to start */ | ||
80 | for (;;) { | ||
81 | int counter; | ||
82 | reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG); | ||
83 | if (reg < 0) | ||
84 | return reg; | ||
85 | counter = ((reg >> PCS_FW_HEARTB_LBN) & | ||
86 | ((1 << PCS_FW_HEARTB_WIDTH) - 1)); | ||
87 | if (old_counter == 0) | ||
88 | old_counter = counter; | ||
89 | else if (counter != old_counter) | ||
90 | break; | ||
91 | if (time_after(jiffies, timeout)) { | ||
92 | /* Some cables have EEPROMs that conflict with the | ||
93 | * PHY's on-board EEPROM so it cannot load firmware */ | ||
94 | EFX_ERR(efx, "If an SFP+ direct attach cable is" | ||
95 | " connected, please check that it complies" | ||
96 | " with the SFP+ specification\n"); | ||
97 | return -ETIMEDOUT; | ||
98 | } | ||
99 | msleep(QT2025C_HEARTB_WAIT); | ||
100 | } | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static int qt2025c_wait_fw_status_good(struct efx_nic *efx) | ||
106 | { | ||
107 | unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME; | ||
108 | int reg; | ||
109 | |||
110 | /* Wait for firmware status to look good */ | ||
111 | for (;;) { | ||
112 | reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG); | ||
113 | if (reg < 0) | ||
114 | return reg; | ||
115 | if ((reg & | ||
116 | ((1 << PCS_UC_STATUS_WIDTH) - 1) << PCS_UC_STATUS_LBN) >= | ||
117 | PCS_UC_STATUS_FW_SAVE) | ||
118 | break; | ||
119 | if (time_after(jiffies, timeout)) | ||
120 | return -ETIMEDOUT; | ||
121 | msleep(QT2025C_FWSTART_WAIT); | ||
122 | } | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static void qt2025c_restart_firmware(struct efx_nic *efx) | ||
128 | { | ||
129 | /* Restart microcontroller execution of firmware from RAM */ | ||
130 | efx_mdio_write(efx, 3, 0xe854, 0x00c0); | ||
131 | efx_mdio_write(efx, 3, 0xe854, 0x0040); | ||
132 | msleep(50); | ||
133 | } | ||
134 | |||
135 | static int qt2025c_wait_reset(struct efx_nic *efx) | ||
136 | { | ||
137 | int rc; | ||
138 | |||
139 | rc = qt2025c_wait_heartbeat(efx); | ||
140 | if (rc != 0) | ||
141 | return rc; | ||
142 | |||
143 | rc = qt2025c_wait_fw_status_good(efx); | ||
144 | if (rc == -ETIMEDOUT) { | ||
145 | /* Bug 17689: occasionally heartbeat starts but firmware status | ||
146 | * code never progresses beyond 0x00. Try again, once, after | ||
147 | * restarting execution of the firmware image. */ | ||
148 | EFX_LOG(efx, "bashing QT2025C microcontroller\n"); | ||
149 | qt2025c_restart_firmware(efx); | ||
150 | rc = qt2025c_wait_heartbeat(efx); | ||
151 | if (rc != 0) | ||
152 | return rc; | ||
153 | rc = qt2025c_wait_fw_status_good(efx); | ||
154 | } | ||
155 | |||
156 | return rc; | ||
157 | } | ||
158 | |||
159 | static void qt2025c_firmware_id(struct efx_nic *efx) | ||
160 | { | ||
161 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
162 | u8 firmware_id[9]; | ||
163 | size_t i; | ||
164 | |||
165 | for (i = 0; i < sizeof(firmware_id); i++) | ||
166 | firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS, | ||
167 | PCS_FW_PRODUCT_CODE_1 + i); | ||
168 | EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n", | ||
169 | (firmware_id[0] << 8) | firmware_id[1], firmware_id[2], | ||
170 | firmware_id[3] >> 4, firmware_id[3] & 0xf, | ||
171 | firmware_id[4], firmware_id[5], | ||
172 | firmware_id[6], firmware_id[7], firmware_id[8]); | ||
173 | phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) | | ||
174 | ((firmware_id[3] & 0x0f) << 16) | | ||
175 | (firmware_id[4] << 8) | firmware_id[5]; | ||
176 | } | ||
177 | |||
178 | static void qt2025c_bug17190_workaround(struct efx_nic *efx) | ||
179 | { | ||
180 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
181 | |||
182 | /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD | ||
183 | * layers up, but PCS down (no block_lock). If we notice this state | ||
184 | * persisting for a couple of seconds, we switch PMA/PMD loopback | ||
185 | * briefly on and then off again, which is normally sufficient to | ||
186 | * recover it. | ||
187 | */ | ||
188 | if (efx->link_state.up || | ||
189 | !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) { | ||
190 | phy_data->bug17190_in_bad_state = false; | ||
191 | return; | ||
192 | } | ||
193 | |||
194 | if (!phy_data->bug17190_in_bad_state) { | ||
195 | phy_data->bug17190_in_bad_state = true; | ||
196 | phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL; | ||
197 | return; | ||
198 | } | ||
199 | |||
200 | if (time_after_eq(jiffies, phy_data->bug17190_timer)) { | ||
201 | EFX_LOG(efx, "bashing QT2025C PMA/PMD\n"); | ||
202 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, | ||
203 | MDIO_PMA_CTRL1_LOOPBACK, true); | ||
204 | msleep(100); | ||
205 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, | ||
206 | MDIO_PMA_CTRL1_LOOPBACK, false); | ||
207 | phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | static int qt2025c_select_phy_mode(struct efx_nic *efx) | ||
212 | { | ||
213 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
214 | struct falcon_board *board = falcon_board(efx); | ||
215 | int reg, rc, i; | ||
216 | uint16_t phy_op_mode; | ||
217 | |||
218 | /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+ | ||
219 | * Self-Configure mode. Don't attempt any switching if we encounter | ||
220 | * older firmware. */ | ||
221 | if (phy_data->firmware_ver < 0x02000100) | ||
222 | return 0; | ||
223 | |||
224 | /* In general we will get optimal behaviour in "SFP+ Self-Configure" | ||
225 | * mode; however, that powers down most of the PHY when no module is | ||
226 | * present, so we must use a different mode (any fixed mode will do) | ||
227 | * to be sure that loopbacks will work. */ | ||
228 | phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020; | ||
229 | |||
230 | /* Only change mode if really necessary */ | ||
231 | reg = efx_mdio_read(efx, 1, 0xc319); | ||
232 | if ((reg & 0x0038) == phy_op_mode) | ||
233 | return 0; | ||
234 | EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode); | ||
235 | |||
236 | /* This sequence replicates the register writes configured in the boot | ||
237 | * EEPROM (including the differences between board revisions), except | ||
238 | * that the operating mode is changed, and the PHY is prevented from | ||
239 | * unnecessarily reloading the main firmware image again. */ | ||
240 | efx_mdio_write(efx, 1, 0xc300, 0x0000); | ||
241 | /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9 | ||
242 | * STOPs onto the firmware/module I2C bus to reset it, varies across | ||
243 | * board revisions, as the bus is connected to different GPIO/LED | ||
244 | * outputs on the PHY.) */ | ||
245 | if (board->major == 0 && board->minor < 2) { | ||
246 | efx_mdio_write(efx, 1, 0xc303, 0x4498); | ||
247 | for (i = 0; i < 9; i++) { | ||
248 | efx_mdio_write(efx, 1, 0xc303, 0x4488); | ||
249 | efx_mdio_write(efx, 1, 0xc303, 0x4480); | ||
250 | efx_mdio_write(efx, 1, 0xc303, 0x4490); | ||
251 | efx_mdio_write(efx, 1, 0xc303, 0x4498); | ||
252 | } | ||
253 | } else { | ||
254 | efx_mdio_write(efx, 1, 0xc303, 0x0920); | ||
255 | efx_mdio_write(efx, 1, 0xd008, 0x0004); | ||
256 | for (i = 0; i < 9; i++) { | ||
257 | efx_mdio_write(efx, 1, 0xc303, 0x0900); | ||
258 | efx_mdio_write(efx, 1, 0xd008, 0x0005); | ||
259 | efx_mdio_write(efx, 1, 0xc303, 0x0920); | ||
260 | efx_mdio_write(efx, 1, 0xd008, 0x0004); | ||
261 | } | ||
262 | efx_mdio_write(efx, 1, 0xc303, 0x4900); | ||
263 | } | ||
264 | efx_mdio_write(efx, 1, 0xc303, 0x4900); | ||
265 | efx_mdio_write(efx, 1, 0xc302, 0x0004); | ||
266 | efx_mdio_write(efx, 1, 0xc316, 0x0013); | ||
267 | efx_mdio_write(efx, 1, 0xc318, 0x0054); | ||
268 | efx_mdio_write(efx, 1, 0xc319, phy_op_mode); | ||
269 | efx_mdio_write(efx, 1, 0xc31a, 0x0098); | ||
270 | efx_mdio_write(efx, 3, 0x0026, 0x0e00); | ||
271 | efx_mdio_write(efx, 3, 0x0027, 0x0013); | ||
272 | efx_mdio_write(efx, 3, 0x0028, 0xa528); | ||
273 | efx_mdio_write(efx, 1, 0xd006, 0x000a); | ||
274 | efx_mdio_write(efx, 1, 0xd007, 0x0009); | ||
275 | efx_mdio_write(efx, 1, 0xd008, 0x0004); | ||
276 | /* This additional write is not present in the boot EEPROM. It | ||
277 | * prevents the PHY's internal boot ROM doing another pointless (and | ||
278 | * slow) reload of the firmware image (the microcontroller's code | ||
279 | * memory is not affected by the microcontroller reset). */ | ||
280 | efx_mdio_write(efx, 1, 0xc317, 0x00ff); | ||
281 | efx_mdio_write(efx, 1, 0xc300, 0x0002); | ||
282 | msleep(20); | ||
283 | |||
284 | /* Restart microcontroller execution of firmware from RAM */ | ||
285 | qt2025c_restart_firmware(efx); | ||
286 | |||
287 | /* Wait for the microcontroller to be ready again */ | ||
288 | rc = qt2025c_wait_reset(efx); | ||
289 | if (rc < 0) { | ||
290 | EFX_ERR(efx, "PHY microcontroller reset during mode switch " | ||
291 | "timed out\n"); | ||
292 | return rc; | ||
293 | } | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static int qt202x_reset_phy(struct efx_nic *efx) | ||
299 | { | ||
300 | int rc; | ||
301 | |||
302 | if (efx->phy_type == PHY_TYPE_QT2025C) { | ||
303 | /* Wait for the reset triggered by falcon_reset_hw() | ||
304 | * to complete */ | ||
305 | rc = qt2025c_wait_reset(efx); | ||
306 | if (rc < 0) | ||
307 | goto fail; | ||
308 | } else { | ||
309 | /* Reset the PHYXS MMD. This is documented as doing | ||
310 | * a complete soft reset. */ | ||
311 | rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS, | ||
312 | QT2022C2_MAX_RESET_TIME / | ||
313 | QT2022C2_RESET_WAIT, | ||
314 | QT2022C2_RESET_WAIT); | ||
315 | if (rc < 0) | ||
316 | goto fail; | ||
317 | } | ||
318 | |||
319 | /* Wait 250ms for the PHY to complete bootup */ | ||
320 | msleep(250); | ||
321 | |||
322 | falcon_board(efx)->type->init_phy(efx); | ||
323 | |||
324 | return 0; | ||
325 | |||
326 | fail: | ||
327 | EFX_ERR(efx, "PHY reset timed out\n"); | ||
328 | return rc; | ||
329 | } | ||
330 | |||
331 | static int qt202x_phy_probe(struct efx_nic *efx) | ||
332 | { | ||
333 | struct qt202x_phy_data *phy_data; | ||
334 | |||
335 | phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL); | ||
336 | if (!phy_data) | ||
337 | return -ENOMEM; | ||
338 | efx->phy_data = phy_data; | ||
339 | phy_data->phy_mode = efx->phy_mode; | ||
340 | phy_data->bug17190_in_bad_state = false; | ||
341 | phy_data->bug17190_timer = 0; | ||
342 | |||
343 | efx->mdio.mmds = QT202X_REQUIRED_DEVS; | ||
344 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
345 | efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS; | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static int qt202x_phy_init(struct efx_nic *efx) | ||
350 | { | ||
351 | u32 devid; | ||
352 | int rc; | ||
353 | |||
354 | rc = qt202x_reset_phy(efx); | ||
355 | if (rc) { | ||
356 | EFX_ERR(efx, "PHY init failed\n"); | ||
357 | return rc; | ||
358 | } | ||
359 | |||
360 | devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); | ||
361 | EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", | ||
362 | devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), | ||
363 | efx_mdio_id_rev(devid)); | ||
364 | |||
365 | if (efx->phy_type == PHY_TYPE_QT2025C) | ||
366 | qt2025c_firmware_id(efx); | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static int qt202x_link_ok(struct efx_nic *efx) | ||
372 | { | ||
373 | return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS); | ||
374 | } | ||
375 | |||
376 | static bool qt202x_phy_poll(struct efx_nic *efx) | ||
377 | { | ||
378 | bool was_up = efx->link_state.up; | ||
379 | |||
380 | efx->link_state.up = qt202x_link_ok(efx); | ||
381 | efx->link_state.speed = 10000; | ||
382 | efx->link_state.fd = true; | ||
383 | efx->link_state.fc = efx->wanted_fc; | ||
384 | |||
385 | if (efx->phy_type == PHY_TYPE_QT2025C) | ||
386 | qt2025c_bug17190_workaround(efx); | ||
387 | |||
388 | return efx->link_state.up != was_up; | ||
389 | } | ||
390 | |||
391 | static int qt202x_phy_reconfigure(struct efx_nic *efx) | ||
392 | { | ||
393 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
394 | |||
395 | if (efx->phy_type == PHY_TYPE_QT2025C) { | ||
396 | int rc = qt2025c_select_phy_mode(efx); | ||
397 | if (rc) | ||
398 | return rc; | ||
399 | |||
400 | /* There are several different register bits which can | ||
401 | * disable TX (and save power) on direct-attach cables | ||
402 | * or optical transceivers, varying somewhat between | ||
403 | * firmware versions. Only 'static mode' appears to | ||
404 | * cover everything. */ | ||
405 | mdio_set_flag( | ||
406 | &efx->mdio, efx->mdio.prtad, MDIO_MMD_PMAPMD, | ||
407 | PMA_PMD_FTX_CTRL2_REG, 1 << PMA_PMD_FTX_STATIC_LBN, | ||
408 | efx->phy_mode & PHY_MODE_TX_DISABLED || | ||
409 | efx->phy_mode & PHY_MODE_LOW_POWER || | ||
410 | efx->loopback_mode == LOOPBACK_PCS || | ||
411 | efx->loopback_mode == LOOPBACK_PMAPMD); | ||
412 | } else { | ||
413 | /* Reset the PHY when moving from tx off to tx on */ | ||
414 | if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) && | ||
415 | (phy_data->phy_mode & PHY_MODE_TX_DISABLED)) | ||
416 | qt202x_reset_phy(efx); | ||
417 | |||
418 | efx_mdio_transmit_disable(efx); | ||
419 | } | ||
420 | |||
421 | efx_mdio_phy_reconfigure(efx); | ||
422 | |||
423 | phy_data->phy_mode = efx->phy_mode; | ||
424 | |||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
429 | { | ||
430 | mdio45_ethtool_gset(&efx->mdio, ecmd); | ||
431 | } | ||
432 | |||
433 | static void qt202x_phy_remove(struct efx_nic *efx) | ||
434 | { | ||
435 | /* Free the context block */ | ||
436 | kfree(efx->phy_data); | ||
437 | efx->phy_data = NULL; | ||
438 | } | ||
439 | |||
440 | struct efx_phy_operations falcon_qt202x_phy_ops = { | ||
441 | .probe = qt202x_phy_probe, | ||
442 | .init = qt202x_phy_init, | ||
443 | .reconfigure = qt202x_phy_reconfigure, | ||
444 | .poll = qt202x_phy_poll, | ||
445 | .fini = efx_port_dummy_op_void, | ||
446 | .remove = qt202x_phy_remove, | ||
447 | .get_settings = qt202x_phy_get_settings, | ||
448 | .set_settings = efx_mdio_set_settings, | ||
449 | .test_alive = efx_mdio_test_alive, | ||
450 | }; | ||
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h new file mode 100644 index 000000000000..18a3be428348 --- /dev/null +++ b/drivers/net/sfc/regs.h | |||
@@ -0,0 +1,3168 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2009 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_REGS_H | ||
12 | #define EFX_REGS_H | ||
13 | |||
14 | /* | ||
15 | * Falcon hardware architecture definitions have a name prefix following | ||
16 | * the format: | ||
17 | * | ||
18 | * F<type>_<min-rev><max-rev>_ | ||
19 | * | ||
20 | * The following <type> strings are used: | ||
21 | * | ||
22 | * MMIO register MC register Host memory structure | ||
23 | * ------------------------------------------------------------- | ||
24 | * Address R MCR | ||
25 | * Bitfield RF MCRF SF | ||
26 | * Enumerator FE MCFE SE | ||
27 | * | ||
28 | * <min-rev> is the first revision to which the definition applies: | ||
29 | * | ||
30 | * A: Falcon A1 (SFC4000AB) | ||
31 | * B: Falcon B0 (SFC4000BA) | ||
32 | * C: Siena A0 (SFL9021AA) | ||
33 | * | ||
34 | * If the definition has been changed or removed in later revisions | ||
35 | * then <max-rev> is the last revision to which the definition applies; | ||
36 | * otherwise it is "Z". | ||
37 | */ | ||
38 | |||
39 | /************************************************************************** | ||
40 | * | ||
41 | * Falcon/Siena registers and descriptors | ||
42 | * | ||
43 | ************************************************************************** | ||
44 | */ | ||
45 | |||
46 | /* ADR_REGION_REG: Address region register */ | ||
47 | #define FR_AZ_ADR_REGION 0x00000000 | ||
48 | #define FRF_AZ_ADR_REGION3_LBN 96 | ||
49 | #define FRF_AZ_ADR_REGION3_WIDTH 18 | ||
50 | #define FRF_AZ_ADR_REGION2_LBN 64 | ||
51 | #define FRF_AZ_ADR_REGION2_WIDTH 18 | ||
52 | #define FRF_AZ_ADR_REGION1_LBN 32 | ||
53 | #define FRF_AZ_ADR_REGION1_WIDTH 18 | ||
54 | #define FRF_AZ_ADR_REGION0_LBN 0 | ||
55 | #define FRF_AZ_ADR_REGION0_WIDTH 18 | ||
56 | |||
57 | /* INT_EN_REG_KER: Kernel driver Interrupt enable register */ | ||
58 | #define FR_AZ_INT_EN_KER 0x00000010 | ||
59 | #define FRF_AZ_KER_INT_LEVE_SEL_LBN 8 | ||
60 | #define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6 | ||
61 | #define FRF_AZ_KER_INT_CHAR_LBN 4 | ||
62 | #define FRF_AZ_KER_INT_CHAR_WIDTH 1 | ||
63 | #define FRF_AZ_KER_INT_KER_LBN 3 | ||
64 | #define FRF_AZ_KER_INT_KER_WIDTH 1 | ||
65 | #define FRF_AZ_DRV_INT_EN_KER_LBN 0 | ||
66 | #define FRF_AZ_DRV_INT_EN_KER_WIDTH 1 | ||
67 | |||
68 | /* INT_EN_REG_CHAR: Char Driver interrupt enable register */ | ||
69 | #define FR_BZ_INT_EN_CHAR 0x00000020 | ||
70 | #define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8 | ||
71 | #define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6 | ||
72 | #define FRF_BZ_CHAR_INT_CHAR_LBN 4 | ||
73 | #define FRF_BZ_CHAR_INT_CHAR_WIDTH 1 | ||
74 | #define FRF_BZ_CHAR_INT_KER_LBN 3 | ||
75 | #define FRF_BZ_CHAR_INT_KER_WIDTH 1 | ||
76 | #define FRF_BZ_DRV_INT_EN_CHAR_LBN 0 | ||
77 | #define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1 | ||
78 | |||
79 | /* INT_ADR_REG_KER: Interrupt host address for Kernel driver */ | ||
80 | #define FR_AZ_INT_ADR_KER 0x00000030 | ||
81 | #define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64 | ||
82 | #define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1 | ||
83 | #define FRF_AZ_INT_ADR_KER_LBN 0 | ||
84 | #define FRF_AZ_INT_ADR_KER_WIDTH 64 | ||
85 | |||
86 | /* INT_ADR_REG_CHAR: Interrupt host address for Char driver */ | ||
87 | #define FR_BZ_INT_ADR_CHAR 0x00000040 | ||
88 | #define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64 | ||
89 | #define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1 | ||
90 | #define FRF_BZ_INT_ADR_CHAR_LBN 0 | ||
91 | #define FRF_BZ_INT_ADR_CHAR_WIDTH 64 | ||
92 | |||
93 | /* INT_ACK_KER: Kernel interrupt acknowledge register */ | ||
94 | #define FR_AA_INT_ACK_KER 0x00000050 | ||
95 | #define FRF_AA_INT_ACK_KER_FIELD_LBN 0 | ||
96 | #define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 | ||
97 | |||
98 | /* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */ | ||
99 | #define FR_BZ_INT_ISR0 0x00000090 | ||
100 | #define FRF_BZ_INT_ISR_REG_LBN 0 | ||
101 | #define FRF_BZ_INT_ISR_REG_WIDTH 64 | ||
102 | |||
103 | /* HW_INIT_REG: Hardware initialization register */ | ||
104 | #define FR_AZ_HW_INIT 0x000000c0 | ||
105 | #define FRF_BB_BDMRD_CPLF_FULL_LBN 124 | ||
106 | #define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1 | ||
107 | #define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121 | ||
108 | #define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3 | ||
109 | #define FRF_CZ_TX_MRG_TAGS_LBN 120 | ||
110 | #define FRF_CZ_TX_MRG_TAGS_WIDTH 1 | ||
111 | #define FRF_AB_TRGT_MASK_ALL_LBN 100 | ||
112 | #define FRF_AB_TRGT_MASK_ALL_WIDTH 1 | ||
113 | #define FRF_AZ_DOORBELL_DROP_LBN 92 | ||
114 | #define FRF_AZ_DOORBELL_DROP_WIDTH 8 | ||
115 | #define FRF_AB_TX_RREQ_MASK_EN_LBN 76 | ||
116 | #define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1 | ||
117 | #define FRF_AB_PE_EIDLE_DIS_LBN 75 | ||
118 | #define FRF_AB_PE_EIDLE_DIS_WIDTH 1 | ||
119 | #define FRF_AA_FC_BLOCKING_EN_LBN 45 | ||
120 | #define FRF_AA_FC_BLOCKING_EN_WIDTH 1 | ||
121 | #define FRF_BZ_B2B_REQ_EN_LBN 45 | ||
122 | #define FRF_BZ_B2B_REQ_EN_WIDTH 1 | ||
123 | #define FRF_AA_B2B_REQ_EN_LBN 44 | ||
124 | #define FRF_AA_B2B_REQ_EN_WIDTH 1 | ||
125 | #define FRF_BB_FC_BLOCKING_EN_LBN 44 | ||
126 | #define FRF_BB_FC_BLOCKING_EN_WIDTH 1 | ||
127 | #define FRF_AZ_POST_WR_MASK_LBN 40 | ||
128 | #define FRF_AZ_POST_WR_MASK_WIDTH 4 | ||
129 | #define FRF_AZ_TLP_TC_LBN 34 | ||
130 | #define FRF_AZ_TLP_TC_WIDTH 3 | ||
131 | #define FRF_AZ_TLP_ATTR_LBN 32 | ||
132 | #define FRF_AZ_TLP_ATTR_WIDTH 2 | ||
133 | #define FRF_AB_INTB_VEC_LBN 24 | ||
134 | #define FRF_AB_INTB_VEC_WIDTH 5 | ||
135 | #define FRF_AB_INTA_VEC_LBN 16 | ||
136 | #define FRF_AB_INTA_VEC_WIDTH 5 | ||
137 | #define FRF_AZ_WD_TIMER_LBN 8 | ||
138 | #define FRF_AZ_WD_TIMER_WIDTH 8 | ||
139 | #define FRF_AZ_US_DISABLE_LBN 5 | ||
140 | #define FRF_AZ_US_DISABLE_WIDTH 1 | ||
141 | #define FRF_AZ_TLP_EP_LBN 4 | ||
142 | #define FRF_AZ_TLP_EP_WIDTH 1 | ||
143 | #define FRF_AZ_ATTR_SEL_LBN 3 | ||
144 | #define FRF_AZ_ATTR_SEL_WIDTH 1 | ||
145 | #define FRF_AZ_TD_SEL_LBN 1 | ||
146 | #define FRF_AZ_TD_SEL_WIDTH 1 | ||
147 | #define FRF_AZ_TLP_TD_LBN 0 | ||
148 | #define FRF_AZ_TLP_TD_WIDTH 1 | ||
149 | |||
150 | /* EE_SPI_HCMD_REG: SPI host command register */ | ||
151 | #define FR_AB_EE_SPI_HCMD 0x00000100 | ||
152 | #define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31 | ||
153 | #define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1 | ||
154 | #define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28 | ||
155 | #define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1 | ||
156 | #define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24 | ||
157 | #define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1 | ||
158 | #define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16 | ||
159 | #define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5 | ||
160 | #define FRF_AB_EE_SPI_HCMD_READ_LBN 15 | ||
161 | #define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1 | ||
162 | #define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12 | ||
163 | #define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2 | ||
164 | #define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8 | ||
165 | #define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2 | ||
166 | #define FRF_AB_EE_SPI_HCMD_ENC_LBN 0 | ||
167 | #define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8 | ||
168 | |||
169 | /* USR_EV_CFG: User Level Event Configuration register */ | ||
170 | #define FR_CZ_USR_EV_CFG 0x00000100 | ||
171 | #define FRF_CZ_USREV_DIS_LBN 16 | ||
172 | #define FRF_CZ_USREV_DIS_WIDTH 1 | ||
173 | #define FRF_CZ_DFLT_EVQ_LBN 0 | ||
174 | #define FRF_CZ_DFLT_EVQ_WIDTH 10 | ||
175 | |||
176 | /* EE_SPI_HADR_REG: SPI host address register */ | ||
177 | #define FR_AB_EE_SPI_HADR 0x00000110 | ||
178 | #define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24 | ||
179 | #define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8 | ||
180 | #define FRF_AB_EE_SPI_HADR_ADR_LBN 0 | ||
181 | #define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24 | ||
182 | |||
183 | /* EE_SPI_HDATA_REG: SPI host data register */ | ||
184 | #define FR_AB_EE_SPI_HDATA 0x00000120 | ||
185 | #define FRF_AB_EE_SPI_HDATA3_LBN 96 | ||
186 | #define FRF_AB_EE_SPI_HDATA3_WIDTH 32 | ||
187 | #define FRF_AB_EE_SPI_HDATA2_LBN 64 | ||
188 | #define FRF_AB_EE_SPI_HDATA2_WIDTH 32 | ||
189 | #define FRF_AB_EE_SPI_HDATA1_LBN 32 | ||
190 | #define FRF_AB_EE_SPI_HDATA1_WIDTH 32 | ||
191 | #define FRF_AB_EE_SPI_HDATA0_LBN 0 | ||
192 | #define FRF_AB_EE_SPI_HDATA0_WIDTH 32 | ||
193 | |||
194 | /* EE_BASE_PAGE_REG: Expansion ROM base mirror register */ | ||
195 | #define FR_AB_EE_BASE_PAGE 0x00000130 | ||
196 | #define FRF_AB_EE_EXPROM_MASK_LBN 16 | ||
197 | #define FRF_AB_EE_EXPROM_MASK_WIDTH 13 | ||
198 | #define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0 | ||
199 | #define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13 | ||
200 | |||
201 | /* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */ | ||
202 | #define FR_AB_EE_VPD_CFG0 0x00000140 | ||
203 | #define FRF_AB_EE_SF_FASTRD_EN_LBN 127 | ||
204 | #define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1 | ||
205 | #define FRF_AB_EE_SF_CLOCK_DIV_LBN 120 | ||
206 | #define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7 | ||
207 | #define FRF_AB_EE_VPD_WIP_POLL_LBN 119 | ||
208 | #define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1 | ||
209 | #define FRF_AB_EE_EE_CLOCK_DIV_LBN 112 | ||
210 | #define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7 | ||
211 | #define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96 | ||
212 | #define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16 | ||
213 | #define FRF_AB_EE_VPDW_LENGTH_LBN 80 | ||
214 | #define FRF_AB_EE_VPDW_LENGTH_WIDTH 15 | ||
215 | #define FRF_AB_EE_VPDW_BASE_LBN 64 | ||
216 | #define FRF_AB_EE_VPDW_BASE_WIDTH 15 | ||
217 | #define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56 | ||
218 | #define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8 | ||
219 | #define FRF_AB_EE_VPD_BASE_LBN 32 | ||
220 | #define FRF_AB_EE_VPD_BASE_WIDTH 24 | ||
221 | #define FRF_AB_EE_VPD_LENGTH_LBN 16 | ||
222 | #define FRF_AB_EE_VPD_LENGTH_WIDTH 15 | ||
223 | #define FRF_AB_EE_VPD_AD_SIZE_LBN 8 | ||
224 | #define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5 | ||
225 | #define FRF_AB_EE_VPD_ACCESS_ON_LBN 5 | ||
226 | #define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1 | ||
227 | #define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4 | ||
228 | #define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1 | ||
229 | #define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2 | ||
230 | #define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1 | ||
231 | #define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1 | ||
232 | #define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1 | ||
233 | #define FRF_AB_EE_VPD_EN_LBN 0 | ||
234 | #define FRF_AB_EE_VPD_EN_WIDTH 1 | ||
235 | |||
236 | /* EE_VPD_SW_CNTL_REG: VPD access SW control register */ | ||
237 | #define FR_AB_EE_VPD_SW_CNTL 0x00000150 | ||
238 | #define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31 | ||
239 | #define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1 | ||
240 | #define FRF_AB_EE_VPD_CYC_WRITE_LBN 28 | ||
241 | #define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1 | ||
242 | #define FRF_AB_EE_VPD_CYC_ADR_LBN 0 | ||
243 | #define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15 | ||
244 | |||
245 | /* EE_VPD_SW_DATA_REG: VPD access SW data register */ | ||
246 | #define FR_AB_EE_VPD_SW_DATA 0x00000160 | ||
247 | #define FRF_AB_EE_VPD_CYC_DAT_LBN 0 | ||
248 | #define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32 | ||
249 | |||
250 | /* PBMX_DBG_IADDR_REG: Capture Module address register */ | ||
251 | #define FR_CZ_PBMX_DBG_IADDR 0x000001f0 | ||
252 | #define FRF_CZ_PBMX_DBG_IADDR_LBN 0 | ||
253 | #define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32 | ||
254 | |||
255 | /* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */ | ||
256 | #define FR_BB_PCIE_CORE_INDIRECT 0x000001f0 | ||
257 | #define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32 | ||
258 | #define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32 | ||
259 | #define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15 | ||
260 | #define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1 | ||
261 | #define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0 | ||
262 | #define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12 | ||
263 | |||
264 | /* PBMX_DBG_IDATA_REG: Capture Module data register */ | ||
265 | #define FR_CZ_PBMX_DBG_IDATA 0x000001f8 | ||
266 | #define FRF_CZ_PBMX_DBG_IDATA_LBN 0 | ||
267 | #define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64 | ||
268 | |||
269 | /* NIC_STAT_REG: NIC status register */ | ||
270 | #define FR_AB_NIC_STAT 0x00000200 | ||
271 | #define FRF_BB_AER_DIS_LBN 34 | ||
272 | #define FRF_BB_AER_DIS_WIDTH 1 | ||
273 | #define FRF_BB_EE_STRAP_EN_LBN 31 | ||
274 | #define FRF_BB_EE_STRAP_EN_WIDTH 1 | ||
275 | #define FRF_BB_EE_STRAP_LBN 24 | ||
276 | #define FRF_BB_EE_STRAP_WIDTH 4 | ||
277 | #define FRF_BB_REVISION_ID_LBN 17 | ||
278 | #define FRF_BB_REVISION_ID_WIDTH 7 | ||
279 | #define FRF_AB_ONCHIP_SRAM_LBN 16 | ||
280 | #define FRF_AB_ONCHIP_SRAM_WIDTH 1 | ||
281 | #define FRF_AB_SF_PRST_LBN 9 | ||
282 | #define FRF_AB_SF_PRST_WIDTH 1 | ||
283 | #define FRF_AB_EE_PRST_LBN 8 | ||
284 | #define FRF_AB_EE_PRST_WIDTH 1 | ||
285 | #define FRF_AB_ATE_MODE_LBN 3 | ||
286 | #define FRF_AB_ATE_MODE_WIDTH 1 | ||
287 | #define FRF_AB_STRAP_PINS_LBN 0 | ||
288 | #define FRF_AB_STRAP_PINS_WIDTH 3 | ||
289 | |||
290 | /* GPIO_CTL_REG: GPIO control register */ | ||
291 | #define FR_AB_GPIO_CTL 0x00000210 | ||
292 | #define FRF_AB_GPIO_OUT3_LBN 112 | ||
293 | #define FRF_AB_GPIO_OUT3_WIDTH 16 | ||
294 | #define FRF_AB_GPIO_IN3_LBN 104 | ||
295 | #define FRF_AB_GPIO_IN3_WIDTH 8 | ||
296 | #define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96 | ||
297 | #define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8 | ||
298 | #define FRF_AB_GPIO_OUT2_LBN 80 | ||
299 | #define FRF_AB_GPIO_OUT2_WIDTH 16 | ||
300 | #define FRF_AB_GPIO_IN2_LBN 72 | ||
301 | #define FRF_AB_GPIO_IN2_WIDTH 8 | ||
302 | #define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64 | ||
303 | #define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8 | ||
304 | #define FRF_AB_GPIO15_OEN_LBN 63 | ||
305 | #define FRF_AB_GPIO15_OEN_WIDTH 1 | ||
306 | #define FRF_AB_GPIO14_OEN_LBN 62 | ||
307 | #define FRF_AB_GPIO14_OEN_WIDTH 1 | ||
308 | #define FRF_AB_GPIO13_OEN_LBN 61 | ||
309 | #define FRF_AB_GPIO13_OEN_WIDTH 1 | ||
310 | #define FRF_AB_GPIO12_OEN_LBN 60 | ||
311 | #define FRF_AB_GPIO12_OEN_WIDTH 1 | ||
312 | #define FRF_AB_GPIO11_OEN_LBN 59 | ||
313 | #define FRF_AB_GPIO11_OEN_WIDTH 1 | ||
314 | #define FRF_AB_GPIO10_OEN_LBN 58 | ||
315 | #define FRF_AB_GPIO10_OEN_WIDTH 1 | ||
316 | #define FRF_AB_GPIO9_OEN_LBN 57 | ||
317 | #define FRF_AB_GPIO9_OEN_WIDTH 1 | ||
318 | #define FRF_AB_GPIO8_OEN_LBN 56 | ||
319 | #define FRF_AB_GPIO8_OEN_WIDTH 1 | ||
320 | #define FRF_AB_GPIO15_OUT_LBN 55 | ||
321 | #define FRF_AB_GPIO15_OUT_WIDTH 1 | ||
322 | #define FRF_AB_GPIO14_OUT_LBN 54 | ||
323 | #define FRF_AB_GPIO14_OUT_WIDTH 1 | ||
324 | #define FRF_AB_GPIO13_OUT_LBN 53 | ||
325 | #define FRF_AB_GPIO13_OUT_WIDTH 1 | ||
326 | #define FRF_AB_GPIO12_OUT_LBN 52 | ||
327 | #define FRF_AB_GPIO12_OUT_WIDTH 1 | ||
328 | #define FRF_AB_GPIO11_OUT_LBN 51 | ||
329 | #define FRF_AB_GPIO11_OUT_WIDTH 1 | ||
330 | #define FRF_AB_GPIO10_OUT_LBN 50 | ||
331 | #define FRF_AB_GPIO10_OUT_WIDTH 1 | ||
332 | #define FRF_AB_GPIO9_OUT_LBN 49 | ||
333 | #define FRF_AB_GPIO9_OUT_WIDTH 1 | ||
334 | #define FRF_AB_GPIO8_OUT_LBN 48 | ||
335 | #define FRF_AB_GPIO8_OUT_WIDTH 1 | ||
336 | #define FRF_AB_GPIO15_IN_LBN 47 | ||
337 | #define FRF_AB_GPIO15_IN_WIDTH 1 | ||
338 | #define FRF_AB_GPIO14_IN_LBN 46 | ||
339 | #define FRF_AB_GPIO14_IN_WIDTH 1 | ||
340 | #define FRF_AB_GPIO13_IN_LBN 45 | ||
341 | #define FRF_AB_GPIO13_IN_WIDTH 1 | ||
342 | #define FRF_AB_GPIO12_IN_LBN 44 | ||
343 | #define FRF_AB_GPIO12_IN_WIDTH 1 | ||
344 | #define FRF_AB_GPIO11_IN_LBN 43 | ||
345 | #define FRF_AB_GPIO11_IN_WIDTH 1 | ||
346 | #define FRF_AB_GPIO10_IN_LBN 42 | ||
347 | #define FRF_AB_GPIO10_IN_WIDTH 1 | ||
348 | #define FRF_AB_GPIO9_IN_LBN 41 | ||
349 | #define FRF_AB_GPIO9_IN_WIDTH 1 | ||
350 | #define FRF_AB_GPIO8_IN_LBN 40 | ||
351 | #define FRF_AB_GPIO8_IN_WIDTH 1 | ||
352 | #define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39 | ||
353 | #define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1 | ||
354 | #define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38 | ||
355 | #define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1 | ||
356 | #define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37 | ||
357 | #define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1 | ||
358 | #define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36 | ||
359 | #define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1 | ||
360 | #define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35 | ||
361 | #define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1 | ||
362 | #define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34 | ||
363 | #define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1 | ||
364 | #define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33 | ||
365 | #define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1 | ||
366 | #define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32 | ||
367 | #define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1 | ||
368 | #define FRF_AB_CLK156_OUT_EN_LBN 31 | ||
369 | #define FRF_AB_CLK156_OUT_EN_WIDTH 1 | ||
370 | #define FRF_AB_USE_NIC_CLK_LBN 30 | ||
371 | #define FRF_AB_USE_NIC_CLK_WIDTH 1 | ||
372 | #define FRF_AB_GPIO5_OEN_LBN 29 | ||
373 | #define FRF_AB_GPIO5_OEN_WIDTH 1 | ||
374 | #define FRF_AB_GPIO4_OEN_LBN 28 | ||
375 | #define FRF_AB_GPIO4_OEN_WIDTH 1 | ||
376 | #define FRF_AB_GPIO3_OEN_LBN 27 | ||
377 | #define FRF_AB_GPIO3_OEN_WIDTH 1 | ||
378 | #define FRF_AB_GPIO2_OEN_LBN 26 | ||
379 | #define FRF_AB_GPIO2_OEN_WIDTH 1 | ||
380 | #define FRF_AB_GPIO1_OEN_LBN 25 | ||
381 | #define FRF_AB_GPIO1_OEN_WIDTH 1 | ||
382 | #define FRF_AB_GPIO0_OEN_LBN 24 | ||
383 | #define FRF_AB_GPIO0_OEN_WIDTH 1 | ||
384 | #define FRF_AB_GPIO7_OUT_LBN 23 | ||
385 | #define FRF_AB_GPIO7_OUT_WIDTH 1 | ||
386 | #define FRF_AB_GPIO6_OUT_LBN 22 | ||
387 | #define FRF_AB_GPIO6_OUT_WIDTH 1 | ||
388 | #define FRF_AB_GPIO5_OUT_LBN 21 | ||
389 | #define FRF_AB_GPIO5_OUT_WIDTH 1 | ||
390 | #define FRF_AB_GPIO4_OUT_LBN 20 | ||
391 | #define FRF_AB_GPIO4_OUT_WIDTH 1 | ||
392 | #define FRF_AB_GPIO3_OUT_LBN 19 | ||
393 | #define FRF_AB_GPIO3_OUT_WIDTH 1 | ||
394 | #define FRF_AB_GPIO2_OUT_LBN 18 | ||
395 | #define FRF_AB_GPIO2_OUT_WIDTH 1 | ||
396 | #define FRF_AB_GPIO1_OUT_LBN 17 | ||
397 | #define FRF_AB_GPIO1_OUT_WIDTH 1 | ||
398 | #define FRF_AB_GPIO0_OUT_LBN 16 | ||
399 | #define FRF_AB_GPIO0_OUT_WIDTH 1 | ||
400 | #define FRF_AB_GPIO7_IN_LBN 15 | ||
401 | #define FRF_AB_GPIO7_IN_WIDTH 1 | ||
402 | #define FRF_AB_GPIO6_IN_LBN 14 | ||
403 | #define FRF_AB_GPIO6_IN_WIDTH 1 | ||
404 | #define FRF_AB_GPIO5_IN_LBN 13 | ||
405 | #define FRF_AB_GPIO5_IN_WIDTH 1 | ||
406 | #define FRF_AB_GPIO4_IN_LBN 12 | ||
407 | #define FRF_AB_GPIO4_IN_WIDTH 1 | ||
408 | #define FRF_AB_GPIO3_IN_LBN 11 | ||
409 | #define FRF_AB_GPIO3_IN_WIDTH 1 | ||
410 | #define FRF_AB_GPIO2_IN_LBN 10 | ||
411 | #define FRF_AB_GPIO2_IN_WIDTH 1 | ||
412 | #define FRF_AB_GPIO1_IN_LBN 9 | ||
413 | #define FRF_AB_GPIO1_IN_WIDTH 1 | ||
414 | #define FRF_AB_GPIO0_IN_LBN 8 | ||
415 | #define FRF_AB_GPIO0_IN_WIDTH 1 | ||
416 | #define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7 | ||
417 | #define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1 | ||
418 | #define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6 | ||
419 | #define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1 | ||
420 | #define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5 | ||
421 | #define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1 | ||
422 | #define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4 | ||
423 | #define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1 | ||
424 | #define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3 | ||
425 | #define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1 | ||
426 | #define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2 | ||
427 | #define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1 | ||
428 | #define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1 | ||
429 | #define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1 | ||
430 | #define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0 | ||
431 | #define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1 | ||
432 | |||
433 | /* GLB_CTL_REG: Global control register */ | ||
434 | #define FR_AB_GLB_CTL 0x00000220 | ||
435 | #define FRF_AB_EXT_PHY_RST_CTL_LBN 63 | ||
436 | #define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1 | ||
437 | #define FRF_AB_XAUI_SD_RST_CTL_LBN 62 | ||
438 | #define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1 | ||
439 | #define FRF_AB_PCIE_SD_RST_CTL_LBN 61 | ||
440 | #define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1 | ||
441 | #define FRF_AA_PCIX_RST_CTL_LBN 60 | ||
442 | #define FRF_AA_PCIX_RST_CTL_WIDTH 1 | ||
443 | #define FRF_BB_BIU_RST_CTL_LBN 60 | ||
444 | #define FRF_BB_BIU_RST_CTL_WIDTH 1 | ||
445 | #define FRF_AB_PCIE_STKY_RST_CTL_LBN 59 | ||
446 | #define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1 | ||
447 | #define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58 | ||
448 | #define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1 | ||
449 | #define FRF_AB_PCIE_CORE_RST_CTL_LBN 57 | ||
450 | #define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1 | ||
451 | #define FRF_AB_XGRX_RST_CTL_LBN 56 | ||
452 | #define FRF_AB_XGRX_RST_CTL_WIDTH 1 | ||
453 | #define FRF_AB_XGTX_RST_CTL_LBN 55 | ||
454 | #define FRF_AB_XGTX_RST_CTL_WIDTH 1 | ||
455 | #define FRF_AB_EM_RST_CTL_LBN 54 | ||
456 | #define FRF_AB_EM_RST_CTL_WIDTH 1 | ||
457 | #define FRF_AB_EV_RST_CTL_LBN 53 | ||
458 | #define FRF_AB_EV_RST_CTL_WIDTH 1 | ||
459 | #define FRF_AB_SR_RST_CTL_LBN 52 | ||
460 | #define FRF_AB_SR_RST_CTL_WIDTH 1 | ||
461 | #define FRF_AB_RX_RST_CTL_LBN 51 | ||
462 | #define FRF_AB_RX_RST_CTL_WIDTH 1 | ||
463 | #define FRF_AB_TX_RST_CTL_LBN 50 | ||
464 | #define FRF_AB_TX_RST_CTL_WIDTH 1 | ||
465 | #define FRF_AB_EE_RST_CTL_LBN 49 | ||
466 | #define FRF_AB_EE_RST_CTL_WIDTH 1 | ||
467 | #define FRF_AB_CS_RST_CTL_LBN 48 | ||
468 | #define FRF_AB_CS_RST_CTL_WIDTH 1 | ||
469 | #define FRF_AB_HOT_RST_CTL_LBN 40 | ||
470 | #define FRF_AB_HOT_RST_CTL_WIDTH 2 | ||
471 | #define FRF_AB_RST_EXT_PHY_LBN 31 | ||
472 | #define FRF_AB_RST_EXT_PHY_WIDTH 1 | ||
473 | #define FRF_AB_RST_XAUI_SD_LBN 30 | ||
474 | #define FRF_AB_RST_XAUI_SD_WIDTH 1 | ||
475 | #define FRF_AB_RST_PCIE_SD_LBN 29 | ||
476 | #define FRF_AB_RST_PCIE_SD_WIDTH 1 | ||
477 | #define FRF_AA_RST_PCIX_LBN 28 | ||
478 | #define FRF_AA_RST_PCIX_WIDTH 1 | ||
479 | #define FRF_BB_RST_BIU_LBN 28 | ||
480 | #define FRF_BB_RST_BIU_WIDTH 1 | ||
481 | #define FRF_AB_RST_PCIE_STKY_LBN 27 | ||
482 | #define FRF_AB_RST_PCIE_STKY_WIDTH 1 | ||
483 | #define FRF_AB_RST_PCIE_NSTKY_LBN 26 | ||
484 | #define FRF_AB_RST_PCIE_NSTKY_WIDTH 1 | ||
485 | #define FRF_AB_RST_PCIE_CORE_LBN 25 | ||
486 | #define FRF_AB_RST_PCIE_CORE_WIDTH 1 | ||
487 | #define FRF_AB_RST_XGRX_LBN 24 | ||
488 | #define FRF_AB_RST_XGRX_WIDTH 1 | ||
489 | #define FRF_AB_RST_XGTX_LBN 23 | ||
490 | #define FRF_AB_RST_XGTX_WIDTH 1 | ||
491 | #define FRF_AB_RST_EM_LBN 22 | ||
492 | #define FRF_AB_RST_EM_WIDTH 1 | ||
493 | #define FRF_AB_RST_EV_LBN 21 | ||
494 | #define FRF_AB_RST_EV_WIDTH 1 | ||
495 | #define FRF_AB_RST_SR_LBN 20 | ||
496 | #define FRF_AB_RST_SR_WIDTH 1 | ||
497 | #define FRF_AB_RST_RX_LBN 19 | ||
498 | #define FRF_AB_RST_RX_WIDTH 1 | ||
499 | #define FRF_AB_RST_TX_LBN 18 | ||
500 | #define FRF_AB_RST_TX_WIDTH 1 | ||
501 | #define FRF_AB_RST_SF_LBN 17 | ||
502 | #define FRF_AB_RST_SF_WIDTH 1 | ||
503 | #define FRF_AB_RST_CS_LBN 16 | ||
504 | #define FRF_AB_RST_CS_WIDTH 1 | ||
505 | #define FRF_AB_INT_RST_DUR_LBN 4 | ||
506 | #define FRF_AB_INT_RST_DUR_WIDTH 3 | ||
507 | #define FRF_AB_EXT_PHY_RST_DUR_LBN 1 | ||
508 | #define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3 | ||
509 | #define FFE_AB_EXT_PHY_RST_DUR_10240US 7 | ||
510 | #define FFE_AB_EXT_PHY_RST_DUR_5120US 6 | ||
511 | #define FFE_AB_EXT_PHY_RST_DUR_2560US 5 | ||
512 | #define FFE_AB_EXT_PHY_RST_DUR_1280US 4 | ||
513 | #define FFE_AB_EXT_PHY_RST_DUR_640US 3 | ||
514 | #define FFE_AB_EXT_PHY_RST_DUR_320US 2 | ||
515 | #define FFE_AB_EXT_PHY_RST_DUR_160US 1 | ||
516 | #define FFE_AB_EXT_PHY_RST_DUR_80US 0 | ||
517 | #define FRF_AB_SWRST_LBN 0 | ||
518 | #define FRF_AB_SWRST_WIDTH 1 | ||
519 | |||
520 | /* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ | ||
521 | #define FR_AZ_FATAL_INTR_KER 0x00000230 | ||
522 | #define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44 | ||
523 | #define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1 | ||
524 | #define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43 | ||
525 | #define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1 | ||
526 | #define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43 | ||
527 | #define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1 | ||
528 | #define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42 | ||
529 | #define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1 | ||
530 | #define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41 | ||
531 | #define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1 | ||
532 | #define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40 | ||
533 | #define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1 | ||
534 | #define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39 | ||
535 | #define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1 | ||
536 | #define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38 | ||
537 | #define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1 | ||
538 | #define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37 | ||
539 | #define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1 | ||
540 | #define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36 | ||
541 | #define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1 | ||
542 | #define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35 | ||
543 | #define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1 | ||
544 | #define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34 | ||
545 | #define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1 | ||
546 | #define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33 | ||
547 | #define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1 | ||
548 | #define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32 | ||
549 | #define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1 | ||
550 | #define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12 | ||
551 | #define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1 | ||
552 | #define FRF_AB_PCI_BUSERR_INT_KER_LBN 11 | ||
553 | #define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1 | ||
554 | #define FRF_CZ_MBU_PERR_INT_KER_LBN 11 | ||
555 | #define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1 | ||
556 | #define FRF_AZ_SRAM_OOB_INT_KER_LBN 10 | ||
557 | #define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1 | ||
558 | #define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9 | ||
559 | #define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1 | ||
560 | #define FRF_AZ_MEM_PERR_INT_KER_LBN 8 | ||
561 | #define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1 | ||
562 | #define FRF_AZ_RBUF_OWN_INT_KER_LBN 7 | ||
563 | #define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1 | ||
564 | #define FRF_AZ_TBUF_OWN_INT_KER_LBN 6 | ||
565 | #define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1 | ||
566 | #define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5 | ||
567 | #define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1 | ||
568 | #define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4 | ||
569 | #define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1 | ||
570 | #define FRF_AZ_EVQ_OWN_INT_KER_LBN 3 | ||
571 | #define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1 | ||
572 | #define FRF_AZ_EVF_OFLO_INT_KER_LBN 2 | ||
573 | #define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1 | ||
574 | #define FRF_AZ_ILL_ADR_INT_KER_LBN 1 | ||
575 | #define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1 | ||
576 | #define FRF_AZ_SRM_PERR_INT_KER_LBN 0 | ||
577 | #define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1 | ||
578 | |||
579 | /* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */ | ||
580 | #define FR_BZ_FATAL_INTR_CHAR 0x00000240 | ||
581 | #define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44 | ||
582 | #define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1 | ||
583 | #define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43 | ||
584 | #define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1 | ||
585 | #define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43 | ||
586 | #define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1 | ||
587 | #define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42 | ||
588 | #define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1 | ||
589 | #define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41 | ||
590 | #define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1 | ||
591 | #define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40 | ||
592 | #define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1 | ||
593 | #define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39 | ||
594 | #define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1 | ||
595 | #define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38 | ||
596 | #define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1 | ||
597 | #define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37 | ||
598 | #define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 | ||
599 | #define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36 | ||
600 | #define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 | ||
601 | #define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35 | ||
602 | #define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1 | ||
603 | #define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34 | ||
604 | #define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1 | ||
605 | #define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33 | ||
606 | #define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1 | ||
607 | #define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32 | ||
608 | #define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1 | ||
609 | #define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12 | ||
610 | #define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1 | ||
611 | #define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11 | ||
612 | #define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1 | ||
613 | #define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11 | ||
614 | #define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1 | ||
615 | #define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10 | ||
616 | #define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1 | ||
617 | #define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9 | ||
618 | #define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1 | ||
619 | #define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8 | ||
620 | #define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1 | ||
621 | #define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7 | ||
622 | #define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1 | ||
623 | #define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6 | ||
624 | #define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1 | ||
625 | #define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5 | ||
626 | #define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1 | ||
627 | #define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4 | ||
628 | #define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1 | ||
629 | #define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3 | ||
630 | #define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1 | ||
631 | #define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2 | ||
632 | #define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1 | ||
633 | #define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1 | ||
634 | #define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1 | ||
635 | #define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0 | ||
636 | #define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1 | ||
637 | |||
638 | /* DP_CTRL_REG: Datapath control register */ | ||
639 | #define FR_BZ_DP_CTRL 0x00000250 | ||
640 | #define FRF_BZ_FLS_EVQ_ID_LBN 0 | ||
641 | #define FRF_BZ_FLS_EVQ_ID_WIDTH 12 | ||
642 | |||
643 | /* MEM_STAT_REG: Memory status register */ | ||
644 | #define FR_AZ_MEM_STAT 0x00000260 | ||
645 | #define FRF_AB_MEM_PERR_VEC_LBN 53 | ||
646 | #define FRF_AB_MEM_PERR_VEC_WIDTH 38 | ||
647 | #define FRF_AB_MBIST_CORR_LBN 38 | ||
648 | #define FRF_AB_MBIST_CORR_WIDTH 15 | ||
649 | #define FRF_AB_MBIST_ERR_LBN 0 | ||
650 | #define FRF_AB_MBIST_ERR_WIDTH 40 | ||
651 | #define FRF_CZ_MEM_PERR_VEC_LBN 0 | ||
652 | #define FRF_CZ_MEM_PERR_VEC_WIDTH 35 | ||
653 | |||
654 | /* CS_DEBUG_REG: Debug register */ | ||
655 | #define FR_AZ_CS_DEBUG 0x00000270 | ||
656 | #define FRF_AB_GLB_DEBUG2_SEL_LBN 50 | ||
657 | #define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3 | ||
658 | #define FRF_AB_DEBUG_BLK_SEL2_LBN 47 | ||
659 | #define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3 | ||
660 | #define FRF_AB_DEBUG_BLK_SEL1_LBN 44 | ||
661 | #define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3 | ||
662 | #define FRF_AB_DEBUG_BLK_SEL0_LBN 41 | ||
663 | #define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3 | ||
664 | #define FRF_CZ_CS_PORT_NUM_LBN 40 | ||
665 | #define FRF_CZ_CS_PORT_NUM_WIDTH 2 | ||
666 | #define FRF_AB_MISC_DEBUG_ADDR_LBN 36 | ||
667 | #define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5 | ||
668 | #define FRF_AB_SERDES_DEBUG_ADDR_LBN 31 | ||
669 | #define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5 | ||
670 | #define FRF_CZ_CS_PORT_FPE_LBN 1 | ||
671 | #define FRF_CZ_CS_PORT_FPE_WIDTH 35 | ||
672 | #define FRF_AB_EM_DEBUG_ADDR_LBN 26 | ||
673 | #define FRF_AB_EM_DEBUG_ADDR_WIDTH 5 | ||
674 | #define FRF_AB_SR_DEBUG_ADDR_LBN 21 | ||
675 | #define FRF_AB_SR_DEBUG_ADDR_WIDTH 5 | ||
676 | #define FRF_AB_EV_DEBUG_ADDR_LBN 16 | ||
677 | #define FRF_AB_EV_DEBUG_ADDR_WIDTH 5 | ||
678 | #define FRF_AB_RX_DEBUG_ADDR_LBN 11 | ||
679 | #define FRF_AB_RX_DEBUG_ADDR_WIDTH 5 | ||
680 | #define FRF_AB_TX_DEBUG_ADDR_LBN 6 | ||
681 | #define FRF_AB_TX_DEBUG_ADDR_WIDTH 5 | ||
682 | #define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1 | ||
683 | #define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5 | ||
684 | #define FRF_AZ_CS_DEBUG_EN_LBN 0 | ||
685 | #define FRF_AZ_CS_DEBUG_EN_WIDTH 1 | ||
686 | |||
687 | /* DRIVER_REG: Driver scratch register [0-7] */ | ||
688 | #define FR_AZ_DRIVER 0x00000280 | ||
689 | #define FR_AZ_DRIVER_STEP 16 | ||
690 | #define FR_AZ_DRIVER_ROWS 8 | ||
691 | #define FRF_AZ_DRIVER_DW0_LBN 0 | ||
692 | #define FRF_AZ_DRIVER_DW0_WIDTH 32 | ||
693 | |||
694 | /* ALTERA_BUILD_REG: Altera build register */ | ||
695 | #define FR_AZ_ALTERA_BUILD 0x00000300 | ||
696 | #define FRF_AZ_ALTERA_BUILD_VER_LBN 0 | ||
697 | #define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32 | ||
698 | |||
699 | /* CSR_SPARE_REG: Spare register */ | ||
700 | #define FR_AZ_CSR_SPARE 0x00000310 | ||
701 | #define FRF_AB_MEM_PERR_EN_LBN 64 | ||
702 | #define FRF_AB_MEM_PERR_EN_WIDTH 38 | ||
703 | #define FRF_CZ_MEM_PERR_EN_LBN 64 | ||
704 | #define FRF_CZ_MEM_PERR_EN_WIDTH 35 | ||
705 | #define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72 | ||
706 | #define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2 | ||
707 | #define FRF_AZ_CSR_SPARE_BITS_LBN 0 | ||
708 | #define FRF_AZ_CSR_SPARE_BITS_WIDTH 32 | ||
709 | |||
710 | /* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */ | ||
711 | #define FR_AB_PCIE_SD_CTL0123 0x00000320 | ||
712 | #define FRF_AB_PCIE_TESTSIG_H_LBN 96 | ||
713 | #define FRF_AB_PCIE_TESTSIG_H_WIDTH 19 | ||
714 | #define FRF_AB_PCIE_TESTSIG_L_LBN 64 | ||
715 | #define FRF_AB_PCIE_TESTSIG_L_WIDTH 19 | ||
716 | #define FRF_AB_PCIE_OFFSET_LBN 56 | ||
717 | #define FRF_AB_PCIE_OFFSET_WIDTH 8 | ||
718 | #define FRF_AB_PCIE_OFFSETEN_H_LBN 55 | ||
719 | #define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1 | ||
720 | #define FRF_AB_PCIE_OFFSETEN_L_LBN 54 | ||
721 | #define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1 | ||
722 | #define FRF_AB_PCIE_HIVMODE_H_LBN 53 | ||
723 | #define FRF_AB_PCIE_HIVMODE_H_WIDTH 1 | ||
724 | #define FRF_AB_PCIE_HIVMODE_L_LBN 52 | ||
725 | #define FRF_AB_PCIE_HIVMODE_L_WIDTH 1 | ||
726 | #define FRF_AB_PCIE_PARRESET_H_LBN 51 | ||
727 | #define FRF_AB_PCIE_PARRESET_H_WIDTH 1 | ||
728 | #define FRF_AB_PCIE_PARRESET_L_LBN 50 | ||
729 | #define FRF_AB_PCIE_PARRESET_L_WIDTH 1 | ||
730 | #define FRF_AB_PCIE_LPBKWDRV_H_LBN 49 | ||
731 | #define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1 | ||
732 | #define FRF_AB_PCIE_LPBKWDRV_L_LBN 48 | ||
733 | #define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1 | ||
734 | #define FRF_AB_PCIE_LPBK_LBN 40 | ||
735 | #define FRF_AB_PCIE_LPBK_WIDTH 8 | ||
736 | #define FRF_AB_PCIE_PARLPBK_LBN 32 | ||
737 | #define FRF_AB_PCIE_PARLPBK_WIDTH 8 | ||
738 | #define FRF_AB_PCIE_RXTERMADJ_H_LBN 30 | ||
739 | #define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2 | ||
740 | #define FRF_AB_PCIE_RXTERMADJ_L_LBN 28 | ||
741 | #define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2 | ||
742 | #define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3 | ||
743 | #define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2 | ||
744 | #define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1 | ||
745 | #define FFE_AB_PCIE_RXTERMADJ_NOMNL 0 | ||
746 | #define FRF_AB_PCIE_TXTERMADJ_H_LBN 26 | ||
747 | #define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2 | ||
748 | #define FRF_AB_PCIE_TXTERMADJ_L_LBN 24 | ||
749 | #define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2 | ||
750 | #define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3 | ||
751 | #define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2 | ||
752 | #define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1 | ||
753 | #define FFE_AB_PCIE_TXTERMADJ_NOMNL 0 | ||
754 | #define FRF_AB_PCIE_RXEQCTL_H_LBN 18 | ||
755 | #define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2 | ||
756 | #define FRF_AB_PCIE_RXEQCTL_L_LBN 16 | ||
757 | #define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2 | ||
758 | #define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3 | ||
759 | #define FFE_AB_PCIE_RXEQCTL_OFF 2 | ||
760 | #define FFE_AB_PCIE_RXEQCTL_MIN 1 | ||
761 | #define FFE_AB_PCIE_RXEQCTL_MAX 0 | ||
762 | #define FRF_AB_PCIE_HIDRV_LBN 8 | ||
763 | #define FRF_AB_PCIE_HIDRV_WIDTH 8 | ||
764 | #define FRF_AB_PCIE_LODRV_LBN 0 | ||
765 | #define FRF_AB_PCIE_LODRV_WIDTH 8 | ||
766 | |||
767 | /* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */ | ||
768 | #define FR_AB_PCIE_SD_CTL45 0x00000330 | ||
769 | #define FRF_AB_PCIE_DTX7_LBN 60 | ||
770 | #define FRF_AB_PCIE_DTX7_WIDTH 4 | ||
771 | #define FRF_AB_PCIE_DTX6_LBN 56 | ||
772 | #define FRF_AB_PCIE_DTX6_WIDTH 4 | ||
773 | #define FRF_AB_PCIE_DTX5_LBN 52 | ||
774 | #define FRF_AB_PCIE_DTX5_WIDTH 4 | ||
775 | #define FRF_AB_PCIE_DTX4_LBN 48 | ||
776 | #define FRF_AB_PCIE_DTX4_WIDTH 4 | ||
777 | #define FRF_AB_PCIE_DTX3_LBN 44 | ||
778 | #define FRF_AB_PCIE_DTX3_WIDTH 4 | ||
779 | #define FRF_AB_PCIE_DTX2_LBN 40 | ||
780 | #define FRF_AB_PCIE_DTX2_WIDTH 4 | ||
781 | #define FRF_AB_PCIE_DTX1_LBN 36 | ||
782 | #define FRF_AB_PCIE_DTX1_WIDTH 4 | ||
783 | #define FRF_AB_PCIE_DTX0_LBN 32 | ||
784 | #define FRF_AB_PCIE_DTX0_WIDTH 4 | ||
785 | #define FRF_AB_PCIE_DEQ7_LBN 28 | ||
786 | #define FRF_AB_PCIE_DEQ7_WIDTH 4 | ||
787 | #define FRF_AB_PCIE_DEQ6_LBN 24 | ||
788 | #define FRF_AB_PCIE_DEQ6_WIDTH 4 | ||
789 | #define FRF_AB_PCIE_DEQ5_LBN 20 | ||
790 | #define FRF_AB_PCIE_DEQ5_WIDTH 4 | ||
791 | #define FRF_AB_PCIE_DEQ4_LBN 16 | ||
792 | #define FRF_AB_PCIE_DEQ4_WIDTH 4 | ||
793 | #define FRF_AB_PCIE_DEQ3_LBN 12 | ||
794 | #define FRF_AB_PCIE_DEQ3_WIDTH 4 | ||
795 | #define FRF_AB_PCIE_DEQ2_LBN 8 | ||
796 | #define FRF_AB_PCIE_DEQ2_WIDTH 4 | ||
797 | #define FRF_AB_PCIE_DEQ1_LBN 4 | ||
798 | #define FRF_AB_PCIE_DEQ1_WIDTH 4 | ||
799 | #define FRF_AB_PCIE_DEQ0_LBN 0 | ||
800 | #define FRF_AB_PCIE_DEQ0_WIDTH 4 | ||
801 | |||
802 | /* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */ | ||
803 | #define FR_AB_PCIE_PCS_CTL_STAT 0x00000340 | ||
804 | #define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52 | ||
805 | #define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4 | ||
806 | #define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48 | ||
807 | #define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4 | ||
808 | #define FRF_AB_PCIE_PRBSERR_LBN 40 | ||
809 | #define FRF_AB_PCIE_PRBSERR_WIDTH 8 | ||
810 | #define FRF_AB_PCIE_PRBSERRH0_LBN 32 | ||
811 | #define FRF_AB_PCIE_PRBSERRH0_WIDTH 8 | ||
812 | #define FRF_AB_PCIE_FASTINIT_H_LBN 15 | ||
813 | #define FRF_AB_PCIE_FASTINIT_H_WIDTH 1 | ||
814 | #define FRF_AB_PCIE_FASTINIT_L_LBN 14 | ||
815 | #define FRF_AB_PCIE_FASTINIT_L_WIDTH 1 | ||
816 | #define FRF_AB_PCIE_CTCDISABLE_H_LBN 13 | ||
817 | #define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1 | ||
818 | #define FRF_AB_PCIE_CTCDISABLE_L_LBN 12 | ||
819 | #define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1 | ||
820 | #define FRF_AB_PCIE_PRBSSYNC_H_LBN 11 | ||
821 | #define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1 | ||
822 | #define FRF_AB_PCIE_PRBSSYNC_L_LBN 10 | ||
823 | #define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1 | ||
824 | #define FRF_AB_PCIE_PRBSERRACK_H_LBN 9 | ||
825 | #define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1 | ||
826 | #define FRF_AB_PCIE_PRBSERRACK_L_LBN 8 | ||
827 | #define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1 | ||
828 | #define FRF_AB_PCIE_PRBSSEL_LBN 0 | ||
829 | #define FRF_AB_PCIE_PRBSSEL_WIDTH 8 | ||
830 | |||
831 | /* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */ | ||
832 | #define FR_BB_DEBUG_DATA_OUT 0x00000350 | ||
833 | #define FRF_BB_DEBUG2_PORT_LBN 25 | ||
834 | #define FRF_BB_DEBUG2_PORT_WIDTH 15 | ||
835 | #define FRF_BB_DEBUG1_PORT_LBN 0 | ||
836 | #define FRF_BB_DEBUG1_PORT_WIDTH 25 | ||
837 | |||
838 | /* EVQ_RPTR_REGP0: Event queue read pointer register */ | ||
839 | #define FR_BZ_EVQ_RPTR_P0 0x00000400 | ||
840 | #define FR_BZ_EVQ_RPTR_P0_STEP 8192 | ||
841 | #define FR_BZ_EVQ_RPTR_P0_ROWS 1024 | ||
842 | /* EVQ_RPTR_REG_KER: Event queue read pointer register */ | ||
843 | #define FR_AA_EVQ_RPTR_KER 0x00011b00 | ||
844 | #define FR_AA_EVQ_RPTR_KER_STEP 4 | ||
845 | #define FR_AA_EVQ_RPTR_KER_ROWS 4 | ||
846 | /* EVQ_RPTR_REG: Event queue read pointer register */ | ||
847 | #define FR_BZ_EVQ_RPTR 0x00fa0000 | ||
848 | #define FR_BZ_EVQ_RPTR_STEP 16 | ||
849 | #define FR_BB_EVQ_RPTR_ROWS 4096 | ||
850 | #define FR_CZ_EVQ_RPTR_ROWS 1024 | ||
851 | /* EVQ_RPTR_REGP123: Event queue read pointer register */ | ||
852 | #define FR_BB_EVQ_RPTR_P123 0x01000400 | ||
853 | #define FR_BB_EVQ_RPTR_P123_STEP 8192 | ||
854 | #define FR_BB_EVQ_RPTR_P123_ROWS 3072 | ||
855 | #define FRF_AZ_EVQ_RPTR_VLD_LBN 15 | ||
856 | #define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1 | ||
857 | #define FRF_AZ_EVQ_RPTR_LBN 0 | ||
858 | #define FRF_AZ_EVQ_RPTR_WIDTH 15 | ||
859 | |||
860 | /* TIMER_COMMAND_REGP0: Timer Command Registers */ | ||
861 | #define FR_BZ_TIMER_COMMAND_P0 0x00000420 | ||
862 | #define FR_BZ_TIMER_COMMAND_P0_STEP 8192 | ||
863 | #define FR_BZ_TIMER_COMMAND_P0_ROWS 1024 | ||
864 | /* TIMER_COMMAND_REG_KER: Timer Command Registers */ | ||
865 | #define FR_AA_TIMER_COMMAND_KER 0x00000420 | ||
866 | #define FR_AA_TIMER_COMMAND_KER_STEP 8192 | ||
867 | #define FR_AA_TIMER_COMMAND_KER_ROWS 4 | ||
868 | /* TIMER_COMMAND_REGP123: Timer Command Registers */ | ||
869 | #define FR_BB_TIMER_COMMAND_P123 0x01000420 | ||
870 | #define FR_BB_TIMER_COMMAND_P123_STEP 8192 | ||
871 | #define FR_BB_TIMER_COMMAND_P123_ROWS 3072 | ||
872 | #define FRF_CZ_TC_TIMER_MODE_LBN 14 | ||
873 | #define FRF_CZ_TC_TIMER_MODE_WIDTH 2 | ||
874 | #define FRF_AB_TC_TIMER_MODE_LBN 12 | ||
875 | #define FRF_AB_TC_TIMER_MODE_WIDTH 2 | ||
876 | #define FRF_CZ_TC_TIMER_VAL_LBN 0 | ||
877 | #define FRF_CZ_TC_TIMER_VAL_WIDTH 14 | ||
878 | #define FRF_AB_TC_TIMER_VAL_LBN 0 | ||
879 | #define FRF_AB_TC_TIMER_VAL_WIDTH 12 | ||
880 | |||
881 | /* DRV_EV_REG: Driver generated event register */ | ||
882 | #define FR_AZ_DRV_EV 0x00000440 | ||
883 | #define FRF_AZ_DRV_EV_QID_LBN 64 | ||
884 | #define FRF_AZ_DRV_EV_QID_WIDTH 12 | ||
885 | #define FRF_AZ_DRV_EV_DATA_LBN 0 | ||
886 | #define FRF_AZ_DRV_EV_DATA_WIDTH 64 | ||
887 | |||
888 | /* EVQ_CTL_REG: Event queue control register */ | ||
889 | #define FR_AZ_EVQ_CTL 0x00000450 | ||
890 | #define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15 | ||
891 | #define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10 | ||
892 | #define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15 | ||
893 | #define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6 | ||
894 | #define FRF_AZ_EVQ_OWNERR_CTL_LBN 14 | ||
895 | #define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1 | ||
896 | #define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7 | ||
897 | #define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7 | ||
898 | #define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0 | ||
899 | #define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7 | ||
900 | |||
901 | /* EVQ_CNT1_REG: Event counter 1 register */ | ||
902 | #define FR_AZ_EVQ_CNT1 0x00000460 | ||
903 | #define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120 | ||
904 | #define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7 | ||
905 | #define FRF_AZ_EVQ_CNT_TOBIU_LBN 100 | ||
906 | #define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20 | ||
907 | #define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80 | ||
908 | #define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20 | ||
909 | #define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60 | ||
910 | #define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20 | ||
911 | #define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40 | ||
912 | #define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20 | ||
913 | #define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20 | ||
914 | #define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20 | ||
915 | #define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0 | ||
916 | #define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20 | ||
917 | |||
918 | /* EVQ_CNT2_REG: Event counter 2 register */ | ||
919 | #define FR_AZ_EVQ_CNT2 0x00000470 | ||
920 | #define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104 | ||
921 | #define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20 | ||
922 | #define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84 | ||
923 | #define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20 | ||
924 | #define FRF_AZ_EVQ_RDY_CNT_LBN 80 | ||
925 | #define FRF_AZ_EVQ_RDY_CNT_WIDTH 4 | ||
926 | #define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60 | ||
927 | #define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20 | ||
928 | #define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40 | ||
929 | #define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20 | ||
930 | #define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20 | ||
931 | #define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20 | ||
932 | #define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0 | ||
933 | #define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20 | ||
934 | |||
935 | /* USR_EV_REG: Event mailbox register */ | ||
936 | #define FR_CZ_USR_EV 0x00000540 | ||
937 | #define FR_CZ_USR_EV_STEP 8192 | ||
938 | #define FR_CZ_USR_EV_ROWS 1024 | ||
939 | #define FRF_CZ_USR_EV_DATA_LBN 0 | ||
940 | #define FRF_CZ_USR_EV_DATA_WIDTH 32 | ||
941 | |||
942 | /* BUF_TBL_CFG_REG: Buffer table configuration register */ | ||
943 | #define FR_AZ_BUF_TBL_CFG 0x00000600 | ||
944 | #define FRF_AZ_BUF_TBL_MODE_LBN 3 | ||
945 | #define FRF_AZ_BUF_TBL_MODE_WIDTH 1 | ||
946 | |||
947 | /* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */ | ||
948 | #define FR_AZ_SRM_RX_DC_CFG 0x00000610 | ||
949 | #define FRF_AZ_SRM_CLK_TMP_EN_LBN 21 | ||
950 | #define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1 | ||
951 | #define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0 | ||
952 | #define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21 | ||
953 | |||
954 | /* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */ | ||
955 | #define FR_AZ_SRM_TX_DC_CFG 0x00000620 | ||
956 | #define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0 | ||
957 | #define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21 | ||
958 | |||
959 | /* SRM_CFG_REG: SRAM configuration register */ | ||
960 | #define FR_AZ_SRM_CFG 0x00000630 | ||
961 | #define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5 | ||
962 | #define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1 | ||
963 | #define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4 | ||
964 | #define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1 | ||
965 | #define FRF_AZ_SRM_INIT_EN_LBN 3 | ||
966 | #define FRF_AZ_SRM_INIT_EN_WIDTH 1 | ||
967 | #define FRF_AZ_SRM_NUM_BANK_LBN 2 | ||
968 | #define FRF_AZ_SRM_NUM_BANK_WIDTH 1 | ||
969 | #define FRF_AZ_SRM_BANK_SIZE_LBN 0 | ||
970 | #define FRF_AZ_SRM_BANK_SIZE_WIDTH 2 | ||
971 | |||
972 | /* BUF_TBL_UPD_REG: Buffer table update register */ | ||
973 | #define FR_AZ_BUF_TBL_UPD 0x00000650 | ||
974 | #define FRF_AZ_BUF_UPD_CMD_LBN 63 | ||
975 | #define FRF_AZ_BUF_UPD_CMD_WIDTH 1 | ||
976 | #define FRF_AZ_BUF_CLR_CMD_LBN 62 | ||
977 | #define FRF_AZ_BUF_CLR_CMD_WIDTH 1 | ||
978 | #define FRF_AZ_BUF_CLR_END_ID_LBN 32 | ||
979 | #define FRF_AZ_BUF_CLR_END_ID_WIDTH 20 | ||
980 | #define FRF_AZ_BUF_CLR_START_ID_LBN 0 | ||
981 | #define FRF_AZ_BUF_CLR_START_ID_WIDTH 20 | ||
982 | |||
983 | /* SRM_UPD_EVQ_REG: Buffer table update register */ | ||
984 | #define FR_AZ_SRM_UPD_EVQ 0x00000660 | ||
985 | #define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0 | ||
986 | #define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12 | ||
987 | |||
988 | /* SRAM_PARITY_REG: SRAM parity register. */ | ||
989 | #define FR_AZ_SRAM_PARITY 0x00000670 | ||
990 | #define FRF_CZ_BYPASS_ECC_LBN 3 | ||
991 | #define FRF_CZ_BYPASS_ECC_WIDTH 1 | ||
992 | #define FRF_CZ_SEC_INT_LBN 2 | ||
993 | #define FRF_CZ_SEC_INT_WIDTH 1 | ||
994 | #define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1 | ||
995 | #define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1 | ||
996 | #define FRF_AB_FORCE_SRAM_PERR_LBN 0 | ||
997 | #define FRF_AB_FORCE_SRAM_PERR_WIDTH 1 | ||
998 | #define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0 | ||
999 | #define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1 | ||
1000 | |||
1001 | /* RX_CFG_REG: Receive configuration register */ | ||
1002 | #define FR_AZ_RX_CFG 0x00000800 | ||
1003 | #define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72 | ||
1004 | #define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14 | ||
1005 | #define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71 | ||
1006 | #define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1 | ||
1007 | #define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62 | ||
1008 | #define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9 | ||
1009 | #define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53 | ||
1010 | #define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9 | ||
1011 | #define FRF_CZ_RX_PRE_RFF_IPG_LBN 49 | ||
1012 | #define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4 | ||
1013 | #define FRF_BZ_RX_TCP_SUP_LBN 48 | ||
1014 | #define FRF_BZ_RX_TCP_SUP_WIDTH 1 | ||
1015 | #define FRF_BZ_RX_INGR_EN_LBN 47 | ||
1016 | #define FRF_BZ_RX_INGR_EN_WIDTH 1 | ||
1017 | #define FRF_BZ_RX_IP_HASH_LBN 46 | ||
1018 | #define FRF_BZ_RX_IP_HASH_WIDTH 1 | ||
1019 | #define FRF_BZ_RX_HASH_ALG_LBN 45 | ||
1020 | #define FRF_BZ_RX_HASH_ALG_WIDTH 1 | ||
1021 | #define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44 | ||
1022 | #define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1 | ||
1023 | #define FRF_BZ_RX_DESC_PUSH_EN_LBN 43 | ||
1024 | #define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1 | ||
1025 | #define FRF_BZ_RX_RDW_PATCH_EN_LBN 42 | ||
1026 | #define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1 | ||
1027 | #define FRF_BB_RX_PCI_BURST_SIZE_LBN 39 | ||
1028 | #define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3 | ||
1029 | #define FRF_BZ_RX_OWNERR_CTL_LBN 38 | ||
1030 | #define FRF_BZ_RX_OWNERR_CTL_WIDTH 1 | ||
1031 | #define FRF_BZ_RX_XON_TX_TH_LBN 33 | ||
1032 | #define FRF_BZ_RX_XON_TX_TH_WIDTH 5 | ||
1033 | #define FRF_AA_RX_DESC_PUSH_EN_LBN 35 | ||
1034 | #define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1 | ||
1035 | #define FRF_AA_RX_RDW_PATCH_EN_LBN 34 | ||
1036 | #define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1 | ||
1037 | #define FRF_AA_RX_PCI_BURST_SIZE_LBN 31 | ||
1038 | #define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3 | ||
1039 | #define FRF_BZ_RX_XOFF_TX_TH_LBN 28 | ||
1040 | #define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5 | ||
1041 | #define FRF_AA_RX_OWNERR_CTL_LBN 30 | ||
1042 | #define FRF_AA_RX_OWNERR_CTL_WIDTH 1 | ||
1043 | #define FRF_AA_RX_XON_TX_TH_LBN 25 | ||
1044 | #define FRF_AA_RX_XON_TX_TH_WIDTH 5 | ||
1045 | #define FRF_BZ_RX_USR_BUF_SIZE_LBN 19 | ||
1046 | #define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9 | ||
1047 | #define FRF_AA_RX_XOFF_TX_TH_LBN 20 | ||
1048 | #define FRF_AA_RX_XOFF_TX_TH_WIDTH 5 | ||
1049 | #define FRF_AA_RX_USR_BUF_SIZE_LBN 11 | ||
1050 | #define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9 | ||
1051 | #define FRF_BZ_RX_XON_MAC_TH_LBN 10 | ||
1052 | #define FRF_BZ_RX_XON_MAC_TH_WIDTH 9 | ||
1053 | #define FRF_AA_RX_XON_MAC_TH_LBN 6 | ||
1054 | #define FRF_AA_RX_XON_MAC_TH_WIDTH 5 | ||
1055 | #define FRF_BZ_RX_XOFF_MAC_TH_LBN 1 | ||
1056 | #define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9 | ||
1057 | #define FRF_AA_RX_XOFF_MAC_TH_LBN 1 | ||
1058 | #define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5 | ||
1059 | #define FRF_AZ_RX_XOFF_MAC_EN_LBN 0 | ||
1060 | #define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1 | ||
1061 | |||
1062 | /* RX_FILTER_CTL_REG: Receive filter control registers */ | ||
1063 | #define FR_BZ_RX_FILTER_CTL 0x00000810 | ||
1064 | #define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94 | ||
1065 | #define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8 | ||
1066 | #define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86 | ||
1067 | #define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8 | ||
1068 | #define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85 | ||
1069 | #define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1 | ||
1070 | #define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69 | ||
1071 | #define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16 | ||
1072 | #define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57 | ||
1073 | #define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12 | ||
1074 | #define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56 | ||
1075 | #define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1 | ||
1076 | #define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55 | ||
1077 | #define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 | ||
1078 | #define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43 | ||
1079 | #define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12 | ||
1080 | #define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42 | ||
1081 | #define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1 | ||
1082 | #define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41 | ||
1083 | #define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 | ||
1084 | #define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40 | ||
1085 | #define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1 | ||
1086 | #define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32 | ||
1087 | #define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8 | ||
1088 | #define FRF_BZ_NUM_KER_LBN 24 | ||
1089 | #define FRF_BZ_NUM_KER_WIDTH 2 | ||
1090 | #define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16 | ||
1091 | #define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8 | ||
1092 | #define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8 | ||
1093 | #define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8 | ||
1094 | #define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0 | ||
1095 | #define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8 | ||
1096 | |||
1097 | /* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */ | ||
1098 | #define FR_AZ_RX_FLUSH_DESCQ 0x00000820 | ||
1099 | #define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24 | ||
1100 | #define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1 | ||
1101 | #define FRF_AZ_RX_FLUSH_DESCQ_LBN 0 | ||
1102 | #define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12 | ||
1103 | |||
1104 | /* RX_DESC_UPD_REGP0: Receive descriptor update register. */ | ||
1105 | #define FR_BZ_RX_DESC_UPD_P0 0x00000830 | ||
1106 | #define FR_BZ_RX_DESC_UPD_P0_STEP 8192 | ||
1107 | #define FR_BZ_RX_DESC_UPD_P0_ROWS 1024 | ||
1108 | /* RX_DESC_UPD_REG_KER: Receive descriptor update register. */ | ||
1109 | #define FR_AA_RX_DESC_UPD_KER 0x00000830 | ||
1110 | #define FR_AA_RX_DESC_UPD_KER_STEP 8192 | ||
1111 | #define FR_AA_RX_DESC_UPD_KER_ROWS 4 | ||
1112 | /* RX_DESC_UPD_REGP123: Receive descriptor update register. */ | ||
1113 | #define FR_BB_RX_DESC_UPD_P123 0x01000830 | ||
1114 | #define FR_BB_RX_DESC_UPD_P123_STEP 8192 | ||
1115 | #define FR_BB_RX_DESC_UPD_P123_ROWS 3072 | ||
1116 | #define FRF_AZ_RX_DESC_WPTR_LBN 96 | ||
1117 | #define FRF_AZ_RX_DESC_WPTR_WIDTH 12 | ||
1118 | #define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95 | ||
1119 | #define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1 | ||
1120 | #define FRF_AZ_RX_DESC_LBN 0 | ||
1121 | #define FRF_AZ_RX_DESC_WIDTH 64 | ||
1122 | |||
1123 | /* RX_DC_CFG_REG: Receive descriptor cache configuration register */ | ||
1124 | #define FR_AZ_RX_DC_CFG 0x00000840 | ||
1125 | #define FRF_AB_RX_MAX_PF_LBN 2 | ||
1126 | #define FRF_AB_RX_MAX_PF_WIDTH 2 | ||
1127 | #define FRF_AZ_RX_DC_SIZE_LBN 0 | ||
1128 | #define FRF_AZ_RX_DC_SIZE_WIDTH 2 | ||
1129 | #define FFE_AZ_RX_DC_SIZE_64 3 | ||
1130 | #define FFE_AZ_RX_DC_SIZE_32 2 | ||
1131 | #define FFE_AZ_RX_DC_SIZE_16 1 | ||
1132 | #define FFE_AZ_RX_DC_SIZE_8 0 | ||
1133 | |||
1134 | /* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */ | ||
1135 | #define FR_AZ_RX_DC_PF_WM 0x00000850 | ||
1136 | #define FRF_AZ_RX_DC_PF_HWM_LBN 6 | ||
1137 | #define FRF_AZ_RX_DC_PF_HWM_WIDTH 6 | ||
1138 | #define FRF_AZ_RX_DC_PF_LWM_LBN 0 | ||
1139 | #define FRF_AZ_RX_DC_PF_LWM_WIDTH 6 | ||
1140 | |||
1141 | /* RX_RSS_TKEY_REG: RSS Toeplitz hash key */ | ||
1142 | #define FR_BZ_RX_RSS_TKEY 0x00000860 | ||
1143 | #define FRF_BZ_RX_RSS_TKEY_HI_LBN 64 | ||
1144 | #define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64 | ||
1145 | #define FRF_BZ_RX_RSS_TKEY_LO_LBN 0 | ||
1146 | #define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64 | ||
1147 | |||
1148 | /* RX_NODESC_DROP_REG: Receive dropped packet counter register */ | ||
1149 | #define FR_AZ_RX_NODESC_DROP 0x00000880 | ||
1150 | #define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0 | ||
1151 | #define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32 | ||
1152 | #define FRF_AB_RX_NODESC_DROP_CNT_LBN 0 | ||
1153 | #define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16 | ||
1154 | |||
1155 | /* RX_SELF_RST_REG: Receive self reset register */ | ||
1156 | #define FR_AA_RX_SELF_RST 0x00000890 | ||
1157 | #define FRF_AA_RX_ISCSI_DIS_LBN 17 | ||
1158 | #define FRF_AA_RX_ISCSI_DIS_WIDTH 1 | ||
1159 | #define FRF_AA_RX_SW_RST_REG_LBN 16 | ||
1160 | #define FRF_AA_RX_SW_RST_REG_WIDTH 1 | ||
1161 | #define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9 | ||
1162 | #define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1 | ||
1163 | #define FRF_AA_RX_SELF_RST_EN_LBN 8 | ||
1164 | #define FRF_AA_RX_SELF_RST_EN_WIDTH 1 | ||
1165 | #define FRF_AA_RX_MAX_PF_LAT_LBN 4 | ||
1166 | #define FRF_AA_RX_MAX_PF_LAT_WIDTH 4 | ||
1167 | #define FRF_AA_RX_MAX_LU_LAT_LBN 0 | ||
1168 | #define FRF_AA_RX_MAX_LU_LAT_WIDTH 4 | ||
1169 | |||
1170 | /* RX_DEBUG_REG: undocumented register */ | ||
1171 | #define FR_AZ_RX_DEBUG 0x000008a0 | ||
1172 | #define FRF_AZ_RX_DEBUG_LBN 0 | ||
1173 | #define FRF_AZ_RX_DEBUG_WIDTH 64 | ||
1174 | |||
1175 | /* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */ | ||
1176 | #define FR_AZ_RX_PUSH_DROP 0x000008b0 | ||
1177 | #define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0 | ||
1178 | #define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32 | ||
1179 | |||
1180 | /* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */ | ||
1181 | #define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0 | ||
1182 | #define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0 | ||
1183 | #define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128 | ||
1184 | |||
1185 | /* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */ | ||
1186 | #define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0 | ||
1187 | #define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0 | ||
1188 | #define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128 | ||
1189 | |||
1190 | /* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */ | ||
1191 | #define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0 | ||
1192 | #define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66 | ||
1193 | #define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1 | ||
1194 | #define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65 | ||
1195 | #define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1 | ||
1196 | #define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64 | ||
1197 | #define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1 | ||
1198 | #define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0 | ||
1199 | #define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64 | ||
1200 | |||
1201 | /* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */ | ||
1202 | #define FR_AZ_TX_FLUSH_DESCQ 0x00000a00 | ||
1203 | #define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12 | ||
1204 | #define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1 | ||
1205 | #define FRF_AZ_TX_FLUSH_DESCQ_LBN 0 | ||
1206 | #define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12 | ||
1207 | |||
1208 | /* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ | ||
1209 | #define FR_BZ_TX_DESC_UPD_P0 0x00000a10 | ||
1210 | #define FR_BZ_TX_DESC_UPD_P0_STEP 8192 | ||
1211 | #define FR_BZ_TX_DESC_UPD_P0_ROWS 1024 | ||
1212 | /* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */ | ||
1213 | #define FR_AA_TX_DESC_UPD_KER 0x00000a10 | ||
1214 | #define FR_AA_TX_DESC_UPD_KER_STEP 8192 | ||
1215 | #define FR_AA_TX_DESC_UPD_KER_ROWS 8 | ||
1216 | /* TX_DESC_UPD_REGP123: Transmit descriptor update register. */ | ||
1217 | #define FR_BB_TX_DESC_UPD_P123 0x01000a10 | ||
1218 | #define FR_BB_TX_DESC_UPD_P123_STEP 8192 | ||
1219 | #define FR_BB_TX_DESC_UPD_P123_ROWS 3072 | ||
1220 | #define FRF_AZ_TX_DESC_WPTR_LBN 96 | ||
1221 | #define FRF_AZ_TX_DESC_WPTR_WIDTH 12 | ||
1222 | #define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95 | ||
1223 | #define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1 | ||
1224 | #define FRF_AZ_TX_DESC_LBN 0 | ||
1225 | #define FRF_AZ_TX_DESC_WIDTH 95 | ||
1226 | |||
1227 | /* TX_DC_CFG_REG: Transmit descriptor cache configuration register */ | ||
1228 | #define FR_AZ_TX_DC_CFG 0x00000a20 | ||
1229 | #define FRF_AZ_TX_DC_SIZE_LBN 0 | ||
1230 | #define FRF_AZ_TX_DC_SIZE_WIDTH 2 | ||
1231 | #define FFE_AZ_TX_DC_SIZE_32 2 | ||
1232 | #define FFE_AZ_TX_DC_SIZE_16 1 | ||
1233 | #define FFE_AZ_TX_DC_SIZE_8 0 | ||
1234 | |||
1235 | /* TX_CHKSM_CFG_REG: Transmit checksum configuration register */ | ||
1236 | #define FR_AA_TX_CHKSM_CFG 0x00000a30 | ||
1237 | #define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96 | ||
1238 | #define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32 | ||
1239 | #define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64 | ||
1240 | #define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32 | ||
1241 | #define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32 | ||
1242 | #define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32 | ||
1243 | #define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0 | ||
1244 | #define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32 | ||
1245 | |||
1246 | /* TX_CFG_REG: Transmit configuration register */ | ||
1247 | #define FR_AZ_TX_CFG 0x00000a50 | ||
1248 | #define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114 | ||
1249 | #define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8 | ||
1250 | #define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113 | ||
1251 | #define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1 | ||
1252 | #define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105 | ||
1253 | #define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8 | ||
1254 | #define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97 | ||
1255 | #define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8 | ||
1256 | #define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89 | ||
1257 | #define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 | ||
1258 | #define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81 | ||
1259 | #define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 | ||
1260 | #define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73 | ||
1261 | #define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 | ||
1262 | #define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65 | ||
1263 | #define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 | ||
1264 | #define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64 | ||
1265 | #define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1 | ||
1266 | #define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48 | ||
1267 | #define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16 | ||
1268 | #define FRF_CZ_TX_FILTER_EN_BIT_LBN 47 | ||
1269 | #define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1 | ||
1270 | #define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16 | ||
1271 | #define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15 | ||
1272 | #define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5 | ||
1273 | #define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1 | ||
1274 | #define FRF_AZ_TX_P1_PRI_EN_LBN 4 | ||
1275 | #define FRF_AZ_TX_P1_PRI_EN_WIDTH 1 | ||
1276 | #define FRF_AZ_TX_OWNERR_CTL_LBN 2 | ||
1277 | #define FRF_AZ_TX_OWNERR_CTL_WIDTH 1 | ||
1278 | #define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1 | ||
1279 | #define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1 | ||
1280 | #define FRF_AZ_TX_IP_ID_REP_EN_LBN 0 | ||
1281 | #define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1 | ||
1282 | |||
1283 | /* TX_PUSH_DROP_REG: Transmit push dropped register */ | ||
1284 | #define FR_AZ_TX_PUSH_DROP 0x00000a60 | ||
1285 | #define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0 | ||
1286 | #define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32 | ||
1287 | |||
1288 | /* TX_RESERVED_REG: Transmit configuration register */ | ||
1289 | #define FR_AZ_TX_RESERVED 0x00000a80 | ||
1290 | #define FRF_AZ_TX_EVT_CNT_LBN 121 | ||
1291 | #define FRF_AZ_TX_EVT_CNT_WIDTH 7 | ||
1292 | #define FRF_AZ_TX_PREF_AGE_CNT_LBN 119 | ||
1293 | #define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2 | ||
1294 | #define FRF_AZ_TX_RD_COMP_TMR_LBN 96 | ||
1295 | #define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23 | ||
1296 | #define FRF_AZ_TX_PUSH_EN_LBN 89 | ||
1297 | #define FRF_AZ_TX_PUSH_EN_WIDTH 1 | ||
1298 | #define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88 | ||
1299 | #define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1 | ||
1300 | #define FRF_AZ_TX_D_FF_FULL_P0_LBN 85 | ||
1301 | #define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1 | ||
1302 | #define FRF_AZ_TX_DMAR_ST_P0_LBN 81 | ||
1303 | #define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1 | ||
1304 | #define FRF_AZ_TX_DMAQ_ST_LBN 78 | ||
1305 | #define FRF_AZ_TX_DMAQ_ST_WIDTH 1 | ||
1306 | #define FRF_AZ_TX_RX_SPACER_LBN 64 | ||
1307 | #define FRF_AZ_TX_RX_SPACER_WIDTH 8 | ||
1308 | #define FRF_AZ_TX_DROP_ABORT_EN_LBN 60 | ||
1309 | #define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1 | ||
1310 | #define FRF_AZ_TX_SOFT_EVT_EN_LBN 59 | ||
1311 | #define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1 | ||
1312 | #define FRF_AZ_TX_PS_EVT_DIS_LBN 58 | ||
1313 | #define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1 | ||
1314 | #define FRF_AZ_TX_RX_SPACER_EN_LBN 57 | ||
1315 | #define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1 | ||
1316 | #define FRF_AZ_TX_XP_TIMER_LBN 52 | ||
1317 | #define FRF_AZ_TX_XP_TIMER_WIDTH 5 | ||
1318 | #define FRF_AZ_TX_PREF_SPACER_LBN 44 | ||
1319 | #define FRF_AZ_TX_PREF_SPACER_WIDTH 8 | ||
1320 | #define FRF_AZ_TX_PREF_WD_TMR_LBN 22 | ||
1321 | #define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22 | ||
1322 | #define FRF_AZ_TX_ONLY1TAG_LBN 21 | ||
1323 | #define FRF_AZ_TX_ONLY1TAG_WIDTH 1 | ||
1324 | #define FRF_AZ_TX_PREF_THRESHOLD_LBN 19 | ||
1325 | #define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2 | ||
1326 | #define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18 | ||
1327 | #define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1 | ||
1328 | #define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17 | ||
1329 | #define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1 | ||
1330 | #define FRF_AA_TX_DMA_FF_THR_LBN 16 | ||
1331 | #define FRF_AA_TX_DMA_FF_THR_WIDTH 1 | ||
1332 | #define FRF_AZ_TX_DMA_SPACER_LBN 8 | ||
1333 | #define FRF_AZ_TX_DMA_SPACER_WIDTH 8 | ||
1334 | #define FRF_AA_TX_TCP_DIS_LBN 7 | ||
1335 | #define FRF_AA_TX_TCP_DIS_WIDTH 1 | ||
1336 | #define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7 | ||
1337 | #define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1 | ||
1338 | #define FRF_AA_TX_IP_DIS_LBN 6 | ||
1339 | #define FRF_AA_TX_IP_DIS_WIDTH 1 | ||
1340 | #define FRF_AZ_TX_MAX_CPL_LBN 2 | ||
1341 | #define FRF_AZ_TX_MAX_CPL_WIDTH 2 | ||
1342 | #define FFE_AZ_TX_MAX_CPL_16 3 | ||
1343 | #define FFE_AZ_TX_MAX_CPL_8 2 | ||
1344 | #define FFE_AZ_TX_MAX_CPL_4 1 | ||
1345 | #define FFE_AZ_TX_MAX_CPL_NOLIMIT 0 | ||
1346 | #define FRF_AZ_TX_MAX_PREF_LBN 0 | ||
1347 | #define FRF_AZ_TX_MAX_PREF_WIDTH 2 | ||
1348 | #define FFE_AZ_TX_MAX_PREF_32 3 | ||
1349 | #define FFE_AZ_TX_MAX_PREF_16 2 | ||
1350 | #define FFE_AZ_TX_MAX_PREF_8 1 | ||
1351 | #define FFE_AZ_TX_MAX_PREF_OFF 0 | ||
1352 | |||
1353 | /* TX_PACE_REG: Transmit pace control register */ | ||
1354 | #define FR_BZ_TX_PACE 0x00000a90 | ||
1355 | #define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19 | ||
1356 | #define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10 | ||
1357 | #define FRF_BZ_TX_PACE_SB_AF_LBN 9 | ||
1358 | #define FRF_BZ_TX_PACE_SB_AF_WIDTH 10 | ||
1359 | #define FRF_BZ_TX_PACE_FB_BASE_LBN 5 | ||
1360 | #define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4 | ||
1361 | #define FRF_BZ_TX_PACE_BIN_TH_LBN 0 | ||
1362 | #define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5 | ||
1363 | |||
1364 | /* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */ | ||
1365 | #define FR_BZ_TX_PACE_DROP_QID 0x00000aa0 | ||
1366 | #define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0 | ||
1367 | #define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16 | ||
1368 | |||
1369 | /* TX_VLAN_REG: Transmit VLAN tag register */ | ||
1370 | #define FR_BB_TX_VLAN 0x00000ae0 | ||
1371 | #define FRF_BB_TX_VLAN_EN_LBN 127 | ||
1372 | #define FRF_BB_TX_VLAN_EN_WIDTH 1 | ||
1373 | #define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125 | ||
1374 | #define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1 | ||
1375 | #define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124 | ||
1376 | #define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1 | ||
1377 | #define FRF_BB_TX_VLAN7_LBN 112 | ||
1378 | #define FRF_BB_TX_VLAN7_WIDTH 12 | ||
1379 | #define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109 | ||
1380 | #define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1 | ||
1381 | #define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108 | ||
1382 | #define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1 | ||
1383 | #define FRF_BB_TX_VLAN6_LBN 96 | ||
1384 | #define FRF_BB_TX_VLAN6_WIDTH 12 | ||
1385 | #define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93 | ||
1386 | #define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1 | ||
1387 | #define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92 | ||
1388 | #define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1 | ||
1389 | #define FRF_BB_TX_VLAN5_LBN 80 | ||
1390 | #define FRF_BB_TX_VLAN5_WIDTH 12 | ||
1391 | #define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77 | ||
1392 | #define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1 | ||
1393 | #define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76 | ||
1394 | #define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1 | ||
1395 | #define FRF_BB_TX_VLAN4_LBN 64 | ||
1396 | #define FRF_BB_TX_VLAN4_WIDTH 12 | ||
1397 | #define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61 | ||
1398 | #define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1 | ||
1399 | #define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60 | ||
1400 | #define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1 | ||
1401 | #define FRF_BB_TX_VLAN3_LBN 48 | ||
1402 | #define FRF_BB_TX_VLAN3_WIDTH 12 | ||
1403 | #define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45 | ||
1404 | #define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1 | ||
1405 | #define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44 | ||
1406 | #define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1 | ||
1407 | #define FRF_BB_TX_VLAN2_LBN 32 | ||
1408 | #define FRF_BB_TX_VLAN2_WIDTH 12 | ||
1409 | #define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29 | ||
1410 | #define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1 | ||
1411 | #define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28 | ||
1412 | #define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1 | ||
1413 | #define FRF_BB_TX_VLAN1_LBN 16 | ||
1414 | #define FRF_BB_TX_VLAN1_WIDTH 12 | ||
1415 | #define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13 | ||
1416 | #define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1 | ||
1417 | #define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12 | ||
1418 | #define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1 | ||
1419 | #define FRF_BB_TX_VLAN0_LBN 0 | ||
1420 | #define FRF_BB_TX_VLAN0_WIDTH 12 | ||
1421 | |||
1422 | /* TX_IPFIL_PORTEN_REG: Transmit filter control register */ | ||
1423 | #define FR_BZ_TX_IPFIL_PORTEN 0x00000af0 | ||
1424 | #define FRF_BZ_TX_MADR0_FIL_EN_LBN 64 | ||
1425 | #define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1 | ||
1426 | #define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62 | ||
1427 | #define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1 | ||
1428 | #define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60 | ||
1429 | #define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1 | ||
1430 | #define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58 | ||
1431 | #define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1 | ||
1432 | #define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56 | ||
1433 | #define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1 | ||
1434 | #define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54 | ||
1435 | #define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1 | ||
1436 | #define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52 | ||
1437 | #define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1 | ||
1438 | #define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50 | ||
1439 | #define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1 | ||
1440 | #define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48 | ||
1441 | #define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1 | ||
1442 | #define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46 | ||
1443 | #define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1 | ||
1444 | #define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44 | ||
1445 | #define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1 | ||
1446 | #define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42 | ||
1447 | #define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1 | ||
1448 | #define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40 | ||
1449 | #define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1 | ||
1450 | #define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38 | ||
1451 | #define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1 | ||
1452 | #define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36 | ||
1453 | #define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1 | ||
1454 | #define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34 | ||
1455 | #define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1 | ||
1456 | #define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32 | ||
1457 | #define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1 | ||
1458 | #define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30 | ||
1459 | #define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1 | ||
1460 | #define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28 | ||
1461 | #define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1 | ||
1462 | #define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26 | ||
1463 | #define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1 | ||
1464 | #define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24 | ||
1465 | #define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1 | ||
1466 | #define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22 | ||
1467 | #define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1 | ||
1468 | #define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20 | ||
1469 | #define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1 | ||
1470 | #define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18 | ||
1471 | #define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1 | ||
1472 | #define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16 | ||
1473 | #define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1 | ||
1474 | #define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14 | ||
1475 | #define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1 | ||
1476 | #define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12 | ||
1477 | #define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1 | ||
1478 | #define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10 | ||
1479 | #define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1 | ||
1480 | #define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8 | ||
1481 | #define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1 | ||
1482 | #define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6 | ||
1483 | #define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1 | ||
1484 | #define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4 | ||
1485 | #define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1 | ||
1486 | #define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2 | ||
1487 | #define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1 | ||
1488 | #define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0 | ||
1489 | #define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1 | ||
1490 | |||
1491 | /* TX_IPFIL_TBL: Transmit IP source address filter table */ | ||
1492 | #define FR_BB_TX_IPFIL_TBL 0x00000b00 | ||
1493 | #define FR_BB_TX_IPFIL_TBL_STEP 16 | ||
1494 | #define FR_BB_TX_IPFIL_TBL_ROWS 16 | ||
1495 | #define FRF_BB_TX_IPFIL_MASK_1_LBN 96 | ||
1496 | #define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32 | ||
1497 | #define FRF_BB_TX_IP_SRC_ADR_1_LBN 64 | ||
1498 | #define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32 | ||
1499 | #define FRF_BB_TX_IPFIL_MASK_0_LBN 32 | ||
1500 | #define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32 | ||
1501 | #define FRF_BB_TX_IP_SRC_ADR_0_LBN 0 | ||
1502 | #define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32 | ||
1503 | |||
1504 | /* MD_TXD_REG: PHY management transmit data register */ | ||
1505 | #define FR_AB_MD_TXD 0x00000c00 | ||
1506 | #define FRF_AB_MD_TXD_LBN 0 | ||
1507 | #define FRF_AB_MD_TXD_WIDTH 16 | ||
1508 | |||
1509 | /* MD_RXD_REG: PHY management receive data register */ | ||
1510 | #define FR_AB_MD_RXD 0x00000c10 | ||
1511 | #define FRF_AB_MD_RXD_LBN 0 | ||
1512 | #define FRF_AB_MD_RXD_WIDTH 16 | ||
1513 | |||
1514 | /* MD_CS_REG: PHY management configuration & status register */ | ||
1515 | #define FR_AB_MD_CS 0x00000c20 | ||
1516 | #define FRF_AB_MD_RD_EN_CMD_LBN 15 | ||
1517 | #define FRF_AB_MD_RD_EN_CMD_WIDTH 1 | ||
1518 | #define FRF_AB_MD_WR_EN_CMD_LBN 14 | ||
1519 | #define FRF_AB_MD_WR_EN_CMD_WIDTH 1 | ||
1520 | #define FRF_AB_MD_ADDR_CMD_LBN 13 | ||
1521 | #define FRF_AB_MD_ADDR_CMD_WIDTH 1 | ||
1522 | #define FRF_AB_MD_PT_LBN 7 | ||
1523 | #define FRF_AB_MD_PT_WIDTH 3 | ||
1524 | #define FRF_AB_MD_PL_LBN 6 | ||
1525 | #define FRF_AB_MD_PL_WIDTH 1 | ||
1526 | #define FRF_AB_MD_INT_CLR_LBN 5 | ||
1527 | #define FRF_AB_MD_INT_CLR_WIDTH 1 | ||
1528 | #define FRF_AB_MD_GC_LBN 4 | ||
1529 | #define FRF_AB_MD_GC_WIDTH 1 | ||
1530 | #define FRF_AB_MD_PRSP_LBN 3 | ||
1531 | #define FRF_AB_MD_PRSP_WIDTH 1 | ||
1532 | #define FRF_AB_MD_RIC_LBN 2 | ||
1533 | #define FRF_AB_MD_RIC_WIDTH 1 | ||
1534 | #define FRF_AB_MD_RDC_LBN 1 | ||
1535 | #define FRF_AB_MD_RDC_WIDTH 1 | ||
1536 | #define FRF_AB_MD_WRC_LBN 0 | ||
1537 | #define FRF_AB_MD_WRC_WIDTH 1 | ||
1538 | |||
1539 | /* MD_PHY_ADR_REG: PHY management PHY address register */ | ||
1540 | #define FR_AB_MD_PHY_ADR 0x00000c30 | ||
1541 | #define FRF_AB_MD_PHY_ADR_LBN 0 | ||
1542 | #define FRF_AB_MD_PHY_ADR_WIDTH 16 | ||
1543 | |||
1544 | /* MD_ID_REG: PHY management ID register */ | ||
1545 | #define FR_AB_MD_ID 0x00000c40 | ||
1546 | #define FRF_AB_MD_PRT_ADR_LBN 11 | ||
1547 | #define FRF_AB_MD_PRT_ADR_WIDTH 5 | ||
1548 | #define FRF_AB_MD_DEV_ADR_LBN 6 | ||
1549 | #define FRF_AB_MD_DEV_ADR_WIDTH 5 | ||
1550 | |||
1551 | /* MD_STAT_REG: PHY management status & mask register */ | ||
1552 | #define FR_AB_MD_STAT 0x00000c50 | ||
1553 | #define FRF_AB_MD_PINT_LBN 4 | ||
1554 | #define FRF_AB_MD_PINT_WIDTH 1 | ||
1555 | #define FRF_AB_MD_DONE_LBN 3 | ||
1556 | #define FRF_AB_MD_DONE_WIDTH 1 | ||
1557 | #define FRF_AB_MD_BSERR_LBN 2 | ||
1558 | #define FRF_AB_MD_BSERR_WIDTH 1 | ||
1559 | #define FRF_AB_MD_LNFL_LBN 1 | ||
1560 | #define FRF_AB_MD_LNFL_WIDTH 1 | ||
1561 | #define FRF_AB_MD_BSY_LBN 0 | ||
1562 | #define FRF_AB_MD_BSY_WIDTH 1 | ||
1563 | |||
1564 | /* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */ | ||
1565 | #define FR_AB_MAC_STAT_DMA 0x00000c60 | ||
1566 | #define FRF_AB_MAC_STAT_DMA_CMD_LBN 48 | ||
1567 | #define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1 | ||
1568 | #define FRF_AB_MAC_STAT_DMA_ADR_LBN 0 | ||
1569 | #define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48 | ||
1570 | |||
1571 | /* MAC_CTRL_REG: Port MAC control register */ | ||
1572 | #define FR_AB_MAC_CTRL 0x00000c80 | ||
1573 | #define FRF_AB_MAC_XOFF_VAL_LBN 16 | ||
1574 | #define FRF_AB_MAC_XOFF_VAL_WIDTH 16 | ||
1575 | #define FRF_BB_TXFIFO_DRAIN_EN_LBN 7 | ||
1576 | #define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1 | ||
1577 | #define FRF_AB_MAC_XG_DISTXCRC_LBN 5 | ||
1578 | #define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1 | ||
1579 | #define FRF_AB_MAC_BCAD_ACPT_LBN 4 | ||
1580 | #define FRF_AB_MAC_BCAD_ACPT_WIDTH 1 | ||
1581 | #define FRF_AB_MAC_UC_PROM_LBN 3 | ||
1582 | #define FRF_AB_MAC_UC_PROM_WIDTH 1 | ||
1583 | #define FRF_AB_MAC_LINK_STATUS_LBN 2 | ||
1584 | #define FRF_AB_MAC_LINK_STATUS_WIDTH 1 | ||
1585 | #define FRF_AB_MAC_SPEED_LBN 0 | ||
1586 | #define FRF_AB_MAC_SPEED_WIDTH 2 | ||
1587 | #define FFE_AB_MAC_SPEED_10G 3 | ||
1588 | #define FFE_AB_MAC_SPEED_1G 2 | ||
1589 | #define FFE_AB_MAC_SPEED_100M 1 | ||
1590 | #define FFE_AB_MAC_SPEED_10M 0 | ||
1591 | |||
1592 | /* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */ | ||
1593 | #define FR_BB_GEN_MODE 0x00000c90 | ||
1594 | #define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3 | ||
1595 | #define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1 | ||
1596 | #define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2 | ||
1597 | #define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1 | ||
1598 | #define FRF_BB_XFP_PHY_INT_MASK_LBN 1 | ||
1599 | #define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1 | ||
1600 | #define FRF_BB_XG_PHY_INT_MASK_LBN 0 | ||
1601 | #define FRF_BB_XG_PHY_INT_MASK_WIDTH 1 | ||
1602 | |||
1603 | /* MAC_MC_HASH_REG0: Multicast address hash table */ | ||
1604 | #define FR_AB_MAC_MC_HASH_REG0 0x00000ca0 | ||
1605 | #define FRF_AB_MAC_MCAST_HASH0_LBN 0 | ||
1606 | #define FRF_AB_MAC_MCAST_HASH0_WIDTH 128 | ||
1607 | |||
1608 | /* MAC_MC_HASH_REG1: Multicast address hash table */ | ||
1609 | #define FR_AB_MAC_MC_HASH_REG1 0x00000cb0 | ||
1610 | #define FRF_AB_MAC_MCAST_HASH1_LBN 0 | ||
1611 | #define FRF_AB_MAC_MCAST_HASH1_WIDTH 128 | ||
1612 | |||
1613 | /* GM_CFG1_REG: GMAC configuration register 1 */ | ||
1614 | #define FR_AB_GM_CFG1 0x00000e00 | ||
1615 | #define FRF_AB_GM_SW_RST_LBN 31 | ||
1616 | #define FRF_AB_GM_SW_RST_WIDTH 1 | ||
1617 | #define FRF_AB_GM_SIM_RST_LBN 30 | ||
1618 | #define FRF_AB_GM_SIM_RST_WIDTH 1 | ||
1619 | #define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19 | ||
1620 | #define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1 | ||
1621 | #define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18 | ||
1622 | #define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1 | ||
1623 | #define FRF_AB_GM_RST_RX_FUNC_LBN 17 | ||
1624 | #define FRF_AB_GM_RST_RX_FUNC_WIDTH 1 | ||
1625 | #define FRF_AB_GM_RST_TX_FUNC_LBN 16 | ||
1626 | #define FRF_AB_GM_RST_TX_FUNC_WIDTH 1 | ||
1627 | #define FRF_AB_GM_LOOP_LBN 8 | ||
1628 | #define FRF_AB_GM_LOOP_WIDTH 1 | ||
1629 | #define FRF_AB_GM_RX_FC_EN_LBN 5 | ||
1630 | #define FRF_AB_GM_RX_FC_EN_WIDTH 1 | ||
1631 | #define FRF_AB_GM_TX_FC_EN_LBN 4 | ||
1632 | #define FRF_AB_GM_TX_FC_EN_WIDTH 1 | ||
1633 | #define FRF_AB_GM_SYNC_RXEN_LBN 3 | ||
1634 | #define FRF_AB_GM_SYNC_RXEN_WIDTH 1 | ||
1635 | #define FRF_AB_GM_RX_EN_LBN 2 | ||
1636 | #define FRF_AB_GM_RX_EN_WIDTH 1 | ||
1637 | #define FRF_AB_GM_SYNC_TXEN_LBN 1 | ||
1638 | #define FRF_AB_GM_SYNC_TXEN_WIDTH 1 | ||
1639 | #define FRF_AB_GM_TX_EN_LBN 0 | ||
1640 | #define FRF_AB_GM_TX_EN_WIDTH 1 | ||
1641 | |||
1642 | /* GM_CFG2_REG: GMAC configuration register 2 */ | ||
1643 | #define FR_AB_GM_CFG2 0x00000e10 | ||
1644 | #define FRF_AB_GM_PAMBL_LEN_LBN 12 | ||
1645 | #define FRF_AB_GM_PAMBL_LEN_WIDTH 4 | ||
1646 | #define FRF_AB_GM_IF_MODE_LBN 8 | ||
1647 | #define FRF_AB_GM_IF_MODE_WIDTH 2 | ||
1648 | #define FFE_AB_IF_MODE_BYTE_MODE 2 | ||
1649 | #define FFE_AB_IF_MODE_NIBBLE_MODE 1 | ||
1650 | #define FRF_AB_GM_HUGE_FRM_EN_LBN 5 | ||
1651 | #define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1 | ||
1652 | #define FRF_AB_GM_LEN_CHK_LBN 4 | ||
1653 | #define FRF_AB_GM_LEN_CHK_WIDTH 1 | ||
1654 | #define FRF_AB_GM_PAD_CRC_EN_LBN 2 | ||
1655 | #define FRF_AB_GM_PAD_CRC_EN_WIDTH 1 | ||
1656 | #define FRF_AB_GM_CRC_EN_LBN 1 | ||
1657 | #define FRF_AB_GM_CRC_EN_WIDTH 1 | ||
1658 | #define FRF_AB_GM_FD_LBN 0 | ||
1659 | #define FRF_AB_GM_FD_WIDTH 1 | ||
1660 | |||
1661 | /* GM_IPG_REG: GMAC IPG register */ | ||
1662 | #define FR_AB_GM_IPG 0x00000e20 | ||
1663 | #define FRF_AB_GM_NONB2B_IPG1_LBN 24 | ||
1664 | #define FRF_AB_GM_NONB2B_IPG1_WIDTH 7 | ||
1665 | #define FRF_AB_GM_NONB2B_IPG2_LBN 16 | ||
1666 | #define FRF_AB_GM_NONB2B_IPG2_WIDTH 7 | ||
1667 | #define FRF_AB_GM_MIN_IPG_ENF_LBN 8 | ||
1668 | #define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8 | ||
1669 | #define FRF_AB_GM_B2B_IPG_LBN 0 | ||
1670 | #define FRF_AB_GM_B2B_IPG_WIDTH 7 | ||
1671 | |||
1672 | /* GM_HD_REG: GMAC half duplex register */ | ||
1673 | #define FR_AB_GM_HD 0x00000e30 | ||
1674 | #define FRF_AB_GM_ALT_BOFF_VAL_LBN 20 | ||
1675 | #define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4 | ||
1676 | #define FRF_AB_GM_ALT_BOFF_EN_LBN 19 | ||
1677 | #define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1 | ||
1678 | #define FRF_AB_GM_BP_NO_BOFF_LBN 18 | ||
1679 | #define FRF_AB_GM_BP_NO_BOFF_WIDTH 1 | ||
1680 | #define FRF_AB_GM_DIS_BOFF_LBN 17 | ||
1681 | #define FRF_AB_GM_DIS_BOFF_WIDTH 1 | ||
1682 | #define FRF_AB_GM_EXDEF_TX_EN_LBN 16 | ||
1683 | #define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1 | ||
1684 | #define FRF_AB_GM_RTRY_LIMIT_LBN 12 | ||
1685 | #define FRF_AB_GM_RTRY_LIMIT_WIDTH 4 | ||
1686 | #define FRF_AB_GM_COL_WIN_LBN 0 | ||
1687 | #define FRF_AB_GM_COL_WIN_WIDTH 10 | ||
1688 | |||
1689 | /* GM_MAX_FLEN_REG: GMAC maximum frame length register */ | ||
1690 | #define FR_AB_GM_MAX_FLEN 0x00000e40 | ||
1691 | #define FRF_AB_GM_MAX_FLEN_LBN 0 | ||
1692 | #define FRF_AB_GM_MAX_FLEN_WIDTH 16 | ||
1693 | |||
1694 | /* GM_TEST_REG: GMAC test register */ | ||
1695 | #define FR_AB_GM_TEST 0x00000e70 | ||
1696 | #define FRF_AB_GM_MAX_BOFF_LBN 3 | ||
1697 | #define FRF_AB_GM_MAX_BOFF_WIDTH 1 | ||
1698 | #define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2 | ||
1699 | #define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1 | ||
1700 | #define FRF_AB_GM_TEST_PAUSE_LBN 1 | ||
1701 | #define FRF_AB_GM_TEST_PAUSE_WIDTH 1 | ||
1702 | #define FRF_AB_GM_SHORT_SLOT_LBN 0 | ||
1703 | #define FRF_AB_GM_SHORT_SLOT_WIDTH 1 | ||
1704 | |||
1705 | /* GM_ADR1_REG: GMAC station address register 1 */ | ||
1706 | #define FR_AB_GM_ADR1 0x00000f00 | ||
1707 | #define FRF_AB_GM_ADR_B0_LBN 24 | ||
1708 | #define FRF_AB_GM_ADR_B0_WIDTH 8 | ||
1709 | #define FRF_AB_GM_ADR_B1_LBN 16 | ||
1710 | #define FRF_AB_GM_ADR_B1_WIDTH 8 | ||
1711 | #define FRF_AB_GM_ADR_B2_LBN 8 | ||
1712 | #define FRF_AB_GM_ADR_B2_WIDTH 8 | ||
1713 | #define FRF_AB_GM_ADR_B3_LBN 0 | ||
1714 | #define FRF_AB_GM_ADR_B3_WIDTH 8 | ||
1715 | |||
1716 | /* GM_ADR2_REG: GMAC station address register 2 */ | ||
1717 | #define FR_AB_GM_ADR2 0x00000f10 | ||
1718 | #define FRF_AB_GM_ADR_B4_LBN 24 | ||
1719 | #define FRF_AB_GM_ADR_B4_WIDTH 8 | ||
1720 | #define FRF_AB_GM_ADR_B5_LBN 16 | ||
1721 | #define FRF_AB_GM_ADR_B5_WIDTH 8 | ||
1722 | |||
1723 | /* GMF_CFG0_REG: GMAC FIFO configuration register 0 */ | ||
1724 | #define FR_AB_GMF_CFG0 0x00000f20 | ||
1725 | #define FRF_AB_GMF_FTFENRPLY_LBN 20 | ||
1726 | #define FRF_AB_GMF_FTFENRPLY_WIDTH 1 | ||
1727 | #define FRF_AB_GMF_STFENRPLY_LBN 19 | ||
1728 | #define FRF_AB_GMF_STFENRPLY_WIDTH 1 | ||
1729 | #define FRF_AB_GMF_FRFENRPLY_LBN 18 | ||
1730 | #define FRF_AB_GMF_FRFENRPLY_WIDTH 1 | ||
1731 | #define FRF_AB_GMF_SRFENRPLY_LBN 17 | ||
1732 | #define FRF_AB_GMF_SRFENRPLY_WIDTH 1 | ||
1733 | #define FRF_AB_GMF_WTMENRPLY_LBN 16 | ||
1734 | #define FRF_AB_GMF_WTMENRPLY_WIDTH 1 | ||
1735 | #define FRF_AB_GMF_FTFENREQ_LBN 12 | ||
1736 | #define FRF_AB_GMF_FTFENREQ_WIDTH 1 | ||
1737 | #define FRF_AB_GMF_STFENREQ_LBN 11 | ||
1738 | #define FRF_AB_GMF_STFENREQ_WIDTH 1 | ||
1739 | #define FRF_AB_GMF_FRFENREQ_LBN 10 | ||
1740 | #define FRF_AB_GMF_FRFENREQ_WIDTH 1 | ||
1741 | #define FRF_AB_GMF_SRFENREQ_LBN 9 | ||
1742 | #define FRF_AB_GMF_SRFENREQ_WIDTH 1 | ||
1743 | #define FRF_AB_GMF_WTMENREQ_LBN 8 | ||
1744 | #define FRF_AB_GMF_WTMENREQ_WIDTH 1 | ||
1745 | #define FRF_AB_GMF_HSTRSTFT_LBN 4 | ||
1746 | #define FRF_AB_GMF_HSTRSTFT_WIDTH 1 | ||
1747 | #define FRF_AB_GMF_HSTRSTST_LBN 3 | ||
1748 | #define FRF_AB_GMF_HSTRSTST_WIDTH 1 | ||
1749 | #define FRF_AB_GMF_HSTRSTFR_LBN 2 | ||
1750 | #define FRF_AB_GMF_HSTRSTFR_WIDTH 1 | ||
1751 | #define FRF_AB_GMF_HSTRSTSR_LBN 1 | ||
1752 | #define FRF_AB_GMF_HSTRSTSR_WIDTH 1 | ||
1753 | #define FRF_AB_GMF_HSTRSTWT_LBN 0 | ||
1754 | #define FRF_AB_GMF_HSTRSTWT_WIDTH 1 | ||
1755 | |||
1756 | /* GMF_CFG1_REG: GMAC FIFO configuration register 1 */ | ||
1757 | #define FR_AB_GMF_CFG1 0x00000f30 | ||
1758 | #define FRF_AB_GMF_CFGFRTH_LBN 16 | ||
1759 | #define FRF_AB_GMF_CFGFRTH_WIDTH 5 | ||
1760 | #define FRF_AB_GMF_CFGXOFFRTX_LBN 0 | ||
1761 | #define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16 | ||
1762 | |||
1763 | /* GMF_CFG2_REG: GMAC FIFO configuration register 2 */ | ||
1764 | #define FR_AB_GMF_CFG2 0x00000f40 | ||
1765 | #define FRF_AB_GMF_CFGHWM_LBN 16 | ||
1766 | #define FRF_AB_GMF_CFGHWM_WIDTH 6 | ||
1767 | #define FRF_AB_GMF_CFGLWM_LBN 0 | ||
1768 | #define FRF_AB_GMF_CFGLWM_WIDTH 6 | ||
1769 | |||
1770 | /* GMF_CFG3_REG: GMAC FIFO configuration register 3 */ | ||
1771 | #define FR_AB_GMF_CFG3 0x00000f50 | ||
1772 | #define FRF_AB_GMF_CFGHWMFT_LBN 16 | ||
1773 | #define FRF_AB_GMF_CFGHWMFT_WIDTH 6 | ||
1774 | #define FRF_AB_GMF_CFGFTTH_LBN 0 | ||
1775 | #define FRF_AB_GMF_CFGFTTH_WIDTH 6 | ||
1776 | |||
1777 | /* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ | ||
1778 | #define FR_AB_GMF_CFG4 0x00000f60 | ||
1779 | #define FRF_AB_GMF_HSTFLTRFRM_LBN 0 | ||
1780 | #define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18 | ||
1781 | |||
1782 | /* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ | ||
1783 | #define FR_AB_GMF_CFG5 0x00000f70 | ||
1784 | #define FRF_AB_GMF_CFGHDPLX_LBN 22 | ||
1785 | #define FRF_AB_GMF_CFGHDPLX_WIDTH 1 | ||
1786 | #define FRF_AB_GMF_SRFULL_LBN 21 | ||
1787 | #define FRF_AB_GMF_SRFULL_WIDTH 1 | ||
1788 | #define FRF_AB_GMF_HSTSRFULLCLR_LBN 20 | ||
1789 | #define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1 | ||
1790 | #define FRF_AB_GMF_CFGBYTMODE_LBN 19 | ||
1791 | #define FRF_AB_GMF_CFGBYTMODE_WIDTH 1 | ||
1792 | #define FRF_AB_GMF_HSTDRPLT64_LBN 18 | ||
1793 | #define FRF_AB_GMF_HSTDRPLT64_WIDTH 1 | ||
1794 | #define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0 | ||
1795 | #define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18 | ||
1796 | |||
1797 | /* TX_SRC_MAC_TBL: Transmit IP source address filter table */ | ||
1798 | #define FR_BB_TX_SRC_MAC_TBL 0x00001000 | ||
1799 | #define FR_BB_TX_SRC_MAC_TBL_STEP 16 | ||
1800 | #define FR_BB_TX_SRC_MAC_TBL_ROWS 16 | ||
1801 | #define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64 | ||
1802 | #define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48 | ||
1803 | #define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0 | ||
1804 | #define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48 | ||
1805 | |||
1806 | /* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */ | ||
1807 | #define FR_BB_TX_SRC_MAC_CTL 0x00001100 | ||
1808 | #define FRF_BB_TX_SRC_DROP_CTR_LBN 16 | ||
1809 | #define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16 | ||
1810 | #define FRF_BB_TX_SRC_FLTR_EN_LBN 15 | ||
1811 | #define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1 | ||
1812 | #define FRF_BB_TX_DROP_CTR_CLR_LBN 12 | ||
1813 | #define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1 | ||
1814 | #define FRF_BB_TX_MAC_QID_SEL_LBN 0 | ||
1815 | #define FRF_BB_TX_MAC_QID_SEL_WIDTH 3 | ||
1816 | |||
1817 | /* XM_ADR_LO_REG: XGMAC address register low */ | ||
1818 | #define FR_AB_XM_ADR_LO 0x00001200 | ||
1819 | #define FRF_AB_XM_ADR_LO_LBN 0 | ||
1820 | #define FRF_AB_XM_ADR_LO_WIDTH 32 | ||
1821 | |||
1822 | /* XM_ADR_HI_REG: XGMAC address register high */ | ||
1823 | #define FR_AB_XM_ADR_HI 0x00001210 | ||
1824 | #define FRF_AB_XM_ADR_HI_LBN 0 | ||
1825 | #define FRF_AB_XM_ADR_HI_WIDTH 16 | ||
1826 | |||
1827 | /* XM_GLB_CFG_REG: XGMAC global configuration */ | ||
1828 | #define FR_AB_XM_GLB_CFG 0x00001220 | ||
1829 | #define FRF_AB_XM_RMTFLT_GEN_LBN 17 | ||
1830 | #define FRF_AB_XM_RMTFLT_GEN_WIDTH 1 | ||
1831 | #define FRF_AB_XM_DEBUG_MODE_LBN 16 | ||
1832 | #define FRF_AB_XM_DEBUG_MODE_WIDTH 1 | ||
1833 | #define FRF_AB_XM_RX_STAT_EN_LBN 11 | ||
1834 | #define FRF_AB_XM_RX_STAT_EN_WIDTH 1 | ||
1835 | #define FRF_AB_XM_TX_STAT_EN_LBN 10 | ||
1836 | #define FRF_AB_XM_TX_STAT_EN_WIDTH 1 | ||
1837 | #define FRF_AB_XM_RX_JUMBO_MODE_LBN 6 | ||
1838 | #define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1 | ||
1839 | #define FRF_AB_XM_WAN_MODE_LBN 5 | ||
1840 | #define FRF_AB_XM_WAN_MODE_WIDTH 1 | ||
1841 | #define FRF_AB_XM_INTCLR_MODE_LBN 3 | ||
1842 | #define FRF_AB_XM_INTCLR_MODE_WIDTH 1 | ||
1843 | #define FRF_AB_XM_CORE_RST_LBN 0 | ||
1844 | #define FRF_AB_XM_CORE_RST_WIDTH 1 | ||
1845 | |||
1846 | /* XM_TX_CFG_REG: XGMAC transmit configuration */ | ||
1847 | #define FR_AB_XM_TX_CFG 0x00001230 | ||
1848 | #define FRF_AB_XM_TX_PROG_LBN 24 | ||
1849 | #define FRF_AB_XM_TX_PROG_WIDTH 1 | ||
1850 | #define FRF_AB_XM_IPG_LBN 16 | ||
1851 | #define FRF_AB_XM_IPG_WIDTH 4 | ||
1852 | #define FRF_AB_XM_FCNTL_LBN 10 | ||
1853 | #define FRF_AB_XM_FCNTL_WIDTH 1 | ||
1854 | #define FRF_AB_XM_TXCRC_LBN 8 | ||
1855 | #define FRF_AB_XM_TXCRC_WIDTH 1 | ||
1856 | #define FRF_AB_XM_EDRC_LBN 6 | ||
1857 | #define FRF_AB_XM_EDRC_WIDTH 1 | ||
1858 | #define FRF_AB_XM_AUTO_PAD_LBN 5 | ||
1859 | #define FRF_AB_XM_AUTO_PAD_WIDTH 1 | ||
1860 | #define FRF_AB_XM_TX_PRMBL_LBN 2 | ||
1861 | #define FRF_AB_XM_TX_PRMBL_WIDTH 1 | ||
1862 | #define FRF_AB_XM_TXEN_LBN 1 | ||
1863 | #define FRF_AB_XM_TXEN_WIDTH 1 | ||
1864 | #define FRF_AB_XM_TX_RST_LBN 0 | ||
1865 | #define FRF_AB_XM_TX_RST_WIDTH 1 | ||
1866 | |||
1867 | /* XM_RX_CFG_REG: XGMAC receive configuration */ | ||
1868 | #define FR_AB_XM_RX_CFG 0x00001240 | ||
1869 | #define FRF_AB_XM_PASS_LENERR_LBN 26 | ||
1870 | #define FRF_AB_XM_PASS_LENERR_WIDTH 1 | ||
1871 | #define FRF_AB_XM_PASS_CRC_ERR_LBN 25 | ||
1872 | #define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1 | ||
1873 | #define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24 | ||
1874 | #define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1 | ||
1875 | #define FRF_AB_XM_REJ_BCAST_LBN 20 | ||
1876 | #define FRF_AB_XM_REJ_BCAST_WIDTH 1 | ||
1877 | #define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11 | ||
1878 | #define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1 | ||
1879 | #define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9 | ||
1880 | #define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1 | ||
1881 | #define FRF_AB_XM_AUTO_DEPAD_LBN 8 | ||
1882 | #define FRF_AB_XM_AUTO_DEPAD_WIDTH 1 | ||
1883 | #define FRF_AB_XM_RXCRC_LBN 3 | ||
1884 | #define FRF_AB_XM_RXCRC_WIDTH 1 | ||
1885 | #define FRF_AB_XM_RX_PRMBL_LBN 2 | ||
1886 | #define FRF_AB_XM_RX_PRMBL_WIDTH 1 | ||
1887 | #define FRF_AB_XM_RXEN_LBN 1 | ||
1888 | #define FRF_AB_XM_RXEN_WIDTH 1 | ||
1889 | #define FRF_AB_XM_RX_RST_LBN 0 | ||
1890 | #define FRF_AB_XM_RX_RST_WIDTH 1 | ||
1891 | |||
1892 | /* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */ | ||
1893 | #define FR_AB_XM_MGT_INT_MASK 0x00001250 | ||
1894 | #define FRF_AB_XM_MSK_STA_INTR_LBN 16 | ||
1895 | #define FRF_AB_XM_MSK_STA_INTR_WIDTH 1 | ||
1896 | #define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9 | ||
1897 | #define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1 | ||
1898 | #define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8 | ||
1899 | #define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1 | ||
1900 | #define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2 | ||
1901 | #define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1 | ||
1902 | #define FRF_AB_XM_MSK_RMTFLT_LBN 1 | ||
1903 | #define FRF_AB_XM_MSK_RMTFLT_WIDTH 1 | ||
1904 | #define FRF_AB_XM_MSK_LCLFLT_LBN 0 | ||
1905 | #define FRF_AB_XM_MSK_LCLFLT_WIDTH 1 | ||
1906 | |||
1907 | /* XM_FC_REG: XGMAC flow control register */ | ||
1908 | #define FR_AB_XM_FC 0x00001270 | ||
1909 | #define FRF_AB_XM_PAUSE_TIME_LBN 16 | ||
1910 | #define FRF_AB_XM_PAUSE_TIME_WIDTH 16 | ||
1911 | #define FRF_AB_XM_RX_MAC_STAT_LBN 11 | ||
1912 | #define FRF_AB_XM_RX_MAC_STAT_WIDTH 1 | ||
1913 | #define FRF_AB_XM_TX_MAC_STAT_LBN 10 | ||
1914 | #define FRF_AB_XM_TX_MAC_STAT_WIDTH 1 | ||
1915 | #define FRF_AB_XM_MCNTL_PASS_LBN 8 | ||
1916 | #define FRF_AB_XM_MCNTL_PASS_WIDTH 2 | ||
1917 | #define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6 | ||
1918 | #define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1 | ||
1919 | #define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5 | ||
1920 | #define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1 | ||
1921 | #define FRF_AB_XM_ZPAUSE_LBN 2 | ||
1922 | #define FRF_AB_XM_ZPAUSE_WIDTH 1 | ||
1923 | #define FRF_AB_XM_XMIT_PAUSE_LBN 1 | ||
1924 | #define FRF_AB_XM_XMIT_PAUSE_WIDTH 1 | ||
1925 | #define FRF_AB_XM_DIS_FCNTL_LBN 0 | ||
1926 | #define FRF_AB_XM_DIS_FCNTL_WIDTH 1 | ||
1927 | |||
1928 | /* XM_PAUSE_TIME_REG: XGMAC pause time register */ | ||
1929 | #define FR_AB_XM_PAUSE_TIME 0x00001290 | ||
1930 | #define FRF_AB_XM_TX_PAUSE_CNT_LBN 16 | ||
1931 | #define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16 | ||
1932 | #define FRF_AB_XM_RX_PAUSE_CNT_LBN 0 | ||
1933 | #define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16 | ||
1934 | |||
1935 | /* XM_TX_PARAM_REG: XGMAC transmit parameter register */ | ||
1936 | #define FR_AB_XM_TX_PARAM 0x000012d0 | ||
1937 | #define FRF_AB_XM_TX_JUMBO_MODE_LBN 31 | ||
1938 | #define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1 | ||
1939 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19 | ||
1940 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11 | ||
1941 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16 | ||
1942 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3 | ||
1943 | #define FRF_AB_XM_PAD_CHAR_LBN 0 | ||
1944 | #define FRF_AB_XM_PAD_CHAR_WIDTH 8 | ||
1945 | |||
1946 | /* XM_RX_PARAM_REG: XGMAC receive parameter register */ | ||
1947 | #define FR_AB_XM_RX_PARAM 0x000012e0 | ||
1948 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3 | ||
1949 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11 | ||
1950 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0 | ||
1951 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3 | ||
1952 | |||
1953 | /* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */ | ||
1954 | #define FR_AB_XM_MGT_INT_MSK 0x000012f0 | ||
1955 | #define FRF_AB_XM_STAT_CNTR_OF_LBN 9 | ||
1956 | #define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1 | ||
1957 | #define FRF_AB_XM_STAT_CNTR_HF_LBN 8 | ||
1958 | #define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1 | ||
1959 | #define FRF_AB_XM_PRMBLE_ERR_LBN 2 | ||
1960 | #define FRF_AB_XM_PRMBLE_ERR_WIDTH 1 | ||
1961 | #define FRF_AB_XM_RMTFLT_LBN 1 | ||
1962 | #define FRF_AB_XM_RMTFLT_WIDTH 1 | ||
1963 | #define FRF_AB_XM_LCLFLT_LBN 0 | ||
1964 | #define FRF_AB_XM_LCLFLT_WIDTH 1 | ||
1965 | |||
1966 | /* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */ | ||
1967 | #define FR_AB_XX_PWR_RST 0x00001300 | ||
1968 | #define FRF_AB_XX_PWRDND_SIG_LBN 31 | ||
1969 | #define FRF_AB_XX_PWRDND_SIG_WIDTH 1 | ||
1970 | #define FRF_AB_XX_PWRDNC_SIG_LBN 30 | ||
1971 | #define FRF_AB_XX_PWRDNC_SIG_WIDTH 1 | ||
1972 | #define FRF_AB_XX_PWRDNB_SIG_LBN 29 | ||
1973 | #define FRF_AB_XX_PWRDNB_SIG_WIDTH 1 | ||
1974 | #define FRF_AB_XX_PWRDNA_SIG_LBN 28 | ||
1975 | #define FRF_AB_XX_PWRDNA_SIG_WIDTH 1 | ||
1976 | #define FRF_AB_XX_SIM_MODE_LBN 27 | ||
1977 | #define FRF_AB_XX_SIM_MODE_WIDTH 1 | ||
1978 | #define FRF_AB_XX_RSTPLLCD_SIG_LBN 25 | ||
1979 | #define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1 | ||
1980 | #define FRF_AB_XX_RSTPLLAB_SIG_LBN 24 | ||
1981 | #define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1 | ||
1982 | #define FRF_AB_XX_RESETD_SIG_LBN 23 | ||
1983 | #define FRF_AB_XX_RESETD_SIG_WIDTH 1 | ||
1984 | #define FRF_AB_XX_RESETC_SIG_LBN 22 | ||
1985 | #define FRF_AB_XX_RESETC_SIG_WIDTH 1 | ||
1986 | #define FRF_AB_XX_RESETB_SIG_LBN 21 | ||
1987 | #define FRF_AB_XX_RESETB_SIG_WIDTH 1 | ||
1988 | #define FRF_AB_XX_RESETA_SIG_LBN 20 | ||
1989 | #define FRF_AB_XX_RESETA_SIG_WIDTH 1 | ||
1990 | #define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18 | ||
1991 | #define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1 | ||
1992 | #define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17 | ||
1993 | #define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1 | ||
1994 | #define FRF_AB_XX_SD_RST_ACT_LBN 16 | ||
1995 | #define FRF_AB_XX_SD_RST_ACT_WIDTH 1 | ||
1996 | #define FRF_AB_XX_PWRDND_EN_LBN 15 | ||
1997 | #define FRF_AB_XX_PWRDND_EN_WIDTH 1 | ||
1998 | #define FRF_AB_XX_PWRDNC_EN_LBN 14 | ||
1999 | #define FRF_AB_XX_PWRDNC_EN_WIDTH 1 | ||
2000 | #define FRF_AB_XX_PWRDNB_EN_LBN 13 | ||
2001 | #define FRF_AB_XX_PWRDNB_EN_WIDTH 1 | ||
2002 | #define FRF_AB_XX_PWRDNA_EN_LBN 12 | ||
2003 | #define FRF_AB_XX_PWRDNA_EN_WIDTH 1 | ||
2004 | #define FRF_AB_XX_RSTPLLCD_EN_LBN 9 | ||
2005 | #define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1 | ||
2006 | #define FRF_AB_XX_RSTPLLAB_EN_LBN 8 | ||
2007 | #define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1 | ||
2008 | #define FRF_AB_XX_RESETD_EN_LBN 7 | ||
2009 | #define FRF_AB_XX_RESETD_EN_WIDTH 1 | ||
2010 | #define FRF_AB_XX_RESETC_EN_LBN 6 | ||
2011 | #define FRF_AB_XX_RESETC_EN_WIDTH 1 | ||
2012 | #define FRF_AB_XX_RESETB_EN_LBN 5 | ||
2013 | #define FRF_AB_XX_RESETB_EN_WIDTH 1 | ||
2014 | #define FRF_AB_XX_RESETA_EN_LBN 4 | ||
2015 | #define FRF_AB_XX_RESETA_EN_WIDTH 1 | ||
2016 | #define FRF_AB_XX_RSTXGXSRX_EN_LBN 2 | ||
2017 | #define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1 | ||
2018 | #define FRF_AB_XX_RSTXGXSTX_EN_LBN 1 | ||
2019 | #define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1 | ||
2020 | #define FRF_AB_XX_RST_XX_EN_LBN 0 | ||
2021 | #define FRF_AB_XX_RST_XX_EN_WIDTH 1 | ||
2022 | |||
2023 | /* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */ | ||
2024 | #define FR_AB_XX_SD_CTL 0x00001310 | ||
2025 | #define FRF_AB_XX_TERMADJ1_LBN 17 | ||
2026 | #define FRF_AB_XX_TERMADJ1_WIDTH 1 | ||
2027 | #define FRF_AB_XX_TERMADJ0_LBN 16 | ||
2028 | #define FRF_AB_XX_TERMADJ0_WIDTH 1 | ||
2029 | #define FRF_AB_XX_HIDRVD_LBN 15 | ||
2030 | #define FRF_AB_XX_HIDRVD_WIDTH 1 | ||
2031 | #define FRF_AB_XX_LODRVD_LBN 14 | ||
2032 | #define FRF_AB_XX_LODRVD_WIDTH 1 | ||
2033 | #define FRF_AB_XX_HIDRVC_LBN 13 | ||
2034 | #define FRF_AB_XX_HIDRVC_WIDTH 1 | ||
2035 | #define FRF_AB_XX_LODRVC_LBN 12 | ||
2036 | #define FRF_AB_XX_LODRVC_WIDTH 1 | ||
2037 | #define FRF_AB_XX_HIDRVB_LBN 11 | ||
2038 | #define FRF_AB_XX_HIDRVB_WIDTH 1 | ||
2039 | #define FRF_AB_XX_LODRVB_LBN 10 | ||
2040 | #define FRF_AB_XX_LODRVB_WIDTH 1 | ||
2041 | #define FRF_AB_XX_HIDRVA_LBN 9 | ||
2042 | #define FRF_AB_XX_HIDRVA_WIDTH 1 | ||
2043 | #define FRF_AB_XX_LODRVA_LBN 8 | ||
2044 | #define FRF_AB_XX_LODRVA_WIDTH 1 | ||
2045 | #define FRF_AB_XX_LPBKD_LBN 3 | ||
2046 | #define FRF_AB_XX_LPBKD_WIDTH 1 | ||
2047 | #define FRF_AB_XX_LPBKC_LBN 2 | ||
2048 | #define FRF_AB_XX_LPBKC_WIDTH 1 | ||
2049 | #define FRF_AB_XX_LPBKB_LBN 1 | ||
2050 | #define FRF_AB_XX_LPBKB_WIDTH 1 | ||
2051 | #define FRF_AB_XX_LPBKA_LBN 0 | ||
2052 | #define FRF_AB_XX_LPBKA_WIDTH 1 | ||
2053 | |||
2054 | /* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ | ||
2055 | #define FR_AB_XX_TXDRV_CTL 0x00001320 | ||
2056 | #define FRF_AB_XX_DEQD_LBN 28 | ||
2057 | #define FRF_AB_XX_DEQD_WIDTH 4 | ||
2058 | #define FRF_AB_XX_DEQC_LBN 24 | ||
2059 | #define FRF_AB_XX_DEQC_WIDTH 4 | ||
2060 | #define FRF_AB_XX_DEQB_LBN 20 | ||
2061 | #define FRF_AB_XX_DEQB_WIDTH 4 | ||
2062 | #define FRF_AB_XX_DEQA_LBN 16 | ||
2063 | #define FRF_AB_XX_DEQA_WIDTH 4 | ||
2064 | #define FRF_AB_XX_DTXD_LBN 12 | ||
2065 | #define FRF_AB_XX_DTXD_WIDTH 4 | ||
2066 | #define FRF_AB_XX_DTXC_LBN 8 | ||
2067 | #define FRF_AB_XX_DTXC_WIDTH 4 | ||
2068 | #define FRF_AB_XX_DTXB_LBN 4 | ||
2069 | #define FRF_AB_XX_DTXB_WIDTH 4 | ||
2070 | #define FRF_AB_XX_DTXA_LBN 0 | ||
2071 | #define FRF_AB_XX_DTXA_WIDTH 4 | ||
2072 | |||
2073 | /* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */ | ||
2074 | #define FR_AB_XX_PRBS_CTL 0x00001330 | ||
2075 | #define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30 | ||
2076 | #define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2 | ||
2077 | #define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29 | ||
2078 | #define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1 | ||
2079 | #define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28 | ||
2080 | #define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1 | ||
2081 | #define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26 | ||
2082 | #define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2 | ||
2083 | #define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25 | ||
2084 | #define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1 | ||
2085 | #define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24 | ||
2086 | #define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1 | ||
2087 | #define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22 | ||
2088 | #define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2 | ||
2089 | #define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21 | ||
2090 | #define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1 | ||
2091 | #define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20 | ||
2092 | #define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1 | ||
2093 | #define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18 | ||
2094 | #define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2 | ||
2095 | #define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17 | ||
2096 | #define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1 | ||
2097 | #define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16 | ||
2098 | #define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1 | ||
2099 | #define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14 | ||
2100 | #define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2 | ||
2101 | #define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13 | ||
2102 | #define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1 | ||
2103 | #define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12 | ||
2104 | #define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1 | ||
2105 | #define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10 | ||
2106 | #define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2 | ||
2107 | #define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9 | ||
2108 | #define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1 | ||
2109 | #define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8 | ||
2110 | #define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1 | ||
2111 | #define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6 | ||
2112 | #define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2 | ||
2113 | #define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5 | ||
2114 | #define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1 | ||
2115 | #define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4 | ||
2116 | #define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1 | ||
2117 | #define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2 | ||
2118 | #define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2 | ||
2119 | #define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1 | ||
2120 | #define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1 | ||
2121 | #define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0 | ||
2122 | #define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1 | ||
2123 | |||
2124 | /* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */ | ||
2125 | #define FR_AB_XX_PRBS_CHK 0x00001340 | ||
2126 | #define FRF_AB_XX_REV_LB_EN_LBN 16 | ||
2127 | #define FRF_AB_XX_REV_LB_EN_WIDTH 1 | ||
2128 | #define FRF_AB_XX_CH3_DEG_DET_LBN 15 | ||
2129 | #define FRF_AB_XX_CH3_DEG_DET_WIDTH 1 | ||
2130 | #define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14 | ||
2131 | #define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1 | ||
2132 | #define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13 | ||
2133 | #define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1 | ||
2134 | #define FRF_AB_XX_CH3_ERR_CHK_LBN 12 | ||
2135 | #define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1 | ||
2136 | #define FRF_AB_XX_CH2_DEG_DET_LBN 11 | ||
2137 | #define FRF_AB_XX_CH2_DEG_DET_WIDTH 1 | ||
2138 | #define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10 | ||
2139 | #define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1 | ||
2140 | #define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9 | ||
2141 | #define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1 | ||
2142 | #define FRF_AB_XX_CH2_ERR_CHK_LBN 8 | ||
2143 | #define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1 | ||
2144 | #define FRF_AB_XX_CH1_DEG_DET_LBN 7 | ||
2145 | #define FRF_AB_XX_CH1_DEG_DET_WIDTH 1 | ||
2146 | #define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6 | ||
2147 | #define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1 | ||
2148 | #define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5 | ||
2149 | #define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1 | ||
2150 | #define FRF_AB_XX_CH1_ERR_CHK_LBN 4 | ||
2151 | #define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1 | ||
2152 | #define FRF_AB_XX_CH0_DEG_DET_LBN 3 | ||
2153 | #define FRF_AB_XX_CH0_DEG_DET_WIDTH 1 | ||
2154 | #define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2 | ||
2155 | #define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1 | ||
2156 | #define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1 | ||
2157 | #define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1 | ||
2158 | #define FRF_AB_XX_CH0_ERR_CHK_LBN 0 | ||
2159 | #define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1 | ||
2160 | |||
2161 | /* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */ | ||
2162 | #define FR_AB_XX_PRBS_ERR 0x00001350 | ||
2163 | #define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24 | ||
2164 | #define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8 | ||
2165 | #define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16 | ||
2166 | #define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8 | ||
2167 | #define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8 | ||
2168 | #define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8 | ||
2169 | #define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0 | ||
2170 | #define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8 | ||
2171 | |||
2172 | /* XX_CORE_STAT_REG: XAUI XGXS core status register */ | ||
2173 | #define FR_AB_XX_CORE_STAT 0x00001360 | ||
2174 | #define FRF_AB_XX_FORCE_SIG3_LBN 31 | ||
2175 | #define FRF_AB_XX_FORCE_SIG3_WIDTH 1 | ||
2176 | #define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30 | ||
2177 | #define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1 | ||
2178 | #define FRF_AB_XX_FORCE_SIG2_LBN 29 | ||
2179 | #define FRF_AB_XX_FORCE_SIG2_WIDTH 1 | ||
2180 | #define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28 | ||
2181 | #define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1 | ||
2182 | #define FRF_AB_XX_FORCE_SIG1_LBN 27 | ||
2183 | #define FRF_AB_XX_FORCE_SIG1_WIDTH 1 | ||
2184 | #define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26 | ||
2185 | #define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1 | ||
2186 | #define FRF_AB_XX_FORCE_SIG0_LBN 25 | ||
2187 | #define FRF_AB_XX_FORCE_SIG0_WIDTH 1 | ||
2188 | #define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24 | ||
2189 | #define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1 | ||
2190 | #define FRF_AB_XX_XGXS_LB_EN_LBN 23 | ||
2191 | #define FRF_AB_XX_XGXS_LB_EN_WIDTH 1 | ||
2192 | #define FRF_AB_XX_XGMII_LB_EN_LBN 22 | ||
2193 | #define FRF_AB_XX_XGMII_LB_EN_WIDTH 1 | ||
2194 | #define FRF_AB_XX_MATCH_FAULT_LBN 21 | ||
2195 | #define FRF_AB_XX_MATCH_FAULT_WIDTH 1 | ||
2196 | #define FRF_AB_XX_ALIGN_DONE_LBN 20 | ||
2197 | #define FRF_AB_XX_ALIGN_DONE_WIDTH 1 | ||
2198 | #define FRF_AB_XX_SYNC_STAT3_LBN 19 | ||
2199 | #define FRF_AB_XX_SYNC_STAT3_WIDTH 1 | ||
2200 | #define FRF_AB_XX_SYNC_STAT2_LBN 18 | ||
2201 | #define FRF_AB_XX_SYNC_STAT2_WIDTH 1 | ||
2202 | #define FRF_AB_XX_SYNC_STAT1_LBN 17 | ||
2203 | #define FRF_AB_XX_SYNC_STAT1_WIDTH 1 | ||
2204 | #define FRF_AB_XX_SYNC_STAT0_LBN 16 | ||
2205 | #define FRF_AB_XX_SYNC_STAT0_WIDTH 1 | ||
2206 | #define FRF_AB_XX_COMMA_DET_CH3_LBN 15 | ||
2207 | #define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1 | ||
2208 | #define FRF_AB_XX_COMMA_DET_CH2_LBN 14 | ||
2209 | #define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1 | ||
2210 | #define FRF_AB_XX_COMMA_DET_CH1_LBN 13 | ||
2211 | #define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1 | ||
2212 | #define FRF_AB_XX_COMMA_DET_CH0_LBN 12 | ||
2213 | #define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1 | ||
2214 | #define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11 | ||
2215 | #define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1 | ||
2216 | #define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10 | ||
2217 | #define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1 | ||
2218 | #define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9 | ||
2219 | #define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1 | ||
2220 | #define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8 | ||
2221 | #define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1 | ||
2222 | #define FRF_AB_XX_CHAR_ERR_CH3_LBN 7 | ||
2223 | #define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1 | ||
2224 | #define FRF_AB_XX_CHAR_ERR_CH2_LBN 6 | ||
2225 | #define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1 | ||
2226 | #define FRF_AB_XX_CHAR_ERR_CH1_LBN 5 | ||
2227 | #define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1 | ||
2228 | #define FRF_AB_XX_CHAR_ERR_CH0_LBN 4 | ||
2229 | #define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1 | ||
2230 | #define FRF_AB_XX_DISPERR_CH3_LBN 3 | ||
2231 | #define FRF_AB_XX_DISPERR_CH3_WIDTH 1 | ||
2232 | #define FRF_AB_XX_DISPERR_CH2_LBN 2 | ||
2233 | #define FRF_AB_XX_DISPERR_CH2_WIDTH 1 | ||
2234 | #define FRF_AB_XX_DISPERR_CH1_LBN 1 | ||
2235 | #define FRF_AB_XX_DISPERR_CH1_WIDTH 1 | ||
2236 | #define FRF_AB_XX_DISPERR_CH0_LBN 0 | ||
2237 | #define FRF_AB_XX_DISPERR_CH0_WIDTH 1 | ||
2238 | |||
2239 | /* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */ | ||
2240 | #define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800 | ||
2241 | #define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16 | ||
2242 | #define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4 | ||
2243 | /* RX_DESC_PTR_TBL: Receive descriptor pointer table */ | ||
2244 | #define FR_BZ_RX_DESC_PTR_TBL 0x00f40000 | ||
2245 | #define FR_BZ_RX_DESC_PTR_TBL_STEP 16 | ||
2246 | #define FR_BB_RX_DESC_PTR_TBL_ROWS 4096 | ||
2247 | #define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024 | ||
2248 | #define FRF_CZ_RX_HDR_SPLIT_LBN 90 | ||
2249 | #define FRF_CZ_RX_HDR_SPLIT_WIDTH 1 | ||
2250 | #define FRF_AA_RX_RESET_LBN 89 | ||
2251 | #define FRF_AA_RX_RESET_WIDTH 1 | ||
2252 | #define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88 | ||
2253 | #define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1 | ||
2254 | #define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87 | ||
2255 | #define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1 | ||
2256 | #define FRF_AZ_RX_DESC_PREF_ACT_LBN 86 | ||
2257 | #define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1 | ||
2258 | #define FRF_AZ_RX_DC_HW_RPTR_LBN 80 | ||
2259 | #define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6 | ||
2260 | #define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68 | ||
2261 | #define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12 | ||
2262 | #define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56 | ||
2263 | #define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12 | ||
2264 | #define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36 | ||
2265 | #define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20 | ||
2266 | #define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24 | ||
2267 | #define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12 | ||
2268 | #define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10 | ||
2269 | #define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14 | ||
2270 | #define FRF_AZ_RX_DESCQ_LABEL_LBN 5 | ||
2271 | #define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5 | ||
2272 | #define FRF_AZ_RX_DESCQ_SIZE_LBN 3 | ||
2273 | #define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2 | ||
2274 | #define FFE_AZ_RX_DESCQ_SIZE_4K 3 | ||
2275 | #define FFE_AZ_RX_DESCQ_SIZE_2K 2 | ||
2276 | #define FFE_AZ_RX_DESCQ_SIZE_1K 1 | ||
2277 | #define FFE_AZ_RX_DESCQ_SIZE_512 0 | ||
2278 | #define FRF_AZ_RX_DESCQ_TYPE_LBN 2 | ||
2279 | #define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1 | ||
2280 | #define FRF_AZ_RX_DESCQ_JUMBO_LBN 1 | ||
2281 | #define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1 | ||
2282 | #define FRF_AZ_RX_DESCQ_EN_LBN 0 | ||
2283 | #define FRF_AZ_RX_DESCQ_EN_WIDTH 1 | ||
2284 | |||
2285 | /* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */ | ||
2286 | #define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900 | ||
2287 | #define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16 | ||
2288 | #define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8 | ||
2289 | /* TX_DESC_PTR_TBL: Transmit descriptor pointer */ | ||
2290 | #define FR_BZ_TX_DESC_PTR_TBL 0x00f50000 | ||
2291 | #define FR_BZ_TX_DESC_PTR_TBL_STEP 16 | ||
2292 | #define FR_BB_TX_DESC_PTR_TBL_ROWS 4096 | ||
2293 | #define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024 | ||
2294 | #define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94 | ||
2295 | #define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2 | ||
2296 | #define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93 | ||
2297 | #define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1 | ||
2298 | #define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92 | ||
2299 | #define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1 | ||
2300 | #define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91 | ||
2301 | #define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1 | ||
2302 | #define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90 | ||
2303 | #define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1 | ||
2304 | #define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89 | ||
2305 | #define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1 | ||
2306 | #define FRF_AZ_TX_DESCQ_EN_LBN 88 | ||
2307 | #define FRF_AZ_TX_DESCQ_EN_WIDTH 1 | ||
2308 | #define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87 | ||
2309 | #define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1 | ||
2310 | #define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86 | ||
2311 | #define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1 | ||
2312 | #define FRF_AZ_TX_DC_HW_RPTR_LBN 80 | ||
2313 | #define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6 | ||
2314 | #define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68 | ||
2315 | #define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12 | ||
2316 | #define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56 | ||
2317 | #define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12 | ||
2318 | #define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36 | ||
2319 | #define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20 | ||
2320 | #define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24 | ||
2321 | #define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12 | ||
2322 | #define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10 | ||
2323 | #define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14 | ||
2324 | #define FRF_AZ_TX_DESCQ_LABEL_LBN 5 | ||
2325 | #define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5 | ||
2326 | #define FRF_AZ_TX_DESCQ_SIZE_LBN 3 | ||
2327 | #define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2 | ||
2328 | #define FFE_AZ_TX_DESCQ_SIZE_4K 3 | ||
2329 | #define FFE_AZ_TX_DESCQ_SIZE_2K 2 | ||
2330 | #define FFE_AZ_TX_DESCQ_SIZE_1K 1 | ||
2331 | #define FFE_AZ_TX_DESCQ_SIZE_512 0 | ||
2332 | #define FRF_AZ_TX_DESCQ_TYPE_LBN 1 | ||
2333 | #define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2 | ||
2334 | #define FRF_AZ_TX_DESCQ_FLUSH_LBN 0 | ||
2335 | #define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1 | ||
2336 | |||
2337 | /* EVQ_PTR_TBL_KER: Event queue pointer table */ | ||
2338 | #define FR_AA_EVQ_PTR_TBL_KER 0x00011a00 | ||
2339 | #define FR_AA_EVQ_PTR_TBL_KER_STEP 16 | ||
2340 | #define FR_AA_EVQ_PTR_TBL_KER_ROWS 4 | ||
2341 | /* EVQ_PTR_TBL: Event queue pointer table */ | ||
2342 | #define FR_BZ_EVQ_PTR_TBL 0x00f60000 | ||
2343 | #define FR_BZ_EVQ_PTR_TBL_STEP 16 | ||
2344 | #define FR_CZ_EVQ_PTR_TBL_ROWS 1024 | ||
2345 | #define FR_BB_EVQ_PTR_TBL_ROWS 4096 | ||
2346 | #define FRF_BZ_EVQ_RPTR_IGN_LBN 40 | ||
2347 | #define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1 | ||
2348 | #define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39 | ||
2349 | #define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1 | ||
2350 | #define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39 | ||
2351 | #define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1 | ||
2352 | #define FRF_AZ_EVQ_NXT_WPTR_LBN 24 | ||
2353 | #define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15 | ||
2354 | #define FRF_AZ_EVQ_EN_LBN 23 | ||
2355 | #define FRF_AZ_EVQ_EN_WIDTH 1 | ||
2356 | #define FRF_AZ_EVQ_SIZE_LBN 20 | ||
2357 | #define FRF_AZ_EVQ_SIZE_WIDTH 3 | ||
2358 | #define FFE_AZ_EVQ_SIZE_32K 6 | ||
2359 | #define FFE_AZ_EVQ_SIZE_16K 5 | ||
2360 | #define FFE_AZ_EVQ_SIZE_8K 4 | ||
2361 | #define FFE_AZ_EVQ_SIZE_4K 3 | ||
2362 | #define FFE_AZ_EVQ_SIZE_2K 2 | ||
2363 | #define FFE_AZ_EVQ_SIZE_1K 1 | ||
2364 | #define FFE_AZ_EVQ_SIZE_512 0 | ||
2365 | #define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0 | ||
2366 | #define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20 | ||
2367 | |||
2368 | /* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */ | ||
2369 | #define FR_AA_BUF_HALF_TBL_KER 0x00018000 | ||
2370 | #define FR_AA_BUF_HALF_TBL_KER_STEP 8 | ||
2371 | #define FR_AA_BUF_HALF_TBL_KER_ROWS 4096 | ||
2372 | /* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */ | ||
2373 | #define FR_BZ_BUF_HALF_TBL 0x00800000 | ||
2374 | #define FR_BZ_BUF_HALF_TBL_STEP 8 | ||
2375 | #define FR_CZ_BUF_HALF_TBL_ROWS 147456 | ||
2376 | #define FR_BB_BUF_HALF_TBL_ROWS 524288 | ||
2377 | #define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44 | ||
2378 | #define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20 | ||
2379 | #define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32 | ||
2380 | #define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12 | ||
2381 | #define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12 | ||
2382 | #define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20 | ||
2383 | #define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0 | ||
2384 | #define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 | ||
2385 | |||
2386 | /* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */ | ||
2387 | #define FR_AA_BUF_FULL_TBL_KER 0x00018000 | ||
2388 | #define FR_AA_BUF_FULL_TBL_KER_STEP 8 | ||
2389 | #define FR_AA_BUF_FULL_TBL_KER_ROWS 4096 | ||
2390 | /* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */ | ||
2391 | #define FR_BZ_BUF_FULL_TBL 0x00800000 | ||
2392 | #define FR_BZ_BUF_FULL_TBL_STEP 8 | ||
2393 | #define FR_CZ_BUF_FULL_TBL_ROWS 147456 | ||
2394 | #define FR_BB_BUF_FULL_TBL_ROWS 917504 | ||
2395 | #define FRF_AZ_BUF_FULL_UNUSED_LBN 51 | ||
2396 | #define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13 | ||
2397 | #define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50 | ||
2398 | #define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1 | ||
2399 | #define FRF_AZ_BUF_ADR_REGION_LBN 48 | ||
2400 | #define FRF_AZ_BUF_ADR_REGION_WIDTH 2 | ||
2401 | #define FFE_AZ_BUF_ADR_REGN3 3 | ||
2402 | #define FFE_AZ_BUF_ADR_REGN2 2 | ||
2403 | #define FFE_AZ_BUF_ADR_REGN1 1 | ||
2404 | #define FFE_AZ_BUF_ADR_REGN0 0 | ||
2405 | #define FRF_AZ_BUF_ADR_FBUF_LBN 14 | ||
2406 | #define FRF_AZ_BUF_ADR_FBUF_WIDTH 34 | ||
2407 | #define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0 | ||
2408 | #define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14 | ||
2409 | |||
2410 | /* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */ | ||
2411 | #define FR_BZ_RX_FILTER_TBL0 0x00f00000 | ||
2412 | #define FR_BZ_RX_FILTER_TBL0_STEP 32 | ||
2413 | #define FR_BZ_RX_FILTER_TBL0_ROWS 8192 | ||
2414 | /* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */ | ||
2415 | #define FR_BB_RX_FILTER_TBL1 0x00f00010 | ||
2416 | #define FR_BB_RX_FILTER_TBL1_STEP 32 | ||
2417 | #define FR_BB_RX_FILTER_TBL1_ROWS 8192 | ||
2418 | #define FRF_BZ_RSS_EN_LBN 110 | ||
2419 | #define FRF_BZ_RSS_EN_WIDTH 1 | ||
2420 | #define FRF_BZ_SCATTER_EN_LBN 109 | ||
2421 | #define FRF_BZ_SCATTER_EN_WIDTH 1 | ||
2422 | #define FRF_BZ_TCP_UDP_LBN 108 | ||
2423 | #define FRF_BZ_TCP_UDP_WIDTH 1 | ||
2424 | #define FRF_BZ_RXQ_ID_LBN 96 | ||
2425 | #define FRF_BZ_RXQ_ID_WIDTH 12 | ||
2426 | #define FRF_BZ_DEST_IP_LBN 64 | ||
2427 | #define FRF_BZ_DEST_IP_WIDTH 32 | ||
2428 | #define FRF_BZ_DEST_PORT_TCP_LBN 48 | ||
2429 | #define FRF_BZ_DEST_PORT_TCP_WIDTH 16 | ||
2430 | #define FRF_BZ_SRC_IP_LBN 16 | ||
2431 | #define FRF_BZ_SRC_IP_WIDTH 32 | ||
2432 | #define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0 | ||
2433 | #define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16 | ||
2434 | |||
2435 | /* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */ | ||
2436 | #define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010 | ||
2437 | #define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32 | ||
2438 | #define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512 | ||
2439 | #define FRF_CZ_RMFT_RSS_EN_LBN 75 | ||
2440 | #define FRF_CZ_RMFT_RSS_EN_WIDTH 1 | ||
2441 | #define FRF_CZ_RMFT_SCATTER_EN_LBN 74 | ||
2442 | #define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1 | ||
2443 | #define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73 | ||
2444 | #define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1 | ||
2445 | #define FRF_CZ_RMFT_RXQ_ID_LBN 61 | ||
2446 | #define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 | ||
2447 | #define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 | ||
2448 | #define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 | ||
2449 | #define FRF_CZ_RMFT_DEST_MAC_LBN 16 | ||
2450 | #define FRF_CZ_RMFT_DEST_MAC_WIDTH 44 | ||
2451 | #define FRF_CZ_RMFT_VLAN_ID_LBN 0 | ||
2452 | #define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 | ||
2453 | |||
2454 | /* TIMER_TBL: Timer table */ | ||
2455 | #define FR_BZ_TIMER_TBL 0x00f70000 | ||
2456 | #define FR_BZ_TIMER_TBL_STEP 16 | ||
2457 | #define FR_CZ_TIMER_TBL_ROWS 1024 | ||
2458 | #define FR_BB_TIMER_TBL_ROWS 4096 | ||
2459 | #define FRF_CZ_TIMER_Q_EN_LBN 33 | ||
2460 | #define FRF_CZ_TIMER_Q_EN_WIDTH 1 | ||
2461 | #define FRF_CZ_INT_ARMD_LBN 32 | ||
2462 | #define FRF_CZ_INT_ARMD_WIDTH 1 | ||
2463 | #define FRF_CZ_INT_PEND_LBN 31 | ||
2464 | #define FRF_CZ_INT_PEND_WIDTH 1 | ||
2465 | #define FRF_CZ_HOST_NOTIFY_MODE_LBN 30 | ||
2466 | #define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1 | ||
2467 | #define FRF_CZ_RELOAD_TIMER_VAL_LBN 16 | ||
2468 | #define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14 | ||
2469 | #define FRF_CZ_TIMER_MODE_LBN 14 | ||
2470 | #define FRF_CZ_TIMER_MODE_WIDTH 2 | ||
2471 | #define FFE_CZ_TIMER_MODE_INT_HLDOFF 3 | ||
2472 | #define FFE_CZ_TIMER_MODE_TRIG_START 2 | ||
2473 | #define FFE_CZ_TIMER_MODE_IMMED_START 1 | ||
2474 | #define FFE_CZ_TIMER_MODE_DIS 0 | ||
2475 | #define FRF_BB_TIMER_MODE_LBN 12 | ||
2476 | #define FRF_BB_TIMER_MODE_WIDTH 2 | ||
2477 | #define FFE_BB_TIMER_MODE_INT_HLDOFF 2 | ||
2478 | #define FFE_BB_TIMER_MODE_TRIG_START 2 | ||
2479 | #define FFE_BB_TIMER_MODE_IMMED_START 1 | ||
2480 | #define FFE_BB_TIMER_MODE_DIS 0 | ||
2481 | #define FRF_CZ_TIMER_VAL_LBN 0 | ||
2482 | #define FRF_CZ_TIMER_VAL_WIDTH 14 | ||
2483 | #define FRF_BB_TIMER_VAL_LBN 0 | ||
2484 | #define FRF_BB_TIMER_VAL_WIDTH 12 | ||
2485 | |||
2486 | /* TX_PACE_TBL: Transmit pacing table */ | ||
2487 | #define FR_BZ_TX_PACE_TBL 0x00f80000 | ||
2488 | #define FR_BZ_TX_PACE_TBL_STEP 16 | ||
2489 | #define FR_CZ_TX_PACE_TBL_ROWS 1024 | ||
2490 | #define FR_BB_TX_PACE_TBL_ROWS 4096 | ||
2491 | #define FRF_BZ_TX_PACE_LBN 0 | ||
2492 | #define FRF_BZ_TX_PACE_WIDTH 5 | ||
2493 | |||
2494 | /* RX_INDIRECTION_TBL: RX Indirection Table */ | ||
2495 | #define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000 | ||
2496 | #define FR_BZ_RX_INDIRECTION_TBL_STEP 16 | ||
2497 | #define FR_BZ_RX_INDIRECTION_TBL_ROWS 128 | ||
2498 | #define FRF_BZ_IT_QUEUE_LBN 0 | ||
2499 | #define FRF_BZ_IT_QUEUE_WIDTH 6 | ||
2500 | |||
2501 | /* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */ | ||
2502 | #define FR_CZ_TX_FILTER_TBL0 0x00fc0000 | ||
2503 | #define FR_CZ_TX_FILTER_TBL0_STEP 16 | ||
2504 | #define FR_CZ_TX_FILTER_TBL0_ROWS 8192 | ||
2505 | #define FRF_CZ_TIFT_TCP_UDP_LBN 108 | ||
2506 | #define FRF_CZ_TIFT_TCP_UDP_WIDTH 1 | ||
2507 | #define FRF_CZ_TIFT_TXQ_ID_LBN 96 | ||
2508 | #define FRF_CZ_TIFT_TXQ_ID_WIDTH 12 | ||
2509 | #define FRF_CZ_TIFT_DEST_IP_LBN 64 | ||
2510 | #define FRF_CZ_TIFT_DEST_IP_WIDTH 32 | ||
2511 | #define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48 | ||
2512 | #define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16 | ||
2513 | #define FRF_CZ_TIFT_SRC_IP_LBN 16 | ||
2514 | #define FRF_CZ_TIFT_SRC_IP_WIDTH 32 | ||
2515 | #define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0 | ||
2516 | #define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16 | ||
2517 | |||
2518 | /* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */ | ||
2519 | #define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000 | ||
2520 | #define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16 | ||
2521 | #define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512 | ||
2522 | #define FRF_CZ_TMFT_TXQ_ID_LBN 61 | ||
2523 | #define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 | ||
2524 | #define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 | ||
2525 | #define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 | ||
2526 | #define FRF_CZ_TMFT_SRC_MAC_LBN 16 | ||
2527 | #define FRF_CZ_TMFT_SRC_MAC_WIDTH 44 | ||
2528 | #define FRF_CZ_TMFT_VLAN_ID_LBN 0 | ||
2529 | #define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 | ||
2530 | |||
2531 | /* MC_TREG_SMEM: MC Shared Memory */ | ||
2532 | #define FR_CZ_MC_TREG_SMEM 0x00ff0000 | ||
2533 | #define FR_CZ_MC_TREG_SMEM_STEP 4 | ||
2534 | #define FR_CZ_MC_TREG_SMEM_ROWS 512 | ||
2535 | #define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0 | ||
2536 | #define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32 | ||
2537 | |||
2538 | /* MSIX_VECTOR_TABLE: MSIX Vector Table */ | ||
2539 | #define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000 | ||
2540 | #define FR_BZ_MSIX_VECTOR_TABLE_STEP 16 | ||
2541 | #define FR_BB_MSIX_VECTOR_TABLE_ROWS 64 | ||
2542 | /* MSIX_VECTOR_TABLE: MSIX Vector Table */ | ||
2543 | #define FR_CZ_MSIX_VECTOR_TABLE 0x00000000 | ||
2544 | /* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */ | ||
2545 | #define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024 | ||
2546 | #define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97 | ||
2547 | #define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31 | ||
2548 | #define FRF_BZ_MSIX_VECTOR_MASK_LBN 96 | ||
2549 | #define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1 | ||
2550 | #define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64 | ||
2551 | #define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32 | ||
2552 | #define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32 | ||
2553 | #define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32 | ||
2554 | #define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0 | ||
2555 | #define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32 | ||
2556 | |||
2557 | /* MSIX_PBA_TABLE: MSIX Pending Bit Array */ | ||
2558 | #define FR_BB_MSIX_PBA_TABLE 0x00ff2000 | ||
2559 | #define FR_BZ_MSIX_PBA_TABLE_STEP 4 | ||
2560 | #define FR_BB_MSIX_PBA_TABLE_ROWS 2 | ||
2561 | /* MSIX_PBA_TABLE: MSIX Pending Bit Array */ | ||
2562 | #define FR_CZ_MSIX_PBA_TABLE 0x00008000 | ||
2563 | /* FR_BZ_MSIX_PBA_TABLE_STEP 4 */ | ||
2564 | #define FR_CZ_MSIX_PBA_TABLE_ROWS 32 | ||
2565 | #define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0 | ||
2566 | #define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32 | ||
2567 | |||
2568 | /* SRM_DBG_REG: SRAM debug access */ | ||
2569 | #define FR_BZ_SRM_DBG 0x03000000 | ||
2570 | #define FR_BZ_SRM_DBG_STEP 8 | ||
2571 | #define FR_CZ_SRM_DBG_ROWS 262144 | ||
2572 | #define FR_BB_SRM_DBG_ROWS 2097152 | ||
2573 | #define FRF_BZ_SRM_DBG_LBN 0 | ||
2574 | #define FRF_BZ_SRM_DBG_WIDTH 64 | ||
2575 | |||
2576 | /* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */ | ||
2577 | #define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000 | ||
2578 | #define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4 | ||
2579 | #define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024 | ||
2580 | #define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0 | ||
2581 | #define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32 | ||
2582 | |||
2583 | /* DRIVER_EV */ | ||
2584 | #define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56 | ||
2585 | #define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4 | ||
2586 | #define FSE_BZ_TX_DSC_ERROR_EV 15 | ||
2587 | #define FSE_BZ_RX_DSC_ERROR_EV 14 | ||
2588 | #define FSE_AA_RX_RECOVER_EV 11 | ||
2589 | #define FSE_AZ_TIMER_EV 10 | ||
2590 | #define FSE_AZ_TX_PKT_NON_TCP_UDP 9 | ||
2591 | #define FSE_AZ_WAKE_UP_EV 6 | ||
2592 | #define FSE_AZ_SRM_UPD_DONE_EV 5 | ||
2593 | #define FSE_AB_EVQ_NOT_EN_EV 3 | ||
2594 | #define FSE_AZ_EVQ_INIT_DONE_EV 2 | ||
2595 | #define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1 | ||
2596 | #define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0 | ||
2597 | #define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0 | ||
2598 | #define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14 | ||
2599 | |||
2600 | /* EVENT_ENTRY */ | ||
2601 | #define FSF_AZ_EV_CODE_LBN 60 | ||
2602 | #define FSF_AZ_EV_CODE_WIDTH 4 | ||
2603 | #define FSE_CZ_EV_CODE_MCDI_EV 12 | ||
2604 | #define FSE_CZ_EV_CODE_USER_EV 8 | ||
2605 | #define FSE_AZ_EV_CODE_DRV_GEN_EV 7 | ||
2606 | #define FSE_AZ_EV_CODE_GLOBAL_EV 6 | ||
2607 | #define FSE_AZ_EV_CODE_DRIVER_EV 5 | ||
2608 | #define FSE_AZ_EV_CODE_TX_EV 2 | ||
2609 | #define FSE_AZ_EV_CODE_RX_EV 0 | ||
2610 | #define FSF_AZ_EV_DATA_LBN 0 | ||
2611 | #define FSF_AZ_EV_DATA_WIDTH 60 | ||
2612 | |||
2613 | /* GLOBAL_EV */ | ||
2614 | #define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12 | ||
2615 | #define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1 | ||
2616 | #define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11 | ||
2617 | #define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1 | ||
2618 | #define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11 | ||
2619 | #define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1 | ||
2620 | #define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10 | ||
2621 | #define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1 | ||
2622 | #define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9 | ||
2623 | #define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1 | ||
2624 | #define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7 | ||
2625 | #define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1 | ||
2626 | |||
2627 | /* LEGACY_INT_VEC */ | ||
2628 | #define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64 | ||
2629 | #define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1 | ||
2630 | #define FSF_AZ_NET_IVEC_INT_Q_LBN 40 | ||
2631 | #define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4 | ||
2632 | #define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32 | ||
2633 | #define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1 | ||
2634 | #define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1 | ||
2635 | #define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1 | ||
2636 | #define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0 | ||
2637 | #define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1 | ||
2638 | |||
2639 | /* MC_XGMAC_FLTR_RULE_DEF */ | ||
2640 | #define FSF_CZ_MC_XFRC_MODE_LBN 416 | ||
2641 | #define FSF_CZ_MC_XFRC_MODE_WIDTH 1 | ||
2642 | #define FSE_CZ_MC_XFRC_MODE_LAYERED 1 | ||
2643 | #define FSE_CZ_MC_XFRC_MODE_SIMPLE 0 | ||
2644 | #define FSF_CZ_MC_XFRC_HASH_LBN 384 | ||
2645 | #define FSF_CZ_MC_XFRC_HASH_WIDTH 32 | ||
2646 | #define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256 | ||
2647 | #define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128 | ||
2648 | #define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128 | ||
2649 | #define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128 | ||
2650 | #define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0 | ||
2651 | #define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128 | ||
2652 | |||
2653 | /* RX_EV */ | ||
2654 | #define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58 | ||
2655 | #define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1 | ||
2656 | #define FSF_CZ_RX_EV_IPV6_PKT_LBN 57 | ||
2657 | #define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1 | ||
2658 | #define FSF_AZ_RX_EV_PKT_OK_LBN 56 | ||
2659 | #define FSF_AZ_RX_EV_PKT_OK_WIDTH 1 | ||
2660 | #define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55 | ||
2661 | #define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1 | ||
2662 | #define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54 | ||
2663 | #define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 | ||
2664 | #define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53 | ||
2665 | #define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1 | ||
2666 | #define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 | ||
2667 | #define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 | ||
2668 | #define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 | ||
2669 | #define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 | ||
2670 | #define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50 | ||
2671 | #define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1 | ||
2672 | #define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49 | ||
2673 | #define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1 | ||
2674 | #define FSF_AA_RX_EV_DRIB_NIB_LBN 49 | ||
2675 | #define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1 | ||
2676 | #define FSF_AZ_RX_EV_TOBE_DISC_LBN 47 | ||
2677 | #define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1 | ||
2678 | #define FSF_AZ_RX_EV_PKT_TYPE_LBN 44 | ||
2679 | #define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3 | ||
2680 | #define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5 | ||
2681 | #define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4 | ||
2682 | #define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3 | ||
2683 | #define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2 | ||
2684 | #define FSE_AZ_RX_EV_PKT_TYPE_LLC 1 | ||
2685 | #define FSE_AZ_RX_EV_PKT_TYPE_ETH 0 | ||
2686 | #define FSF_AZ_RX_EV_HDR_TYPE_LBN 42 | ||
2687 | #define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2 | ||
2688 | #define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3 | ||
2689 | #define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2 | ||
2690 | #define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2 | ||
2691 | #define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1 | ||
2692 | #define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1 | ||
2693 | #define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0 | ||
2694 | #define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0 | ||
2695 | #define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41 | ||
2696 | #define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1 | ||
2697 | #define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40 | ||
2698 | #define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1 | ||
2699 | #define FSF_AZ_RX_EV_MCAST_PKT_LBN 39 | ||
2700 | #define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1 | ||
2701 | #define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37 | ||
2702 | #define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1 | ||
2703 | #define FSF_AZ_RX_EV_Q_LABEL_LBN 32 | ||
2704 | #define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5 | ||
2705 | #define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31 | ||
2706 | #define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1 | ||
2707 | #define FSF_AZ_RX_EV_PORT_LBN 30 | ||
2708 | #define FSF_AZ_RX_EV_PORT_WIDTH 1 | ||
2709 | #define FSF_AZ_RX_EV_BYTE_CNT_LBN 16 | ||
2710 | #define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14 | ||
2711 | #define FSF_AZ_RX_EV_SOP_LBN 15 | ||
2712 | #define FSF_AZ_RX_EV_SOP_WIDTH 1 | ||
2713 | #define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14 | ||
2714 | #define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1 | ||
2715 | #define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13 | ||
2716 | #define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1 | ||
2717 | #define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12 | ||
2718 | #define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1 | ||
2719 | #define FSF_AZ_RX_EV_DESC_PTR_LBN 0 | ||
2720 | #define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12 | ||
2721 | |||
2722 | /* RX_KER_DESC */ | ||
2723 | #define FSF_AZ_RX_KER_BUF_SIZE_LBN 48 | ||
2724 | #define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14 | ||
2725 | #define FSF_AZ_RX_KER_BUF_REGION_LBN 46 | ||
2726 | #define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2 | ||
2727 | #define FSF_AZ_RX_KER_BUF_ADDR_LBN 0 | ||
2728 | #define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46 | ||
2729 | |||
2730 | /* RX_USER_DESC */ | ||
2731 | #define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20 | ||
2732 | #define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12 | ||
2733 | #define FSF_AZ_RX_USER_BUF_ID_LBN 0 | ||
2734 | #define FSF_AZ_RX_USER_BUF_ID_WIDTH 20 | ||
2735 | |||
2736 | /* TX_EV */ | ||
2737 | #define FSF_AZ_TX_EV_PKT_ERR_LBN 38 | ||
2738 | #define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1 | ||
2739 | #define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37 | ||
2740 | #define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1 | ||
2741 | #define FSF_AZ_TX_EV_Q_LABEL_LBN 32 | ||
2742 | #define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5 | ||
2743 | #define FSF_AZ_TX_EV_PORT_LBN 16 | ||
2744 | #define FSF_AZ_TX_EV_PORT_WIDTH 1 | ||
2745 | #define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15 | ||
2746 | #define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1 | ||
2747 | #define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14 | ||
2748 | #define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 | ||
2749 | #define FSF_AZ_TX_EV_COMP_LBN 12 | ||
2750 | #define FSF_AZ_TX_EV_COMP_WIDTH 1 | ||
2751 | #define FSF_AZ_TX_EV_DESC_PTR_LBN 0 | ||
2752 | #define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12 | ||
2753 | |||
2754 | /* TX_KER_DESC */ | ||
2755 | #define FSF_AZ_TX_KER_CONT_LBN 62 | ||
2756 | #define FSF_AZ_TX_KER_CONT_WIDTH 1 | ||
2757 | #define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48 | ||
2758 | #define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14 | ||
2759 | #define FSF_AZ_TX_KER_BUF_REGION_LBN 46 | ||
2760 | #define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2 | ||
2761 | #define FSF_AZ_TX_KER_BUF_ADDR_LBN 0 | ||
2762 | #define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46 | ||
2763 | |||
2764 | /* TX_USER_DESC */ | ||
2765 | #define FSF_AZ_TX_USER_SW_EV_EN_LBN 48 | ||
2766 | #define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1 | ||
2767 | #define FSF_AZ_TX_USER_CONT_LBN 46 | ||
2768 | #define FSF_AZ_TX_USER_CONT_WIDTH 1 | ||
2769 | #define FSF_AZ_TX_USER_BYTE_CNT_LBN 33 | ||
2770 | #define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13 | ||
2771 | #define FSF_AZ_TX_USER_BUF_ID_LBN 13 | ||
2772 | #define FSF_AZ_TX_USER_BUF_ID_WIDTH 20 | ||
2773 | #define FSF_AZ_TX_USER_BYTE_OFS_LBN 0 | ||
2774 | #define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13 | ||
2775 | |||
2776 | /* USER_EV */ | ||
2777 | #define FSF_CZ_USER_QID_LBN 32 | ||
2778 | #define FSF_CZ_USER_QID_WIDTH 10 | ||
2779 | #define FSF_CZ_USER_EV_REG_VALUE_LBN 0 | ||
2780 | #define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32 | ||
2781 | |||
2782 | /************************************************************************** | ||
2783 | * | ||
2784 | * Falcon B0 PCIe core indirect registers | ||
2785 | * | ||
2786 | ************************************************************************** | ||
2787 | */ | ||
2788 | |||
2789 | #define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68 | ||
2790 | |||
2791 | #define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70 | ||
2792 | |||
2793 | #define FPCR_BB_ACK_RPL_TIMER 0x700 | ||
2794 | #define FPCRF_BB_ACK_TL_LBN 0 | ||
2795 | #define FPCRF_BB_ACK_TL_WIDTH 16 | ||
2796 | #define FPCRF_BB_RPL_TL_LBN 16 | ||
2797 | #define FPCRF_BB_RPL_TL_WIDTH 16 | ||
2798 | |||
2799 | #define FPCR_BB_ACK_FREQ 0x70C | ||
2800 | #define FPCRF_BB_ACK_FREQ_LBN 0 | ||
2801 | #define FPCRF_BB_ACK_FREQ_WIDTH 7 | ||
2802 | |||
2803 | /************************************************************************** | ||
2804 | * | ||
2805 | * Pseudo-registers and fields | ||
2806 | * | ||
2807 | ************************************************************************** | ||
2808 | */ | ||
2809 | |||
2810 | /* Interrupt acknowledge work-around register (A0/A1 only) */ | ||
2811 | #define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070 | ||
2812 | |||
2813 | /* EE_SPI_HCMD_REG: SPI host command register */ | ||
2814 | /* Values for the EE_SPI_HCMD_SF_SEL register field */ | ||
2815 | #define FFE_AB_SPI_DEVICE_EEPROM 0 | ||
2816 | #define FFE_AB_SPI_DEVICE_FLASH 1 | ||
2817 | |||
2818 | /* NIC_STAT_REG: NIC status register */ | ||
2819 | #define FRF_AB_STRAP_10G_LBN 2 | ||
2820 | #define FRF_AB_STRAP_10G_WIDTH 1 | ||
2821 | #define FRF_AA_STRAP_PCIE_LBN 0 | ||
2822 | #define FRF_AA_STRAP_PCIE_WIDTH 1 | ||
2823 | |||
2824 | /* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ | ||
2825 | #define FRF_AZ_FATAL_INTR_LBN 0 | ||
2826 | #define FRF_AZ_FATAL_INTR_WIDTH 12 | ||
2827 | |||
2828 | /* SRM_CFG_REG: SRAM configuration register */ | ||
2829 | /* We treat the number of SRAM banks and bank size as a single field */ | ||
2830 | #define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN | ||
2831 | #define FRF_AZ_SRM_NB_SZ_WIDTH \ | ||
2832 | (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH) | ||
2833 | #define FFE_AB_SRM_NB1_SZ2M 0 | ||
2834 | #define FFE_AB_SRM_NB1_SZ4M 1 | ||
2835 | #define FFE_AB_SRM_NB1_SZ8M 2 | ||
2836 | #define FFE_AB_SRM_NB_SZ_DEF 3 | ||
2837 | #define FFE_AB_SRM_NB2_SZ4M 4 | ||
2838 | #define FFE_AB_SRM_NB2_SZ8M 5 | ||
2839 | #define FFE_AB_SRM_NB2_SZ16M 6 | ||
2840 | #define FFE_AB_SRM_NB_SZ_RES 7 | ||
2841 | |||
2842 | /* RX_DESC_UPD_REGP0: Receive descriptor update register. */ | ||
2843 | /* We write just the last dword of these registers */ | ||
2844 | #define FR_AZ_RX_DESC_UPD_DWORD_P0 \ | ||
2845 | (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \ | ||
2846 | FR_BZ_RX_DESC_UPD_P0 + 3 * 4) | ||
2847 | #define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32) | ||
2848 | #define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH | ||
2849 | |||
2850 | /* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ | ||
2851 | #define FR_AZ_TX_DESC_UPD_DWORD_P0 \ | ||
2852 | (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \ | ||
2853 | FR_BZ_TX_DESC_UPD_P0 + 3 * 4) | ||
2854 | #define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32) | ||
2855 | #define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH | ||
2856 | |||
2857 | /* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ | ||
2858 | #define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12 | ||
2859 | #define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1 | ||
2860 | |||
2861 | /* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ | ||
2862 | #define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12 | ||
2863 | #define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1 | ||
2864 | |||
2865 | /* XM_TX_PARAM_REG: XGMAC transmit parameter register */ | ||
2866 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN | ||
2867 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \ | ||
2868 | FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH) | ||
2869 | |||
2870 | /* XM_RX_PARAM_REG: XGMAC receive parameter register */ | ||
2871 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN | ||
2872 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \ | ||
2873 | FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH) | ||
2874 | |||
2875 | /* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ | ||
2876 | /* Default values */ | ||
2877 | #define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */ | ||
2878 | #define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */ | ||
2879 | #define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */ | ||
2880 | |||
2881 | /* XX_CORE_STAT_REG: XAUI XGXS core status register */ | ||
2882 | /* XGXS all-lanes status fields */ | ||
2883 | #define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN | ||
2884 | #define FRF_AB_XX_SYNC_STAT_WIDTH 4 | ||
2885 | #define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN | ||
2886 | #define FRF_AB_XX_COMMA_DET_WIDTH 4 | ||
2887 | #define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN | ||
2888 | #define FRF_AB_XX_CHAR_ERR_WIDTH 4 | ||
2889 | #define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN | ||
2890 | #define FRF_AB_XX_DISPERR_WIDTH 4 | ||
2891 | #define FFE_AB_XX_STAT_ALL_LANES 0xf | ||
2892 | #define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN | ||
2893 | #define FRF_AB_XX_FORCE_SIG_WIDTH 8 | ||
2894 | #define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff | ||
2895 | |||
2896 | /* DRIVER_EV */ | ||
2897 | /* Sub-fields of an RX flush completion event */ | ||
2898 | #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 | ||
2899 | #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 | ||
2900 | #define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0 | ||
2901 | #define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12 | ||
2902 | |||
2903 | /* EVENT_ENTRY */ | ||
2904 | /* Magic number field for event test */ | ||
2905 | #define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 | ||
2906 | #define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 | ||
2907 | |||
2908 | /************************************************************************** | ||
2909 | * | ||
2910 | * Falcon MAC stats | ||
2911 | * | ||
2912 | ************************************************************************** | ||
2913 | * | ||
2914 | */ | ||
2915 | |||
2916 | #define GRxGoodOct_offset 0x0 | ||
2917 | #define GRxGoodOct_WIDTH 48 | ||
2918 | #define GRxBadOct_offset 0x8 | ||
2919 | #define GRxBadOct_WIDTH 48 | ||
2920 | #define GRxMissPkt_offset 0x10 | ||
2921 | #define GRxMissPkt_WIDTH 32 | ||
2922 | #define GRxFalseCRS_offset 0x14 | ||
2923 | #define GRxFalseCRS_WIDTH 32 | ||
2924 | #define GRxPausePkt_offset 0x18 | ||
2925 | #define GRxPausePkt_WIDTH 32 | ||
2926 | #define GRxBadPkt_offset 0x1C | ||
2927 | #define GRxBadPkt_WIDTH 32 | ||
2928 | #define GRxUcastPkt_offset 0x20 | ||
2929 | #define GRxUcastPkt_WIDTH 32 | ||
2930 | #define GRxMcastPkt_offset 0x24 | ||
2931 | #define GRxMcastPkt_WIDTH 32 | ||
2932 | #define GRxBcastPkt_offset 0x28 | ||
2933 | #define GRxBcastPkt_WIDTH 32 | ||
2934 | #define GRxGoodLt64Pkt_offset 0x2C | ||
2935 | #define GRxGoodLt64Pkt_WIDTH 32 | ||
2936 | #define GRxBadLt64Pkt_offset 0x30 | ||
2937 | #define GRxBadLt64Pkt_WIDTH 32 | ||
2938 | #define GRx64Pkt_offset 0x34 | ||
2939 | #define GRx64Pkt_WIDTH 32 | ||
2940 | #define GRx65to127Pkt_offset 0x38 | ||
2941 | #define GRx65to127Pkt_WIDTH 32 | ||
2942 | #define GRx128to255Pkt_offset 0x3C | ||
2943 | #define GRx128to255Pkt_WIDTH 32 | ||
2944 | #define GRx256to511Pkt_offset 0x40 | ||
2945 | #define GRx256to511Pkt_WIDTH 32 | ||
2946 | #define GRx512to1023Pkt_offset 0x44 | ||
2947 | #define GRx512to1023Pkt_WIDTH 32 | ||
2948 | #define GRx1024to15xxPkt_offset 0x48 | ||
2949 | #define GRx1024to15xxPkt_WIDTH 32 | ||
2950 | #define GRx15xxtoJumboPkt_offset 0x4C | ||
2951 | #define GRx15xxtoJumboPkt_WIDTH 32 | ||
2952 | #define GRxGtJumboPkt_offset 0x50 | ||
2953 | #define GRxGtJumboPkt_WIDTH 32 | ||
2954 | #define GRxFcsErr64to15xxPkt_offset 0x54 | ||
2955 | #define GRxFcsErr64to15xxPkt_WIDTH 32 | ||
2956 | #define GRxFcsErr15xxtoJumboPkt_offset 0x58 | ||
2957 | #define GRxFcsErr15xxtoJumboPkt_WIDTH 32 | ||
2958 | #define GRxFcsErrGtJumboPkt_offset 0x5C | ||
2959 | #define GRxFcsErrGtJumboPkt_WIDTH 32 | ||
2960 | #define GTxGoodBadOct_offset 0x80 | ||
2961 | #define GTxGoodBadOct_WIDTH 48 | ||
2962 | #define GTxGoodOct_offset 0x88 | ||
2963 | #define GTxGoodOct_WIDTH 48 | ||
2964 | #define GTxSglColPkt_offset 0x90 | ||
2965 | #define GTxSglColPkt_WIDTH 32 | ||
2966 | #define GTxMultColPkt_offset 0x94 | ||
2967 | #define GTxMultColPkt_WIDTH 32 | ||
2968 | #define GTxExColPkt_offset 0x98 | ||
2969 | #define GTxExColPkt_WIDTH 32 | ||
2970 | #define GTxDefPkt_offset 0x9C | ||
2971 | #define GTxDefPkt_WIDTH 32 | ||
2972 | #define GTxLateCol_offset 0xA0 | ||
2973 | #define GTxLateCol_WIDTH 32 | ||
2974 | #define GTxExDefPkt_offset 0xA4 | ||
2975 | #define GTxExDefPkt_WIDTH 32 | ||
2976 | #define GTxPausePkt_offset 0xA8 | ||
2977 | #define GTxPausePkt_WIDTH 32 | ||
2978 | #define GTxBadPkt_offset 0xAC | ||
2979 | #define GTxBadPkt_WIDTH 32 | ||
2980 | #define GTxUcastPkt_offset 0xB0 | ||
2981 | #define GTxUcastPkt_WIDTH 32 | ||
2982 | #define GTxMcastPkt_offset 0xB4 | ||
2983 | #define GTxMcastPkt_WIDTH 32 | ||
2984 | #define GTxBcastPkt_offset 0xB8 | ||
2985 | #define GTxBcastPkt_WIDTH 32 | ||
2986 | #define GTxLt64Pkt_offset 0xBC | ||
2987 | #define GTxLt64Pkt_WIDTH 32 | ||
2988 | #define GTx64Pkt_offset 0xC0 | ||
2989 | #define GTx64Pkt_WIDTH 32 | ||
2990 | #define GTx65to127Pkt_offset 0xC4 | ||
2991 | #define GTx65to127Pkt_WIDTH 32 | ||
2992 | #define GTx128to255Pkt_offset 0xC8 | ||
2993 | #define GTx128to255Pkt_WIDTH 32 | ||
2994 | #define GTx256to511Pkt_offset 0xCC | ||
2995 | #define GTx256to511Pkt_WIDTH 32 | ||
2996 | #define GTx512to1023Pkt_offset 0xD0 | ||
2997 | #define GTx512to1023Pkt_WIDTH 32 | ||
2998 | #define GTx1024to15xxPkt_offset 0xD4 | ||
2999 | #define GTx1024to15xxPkt_WIDTH 32 | ||
3000 | #define GTx15xxtoJumboPkt_offset 0xD8 | ||
3001 | #define GTx15xxtoJumboPkt_WIDTH 32 | ||
3002 | #define GTxGtJumboPkt_offset 0xDC | ||
3003 | #define GTxGtJumboPkt_WIDTH 32 | ||
3004 | #define GTxNonTcpUdpPkt_offset 0xE0 | ||
3005 | #define GTxNonTcpUdpPkt_WIDTH 16 | ||
3006 | #define GTxMacSrcErrPkt_offset 0xE4 | ||
3007 | #define GTxMacSrcErrPkt_WIDTH 16 | ||
3008 | #define GTxIpSrcErrPkt_offset 0xE8 | ||
3009 | #define GTxIpSrcErrPkt_WIDTH 16 | ||
3010 | #define GDmaDone_offset 0xEC | ||
3011 | #define GDmaDone_WIDTH 32 | ||
3012 | |||
3013 | #define XgRxOctets_offset 0x0 | ||
3014 | #define XgRxOctets_WIDTH 48 | ||
3015 | #define XgRxOctetsOK_offset 0x8 | ||
3016 | #define XgRxOctetsOK_WIDTH 48 | ||
3017 | #define XgRxPkts_offset 0x10 | ||
3018 | #define XgRxPkts_WIDTH 32 | ||
3019 | #define XgRxPktsOK_offset 0x14 | ||
3020 | #define XgRxPktsOK_WIDTH 32 | ||
3021 | #define XgRxBroadcastPkts_offset 0x18 | ||
3022 | #define XgRxBroadcastPkts_WIDTH 32 | ||
3023 | #define XgRxMulticastPkts_offset 0x1C | ||
3024 | #define XgRxMulticastPkts_WIDTH 32 | ||
3025 | #define XgRxUnicastPkts_offset 0x20 | ||
3026 | #define XgRxUnicastPkts_WIDTH 32 | ||
3027 | #define XgRxUndersizePkts_offset 0x24 | ||
3028 | #define XgRxUndersizePkts_WIDTH 32 | ||
3029 | #define XgRxOversizePkts_offset 0x28 | ||
3030 | #define XgRxOversizePkts_WIDTH 32 | ||
3031 | #define XgRxJabberPkts_offset 0x2C | ||
3032 | #define XgRxJabberPkts_WIDTH 32 | ||
3033 | #define XgRxUndersizeFCSerrorPkts_offset 0x30 | ||
3034 | #define XgRxUndersizeFCSerrorPkts_WIDTH 32 | ||
3035 | #define XgRxDropEvents_offset 0x34 | ||
3036 | #define XgRxDropEvents_WIDTH 32 | ||
3037 | #define XgRxFCSerrorPkts_offset 0x38 | ||
3038 | #define XgRxFCSerrorPkts_WIDTH 32 | ||
3039 | #define XgRxAlignError_offset 0x3C | ||
3040 | #define XgRxAlignError_WIDTH 32 | ||
3041 | #define XgRxSymbolError_offset 0x40 | ||
3042 | #define XgRxSymbolError_WIDTH 32 | ||
3043 | #define XgRxInternalMACError_offset 0x44 | ||
3044 | #define XgRxInternalMACError_WIDTH 32 | ||
3045 | #define XgRxControlPkts_offset 0x48 | ||
3046 | #define XgRxControlPkts_WIDTH 32 | ||
3047 | #define XgRxPausePkts_offset 0x4C | ||
3048 | #define XgRxPausePkts_WIDTH 32 | ||
3049 | #define XgRxPkts64Octets_offset 0x50 | ||
3050 | #define XgRxPkts64Octets_WIDTH 32 | ||
3051 | #define XgRxPkts65to127Octets_offset 0x54 | ||
3052 | #define XgRxPkts65to127Octets_WIDTH 32 | ||
3053 | #define XgRxPkts128to255Octets_offset 0x58 | ||
3054 | #define XgRxPkts128to255Octets_WIDTH 32 | ||
3055 | #define XgRxPkts256to511Octets_offset 0x5C | ||
3056 | #define XgRxPkts256to511Octets_WIDTH 32 | ||
3057 | #define XgRxPkts512to1023Octets_offset 0x60 | ||
3058 | #define XgRxPkts512to1023Octets_WIDTH 32 | ||
3059 | #define XgRxPkts1024to15xxOctets_offset 0x64 | ||
3060 | #define XgRxPkts1024to15xxOctets_WIDTH 32 | ||
3061 | #define XgRxPkts15xxtoMaxOctets_offset 0x68 | ||
3062 | #define XgRxPkts15xxtoMaxOctets_WIDTH 32 | ||
3063 | #define XgRxLengthError_offset 0x6C | ||
3064 | #define XgRxLengthError_WIDTH 32 | ||
3065 | #define XgTxPkts_offset 0x80 | ||
3066 | #define XgTxPkts_WIDTH 32 | ||
3067 | #define XgTxOctets_offset 0x88 | ||
3068 | #define XgTxOctets_WIDTH 48 | ||
3069 | #define XgTxMulticastPkts_offset 0x90 | ||
3070 | #define XgTxMulticastPkts_WIDTH 32 | ||
3071 | #define XgTxBroadcastPkts_offset 0x94 | ||
3072 | #define XgTxBroadcastPkts_WIDTH 32 | ||
3073 | #define XgTxUnicastPkts_offset 0x98 | ||
3074 | #define XgTxUnicastPkts_WIDTH 32 | ||
3075 | #define XgTxControlPkts_offset 0x9C | ||
3076 | #define XgTxControlPkts_WIDTH 32 | ||
3077 | #define XgTxPausePkts_offset 0xA0 | ||
3078 | #define XgTxPausePkts_WIDTH 32 | ||
3079 | #define XgTxPkts64Octets_offset 0xA4 | ||
3080 | #define XgTxPkts64Octets_WIDTH 32 | ||
3081 | #define XgTxPkts65to127Octets_offset 0xA8 | ||
3082 | #define XgTxPkts65to127Octets_WIDTH 32 | ||
3083 | #define XgTxPkts128to255Octets_offset 0xAC | ||
3084 | #define XgTxPkts128to255Octets_WIDTH 32 | ||
3085 | #define XgTxPkts256to511Octets_offset 0xB0 | ||
3086 | #define XgTxPkts256to511Octets_WIDTH 32 | ||
3087 | #define XgTxPkts512to1023Octets_offset 0xB4 | ||
3088 | #define XgTxPkts512to1023Octets_WIDTH 32 | ||
3089 | #define XgTxPkts1024to15xxOctets_offset 0xB8 | ||
3090 | #define XgTxPkts1024to15xxOctets_WIDTH 32 | ||
3091 | #define XgTxPkts1519toMaxOctets_offset 0xBC | ||
3092 | #define XgTxPkts1519toMaxOctets_WIDTH 32 | ||
3093 | #define XgTxUndersizePkts_offset 0xC0 | ||
3094 | #define XgTxUndersizePkts_WIDTH 32 | ||
3095 | #define XgTxOversizePkts_offset 0xC4 | ||
3096 | #define XgTxOversizePkts_WIDTH 32 | ||
3097 | #define XgTxNonTcpUdpPkt_offset 0xC8 | ||
3098 | #define XgTxNonTcpUdpPkt_WIDTH 16 | ||
3099 | #define XgTxMacSrcErrPkt_offset 0xCC | ||
3100 | #define XgTxMacSrcErrPkt_WIDTH 16 | ||
3101 | #define XgTxIpSrcErrPkt_offset 0xD0 | ||
3102 | #define XgTxIpSrcErrPkt_WIDTH 16 | ||
3103 | #define XgDmaDone_offset 0xD4 | ||
3104 | #define XgDmaDone_WIDTH 32 | ||
3105 | |||
3106 | #define FALCON_STATS_NOT_DONE 0x00000000 | ||
3107 | #define FALCON_STATS_DONE 0xffffffff | ||
3108 | |||
3109 | /************************************************************************** | ||
3110 | * | ||
3111 | * Falcon non-volatile configuration | ||
3112 | * | ||
3113 | ************************************************************************** | ||
3114 | */ | ||
3115 | |||
3116 | /* Board configuration v2 (v1 is obsolete; later versions are compatible) */ | ||
3117 | struct falcon_nvconfig_board_v2 { | ||
3118 | __le16 nports; | ||
3119 | u8 port0_phy_addr; | ||
3120 | u8 port0_phy_type; | ||
3121 | u8 port1_phy_addr; | ||
3122 | u8 port1_phy_type; | ||
3123 | __le16 asic_sub_revision; | ||
3124 | __le16 board_revision; | ||
3125 | } __packed; | ||
3126 | |||
3127 | /* Board configuration v3 extra information */ | ||
3128 | struct falcon_nvconfig_board_v3 { | ||
3129 | __le32 spi_device_type[2]; | ||
3130 | } __packed; | ||
3131 | |||
3132 | /* Bit numbers for spi_device_type */ | ||
3133 | #define SPI_DEV_TYPE_SIZE_LBN 0 | ||
3134 | #define SPI_DEV_TYPE_SIZE_WIDTH 5 | ||
3135 | #define SPI_DEV_TYPE_ADDR_LEN_LBN 6 | ||
3136 | #define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2 | ||
3137 | #define SPI_DEV_TYPE_ERASE_CMD_LBN 8 | ||
3138 | #define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8 | ||
3139 | #define SPI_DEV_TYPE_ERASE_SIZE_LBN 16 | ||
3140 | #define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5 | ||
3141 | #define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24 | ||
3142 | #define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5 | ||
3143 | #define SPI_DEV_TYPE_FIELD(type, field) \ | ||
3144 | (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field))) | ||
3145 | |||
3146 | #define FALCON_NVCONFIG_OFFSET 0x300 | ||
3147 | |||
3148 | #define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C | ||
3149 | struct falcon_nvconfig { | ||
3150 | efx_oword_t ee_vpd_cfg_reg; /* 0x300 */ | ||
3151 | u8 mac_address[2][8]; /* 0x310 */ | ||
3152 | efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */ | ||
3153 | efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */ | ||
3154 | efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */ | ||
3155 | efx_oword_t hw_init_reg; /* 0x350 */ | ||
3156 | efx_oword_t nic_stat_reg; /* 0x360 */ | ||
3157 | efx_oword_t glb_ctl_reg; /* 0x370 */ | ||
3158 | efx_oword_t srm_cfg_reg; /* 0x380 */ | ||
3159 | efx_oword_t spare_reg; /* 0x390 */ | ||
3160 | __le16 board_magic_num; /* 0x3A0 */ | ||
3161 | __le16 board_struct_ver; | ||
3162 | __le16 board_checksum; | ||
3163 | struct falcon_nvconfig_board_v2 board_v2; | ||
3164 | efx_oword_t ee_base_page_reg; /* 0x3B0 */ | ||
3165 | struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */ | ||
3166 | } __packed; | ||
3167 | |||
3168 | #endif /* EFX_REGS_H */ | ||
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 98bff5ada09a..e308818b9f55 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2005-2008 Solarflare Communications Inc. | 4 | * Copyright 2005-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -10,15 +10,15 @@ | |||
10 | 10 | ||
11 | #include <linux/socket.h> | 11 | #include <linux/socket.h> |
12 | #include <linux/in.h> | 12 | #include <linux/in.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/ip.h> | 14 | #include <linux/ip.h> |
14 | #include <linux/tcp.h> | 15 | #include <linux/tcp.h> |
15 | #include <linux/udp.h> | 16 | #include <linux/udp.h> |
16 | #include <net/ip.h> | 17 | #include <net/ip.h> |
17 | #include <net/checksum.h> | 18 | #include <net/checksum.h> |
18 | #include "net_driver.h" | 19 | #include "net_driver.h" |
19 | #include "rx.h" | ||
20 | #include "efx.h" | 20 | #include "efx.h" |
21 | #include "falcon.h" | 21 | #include "nic.h" |
22 | #include "selftest.h" | 22 | #include "selftest.h" |
23 | #include "workarounds.h" | 23 | #include "workarounds.h" |
24 | 24 | ||
@@ -61,7 +61,7 @@ | |||
61 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? | 61 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? |
62 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) | 62 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) |
63 | */ | 63 | */ |
64 | static int rx_alloc_method = RX_ALLOC_METHOD_PAGE; | 64 | static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; |
65 | 65 | ||
66 | #define RX_ALLOC_LEVEL_LRO 0x2000 | 66 | #define RX_ALLOC_LEVEL_LRO 0x2000 |
67 | #define RX_ALLOC_LEVEL_MAX 0x3000 | 67 | #define RX_ALLOC_LEVEL_MAX 0x3000 |
@@ -293,8 +293,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
293 | * fill anyway. | 293 | * fill anyway. |
294 | */ | 294 | */ |
295 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | 295 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
296 | EFX_BUG_ON_PARANOID(fill_level > | 296 | EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); |
297 | rx_queue->efx->type->rxd_ring_mask + 1); | ||
298 | 297 | ||
299 | /* Don't fill if we don't need to */ | 298 | /* Don't fill if we don't need to */ |
300 | if (fill_level >= rx_queue->fast_fill_trigger) | 299 | if (fill_level >= rx_queue->fast_fill_trigger) |
@@ -316,8 +315,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
316 | retry: | 315 | retry: |
317 | /* Recalculate current fill level now that we have the lock */ | 316 | /* Recalculate current fill level now that we have the lock */ |
318 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | 317 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
319 | EFX_BUG_ON_PARANOID(fill_level > | 318 | EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); |
320 | rx_queue->efx->type->rxd_ring_mask + 1); | ||
321 | space = rx_queue->fast_fill_limit - fill_level; | 319 | space = rx_queue->fast_fill_limit - fill_level; |
322 | if (space < EFX_RX_BATCH) | 320 | if (space < EFX_RX_BATCH) |
323 | goto out_unlock; | 321 | goto out_unlock; |
@@ -329,8 +327,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
329 | 327 | ||
330 | do { | 328 | do { |
331 | for (i = 0; i < EFX_RX_BATCH; ++i) { | 329 | for (i = 0; i < EFX_RX_BATCH; ++i) { |
332 | index = (rx_queue->added_count & | 330 | index = rx_queue->added_count & EFX_RXQ_MASK; |
333 | rx_queue->efx->type->rxd_ring_mask); | ||
334 | rx_buf = efx_rx_buffer(rx_queue, index); | 331 | rx_buf = efx_rx_buffer(rx_queue, index); |
335 | rc = efx_init_rx_buffer(rx_queue, rx_buf); | 332 | rc = efx_init_rx_buffer(rx_queue, rx_buf); |
336 | if (unlikely(rc)) | 333 | if (unlikely(rc)) |
@@ -345,7 +342,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
345 | 342 | ||
346 | out: | 343 | out: |
347 | /* Send write pointer to card. */ | 344 | /* Send write pointer to card. */ |
348 | falcon_notify_rx_desc(rx_queue); | 345 | efx_nic_notify_rx_desc(rx_queue); |
349 | 346 | ||
350 | /* If the fast fill is running inside from the refill tasklet, then | 347 | /* If the fast fill is running inside from the refill tasklet, then |
351 | * for SMP systems it may be running on a different CPU to | 348 | * for SMP systems it may be running on a different CPU to |
@@ -448,17 +445,23 @@ static void efx_rx_packet_lro(struct efx_channel *channel, | |||
448 | bool checksummed) | 445 | bool checksummed) |
449 | { | 446 | { |
450 | struct napi_struct *napi = &channel->napi_str; | 447 | struct napi_struct *napi = &channel->napi_str; |
448 | gro_result_t gro_result; | ||
451 | 449 | ||
452 | /* Pass the skb/page into the LRO engine */ | 450 | /* Pass the skb/page into the LRO engine */ |
453 | if (rx_buf->page) { | 451 | if (rx_buf->page) { |
454 | struct sk_buff *skb = napi_get_frags(napi); | 452 | struct page *page = rx_buf->page; |
453 | struct sk_buff *skb; | ||
455 | 454 | ||
455 | EFX_BUG_ON_PARANOID(rx_buf->skb); | ||
456 | rx_buf->page = NULL; | ||
457 | |||
458 | skb = napi_get_frags(napi); | ||
456 | if (!skb) { | 459 | if (!skb) { |
457 | put_page(rx_buf->page); | 460 | put_page(page); |
458 | goto out; | 461 | return; |
459 | } | 462 | } |
460 | 463 | ||
461 | skb_shinfo(skb)->frags[0].page = rx_buf->page; | 464 | skb_shinfo(skb)->frags[0].page = page; |
462 | skb_shinfo(skb)->frags[0].page_offset = | 465 | skb_shinfo(skb)->frags[0].page_offset = |
463 | efx_rx_buf_offset(rx_buf); | 466 | efx_rx_buf_offset(rx_buf); |
464 | skb_shinfo(skb)->frags[0].size = rx_buf->len; | 467 | skb_shinfo(skb)->frags[0].size = rx_buf->len; |
@@ -470,17 +473,24 @@ static void efx_rx_packet_lro(struct efx_channel *channel, | |||
470 | skb->ip_summed = | 473 | skb->ip_summed = |
471 | checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; | 474 | checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; |
472 | 475 | ||
473 | napi_gro_frags(napi); | 476 | skb_record_rx_queue(skb, channel->channel); |
474 | 477 | ||
475 | out: | 478 | gro_result = napi_gro_frags(napi); |
476 | EFX_BUG_ON_PARANOID(rx_buf->skb); | ||
477 | rx_buf->page = NULL; | ||
478 | } else { | 479 | } else { |
479 | EFX_BUG_ON_PARANOID(!rx_buf->skb); | 480 | struct sk_buff *skb = rx_buf->skb; |
480 | EFX_BUG_ON_PARANOID(!checksummed); | ||
481 | 481 | ||
482 | napi_gro_receive(napi, rx_buf->skb); | 482 | EFX_BUG_ON_PARANOID(!skb); |
483 | EFX_BUG_ON_PARANOID(!checksummed); | ||
483 | rx_buf->skb = NULL; | 484 | rx_buf->skb = NULL; |
485 | |||
486 | gro_result = napi_gro_receive(napi, skb); | ||
487 | } | ||
488 | |||
489 | if (gro_result == GRO_NORMAL) { | ||
490 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | ||
491 | } else if (gro_result != GRO_DROP) { | ||
492 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | ||
493 | channel->irq_mod_score += 2; | ||
484 | } | 494 | } |
485 | } | 495 | } |
486 | 496 | ||
@@ -558,7 +568,7 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
558 | if (unlikely(efx->loopback_selftest)) { | 568 | if (unlikely(efx->loopback_selftest)) { |
559 | efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); | 569 | efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); |
560 | efx_free_rx_buffer(efx, rx_buf); | 570 | efx_free_rx_buffer(efx, rx_buf); |
561 | goto done; | 571 | return; |
562 | } | 572 | } |
563 | 573 | ||
564 | if (rx_buf->skb) { | 574 | if (rx_buf->skb) { |
@@ -570,34 +580,28 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
570 | * at the ethernet header */ | 580 | * at the ethernet header */ |
571 | rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, | 581 | rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, |
572 | efx->net_dev); | 582 | efx->net_dev); |
583 | |||
584 | skb_record_rx_queue(rx_buf->skb, channel->channel); | ||
573 | } | 585 | } |
574 | 586 | ||
575 | if (likely(checksummed || rx_buf->page)) { | 587 | if (likely(checksummed || rx_buf->page)) { |
576 | efx_rx_packet_lro(channel, rx_buf, checksummed); | 588 | efx_rx_packet_lro(channel, rx_buf, checksummed); |
577 | goto done; | 589 | return; |
578 | } | 590 | } |
579 | 591 | ||
580 | /* We now own the SKB */ | 592 | /* We now own the SKB */ |
581 | skb = rx_buf->skb; | 593 | skb = rx_buf->skb; |
582 | rx_buf->skb = NULL; | 594 | rx_buf->skb = NULL; |
583 | |||
584 | EFX_BUG_ON_PARANOID(rx_buf->page); | ||
585 | EFX_BUG_ON_PARANOID(rx_buf->skb); | ||
586 | EFX_BUG_ON_PARANOID(!skb); | 595 | EFX_BUG_ON_PARANOID(!skb); |
587 | 596 | ||
588 | /* Set the SKB flags */ | 597 | /* Set the SKB flags */ |
589 | skb->ip_summed = CHECKSUM_NONE; | 598 | skb->ip_summed = CHECKSUM_NONE; |
590 | 599 | ||
591 | skb_record_rx_queue(skb, channel->channel); | ||
592 | |||
593 | /* Pass the packet up */ | 600 | /* Pass the packet up */ |
594 | netif_receive_skb(skb); | 601 | netif_receive_skb(skb); |
595 | 602 | ||
596 | /* Update allocation strategy method */ | 603 | /* Update allocation strategy method */ |
597 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | 604 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; |
598 | |||
599 | done: | ||
600 | ; | ||
601 | } | 605 | } |
602 | 606 | ||
603 | void efx_rx_strategy(struct efx_channel *channel) | 607 | void efx_rx_strategy(struct efx_channel *channel) |
@@ -632,12 +636,12 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |||
632 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); | 636 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); |
633 | 637 | ||
634 | /* Allocate RX buffers */ | 638 | /* Allocate RX buffers */ |
635 | rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); | 639 | rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); |
636 | rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); | 640 | rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); |
637 | if (!rx_queue->buffer) | 641 | if (!rx_queue->buffer) |
638 | return -ENOMEM; | 642 | return -ENOMEM; |
639 | 643 | ||
640 | rc = falcon_probe_rx(rx_queue); | 644 | rc = efx_nic_probe_rx(rx_queue); |
641 | if (rc) { | 645 | if (rc) { |
642 | kfree(rx_queue->buffer); | 646 | kfree(rx_queue->buffer); |
643 | rx_queue->buffer = NULL; | 647 | rx_queue->buffer = NULL; |
@@ -647,7 +651,6 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |||
647 | 651 | ||
648 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | 652 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) |
649 | { | 653 | { |
650 | struct efx_nic *efx = rx_queue->efx; | ||
651 | unsigned int max_fill, trigger, limit; | 654 | unsigned int max_fill, trigger, limit; |
652 | 655 | ||
653 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); | 656 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); |
@@ -660,7 +663,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | |||
660 | rx_queue->min_overfill = -1U; | 663 | rx_queue->min_overfill = -1U; |
661 | 664 | ||
662 | /* Initialise limit fields */ | 665 | /* Initialise limit fields */ |
663 | max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM; | 666 | max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM; |
664 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; | 667 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; |
665 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; | 668 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; |
666 | 669 | ||
@@ -669,7 +672,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | |||
669 | rx_queue->fast_fill_limit = limit; | 672 | rx_queue->fast_fill_limit = limit; |
670 | 673 | ||
671 | /* Set up RX descriptor ring */ | 674 | /* Set up RX descriptor ring */ |
672 | falcon_init_rx(rx_queue); | 675 | efx_nic_init_rx(rx_queue); |
673 | } | 676 | } |
674 | 677 | ||
675 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | 678 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) |
@@ -679,11 +682,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
679 | 682 | ||
680 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); | 683 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); |
681 | 684 | ||
682 | falcon_fini_rx(rx_queue); | 685 | efx_nic_fini_rx(rx_queue); |
683 | 686 | ||
684 | /* Release RX buffers NB start at index 0 not current HW ptr */ | 687 | /* Release RX buffers NB start at index 0 not current HW ptr */ |
685 | if (rx_queue->buffer) { | 688 | if (rx_queue->buffer) { |
686 | for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { | 689 | for (i = 0; i <= EFX_RXQ_MASK; i++) { |
687 | rx_buf = efx_rx_buffer(rx_queue, i); | 690 | rx_buf = efx_rx_buffer(rx_queue, i); |
688 | efx_fini_rx_buffer(rx_queue, rx_buf); | 691 | efx_fini_rx_buffer(rx_queue, rx_buf); |
689 | } | 692 | } |
@@ -704,7 +707,7 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | |||
704 | { | 707 | { |
705 | EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); | 708 | EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); |
706 | 709 | ||
707 | falcon_remove_rx(rx_queue); | 710 | efx_nic_remove_rx(rx_queue); |
708 | 711 | ||
709 | kfree(rx_queue->buffer); | 712 | kfree(rx_queue->buffer); |
710 | rx_queue->buffer = NULL; | 713 | rx_queue->buffer = NULL; |
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h deleted file mode 100644 index 42ee7555a80b..000000000000 --- a/drivers/net/sfc/rx.h +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2006 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #ifndef EFX_RX_H | ||
11 | #define EFX_RX_H | ||
12 | |||
13 | #include "net_driver.h" | ||
14 | |||
15 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); | ||
16 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); | ||
17 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue); | ||
18 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); | ||
19 | |||
20 | void efx_rx_strategy(struct efx_channel *channel); | ||
21 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); | ||
22 | void efx_rx_work(struct work_struct *data); | ||
23 | void __efx_rx_packet(struct efx_channel *channel, | ||
24 | struct efx_rx_buffer *rx_buf, bool checksummed); | ||
25 | |||
26 | #endif /* EFX_RX_H */ | ||
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index 817c7efc11e0..0106b1d9aae2 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2008 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -18,17 +18,13 @@ | |||
18 | #include <linux/in.h> | 18 | #include <linux/in.h> |
19 | #include <linux/udp.h> | 19 | #include <linux/udp.h> |
20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
21 | #include <linux/slab.h> | ||
21 | #include <asm/io.h> | 22 | #include <asm/io.h> |
22 | #include "net_driver.h" | 23 | #include "net_driver.h" |
23 | #include "ethtool.h" | ||
24 | #include "efx.h" | 24 | #include "efx.h" |
25 | #include "falcon.h" | 25 | #include "nic.h" |
26 | #include "selftest.h" | 26 | #include "selftest.h" |
27 | #include "boards.h" | ||
28 | #include "workarounds.h" | 27 | #include "workarounds.h" |
29 | #include "spi.h" | ||
30 | #include "falcon_io.h" | ||
31 | #include "mdio_10g.h" | ||
32 | 28 | ||
33 | /* | 29 | /* |
34 | * Loopback test packet structure | 30 | * Loopback test packet structure |
@@ -49,7 +45,7 @@ static const unsigned char payload_source[ETH_ALEN] = { | |||
49 | 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, | 45 | 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, |
50 | }; | 46 | }; |
51 | 47 | ||
52 | static const char *payload_msg = | 48 | static const char payload_msg[] = |
53 | "Hello world! This is an Efx loopback test in progress!"; | 49 | "Hello world! This is an Efx loopback test in progress!"; |
54 | 50 | ||
55 | /** | 51 | /** |
@@ -57,6 +53,7 @@ static const char *payload_msg = | |||
57 | * @flush: Drop all packets in efx_loopback_rx_packet | 53 | * @flush: Drop all packets in efx_loopback_rx_packet |
58 | * @packet_count: Number of packets being used in this test | 54 | * @packet_count: Number of packets being used in this test |
59 | * @skbs: An array of skbs transmitted | 55 | * @skbs: An array of skbs transmitted |
56 | * @offload_csum: Checksums are being offloaded | ||
60 | * @rx_good: RX good packet count | 57 | * @rx_good: RX good packet count |
61 | * @rx_bad: RX bad packet count | 58 | * @rx_bad: RX bad packet count |
62 | * @payload: Payload used in tests | 59 | * @payload: Payload used in tests |
@@ -65,10 +62,7 @@ struct efx_loopback_state { | |||
65 | bool flush; | 62 | bool flush; |
66 | int packet_count; | 63 | int packet_count; |
67 | struct sk_buff **skbs; | 64 | struct sk_buff **skbs; |
68 | |||
69 | /* Checksums are being offloaded */ | ||
70 | bool offload_csum; | 65 | bool offload_csum; |
71 | |||
72 | atomic_t rx_good; | 66 | atomic_t rx_good; |
73 | atomic_t rx_bad; | 67 | atomic_t rx_bad; |
74 | struct efx_loopback_payload payload; | 68 | struct efx_loopback_payload payload; |
@@ -80,60 +74,40 @@ struct efx_loopback_state { | |||
80 | * | 74 | * |
81 | **************************************************************************/ | 75 | **************************************************************************/ |
82 | 76 | ||
83 | static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests) | 77 | static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) |
84 | { | 78 | { |
85 | int rc = 0; | 79 | int rc = 0; |
86 | int devad = __ffs(efx->mdio.mmds); | ||
87 | u16 physid1, physid2; | ||
88 | |||
89 | if (efx->phy_type == PHY_TYPE_NONE) | ||
90 | return 0; | ||
91 | |||
92 | mutex_lock(&efx->mac_lock); | ||
93 | tests->mdio = -1; | ||
94 | |||
95 | physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1); | ||
96 | physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2); | ||
97 | 80 | ||
98 | if ((physid1 == 0x0000) || (physid1 == 0xffff) || | 81 | if (efx->phy_op->test_alive) { |
99 | (physid2 == 0x0000) || (physid2 == 0xffff)) { | 82 | rc = efx->phy_op->test_alive(efx); |
100 | EFX_ERR(efx, "no MDIO PHY present with ID %d\n", | 83 | tests->phy_alive = rc ? -1 : 1; |
101 | efx->mdio.prtad); | ||
102 | rc = -EINVAL; | ||
103 | goto out; | ||
104 | } | 84 | } |
105 | 85 | ||
106 | if (EFX_IS10G(efx)) { | ||
107 | rc = efx_mdio_check_mmds(efx, efx->phy_op->mmds, 0); | ||
108 | if (rc) | ||
109 | goto out; | ||
110 | } | ||
111 | |||
112 | out: | ||
113 | mutex_unlock(&efx->mac_lock); | ||
114 | tests->mdio = rc ? -1 : 1; | ||
115 | return rc; | 86 | return rc; |
116 | } | 87 | } |
117 | 88 | ||
118 | static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) | 89 | static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) |
119 | { | 90 | { |
120 | int rc; | 91 | int rc = 0; |
92 | |||
93 | if (efx->type->test_nvram) { | ||
94 | rc = efx->type->test_nvram(efx); | ||
95 | tests->nvram = rc ? -1 : 1; | ||
96 | } | ||
121 | 97 | ||
122 | rc = falcon_read_nvram(efx, NULL); | ||
123 | tests->nvram = rc ? -1 : 1; | ||
124 | return rc; | 98 | return rc; |
125 | } | 99 | } |
126 | 100 | ||
127 | static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | 101 | static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) |
128 | { | 102 | { |
129 | int rc; | 103 | int rc = 0; |
130 | 104 | ||
131 | /* Not supported on A-series silicon */ | 105 | /* Test register access */ |
132 | if (falcon_rev(efx) < FALCON_REV_B0) | 106 | if (efx->type->test_registers) { |
133 | return 0; | 107 | rc = efx->type->test_registers(efx); |
108 | tests->registers = rc ? -1 : 1; | ||
109 | } | ||
134 | 110 | ||
135 | rc = falcon_test_registers(efx); | ||
136 | tests->registers = rc ? -1 : 1; | ||
137 | return rc; | 111 | return rc; |
138 | } | 112 | } |
139 | 113 | ||
@@ -165,7 +139,7 @@ static int efx_test_interrupts(struct efx_nic *efx, | |||
165 | goto success; | 139 | goto success; |
166 | } | 140 | } |
167 | 141 | ||
168 | falcon_generate_interrupt(efx); | 142 | efx_nic_generate_interrupt(efx); |
169 | 143 | ||
170 | /* Wait for arrival of test interrupt. */ | 144 | /* Wait for arrival of test interrupt. */ |
171 | EFX_LOG(efx, "waiting for test interrupt\n"); | 145 | EFX_LOG(efx, "waiting for test interrupt\n"); |
@@ -177,8 +151,8 @@ static int efx_test_interrupts(struct efx_nic *efx, | |||
177 | return -ETIMEDOUT; | 151 | return -ETIMEDOUT; |
178 | 152 | ||
179 | success: | 153 | success: |
180 | EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n", | 154 | EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx), |
181 | efx->interrupt_mode, efx->last_irq_cpu); | 155 | efx->last_irq_cpu); |
182 | tests->interrupt = 1; | 156 | tests->interrupt = 1; |
183 | return 0; | 157 | return 0; |
184 | } | 158 | } |
@@ -203,7 +177,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
203 | channel->eventq_magic = 0; | 177 | channel->eventq_magic = 0; |
204 | smp_wmb(); | 178 | smp_wmb(); |
205 | 179 | ||
206 | falcon_generate_test_event(channel, magic); | 180 | efx_nic_generate_test_event(channel, magic); |
207 | 181 | ||
208 | /* Wait for arrival of interrupt */ | 182 | /* Wait for arrival of interrupt */ |
209 | count = 0; | 183 | count = 0; |
@@ -254,11 +228,8 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, | |||
254 | if (!efx->phy_op->run_tests) | 228 | if (!efx->phy_op->run_tests) |
255 | return 0; | 229 | return 0; |
256 | 230 | ||
257 | EFX_BUG_ON_PARANOID(efx->phy_op->num_tests == 0 || | ||
258 | efx->phy_op->num_tests > EFX_MAX_PHY_TESTS); | ||
259 | |||
260 | mutex_lock(&efx->mac_lock); | 231 | mutex_lock(&efx->mac_lock); |
261 | rc = efx->phy_op->run_tests(efx, tests->phy, flags); | 232 | rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); |
262 | mutex_unlock(&efx->mac_lock); | 233 | mutex_unlock(&efx->mac_lock); |
263 | return rc; | 234 | return rc; |
264 | } | 235 | } |
@@ -426,7 +397,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) | |||
426 | 397 | ||
427 | if (efx_dev_registered(efx)) | 398 | if (efx_dev_registered(efx)) |
428 | netif_tx_lock_bh(efx->net_dev); | 399 | netif_tx_lock_bh(efx->net_dev); |
429 | rc = efx_xmit(efx, tx_queue, skb); | 400 | rc = efx_enqueue_skb(tx_queue, skb); |
430 | if (efx_dev_registered(efx)) | 401 | if (efx_dev_registered(efx)) |
431 | netif_tx_unlock_bh(efx->net_dev); | 402 | netif_tx_unlock_bh(efx->net_dev); |
432 | 403 | ||
@@ -439,7 +410,6 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) | |||
439 | kfree_skb(skb); | 410 | kfree_skb(skb); |
440 | return -EPIPE; | 411 | return -EPIPE; |
441 | } | 412 | } |
442 | efx->net_dev->trans_start = jiffies; | ||
443 | } | 413 | } |
444 | 414 | ||
445 | return 0; | 415 | return 0; |
@@ -527,7 +497,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, | |||
527 | 497 | ||
528 | for (i = 0; i < 3; i++) { | 498 | for (i = 0; i < 3; i++) { |
529 | /* Determine how many packets to send */ | 499 | /* Determine how many packets to send */ |
530 | state->packet_count = (efx->type->txd_ring_mask + 1) / 3; | 500 | state->packet_count = EFX_TXQ_SIZE / 3; |
531 | state->packet_count = min(1 << (i << 2), state->packet_count); | 501 | state->packet_count = min(1 << (i << 2), state->packet_count); |
532 | state->skbs = kzalloc(sizeof(state->skbs[0]) * | 502 | state->skbs = kzalloc(sizeof(state->skbs[0]) * |
533 | state->packet_count, GFP_KERNEL); | 503 | state->packet_count, GFP_KERNEL); |
@@ -568,14 +538,49 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, | |||
568 | return 0; | 538 | return 0; |
569 | } | 539 | } |
570 | 540 | ||
541 | /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but | ||
542 | * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it | ||
543 | * to delay and retry. Therefore, it's safer to just poll directly. Wait | ||
544 | * for link up and any faults to dissipate. */ | ||
545 | static int efx_wait_for_link(struct efx_nic *efx) | ||
546 | { | ||
547 | struct efx_link_state *link_state = &efx->link_state; | ||
548 | int count; | ||
549 | bool link_up; | ||
550 | |||
551 | for (count = 0; count < 40; count++) { | ||
552 | schedule_timeout_uninterruptible(HZ / 10); | ||
553 | |||
554 | if (efx->type->monitor != NULL) { | ||
555 | mutex_lock(&efx->mac_lock); | ||
556 | efx->type->monitor(efx); | ||
557 | mutex_unlock(&efx->mac_lock); | ||
558 | } else { | ||
559 | struct efx_channel *channel = &efx->channel[0]; | ||
560 | if (channel->work_pending) | ||
561 | efx_process_channel_now(channel); | ||
562 | } | ||
563 | |||
564 | mutex_lock(&efx->mac_lock); | ||
565 | link_up = link_state->up; | ||
566 | if (link_up) | ||
567 | link_up = !efx->mac_op->check_fault(efx); | ||
568 | mutex_unlock(&efx->mac_lock); | ||
569 | |||
570 | if (link_up) | ||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | return -ETIMEDOUT; | ||
575 | } | ||
576 | |||
571 | static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, | 577 | static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, |
572 | unsigned int loopback_modes) | 578 | unsigned int loopback_modes) |
573 | { | 579 | { |
574 | enum efx_loopback_mode mode; | 580 | enum efx_loopback_mode mode; |
575 | struct efx_loopback_state *state; | 581 | struct efx_loopback_state *state; |
576 | struct efx_tx_queue *tx_queue; | 582 | struct efx_tx_queue *tx_queue; |
577 | bool link_up; | 583 | int rc = 0; |
578 | int count, rc = 0; | ||
579 | 584 | ||
580 | /* Set the port loopback_selftest member. From this point on | 585 | /* Set the port loopback_selftest member. From this point on |
581 | * all received packets will be dropped. Mark the state as | 586 | * all received packets will be dropped. Mark the state as |
@@ -594,46 +599,23 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, | |||
594 | 599 | ||
595 | /* Move the port into the specified loopback mode. */ | 600 | /* Move the port into the specified loopback mode. */ |
596 | state->flush = true; | 601 | state->flush = true; |
602 | mutex_lock(&efx->mac_lock); | ||
597 | efx->loopback_mode = mode; | 603 | efx->loopback_mode = mode; |
598 | efx_reconfigure_port(efx); | 604 | rc = __efx_reconfigure_port(efx); |
599 | 605 | mutex_unlock(&efx->mac_lock); | |
600 | /* Wait for the PHY to signal the link is up. Interrupts | 606 | if (rc) { |
601 | * are enabled for PHY's using LASI, otherwise we poll() | 607 | EFX_ERR(efx, "unable to move into %s loopback\n", |
602 | * quickly */ | 608 | LOOPBACK_MODE(efx)); |
603 | count = 0; | 609 | goto out; |
604 | do { | 610 | } |
605 | struct efx_channel *channel = &efx->channel[0]; | ||
606 | 611 | ||
607 | efx->phy_op->poll(efx); | 612 | rc = efx_wait_for_link(efx); |
608 | schedule_timeout_uninterruptible(HZ / 10); | 613 | if (rc) { |
609 | if (channel->work_pending) | ||
610 | efx_process_channel_now(channel); | ||
611 | /* Wait for PHY events to be processed */ | ||
612 | flush_workqueue(efx->workqueue); | ||
613 | rmb(); | ||
614 | |||
615 | /* We need both the phy and xaui links to be ok. | ||
616 | * rather than relying on the falcon_xmac irq/poll | ||
617 | * regime, just poll xaui directly */ | ||
618 | link_up = efx->link_up; | ||
619 | if (link_up && EFX_IS10G(efx) && | ||
620 | !falcon_xaui_link_ok(efx)) | ||
621 | link_up = false; | ||
622 | |||
623 | } while ((++count < 20) && !link_up); | ||
624 | |||
625 | /* The link should now be up. If it isn't, there is no point | ||
626 | * in attempting a loopback test */ | ||
627 | if (!link_up) { | ||
628 | EFX_ERR(efx, "loopback %s never came up\n", | 614 | EFX_ERR(efx, "loopback %s never came up\n", |
629 | LOOPBACK_MODE(efx)); | 615 | LOOPBACK_MODE(efx)); |
630 | rc = -EIO; | ||
631 | goto out; | 616 | goto out; |
632 | } | 617 | } |
633 | 618 | ||
634 | EFX_LOG(efx, "link came up in %s loopback in %d iterations\n", | ||
635 | LOOPBACK_MODE(efx), count); | ||
636 | |||
637 | /* Test every TX queue */ | 619 | /* Test every TX queue */ |
638 | efx_for_each_tx_queue(tx_queue, efx) { | 620 | efx_for_each_tx_queue(tx_queue, efx) { |
639 | state->offload_csum = (tx_queue->queue == | 621 | state->offload_csum = (tx_queue->queue == |
@@ -667,14 +649,13 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | |||
667 | enum efx_loopback_mode loopback_mode = efx->loopback_mode; | 649 | enum efx_loopback_mode loopback_mode = efx->loopback_mode; |
668 | int phy_mode = efx->phy_mode; | 650 | int phy_mode = efx->phy_mode; |
669 | enum reset_type reset_method = RESET_TYPE_INVISIBLE; | 651 | enum reset_type reset_method = RESET_TYPE_INVISIBLE; |
670 | struct ethtool_cmd ecmd; | ||
671 | struct efx_channel *channel; | 652 | struct efx_channel *channel; |
672 | int rc_test = 0, rc_reset = 0, rc; | 653 | int rc_test = 0, rc_reset = 0, rc; |
673 | 654 | ||
674 | /* Online (i.e. non-disruptive) testing | 655 | /* Online (i.e. non-disruptive) testing |
675 | * This checks interrupt generation, event delivery and PHY presence. */ | 656 | * This checks interrupt generation, event delivery and PHY presence. */ |
676 | 657 | ||
677 | rc = efx_test_mdio(efx, tests); | 658 | rc = efx_test_phy_alive(efx, tests); |
678 | if (rc && !rc_test) | 659 | if (rc && !rc_test) |
679 | rc_test = rc; | 660 | rc_test = rc; |
680 | 661 | ||
@@ -720,21 +701,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | |||
720 | mutex_unlock(&efx->mac_lock); | 701 | mutex_unlock(&efx->mac_lock); |
721 | 702 | ||
722 | /* free up all consumers of SRAM (including all the queues) */ | 703 | /* free up all consumers of SRAM (including all the queues) */ |
723 | efx_reset_down(efx, reset_method, &ecmd); | 704 | efx_reset_down(efx, reset_method); |
724 | 705 | ||
725 | rc = efx_test_chip(efx, tests); | 706 | rc = efx_test_chip(efx, tests); |
726 | if (rc && !rc_test) | 707 | if (rc && !rc_test) |
727 | rc_test = rc; | 708 | rc_test = rc; |
728 | 709 | ||
729 | /* reset the chip to recover from the register test */ | 710 | /* reset the chip to recover from the register test */ |
730 | rc_reset = falcon_reset_hw(efx, reset_method); | 711 | rc_reset = efx->type->reset(efx, reset_method); |
731 | 712 | ||
732 | /* Ensure that the phy is powered and out of loopback | 713 | /* Ensure that the phy is powered and out of loopback |
733 | * for the bist and loopback tests */ | 714 | * for the bist and loopback tests */ |
734 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; | 715 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; |
735 | efx->loopback_mode = LOOPBACK_NONE; | 716 | efx->loopback_mode = LOOPBACK_NONE; |
736 | 717 | ||
737 | rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0); | 718 | rc = efx_reset_up(efx, reset_method, rc_reset == 0); |
738 | if (rc && !rc_reset) | 719 | if (rc && !rc_reset) |
739 | rc_reset = rc; | 720 | rc_reset = rc; |
740 | 721 | ||
@@ -753,10 +734,12 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | |||
753 | rc_test = rc; | 734 | rc_test = rc; |
754 | 735 | ||
755 | /* restore the PHY to the previous state */ | 736 | /* restore the PHY to the previous state */ |
756 | efx->loopback_mode = loopback_mode; | 737 | mutex_lock(&efx->mac_lock); |
757 | efx->phy_mode = phy_mode; | 738 | efx->phy_mode = phy_mode; |
758 | efx->port_inhibited = false; | 739 | efx->port_inhibited = false; |
759 | efx_ethtool_set_settings(efx->net_dev, &ecmd); | 740 | efx->loopback_mode = loopback_mode; |
741 | __efx_reconfigure_port(efx); | ||
742 | mutex_unlock(&efx->mac_lock); | ||
760 | 743 | ||
761 | return rc_test; | 744 | return rc_test; |
762 | } | 745 | } |
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h index f6feee04c96b..643bef72b99d 100644 --- a/drivers/net/sfc/selftest.h +++ b/drivers/net/sfc/selftest.h | |||
@@ -32,7 +32,7 @@ struct efx_loopback_self_tests { | |||
32 | */ | 32 | */ |
33 | struct efx_self_tests { | 33 | struct efx_self_tests { |
34 | /* online tests */ | 34 | /* online tests */ |
35 | int mdio; | 35 | int phy_alive; |
36 | int nvram; | 36 | int nvram; |
37 | int interrupt; | 37 | int interrupt; |
38 | int eventq_dma[EFX_MAX_CHANNELS]; | 38 | int eventq_dma[EFX_MAX_CHANNELS]; |
@@ -40,7 +40,7 @@ struct efx_self_tests { | |||
40 | int eventq_poll[EFX_MAX_CHANNELS]; | 40 | int eventq_poll[EFX_MAX_CHANNELS]; |
41 | /* offline tests */ | 41 | /* offline tests */ |
42 | int registers; | 42 | int registers; |
43 | int phy[EFX_MAX_PHY_TESTS]; | 43 | int phy_ext[EFX_MAX_PHY_TESTS]; |
44 | struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; | 44 | struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; |
45 | }; | 45 | }; |
46 | 46 | ||
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c deleted file mode 100644 index 49eb91b5f50c..000000000000 --- a/drivers/net/sfc/sfe4001.c +++ /dev/null | |||
@@ -1,435 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2007-2008 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | /***************************************************************************** | ||
11 | * Support for the SFE4001 and SFN4111T NICs. | ||
12 | * | ||
13 | * The SFE4001 does not power-up fully at reset due to its high power | ||
14 | * consumption. We control its power via a PCA9539 I/O expander. | ||
15 | * Both boards have a MAX6647 temperature monitor which we expose to | ||
16 | * the lm90 driver. | ||
17 | * | ||
18 | * This also provides minimal support for reflashing the PHY, which is | ||
19 | * initiated by resetting it with the FLASH_CFG_1 pin pulled down. | ||
20 | * On SFE4001 rev A2 and later this is connected to the 3V3X output of | ||
21 | * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3. | ||
22 | * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually | ||
23 | * exclusive with the network device being open. | ||
24 | */ | ||
25 | |||
26 | #include <linux/delay.h> | ||
27 | #include <linux/rtnetlink.h> | ||
28 | #include "net_driver.h" | ||
29 | #include "efx.h" | ||
30 | #include "phy.h" | ||
31 | #include "boards.h" | ||
32 | #include "falcon.h" | ||
33 | #include "falcon_hwdefs.h" | ||
34 | #include "falcon_io.h" | ||
35 | #include "mac.h" | ||
36 | #include "workarounds.h" | ||
37 | |||
38 | /************************************************************************** | ||
39 | * | ||
40 | * I2C IO Expander device | ||
41 | * | ||
42 | **************************************************************************/ | ||
43 | #define PCA9539 0x74 | ||
44 | |||
45 | #define P0_IN 0x00 | ||
46 | #define P0_OUT 0x02 | ||
47 | #define P0_INVERT 0x04 | ||
48 | #define P0_CONFIG 0x06 | ||
49 | |||
50 | #define P0_EN_1V0X_LBN 0 | ||
51 | #define P0_EN_1V0X_WIDTH 1 | ||
52 | #define P0_EN_1V2_LBN 1 | ||
53 | #define P0_EN_1V2_WIDTH 1 | ||
54 | #define P0_EN_2V5_LBN 2 | ||
55 | #define P0_EN_2V5_WIDTH 1 | ||
56 | #define P0_EN_3V3X_LBN 3 | ||
57 | #define P0_EN_3V3X_WIDTH 1 | ||
58 | #define P0_EN_5V_LBN 4 | ||
59 | #define P0_EN_5V_WIDTH 1 | ||
60 | #define P0_SHORTEN_JTAG_LBN 5 | ||
61 | #define P0_SHORTEN_JTAG_WIDTH 1 | ||
62 | #define P0_X_TRST_LBN 6 | ||
63 | #define P0_X_TRST_WIDTH 1 | ||
64 | #define P0_DSP_RESET_LBN 7 | ||
65 | #define P0_DSP_RESET_WIDTH 1 | ||
66 | |||
67 | #define P1_IN 0x01 | ||
68 | #define P1_OUT 0x03 | ||
69 | #define P1_INVERT 0x05 | ||
70 | #define P1_CONFIG 0x07 | ||
71 | |||
72 | #define P1_AFE_PWD_LBN 0 | ||
73 | #define P1_AFE_PWD_WIDTH 1 | ||
74 | #define P1_DSP_PWD25_LBN 1 | ||
75 | #define P1_DSP_PWD25_WIDTH 1 | ||
76 | #define P1_RESERVED_LBN 2 | ||
77 | #define P1_RESERVED_WIDTH 2 | ||
78 | #define P1_SPARE_LBN 4 | ||
79 | #define P1_SPARE_WIDTH 4 | ||
80 | |||
81 | /* Temperature Sensor */ | ||
82 | #define MAX664X_REG_RSL 0x02 | ||
83 | #define MAX664X_REG_WLHO 0x0B | ||
84 | |||
85 | static void sfe4001_poweroff(struct efx_nic *efx) | ||
86 | { | ||
87 | struct i2c_client *ioexp_client = efx->board_info.ioexp_client; | ||
88 | struct i2c_client *hwmon_client = efx->board_info.hwmon_client; | ||
89 | |||
90 | /* Turn off all power rails and disable outputs */ | ||
91 | i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff); | ||
92 | i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff); | ||
93 | i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff); | ||
94 | |||
95 | /* Clear any over-temperature alert */ | ||
96 | i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); | ||
97 | } | ||
98 | |||
99 | static int sfe4001_poweron(struct efx_nic *efx) | ||
100 | { | ||
101 | struct i2c_client *hwmon_client = efx->board_info.hwmon_client; | ||
102 | struct i2c_client *ioexp_client = efx->board_info.ioexp_client; | ||
103 | unsigned int i, j; | ||
104 | int rc; | ||
105 | u8 out; | ||
106 | |||
107 | /* Clear any previous over-temperature alert */ | ||
108 | rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); | ||
109 | if (rc < 0) | ||
110 | return rc; | ||
111 | |||
112 | /* Enable port 0 and port 1 outputs on IO expander */ | ||
113 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00); | ||
114 | if (rc) | ||
115 | return rc; | ||
116 | rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, | ||
117 | 0xff & ~(1 << P1_SPARE_LBN)); | ||
118 | if (rc) | ||
119 | goto fail_on; | ||
120 | |||
121 | /* If PHY power is on, turn it all off and wait 1 second to | ||
122 | * ensure a full reset. | ||
123 | */ | ||
124 | rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT); | ||
125 | if (rc < 0) | ||
126 | goto fail_on; | ||
127 | out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) | | ||
128 | (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | | ||
129 | (0 << P0_EN_1V0X_LBN)); | ||
130 | if (rc != out) { | ||
131 | EFX_INFO(efx, "power-cycling PHY\n"); | ||
132 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); | ||
133 | if (rc) | ||
134 | goto fail_on; | ||
135 | schedule_timeout_uninterruptible(HZ); | ||
136 | } | ||
137 | |||
138 | for (i = 0; i < 20; ++i) { | ||
139 | /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */ | ||
140 | out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | | ||
141 | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | | ||
142 | (1 << P0_X_TRST_LBN)); | ||
143 | if (efx->phy_mode & PHY_MODE_SPECIAL) | ||
144 | out |= 1 << P0_EN_3V3X_LBN; | ||
145 | |||
146 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); | ||
147 | if (rc) | ||
148 | goto fail_on; | ||
149 | msleep(10); | ||
150 | |||
151 | /* Turn on 1V power rail */ | ||
152 | out &= ~(1 << P0_EN_1V0X_LBN); | ||
153 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); | ||
154 | if (rc) | ||
155 | goto fail_on; | ||
156 | |||
157 | EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i); | ||
158 | |||
159 | /* In flash config mode, DSP does not turn on AFE, so | ||
160 | * just wait 1 second. | ||
161 | */ | ||
162 | if (efx->phy_mode & PHY_MODE_SPECIAL) { | ||
163 | schedule_timeout_uninterruptible(HZ); | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | for (j = 0; j < 10; ++j) { | ||
168 | msleep(100); | ||
169 | |||
170 | /* Check DSP has asserted AFE power line */ | ||
171 | rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN); | ||
172 | if (rc < 0) | ||
173 | goto fail_on; | ||
174 | if (rc & (1 << P1_AFE_PWD_LBN)) | ||
175 | return 0; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | EFX_INFO(efx, "timed out waiting for DSP boot\n"); | ||
180 | rc = -ETIMEDOUT; | ||
181 | fail_on: | ||
182 | sfe4001_poweroff(efx); | ||
183 | return rc; | ||
184 | } | ||
185 | |||
186 | static int sfn4111t_reset(struct efx_nic *efx) | ||
187 | { | ||
188 | efx_oword_t reg; | ||
189 | |||
190 | /* GPIO 3 and the GPIO register are shared with I2C, so block that */ | ||
191 | i2c_lock_adapter(&efx->i2c_adap); | ||
192 | |||
193 | /* Pull RST_N (GPIO 2) low then let it up again, setting the | ||
194 | * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the | ||
195 | * output enables; the output levels should always be 0 (low) | ||
196 | * and we rely on external pull-ups. */ | ||
197 | falcon_read(efx, ®, GPIO_CTL_REG_KER); | ||
198 | EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true); | ||
199 | falcon_write(efx, ®, GPIO_CTL_REG_KER); | ||
200 | msleep(1000); | ||
201 | EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false); | ||
202 | EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, | ||
203 | !!(efx->phy_mode & PHY_MODE_SPECIAL)); | ||
204 | falcon_write(efx, ®, GPIO_CTL_REG_KER); | ||
205 | msleep(1); | ||
206 | |||
207 | i2c_unlock_adapter(&efx->i2c_adap); | ||
208 | |||
209 | ssleep(1); | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | static ssize_t show_phy_flash_cfg(struct device *dev, | ||
214 | struct device_attribute *attr, char *buf) | ||
215 | { | ||
216 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
217 | return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL)); | ||
218 | } | ||
219 | |||
220 | static ssize_t set_phy_flash_cfg(struct device *dev, | ||
221 | struct device_attribute *attr, | ||
222 | const char *buf, size_t count) | ||
223 | { | ||
224 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
225 | enum efx_phy_mode old_mode, new_mode; | ||
226 | int err; | ||
227 | |||
228 | rtnl_lock(); | ||
229 | old_mode = efx->phy_mode; | ||
230 | if (count == 0 || *buf == '0') | ||
231 | new_mode = old_mode & ~PHY_MODE_SPECIAL; | ||
232 | else | ||
233 | new_mode = PHY_MODE_SPECIAL; | ||
234 | if (old_mode == new_mode) { | ||
235 | err = 0; | ||
236 | } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { | ||
237 | err = -EBUSY; | ||
238 | } else { | ||
239 | /* Reset the PHY, reconfigure the MAC and enable/disable | ||
240 | * MAC stats accordingly. */ | ||
241 | efx->phy_mode = new_mode; | ||
242 | if (new_mode & PHY_MODE_SPECIAL) | ||
243 | efx_stats_disable(efx); | ||
244 | if (efx->board_info.type == EFX_BOARD_SFE4001) | ||
245 | err = sfe4001_poweron(efx); | ||
246 | else | ||
247 | err = sfn4111t_reset(efx); | ||
248 | efx_reconfigure_port(efx); | ||
249 | if (!(new_mode & PHY_MODE_SPECIAL)) | ||
250 | efx_stats_enable(efx); | ||
251 | } | ||
252 | rtnl_unlock(); | ||
253 | |||
254 | return err ? err : count; | ||
255 | } | ||
256 | |||
257 | static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg); | ||
258 | |||
259 | static void sfe4001_fini(struct efx_nic *efx) | ||
260 | { | ||
261 | EFX_INFO(efx, "%s\n", __func__); | ||
262 | |||
263 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
264 | sfe4001_poweroff(efx); | ||
265 | i2c_unregister_device(efx->board_info.ioexp_client); | ||
266 | i2c_unregister_device(efx->board_info.hwmon_client); | ||
267 | } | ||
268 | |||
269 | static int sfe4001_check_hw(struct efx_nic *efx) | ||
270 | { | ||
271 | s32 status; | ||
272 | |||
273 | /* If XAUI link is up then do not monitor */ | ||
274 | if (EFX_WORKAROUND_7884(efx) && efx->mac_up) | ||
275 | return 0; | ||
276 | |||
277 | /* Check the powered status of the PHY. Lack of power implies that | ||
278 | * the MAX6647 has shut down power to it, probably due to a temp. | ||
279 | * alarm. Reading the power status rather than the MAX6647 status | ||
280 | * directly because the later is read-to-clear and would thus | ||
281 | * start to power up the PHY again when polled, causing us to blip | ||
282 | * the power undesirably. | ||
283 | * We know we can read from the IO expander because we did | ||
284 | * it during power-on. Assume failure now is bad news. */ | ||
285 | status = i2c_smbus_read_byte_data(efx->board_info.ioexp_client, P1_IN); | ||
286 | if (status >= 0 && | ||
287 | (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0) | ||
288 | return 0; | ||
289 | |||
290 | /* Use board power control, not PHY power control */ | ||
291 | sfe4001_poweroff(efx); | ||
292 | efx->phy_mode = PHY_MODE_OFF; | ||
293 | |||
294 | return (status < 0) ? -EIO : -ERANGE; | ||
295 | } | ||
296 | |||
297 | static struct i2c_board_info sfe4001_hwmon_info = { | ||
298 | I2C_BOARD_INFO("max6647", 0x4e), | ||
299 | }; | ||
300 | |||
301 | /* This board uses an I2C expander to provider power to the PHY, which needs to | ||
302 | * be turned on before the PHY can be used. | ||
303 | * Context: Process context, rtnl lock held | ||
304 | */ | ||
305 | int sfe4001_init(struct efx_nic *efx) | ||
306 | { | ||
307 | int rc; | ||
308 | |||
309 | #if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE) | ||
310 | efx->board_info.hwmon_client = | ||
311 | i2c_new_device(&efx->i2c_adap, &sfe4001_hwmon_info); | ||
312 | #else | ||
313 | efx->board_info.hwmon_client = | ||
314 | i2c_new_dummy(&efx->i2c_adap, sfe4001_hwmon_info.addr); | ||
315 | #endif | ||
316 | if (!efx->board_info.hwmon_client) | ||
317 | return -EIO; | ||
318 | |||
319 | /* Raise board/PHY high limit from 85 to 90 degrees Celsius */ | ||
320 | rc = i2c_smbus_write_byte_data(efx->board_info.hwmon_client, | ||
321 | MAX664X_REG_WLHO, 90); | ||
322 | if (rc) | ||
323 | goto fail_hwmon; | ||
324 | |||
325 | efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539); | ||
326 | if (!efx->board_info.ioexp_client) { | ||
327 | rc = -EIO; | ||
328 | goto fail_hwmon; | ||
329 | } | ||
330 | |||
331 | /* 10Xpress has fixed-function LED pins, so there is no board-specific | ||
332 | * blink code. */ | ||
333 | efx->board_info.blink = tenxpress_phy_blink; | ||
334 | |||
335 | efx->board_info.monitor = sfe4001_check_hw; | ||
336 | efx->board_info.fini = sfe4001_fini; | ||
337 | |||
338 | if (efx->phy_mode & PHY_MODE_SPECIAL) { | ||
339 | /* PHY won't generate a 156.25 MHz clock and MAC stats fetch | ||
340 | * will fail. */ | ||
341 | efx_stats_disable(efx); | ||
342 | } | ||
343 | rc = sfe4001_poweron(efx); | ||
344 | if (rc) | ||
345 | goto fail_ioexp; | ||
346 | |||
347 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
348 | if (rc) | ||
349 | goto fail_on; | ||
350 | |||
351 | EFX_INFO(efx, "PHY is powered on\n"); | ||
352 | return 0; | ||
353 | |||
354 | fail_on: | ||
355 | sfe4001_poweroff(efx); | ||
356 | fail_ioexp: | ||
357 | i2c_unregister_device(efx->board_info.ioexp_client); | ||
358 | fail_hwmon: | ||
359 | i2c_unregister_device(efx->board_info.hwmon_client); | ||
360 | return rc; | ||
361 | } | ||
362 | |||
363 | static int sfn4111t_check_hw(struct efx_nic *efx) | ||
364 | { | ||
365 | s32 status; | ||
366 | |||
367 | /* If XAUI link is up then do not monitor */ | ||
368 | if (EFX_WORKAROUND_7884(efx) && efx->mac_up) | ||
369 | return 0; | ||
370 | |||
371 | /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */ | ||
372 | status = i2c_smbus_read_byte_data(efx->board_info.hwmon_client, | ||
373 | MAX664X_REG_RSL); | ||
374 | if (status < 0) | ||
375 | return -EIO; | ||
376 | if (status & 0x57) | ||
377 | return -ERANGE; | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | static void sfn4111t_fini(struct efx_nic *efx) | ||
382 | { | ||
383 | EFX_INFO(efx, "%s\n", __func__); | ||
384 | |||
385 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
386 | i2c_unregister_device(efx->board_info.hwmon_client); | ||
387 | } | ||
388 | |||
389 | static struct i2c_board_info sfn4111t_a0_hwmon_info = { | ||
390 | I2C_BOARD_INFO("max6647", 0x4e), | ||
391 | }; | ||
392 | |||
393 | static struct i2c_board_info sfn4111t_r5_hwmon_info = { | ||
394 | I2C_BOARD_INFO("max6646", 0x4d), | ||
395 | }; | ||
396 | |||
397 | int sfn4111t_init(struct efx_nic *efx) | ||
398 | { | ||
399 | int i = 0; | ||
400 | int rc; | ||
401 | |||
402 | efx->board_info.hwmon_client = | ||
403 | i2c_new_device(&efx->i2c_adap, | ||
404 | (efx->board_info.minor < 5) ? | ||
405 | &sfn4111t_a0_hwmon_info : | ||
406 | &sfn4111t_r5_hwmon_info); | ||
407 | if (!efx->board_info.hwmon_client) | ||
408 | return -EIO; | ||
409 | |||
410 | efx->board_info.blink = tenxpress_phy_blink; | ||
411 | efx->board_info.monitor = sfn4111t_check_hw; | ||
412 | efx->board_info.fini = sfn4111t_fini; | ||
413 | |||
414 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
415 | if (rc) | ||
416 | goto fail_hwmon; | ||
417 | |||
418 | do { | ||
419 | if (efx->phy_mode & PHY_MODE_SPECIAL) { | ||
420 | /* PHY may not generate a 156.25 MHz clock and MAC | ||
421 | * stats fetch will fail. */ | ||
422 | efx_stats_disable(efx); | ||
423 | sfn4111t_reset(efx); | ||
424 | } | ||
425 | rc = sft9001_wait_boot(efx); | ||
426 | if (rc == 0) | ||
427 | return 0; | ||
428 | efx->phy_mode = PHY_MODE_SPECIAL; | ||
429 | } while (rc == -EINVAL && ++i < 2); | ||
430 | |||
431 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
432 | fail_hwmon: | ||
433 | i2c_unregister_device(efx->board_info.hwmon_client); | ||
434 | return rc; | ||
435 | } | ||
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c new file mode 100644 index 000000000000..e0c46f59d1f8 --- /dev/null +++ b/drivers/net/sfc/siena.c | |||
@@ -0,0 +1,617 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2009 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include "net_driver.h" | ||
17 | #include "bitfield.h" | ||
18 | #include "efx.h" | ||
19 | #include "nic.h" | ||
20 | #include "mac.h" | ||
21 | #include "spi.h" | ||
22 | #include "regs.h" | ||
23 | #include "io.h" | ||
24 | #include "phy.h" | ||
25 | #include "workarounds.h" | ||
26 | #include "mcdi.h" | ||
27 | #include "mcdi_pcol.h" | ||
28 | |||
29 | /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ | ||
30 | |||
31 | static void siena_init_wol(struct efx_nic *efx); | ||
32 | |||
33 | |||
34 | static void siena_push_irq_moderation(struct efx_channel *channel) | ||
35 | { | ||
36 | efx_dword_t timer_cmd; | ||
37 | |||
38 | if (channel->irq_moderation) | ||
39 | EFX_POPULATE_DWORD_2(timer_cmd, | ||
40 | FRF_CZ_TC_TIMER_MODE, | ||
41 | FFE_CZ_TIMER_MODE_INT_HLDOFF, | ||
42 | FRF_CZ_TC_TIMER_VAL, | ||
43 | channel->irq_moderation - 1); | ||
44 | else | ||
45 | EFX_POPULATE_DWORD_2(timer_cmd, | ||
46 | FRF_CZ_TC_TIMER_MODE, | ||
47 | FFE_CZ_TIMER_MODE_DIS, | ||
48 | FRF_CZ_TC_TIMER_VAL, 0); | ||
49 | efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, | ||
50 | channel->channel); | ||
51 | } | ||
52 | |||
53 | static void siena_push_multicast_hash(struct efx_nic *efx) | ||
54 | { | ||
55 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
56 | |||
57 | efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, | ||
58 | efx->multicast_hash.byte, sizeof(efx->multicast_hash), | ||
59 | NULL, 0, NULL); | ||
60 | } | ||
61 | |||
62 | static int siena_mdio_write(struct net_device *net_dev, | ||
63 | int prtad, int devad, u16 addr, u16 value) | ||
64 | { | ||
65 | struct efx_nic *efx = netdev_priv(net_dev); | ||
66 | uint32_t status; | ||
67 | int rc; | ||
68 | |||
69 | rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad, | ||
70 | addr, value, &status); | ||
71 | if (rc) | ||
72 | return rc; | ||
73 | if (status != MC_CMD_MDIO_STATUS_GOOD) | ||
74 | return -EIO; | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static int siena_mdio_read(struct net_device *net_dev, | ||
80 | int prtad, int devad, u16 addr) | ||
81 | { | ||
82 | struct efx_nic *efx = netdev_priv(net_dev); | ||
83 | uint16_t value; | ||
84 | uint32_t status; | ||
85 | int rc; | ||
86 | |||
87 | rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad, | ||
88 | addr, &value, &status); | ||
89 | if (rc) | ||
90 | return rc; | ||
91 | if (status != MC_CMD_MDIO_STATUS_GOOD) | ||
92 | return -EIO; | ||
93 | |||
94 | return (int)value; | ||
95 | } | ||
96 | |||
97 | /* This call is responsible for hooking in the MAC and PHY operations */ | ||
98 | static int siena_probe_port(struct efx_nic *efx) | ||
99 | { | ||
100 | int rc; | ||
101 | |||
102 | /* Hook in PHY operations table */ | ||
103 | efx->phy_op = &efx_mcdi_phy_ops; | ||
104 | |||
105 | /* Set up MDIO structure for PHY */ | ||
106 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
107 | efx->mdio.mdio_read = siena_mdio_read; | ||
108 | efx->mdio.mdio_write = siena_mdio_write; | ||
109 | |||
110 | /* Fill out MDIO structure, loopback modes, and initial link state */ | ||
111 | rc = efx->phy_op->probe(efx); | ||
112 | if (rc != 0) | ||
113 | return rc; | ||
114 | |||
115 | /* Allocate buffer for stats */ | ||
116 | rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, | ||
117 | MC_CMD_MAC_NSTATS * sizeof(u64)); | ||
118 | if (rc) | ||
119 | return rc; | ||
120 | EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", | ||
121 | (u64)efx->stats_buffer.dma_addr, | ||
122 | efx->stats_buffer.addr, | ||
123 | (u64)virt_to_phys(efx->stats_buffer.addr)); | ||
124 | |||
125 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); | ||
126 | |||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | void siena_remove_port(struct efx_nic *efx) | ||
131 | { | ||
132 | efx->phy_op->remove(efx); | ||
133 | efx_nic_free_buffer(efx, &efx->stats_buffer); | ||
134 | } | ||
135 | |||
136 | static const struct efx_nic_register_test siena_register_tests[] = { | ||
137 | { FR_AZ_ADR_REGION, | ||
138 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, | ||
139 | { FR_CZ_USR_EV_CFG, | ||
140 | EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, | ||
141 | { FR_AZ_RX_CFG, | ||
142 | EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, | ||
143 | { FR_AZ_TX_CFG, | ||
144 | EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, | ||
145 | { FR_AZ_TX_RESERVED, | ||
146 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, | ||
147 | { FR_AZ_SRM_TX_DC_CFG, | ||
148 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, | ||
149 | { FR_AZ_RX_DC_CFG, | ||
150 | EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, | ||
151 | { FR_AZ_RX_DC_PF_WM, | ||
152 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, | ||
153 | { FR_BZ_DP_CTRL, | ||
154 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, | ||
155 | { FR_BZ_RX_RSS_TKEY, | ||
156 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | ||
157 | { FR_CZ_RX_RSS_IPV6_REG1, | ||
158 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | ||
159 | { FR_CZ_RX_RSS_IPV6_REG2, | ||
160 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | ||
161 | { FR_CZ_RX_RSS_IPV6_REG3, | ||
162 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, | ||
163 | }; | ||
164 | |||
165 | static int siena_test_registers(struct efx_nic *efx) | ||
166 | { | ||
167 | return efx_nic_test_registers(efx, siena_register_tests, | ||
168 | ARRAY_SIZE(siena_register_tests)); | ||
169 | } | ||
170 | |||
171 | /************************************************************************** | ||
172 | * | ||
173 | * Device reset | ||
174 | * | ||
175 | ************************************************************************** | ||
176 | */ | ||
177 | |||
178 | static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) | ||
179 | { | ||
180 | int rc; | ||
181 | |||
182 | /* Recover from a failed assertion pre-reset */ | ||
183 | rc = efx_mcdi_handle_assertion(efx); | ||
184 | if (rc) | ||
185 | return rc; | ||
186 | |||
187 | if (method == RESET_TYPE_WORLD) | ||
188 | return efx_mcdi_reset_mc(efx); | ||
189 | else | ||
190 | return efx_mcdi_reset_port(efx); | ||
191 | } | ||
192 | |||
193 | static int siena_probe_nvconfig(struct efx_nic *efx) | ||
194 | { | ||
195 | int rc; | ||
196 | |||
197 | rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL); | ||
198 | if (rc) | ||
199 | return rc; | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int siena_probe_nic(struct efx_nic *efx) | ||
205 | { | ||
206 | struct siena_nic_data *nic_data; | ||
207 | bool already_attached = 0; | ||
208 | int rc; | ||
209 | |||
210 | /* Allocate storage for hardware specific data */ | ||
211 | nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); | ||
212 | if (!nic_data) | ||
213 | return -ENOMEM; | ||
214 | efx->nic_data = nic_data; | ||
215 | |||
216 | if (efx_nic_fpga_ver(efx) != 0) { | ||
217 | EFX_ERR(efx, "Siena FPGA not supported\n"); | ||
218 | rc = -ENODEV; | ||
219 | goto fail1; | ||
220 | } | ||
221 | |||
222 | efx_mcdi_init(efx); | ||
223 | |||
224 | /* Recover from a failed assertion before probing */ | ||
225 | rc = efx_mcdi_handle_assertion(efx); | ||
226 | if (rc) | ||
227 | goto fail1; | ||
228 | |||
229 | rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build); | ||
230 | if (rc) { | ||
231 | EFX_ERR(efx, "Failed to read MCPU firmware version - " | ||
232 | "rc %d\n", rc); | ||
233 | goto fail1; /* MCPU absent? */ | ||
234 | } | ||
235 | |||
236 | /* Let the BMC know that the driver is now in charge of link and | ||
237 | * filter settings. We must do this before we reset the NIC */ | ||
238 | rc = efx_mcdi_drv_attach(efx, true, &already_attached); | ||
239 | if (rc) { | ||
240 | EFX_ERR(efx, "Unable to register driver with MCPU\n"); | ||
241 | goto fail2; | ||
242 | } | ||
243 | if (already_attached) | ||
244 | /* Not a fatal error */ | ||
245 | EFX_ERR(efx, "Host already registered with MCPU\n"); | ||
246 | |||
247 | /* Now we can reset the NIC */ | ||
248 | rc = siena_reset_hw(efx, RESET_TYPE_ALL); | ||
249 | if (rc) { | ||
250 | EFX_ERR(efx, "failed to reset NIC\n"); | ||
251 | goto fail3; | ||
252 | } | ||
253 | |||
254 | siena_init_wol(efx); | ||
255 | |||
256 | /* Allocate memory for INT_KER */ | ||
257 | rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); | ||
258 | if (rc) | ||
259 | goto fail4; | ||
260 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | ||
261 | |||
262 | EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n", | ||
263 | (unsigned long long)efx->irq_status.dma_addr, | ||
264 | efx->irq_status.addr, | ||
265 | (unsigned long long)virt_to_phys(efx->irq_status.addr)); | ||
266 | |||
267 | /* Read in the non-volatile configuration */ | ||
268 | rc = siena_probe_nvconfig(efx); | ||
269 | if (rc == -EINVAL) { | ||
270 | EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n"); | ||
271 | efx->phy_type = PHY_TYPE_NONE; | ||
272 | efx->mdio.prtad = MDIO_PRTAD_NONE; | ||
273 | } else if (rc) { | ||
274 | goto fail5; | ||
275 | } | ||
276 | |||
277 | return 0; | ||
278 | |||
279 | fail5: | ||
280 | efx_nic_free_buffer(efx, &efx->irq_status); | ||
281 | fail4: | ||
282 | fail3: | ||
283 | efx_mcdi_drv_attach(efx, false, NULL); | ||
284 | fail2: | ||
285 | fail1: | ||
286 | kfree(efx->nic_data); | ||
287 | return rc; | ||
288 | } | ||
289 | |||
290 | /* This call performs hardware-specific global initialisation, such as | ||
291 | * defining the descriptor cache sizes and number of RSS channels. | ||
292 | * It does not set up any buffers, descriptor rings or event queues. | ||
293 | */ | ||
294 | static int siena_init_nic(struct efx_nic *efx) | ||
295 | { | ||
296 | efx_oword_t temp; | ||
297 | int rc; | ||
298 | |||
299 | /* Recover from a failed assertion post-reset */ | ||
300 | rc = efx_mcdi_handle_assertion(efx); | ||
301 | if (rc) | ||
302 | return rc; | ||
303 | |||
304 | /* Squash TX of packets of 16 bytes or less */ | ||
305 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | ||
306 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | ||
307 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | ||
308 | |||
309 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 | ||
310 | * descriptors (which is bad). | ||
311 | */ | ||
312 | efx_reado(efx, &temp, FR_AZ_TX_CFG); | ||
313 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); | ||
314 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); | ||
315 | efx_writeo(efx, &temp, FR_AZ_TX_CFG); | ||
316 | |||
317 | efx_reado(efx, &temp, FR_AZ_RX_CFG); | ||
318 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); | ||
319 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); | ||
320 | efx_writeo(efx, &temp, FR_AZ_RX_CFG); | ||
321 | |||
322 | if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) | ||
323 | /* No MCDI operation has been defined to set thresholds */ | ||
324 | EFX_ERR(efx, "ignoring RX flow control thresholds\n"); | ||
325 | |||
326 | /* Enable event logging */ | ||
327 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); | ||
328 | if (rc) | ||
329 | return rc; | ||
330 | |||
331 | /* Set destination of both TX and RX Flush events */ | ||
332 | EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); | ||
333 | efx_writeo(efx, &temp, FR_BZ_DP_CTRL); | ||
334 | |||
335 | EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); | ||
336 | efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); | ||
337 | |||
338 | efx_nic_init_common(efx); | ||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | static void siena_remove_nic(struct efx_nic *efx) | ||
343 | { | ||
344 | efx_nic_free_buffer(efx, &efx->irq_status); | ||
345 | |||
346 | siena_reset_hw(efx, RESET_TYPE_ALL); | ||
347 | |||
348 | /* Relinquish the device back to the BMC */ | ||
349 | if (efx_nic_has_mc(efx)) | ||
350 | efx_mcdi_drv_attach(efx, false, NULL); | ||
351 | |||
352 | /* Tear down the private nic state */ | ||
353 | kfree(efx->nic_data); | ||
354 | efx->nic_data = NULL; | ||
355 | } | ||
356 | |||
357 | #define STATS_GENERATION_INVALID ((u64)(-1)) | ||
358 | |||
359 | static int siena_try_update_nic_stats(struct efx_nic *efx) | ||
360 | { | ||
361 | u64 *dma_stats; | ||
362 | struct efx_mac_stats *mac_stats; | ||
363 | u64 generation_start; | ||
364 | u64 generation_end; | ||
365 | |||
366 | mac_stats = &efx->mac_stats; | ||
367 | dma_stats = (u64 *)efx->stats_buffer.addr; | ||
368 | |||
369 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; | ||
370 | if (generation_end == STATS_GENERATION_INVALID) | ||
371 | return 0; | ||
372 | rmb(); | ||
373 | |||
374 | #define MAC_STAT(M, D) \ | ||
375 | mac_stats->M = dma_stats[MC_CMD_MAC_ ## D] | ||
376 | |||
377 | MAC_STAT(tx_bytes, TX_BYTES); | ||
378 | MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); | ||
379 | mac_stats->tx_good_bytes = (mac_stats->tx_bytes - | ||
380 | mac_stats->tx_bad_bytes); | ||
381 | MAC_STAT(tx_packets, TX_PKTS); | ||
382 | MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); | ||
383 | MAC_STAT(tx_pause, TX_PAUSE_PKTS); | ||
384 | MAC_STAT(tx_control, TX_CONTROL_PKTS); | ||
385 | MAC_STAT(tx_unicast, TX_UNICAST_PKTS); | ||
386 | MAC_STAT(tx_multicast, TX_MULTICAST_PKTS); | ||
387 | MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS); | ||
388 | MAC_STAT(tx_lt64, TX_LT64_PKTS); | ||
389 | MAC_STAT(tx_64, TX_64_PKTS); | ||
390 | MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS); | ||
391 | MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS); | ||
392 | MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS); | ||
393 | MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS); | ||
394 | MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS); | ||
395 | MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS); | ||
396 | MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS); | ||
397 | mac_stats->tx_collision = 0; | ||
398 | MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS); | ||
399 | MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS); | ||
400 | MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS); | ||
401 | MAC_STAT(tx_deferred, TX_DEFERRED_PKTS); | ||
402 | MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS); | ||
403 | mac_stats->tx_collision = (mac_stats->tx_single_collision + | ||
404 | mac_stats->tx_multiple_collision + | ||
405 | mac_stats->tx_excessive_collision + | ||
406 | mac_stats->tx_late_collision); | ||
407 | MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS); | ||
408 | MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS); | ||
409 | MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS); | ||
410 | MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); | ||
411 | MAC_STAT(rx_bytes, RX_BYTES); | ||
412 | MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); | ||
413 | mac_stats->rx_good_bytes = (mac_stats->rx_bytes - | ||
414 | mac_stats->rx_bad_bytes); | ||
415 | MAC_STAT(rx_packets, RX_PKTS); | ||
416 | MAC_STAT(rx_good, RX_GOOD_PKTS); | ||
417 | mac_stats->rx_bad = mac_stats->rx_packets - mac_stats->rx_good; | ||
418 | MAC_STAT(rx_pause, RX_PAUSE_PKTS); | ||
419 | MAC_STAT(rx_control, RX_CONTROL_PKTS); | ||
420 | MAC_STAT(rx_unicast, RX_UNICAST_PKTS); | ||
421 | MAC_STAT(rx_multicast, RX_MULTICAST_PKTS); | ||
422 | MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS); | ||
423 | MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS); | ||
424 | MAC_STAT(rx_64, RX_64_PKTS); | ||
425 | MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS); | ||
426 | MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS); | ||
427 | MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS); | ||
428 | MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS); | ||
429 | MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS); | ||
430 | MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS); | ||
431 | MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS); | ||
432 | mac_stats->rx_bad_lt64 = 0; | ||
433 | mac_stats->rx_bad_64_to_15xx = 0; | ||
434 | mac_stats->rx_bad_15xx_to_jumbo = 0; | ||
435 | MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS); | ||
436 | MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS); | ||
437 | mac_stats->rx_missed = 0; | ||
438 | MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS); | ||
439 | MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS); | ||
440 | MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS); | ||
441 | MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS); | ||
442 | MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS); | ||
443 | mac_stats->rx_good_lt64 = 0; | ||
444 | |||
445 | efx->n_rx_nodesc_drop_cnt = dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]; | ||
446 | |||
447 | #undef MAC_STAT | ||
448 | |||
449 | rmb(); | ||
450 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; | ||
451 | if (generation_end != generation_start) | ||
452 | return -EAGAIN; | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | static void siena_update_nic_stats(struct efx_nic *efx) | ||
458 | { | ||
459 | int retry; | ||
460 | |||
461 | /* If we're unlucky enough to read statistics wduring the DMA, wait | ||
462 | * up to 10ms for it to finish (typically takes <500us) */ | ||
463 | for (retry = 0; retry < 100; ++retry) { | ||
464 | if (siena_try_update_nic_stats(efx) == 0) | ||
465 | return; | ||
466 | udelay(100); | ||
467 | } | ||
468 | |||
469 | /* Use the old values instead */ | ||
470 | } | ||
471 | |||
472 | static void siena_start_nic_stats(struct efx_nic *efx) | ||
473 | { | ||
474 | u64 *dma_stats = (u64 *)efx->stats_buffer.addr; | ||
475 | |||
476 | dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; | ||
477 | |||
478 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, | ||
479 | MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); | ||
480 | } | ||
481 | |||
482 | static void siena_stop_nic_stats(struct efx_nic *efx) | ||
483 | { | ||
484 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); | ||
485 | } | ||
486 | |||
487 | void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len) | ||
488 | { | ||
489 | struct siena_nic_data *nic_data = efx->nic_data; | ||
490 | snprintf(buf, len, "%u.%u.%u.%u", | ||
491 | (unsigned int)(nic_data->fw_version >> 48), | ||
492 | (unsigned int)(nic_data->fw_version >> 32 & 0xffff), | ||
493 | (unsigned int)(nic_data->fw_version >> 16 & 0xffff), | ||
494 | (unsigned int)(nic_data->fw_version & 0xffff)); | ||
495 | } | ||
496 | |||
497 | /************************************************************************** | ||
498 | * | ||
499 | * Wake on LAN | ||
500 | * | ||
501 | ************************************************************************** | ||
502 | */ | ||
503 | |||
504 | static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) | ||
505 | { | ||
506 | struct siena_nic_data *nic_data = efx->nic_data; | ||
507 | |||
508 | wol->supported = WAKE_MAGIC; | ||
509 | if (nic_data->wol_filter_id != -1) | ||
510 | wol->wolopts = WAKE_MAGIC; | ||
511 | else | ||
512 | wol->wolopts = 0; | ||
513 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | ||
514 | } | ||
515 | |||
516 | |||
517 | static int siena_set_wol(struct efx_nic *efx, u32 type) | ||
518 | { | ||
519 | struct siena_nic_data *nic_data = efx->nic_data; | ||
520 | int rc; | ||
521 | |||
522 | if (type & ~WAKE_MAGIC) | ||
523 | return -EINVAL; | ||
524 | |||
525 | if (type & WAKE_MAGIC) { | ||
526 | if (nic_data->wol_filter_id != -1) | ||
527 | efx_mcdi_wol_filter_remove(efx, | ||
528 | nic_data->wol_filter_id); | ||
529 | rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address, | ||
530 | &nic_data->wol_filter_id); | ||
531 | if (rc) | ||
532 | goto fail; | ||
533 | |||
534 | pci_wake_from_d3(efx->pci_dev, true); | ||
535 | } else { | ||
536 | rc = efx_mcdi_wol_filter_reset(efx); | ||
537 | nic_data->wol_filter_id = -1; | ||
538 | pci_wake_from_d3(efx->pci_dev, false); | ||
539 | if (rc) | ||
540 | goto fail; | ||
541 | } | ||
542 | |||
543 | return 0; | ||
544 | fail: | ||
545 | EFX_ERR(efx, "%s failed: type=%d rc=%d\n", __func__, type, rc); | ||
546 | return rc; | ||
547 | } | ||
548 | |||
549 | |||
550 | static void siena_init_wol(struct efx_nic *efx) | ||
551 | { | ||
552 | struct siena_nic_data *nic_data = efx->nic_data; | ||
553 | int rc; | ||
554 | |||
555 | rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); | ||
556 | |||
557 | if (rc != 0) { | ||
558 | /* If it failed, attempt to get into a synchronised | ||
559 | * state with MC by resetting any set WoL filters */ | ||
560 | efx_mcdi_wol_filter_reset(efx); | ||
561 | nic_data->wol_filter_id = -1; | ||
562 | } else if (nic_data->wol_filter_id != -1) { | ||
563 | pci_wake_from_d3(efx->pci_dev, true); | ||
564 | } | ||
565 | } | ||
566 | |||
567 | |||
568 | /************************************************************************** | ||
569 | * | ||
570 | * Revision-dependent attributes used by efx.c and nic.c | ||
571 | * | ||
572 | ************************************************************************** | ||
573 | */ | ||
574 | |||
575 | struct efx_nic_type siena_a0_nic_type = { | ||
576 | .probe = siena_probe_nic, | ||
577 | .remove = siena_remove_nic, | ||
578 | .init = siena_init_nic, | ||
579 | .fini = efx_port_dummy_op_void, | ||
580 | .monitor = NULL, | ||
581 | .reset = siena_reset_hw, | ||
582 | .probe_port = siena_probe_port, | ||
583 | .remove_port = siena_remove_port, | ||
584 | .prepare_flush = efx_port_dummy_op_void, | ||
585 | .update_stats = siena_update_nic_stats, | ||
586 | .start_stats = siena_start_nic_stats, | ||
587 | .stop_stats = siena_stop_nic_stats, | ||
588 | .set_id_led = efx_mcdi_set_id_led, | ||
589 | .push_irq_moderation = siena_push_irq_moderation, | ||
590 | .push_multicast_hash = siena_push_multicast_hash, | ||
591 | .reconfigure_port = efx_mcdi_phy_reconfigure, | ||
592 | .get_wol = siena_get_wol, | ||
593 | .set_wol = siena_set_wol, | ||
594 | .resume_wol = siena_init_wol, | ||
595 | .test_registers = siena_test_registers, | ||
596 | .test_nvram = efx_mcdi_nvram_test_all, | ||
597 | .default_mac_ops = &efx_mcdi_mac_operations, | ||
598 | |||
599 | .revision = EFX_REV_SIENA_A0, | ||
600 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + | ||
601 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), | ||
602 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | ||
603 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, | ||
604 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, | ||
605 | .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, | ||
606 | .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, | ||
607 | .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), | ||
608 | .rx_buffer_padding = 0, | ||
609 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | ||
610 | .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy | ||
611 | * interrupt handler only supports 32 | ||
612 | * channels */ | ||
613 | .tx_dc_base = 0x88000, | ||
614 | .rx_dc_base = 0x68000, | ||
615 | .offload_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM, | ||
616 | .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT, | ||
617 | }; | ||
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h index 1b1ceb411671..8bf4fce0813a 100644 --- a/drivers/net/sfc/spi.h +++ b/drivers/net/sfc/spi.h | |||
@@ -36,8 +36,6 @@ | |||
36 | 36 | ||
37 | /** | 37 | /** |
38 | * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device | 38 | * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device |
39 | * @efx: The Efx controller that owns this device | ||
40 | * @mtd: MTD state | ||
41 | * @device_id: Controller's id for the device | 39 | * @device_id: Controller's id for the device |
42 | * @size: Size (in bytes) | 40 | * @size: Size (in bytes) |
43 | * @addr_len: Number of address bytes in read/write commands | 41 | * @addr_len: Number of address bytes in read/write commands |
@@ -54,10 +52,6 @@ | |||
54 | * Write commands are limited to blocks with this size and alignment. | 52 | * Write commands are limited to blocks with this size and alignment. |
55 | */ | 53 | */ |
56 | struct efx_spi_device { | 54 | struct efx_spi_device { |
57 | struct efx_nic *efx; | ||
58 | #ifdef CONFIG_SFC_MTD | ||
59 | void *mtd; | ||
60 | #endif | ||
61 | int device_id; | 55 | int device_id; |
62 | unsigned int size; | 56 | unsigned int size; |
63 | unsigned int addr_len; | 57 | unsigned int addr_len; |
@@ -67,12 +61,16 @@ struct efx_spi_device { | |||
67 | unsigned int block_size; | 61 | unsigned int block_size; |
68 | }; | 62 | }; |
69 | 63 | ||
70 | int falcon_spi_cmd(const struct efx_spi_device *spi, unsigned int command, | 64 | int falcon_spi_cmd(struct efx_nic *efx, |
65 | const struct efx_spi_device *spi, unsigned int command, | ||
71 | int address, const void* in, void *out, size_t len); | 66 | int address, const void* in, void *out, size_t len); |
72 | int falcon_spi_wait_write(const struct efx_spi_device *spi); | 67 | int falcon_spi_wait_write(struct efx_nic *efx, |
73 | int falcon_spi_read(const struct efx_spi_device *spi, loff_t start, | 68 | const struct efx_spi_device *spi); |
69 | int falcon_spi_read(struct efx_nic *efx, | ||
70 | const struct efx_spi_device *spi, loff_t start, | ||
74 | size_t len, size_t *retlen, u8 *buffer); | 71 | size_t len, size_t *retlen, u8 *buffer); |
75 | int falcon_spi_write(const struct efx_spi_device *spi, loff_t start, | 72 | int falcon_spi_write(struct efx_nic *efx, |
73 | const struct efx_spi_device *spi, loff_t start, | ||
76 | size_t len, size_t *retlen, const u8 *buffer); | 74 | size_t len, size_t *retlen, const u8 *buffer); |
77 | 75 | ||
78 | /* | 76 | /* |
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index f4d509015f75..f21efe7bd316 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2007-2008 Solarflare Communications Inc. | 3 | * Copyright 2007-2009 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
@@ -10,12 +10,12 @@ | |||
10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
11 | #include <linux/rtnetlink.h> | 11 | #include <linux/rtnetlink.h> |
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <linux/slab.h> | ||
13 | #include "efx.h" | 14 | #include "efx.h" |
14 | #include "mdio_10g.h" | 15 | #include "mdio_10g.h" |
15 | #include "falcon.h" | 16 | #include "nic.h" |
16 | #include "phy.h" | 17 | #include "phy.h" |
17 | #include "falcon_hwdefs.h" | 18 | #include "regs.h" |
18 | #include "boards.h" | ||
19 | #include "workarounds.h" | 19 | #include "workarounds.h" |
20 | #include "selftest.h" | 20 | #include "selftest.h" |
21 | 21 | ||
@@ -31,13 +31,13 @@ | |||
31 | #define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ | 31 | #define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ |
32 | (1 << LOOPBACK_PCS) | \ | 32 | (1 << LOOPBACK_PCS) | \ |
33 | (1 << LOOPBACK_PMAPMD) | \ | 33 | (1 << LOOPBACK_PMAPMD) | \ |
34 | (1 << LOOPBACK_NETWORK)) | 34 | (1 << LOOPBACK_PHYXS_WS)) |
35 | 35 | ||
36 | #define SFT9001_LOOPBACKS ((1 << LOOPBACK_GPHY) | \ | 36 | #define SFT9001_LOOPBACKS ((1 << LOOPBACK_GPHY) | \ |
37 | (1 << LOOPBACK_PHYXS) | \ | 37 | (1 << LOOPBACK_PHYXS) | \ |
38 | (1 << LOOPBACK_PCS) | \ | 38 | (1 << LOOPBACK_PCS) | \ |
39 | (1 << LOOPBACK_PMAPMD) | \ | 39 | (1 << LOOPBACK_PMAPMD) | \ |
40 | (1 << LOOPBACK_NETWORK)) | 40 | (1 << LOOPBACK_PHYXS_WS)) |
41 | 41 | ||
42 | /* We complain if we fail to see the link partner as 10G capable this many | 42 | /* We complain if we fail to see the link partner as 10G capable this many |
43 | * times in a row (must be > 1 as sampling the autoneg. registers is racy) | 43 | * times in a row (must be > 1 as sampling the autoneg. registers is racy) |
@@ -84,9 +84,9 @@ | |||
84 | #define PMA_PMD_LED_FLASH (3) | 84 | #define PMA_PMD_LED_FLASH (3) |
85 | #define PMA_PMD_LED_MASK 3 | 85 | #define PMA_PMD_LED_MASK 3 |
86 | /* All LEDs under hardware control */ | 86 | /* All LEDs under hardware control */ |
87 | #define PMA_PMD_LED_FULL_AUTO (0) | 87 | #define SFT9001_PMA_PMD_LED_DEFAULT 0 |
88 | /* Green and Amber under hardware control, Red off */ | 88 | /* Green and Amber under hardware control, Red off */ |
89 | #define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) | 89 | #define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
90 | 90 | ||
91 | #define PMA_PMD_SPEED_ENABLE_REG 49192 | 91 | #define PMA_PMD_SPEED_ENABLE_REG 49192 |
92 | #define PMA_PMD_100TX_ADV_LBN 1 | 92 | #define PMA_PMD_100TX_ADV_LBN 1 |
@@ -200,15 +200,20 @@ static ssize_t set_phy_short_reach(struct device *dev, | |||
200 | const char *buf, size_t count) | 200 | const char *buf, size_t count) |
201 | { | 201 | { |
202 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | 202 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
203 | int rc; | ||
203 | 204 | ||
204 | rtnl_lock(); | 205 | rtnl_lock(); |
205 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR, | 206 | if (efx->state != STATE_RUNNING) { |
206 | MDIO_PMA_10GBT_TXPWR_SHORT, | 207 | rc = -EBUSY; |
207 | count != 0 && *buf != '0'); | 208 | } else { |
208 | efx_reconfigure_port(efx); | 209 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR, |
210 | MDIO_PMA_10GBT_TXPWR_SHORT, | ||
211 | count != 0 && *buf != '0'); | ||
212 | rc = efx_reconfigure_port(efx); | ||
213 | } | ||
209 | rtnl_unlock(); | 214 | rtnl_unlock(); |
210 | 215 | ||
211 | return count; | 216 | return rc < 0 ? rc : (ssize_t)count; |
212 | } | 217 | } |
213 | 218 | ||
214 | static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach, | 219 | static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach, |
@@ -292,23 +297,68 @@ static int tenxpress_init(struct efx_nic *efx) | |||
292 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG, | 297 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG, |
293 | 1 << PMA_PMA_LED_ACTIVITY_LBN, true); | 298 | 1 << PMA_PMA_LED_ACTIVITY_LBN, true); |
294 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, | 299 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, |
295 | PMA_PMD_LED_DEFAULT); | 300 | SFX7101_PMA_PMD_LED_DEFAULT); |
296 | } | 301 | } |
297 | 302 | ||
298 | return 0; | 303 | return 0; |
299 | } | 304 | } |
300 | 305 | ||
301 | static int tenxpress_phy_init(struct efx_nic *efx) | 306 | static int tenxpress_phy_probe(struct efx_nic *efx) |
302 | { | 307 | { |
303 | struct tenxpress_phy_data *phy_data; | 308 | struct tenxpress_phy_data *phy_data; |
304 | int rc = 0; | 309 | int rc; |
305 | 310 | ||
311 | /* Allocate phy private storage */ | ||
306 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | 312 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); |
307 | if (!phy_data) | 313 | if (!phy_data) |
308 | return -ENOMEM; | 314 | return -ENOMEM; |
309 | efx->phy_data = phy_data; | 315 | efx->phy_data = phy_data; |
310 | phy_data->phy_mode = efx->phy_mode; | 316 | phy_data->phy_mode = efx->phy_mode; |
311 | 317 | ||
318 | /* Create any special files */ | ||
319 | if (efx->phy_type == PHY_TYPE_SFT9001B) { | ||
320 | rc = device_create_file(&efx->pci_dev->dev, | ||
321 | &dev_attr_phy_short_reach); | ||
322 | if (rc) | ||
323 | goto fail; | ||
324 | } | ||
325 | |||
326 | if (efx->phy_type == PHY_TYPE_SFX7101) { | ||
327 | efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; | ||
328 | efx->mdio.mode_support = MDIO_SUPPORTS_C45; | ||
329 | |||
330 | efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS; | ||
331 | |||
332 | efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | | ||
333 | ADVERTISED_10000baseT_Full); | ||
334 | } else { | ||
335 | efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; | ||
336 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
337 | |||
338 | efx->loopback_modes = (SFT9001_LOOPBACKS | | ||
339 | FALCON_XMAC_LOOPBACKS | | ||
340 | FALCON_GMAC_LOOPBACKS); | ||
341 | |||
342 | efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | | ||
343 | ADVERTISED_10000baseT_Full | | ||
344 | ADVERTISED_1000baseT_Full | | ||
345 | ADVERTISED_100baseT_Full); | ||
346 | } | ||
347 | |||
348 | return 0; | ||
349 | |||
350 | fail: | ||
351 | kfree(efx->phy_data); | ||
352 | efx->phy_data = NULL; | ||
353 | return rc; | ||
354 | } | ||
355 | |||
356 | static int tenxpress_phy_init(struct efx_nic *efx) | ||
357 | { | ||
358 | int rc; | ||
359 | |||
360 | falcon_board(efx)->type->init_phy(efx); | ||
361 | |||
312 | if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { | 362 | if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { |
313 | if (efx->phy_type == PHY_TYPE_SFT9001A) { | 363 | if (efx->phy_type == PHY_TYPE_SFT9001A) { |
314 | int reg; | 364 | int reg; |
@@ -322,23 +372,20 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
322 | 372 | ||
323 | rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); | 373 | rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); |
324 | if (rc < 0) | 374 | if (rc < 0) |
325 | goto fail; | 375 | return rc; |
326 | 376 | ||
327 | rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); | 377 | rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); |
328 | if (rc < 0) | 378 | if (rc < 0) |
329 | goto fail; | 379 | return rc; |
330 | } | 380 | } |
331 | 381 | ||
332 | rc = tenxpress_init(efx); | 382 | rc = tenxpress_init(efx); |
333 | if (rc < 0) | 383 | if (rc < 0) |
334 | goto fail; | 384 | return rc; |
335 | 385 | ||
336 | if (efx->phy_type == PHY_TYPE_SFT9001B) { | 386 | /* Reinitialise flow control settings */ |
337 | rc = device_create_file(&efx->pci_dev->dev, | 387 | efx_link_set_wanted_fc(efx, efx->wanted_fc); |
338 | &dev_attr_phy_short_reach); | 388 | efx_mdio_an_reconfigure(efx); |
339 | if (rc) | ||
340 | goto fail; | ||
341 | } | ||
342 | 389 | ||
343 | schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ | 390 | schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ |
344 | 391 | ||
@@ -346,11 +393,6 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
346 | falcon_reset_xaui(efx); | 393 | falcon_reset_xaui(efx); |
347 | 394 | ||
348 | return 0; | 395 | return 0; |
349 | |||
350 | fail: | ||
351 | kfree(efx->phy_data); | ||
352 | efx->phy_data = NULL; | ||
353 | return rc; | ||
354 | } | 396 | } |
355 | 397 | ||
356 | /* Perform a "special software reset" on the PHY. The caller is | 398 | /* Perform a "special software reset" on the PHY. The caller is |
@@ -363,7 +405,7 @@ static int tenxpress_special_reset(struct efx_nic *efx) | |||
363 | /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so | 405 | /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so |
364 | * a special software reset can glitch the XGMAC sufficiently for stats | 406 | * a special software reset can glitch the XGMAC sufficiently for stats |
365 | * requests to fail. */ | 407 | * requests to fail. */ |
366 | efx_stats_disable(efx); | 408 | falcon_stop_nic_stats(efx); |
367 | 409 | ||
368 | /* Initiate reset */ | 410 | /* Initiate reset */ |
369 | reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG); | 411 | reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG); |
@@ -385,7 +427,7 @@ static int tenxpress_special_reset(struct efx_nic *efx) | |||
385 | /* Wait for the XGXS state machine to churn */ | 427 | /* Wait for the XGXS state machine to churn */ |
386 | mdelay(10); | 428 | mdelay(10); |
387 | out: | 429 | out: |
388 | efx_stats_enable(efx); | 430 | falcon_start_nic_stats(efx); |
389 | return rc; | 431 | return rc; |
390 | } | 432 | } |
391 | 433 | ||
@@ -489,133 +531,126 @@ static void tenxpress_low_power(struct efx_nic *efx) | |||
489 | !!(efx->phy_mode & PHY_MODE_LOW_POWER)); | 531 | !!(efx->phy_mode & PHY_MODE_LOW_POWER)); |
490 | } | 532 | } |
491 | 533 | ||
492 | static void tenxpress_phy_reconfigure(struct efx_nic *efx) | 534 | static int tenxpress_phy_reconfigure(struct efx_nic *efx) |
493 | { | 535 | { |
494 | struct tenxpress_phy_data *phy_data = efx->phy_data; | 536 | struct tenxpress_phy_data *phy_data = efx->phy_data; |
495 | struct ethtool_cmd ecmd; | ||
496 | bool phy_mode_change, loop_reset; | 537 | bool phy_mode_change, loop_reset; |
497 | 538 | ||
498 | if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { | 539 | if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { |
499 | phy_data->phy_mode = efx->phy_mode; | 540 | phy_data->phy_mode = efx->phy_mode; |
500 | return; | 541 | return 0; |
501 | } | 542 | } |
502 | 543 | ||
503 | tenxpress_low_power(efx); | ||
504 | |||
505 | phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && | 544 | phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && |
506 | phy_data->phy_mode != PHY_MODE_NORMAL); | 545 | phy_data->phy_mode != PHY_MODE_NORMAL); |
507 | loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) || | 546 | loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, LOOPBACKS_EXTERNAL(efx)) || |
508 | LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); | 547 | LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); |
509 | 548 | ||
510 | if (loop_reset || phy_mode_change) { | 549 | if (loop_reset || phy_mode_change) { |
511 | int rc; | 550 | tenxpress_special_reset(efx); |
512 | |||
513 | efx->phy_op->get_settings(efx, &ecmd); | ||
514 | |||
515 | if (loop_reset || phy_mode_change) { | ||
516 | tenxpress_special_reset(efx); | ||
517 | |||
518 | /* Reset XAUI if we were in 10G, and are staying | ||
519 | * in 10G. If we're moving into and out of 10G | ||
520 | * then xaui will be reset anyway */ | ||
521 | if (EFX_IS10G(efx)) | ||
522 | falcon_reset_xaui(efx); | ||
523 | } | ||
524 | 551 | ||
525 | rc = efx->phy_op->set_settings(efx, &ecmd); | 552 | /* Reset XAUI if we were in 10G, and are staying |
526 | WARN_ON(rc); | 553 | * in 10G. If we're moving into and out of 10G |
554 | * then xaui will be reset anyway */ | ||
555 | if (EFX_IS10G(efx)) | ||
556 | falcon_reset_xaui(efx); | ||
527 | } | 557 | } |
528 | 558 | ||
559 | tenxpress_low_power(efx); | ||
529 | efx_mdio_transmit_disable(efx); | 560 | efx_mdio_transmit_disable(efx); |
530 | efx_mdio_phy_reconfigure(efx); | 561 | efx_mdio_phy_reconfigure(efx); |
531 | tenxpress_ext_loopback(efx); | 562 | tenxpress_ext_loopback(efx); |
563 | efx_mdio_an_reconfigure(efx); | ||
532 | 564 | ||
533 | phy_data->loopback_mode = efx->loopback_mode; | 565 | phy_data->loopback_mode = efx->loopback_mode; |
534 | phy_data->phy_mode = efx->phy_mode; | 566 | phy_data->phy_mode = efx->phy_mode; |
535 | 567 | ||
536 | if (efx->phy_type == PHY_TYPE_SFX7101) { | 568 | return 0; |
537 | efx->link_speed = 10000; | ||
538 | efx->link_fd = true; | ||
539 | efx->link_up = sfx7101_link_ok(efx); | ||
540 | } else { | ||
541 | efx->phy_op->get_settings(efx, &ecmd); | ||
542 | efx->link_speed = ecmd.speed; | ||
543 | efx->link_fd = ecmd.duplex == DUPLEX_FULL; | ||
544 | efx->link_up = sft9001_link_ok(efx, &ecmd); | ||
545 | } | ||
546 | efx->link_fc = efx_mdio_get_pause(efx); | ||
547 | } | 569 | } |
548 | 570 | ||
549 | /* Poll PHY for interrupt */ | 571 | static void |
550 | static void tenxpress_phy_poll(struct efx_nic *efx) | 572 | tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); |
573 | |||
574 | /* Poll for link state changes */ | ||
575 | static bool tenxpress_phy_poll(struct efx_nic *efx) | ||
551 | { | 576 | { |
552 | struct tenxpress_phy_data *phy_data = efx->phy_data; | 577 | struct efx_link_state old_state = efx->link_state; |
553 | bool change = false; | ||
554 | 578 | ||
555 | if (efx->phy_type == PHY_TYPE_SFX7101) { | 579 | if (efx->phy_type == PHY_TYPE_SFX7101) { |
556 | bool link_ok = sfx7101_link_ok(efx); | 580 | efx->link_state.up = sfx7101_link_ok(efx); |
557 | if (link_ok != efx->link_up) { | 581 | efx->link_state.speed = 10000; |
558 | change = true; | 582 | efx->link_state.fd = true; |
559 | } else { | 583 | efx->link_state.fc = efx_mdio_get_pause(efx); |
560 | unsigned int link_fc = efx_mdio_get_pause(efx); | 584 | |
561 | if (link_fc != efx->link_fc) | 585 | sfx7101_check_bad_lp(efx, efx->link_state.up); |
562 | change = true; | ||
563 | } | ||
564 | sfx7101_check_bad_lp(efx, link_ok); | ||
565 | } else if (efx->loopback_mode) { | ||
566 | bool link_ok = sft9001_link_ok(efx, NULL); | ||
567 | if (link_ok != efx->link_up) | ||
568 | change = true; | ||
569 | } else { | 586 | } else { |
570 | int status = efx_mdio_read(efx, MDIO_MMD_PMAPMD, | 587 | struct ethtool_cmd ecmd; |
571 | MDIO_PMA_LASI_STAT); | ||
572 | if (status & MDIO_PMA_LASI_LSALARM) | ||
573 | change = true; | ||
574 | } | ||
575 | 588 | ||
576 | if (change) | 589 | /* Check the LASI alarm first */ |
577 | falcon_sim_phy_event(efx); | 590 | if (efx->loopback_mode == LOOPBACK_NONE && |
591 | !(efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT) & | ||
592 | MDIO_PMA_LASI_LSALARM)) | ||
593 | return false; | ||
578 | 594 | ||
579 | if (phy_data->phy_mode != PHY_MODE_NORMAL) | 595 | tenxpress_get_settings(efx, &ecmd); |
580 | return; | 596 | |
597 | efx->link_state.up = sft9001_link_ok(efx, &ecmd); | ||
598 | efx->link_state.speed = ecmd.speed; | ||
599 | efx->link_state.fd = (ecmd.duplex == DUPLEX_FULL); | ||
600 | efx->link_state.fc = efx_mdio_get_pause(efx); | ||
601 | } | ||
602 | |||
603 | return !efx_link_state_equal(&efx->link_state, &old_state); | ||
581 | } | 604 | } |
582 | 605 | ||
583 | static void tenxpress_phy_fini(struct efx_nic *efx) | 606 | static void sfx7101_phy_fini(struct efx_nic *efx) |
584 | { | 607 | { |
585 | int reg; | 608 | int reg; |
586 | 609 | ||
610 | /* Power down the LNPGA */ | ||
611 | reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); | ||
612 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); | ||
613 | |||
614 | /* Waiting here ensures that the board fini, which can turn | ||
615 | * off the power to the PHY, won't get run until the LNPGA | ||
616 | * powerdown has been given long enough to complete. */ | ||
617 | schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ | ||
618 | } | ||
619 | |||
620 | static void tenxpress_phy_remove(struct efx_nic *efx) | ||
621 | { | ||
587 | if (efx->phy_type == PHY_TYPE_SFT9001B) | 622 | if (efx->phy_type == PHY_TYPE_SFT9001B) |
588 | device_remove_file(&efx->pci_dev->dev, | 623 | device_remove_file(&efx->pci_dev->dev, |
589 | &dev_attr_phy_short_reach); | 624 | &dev_attr_phy_short_reach); |
590 | 625 | ||
591 | if (efx->phy_type == PHY_TYPE_SFX7101) { | ||
592 | /* Power down the LNPGA */ | ||
593 | reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); | ||
594 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); | ||
595 | |||
596 | /* Waiting here ensures that the board fini, which can turn | ||
597 | * off the power to the PHY, won't get run until the LNPGA | ||
598 | * powerdown has been given long enough to complete. */ | ||
599 | schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ | ||
600 | } | ||
601 | |||
602 | kfree(efx->phy_data); | 626 | kfree(efx->phy_data); |
603 | efx->phy_data = NULL; | 627 | efx->phy_data = NULL; |
604 | } | 628 | } |
605 | 629 | ||
606 | 630 | ||
607 | /* Set the RX and TX LEDs and Link LED flashing. The other LEDs | 631 | /* Override the RX, TX and link LEDs */ |
608 | * (which probably aren't wired anyway) are left in AUTO mode */ | 632 | void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) |
609 | void tenxpress_phy_blink(struct efx_nic *efx, bool blink) | ||
610 | { | 633 | { |
611 | int reg; | 634 | int reg; |
612 | 635 | ||
613 | if (blink) | 636 | switch (mode) { |
614 | reg = (PMA_PMD_LED_FLASH << PMA_PMD_LED_TX_LBN) | | 637 | case EFX_LED_OFF: |
615 | (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN) | | 638 | reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) | |
616 | (PMA_PMD_LED_FLASH << PMA_PMD_LED_LINK_LBN); | 639 | (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) | |
617 | else | 640 | (PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN); |
618 | reg = PMA_PMD_LED_DEFAULT; | 641 | break; |
642 | case EFX_LED_ON: | ||
643 | reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) | | ||
644 | (PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) | | ||
645 | (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN); | ||
646 | break; | ||
647 | default: | ||
648 | if (efx->phy_type == PHY_TYPE_SFX7101) | ||
649 | reg = SFX7101_PMA_PMD_LED_DEFAULT; | ||
650 | else | ||
651 | reg = SFT9001_PMA_PMD_LED_DEFAULT; | ||
652 | break; | ||
653 | } | ||
619 | 654 | ||
620 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg); | 655 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg); |
621 | } | 656 | } |
@@ -624,6 +659,13 @@ static const char *const sfx7101_test_names[] = { | |||
624 | "bist" | 659 | "bist" |
625 | }; | 660 | }; |
626 | 661 | ||
662 | static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index) | ||
663 | { | ||
664 | if (index < ARRAY_SIZE(sfx7101_test_names)) | ||
665 | return sfx7101_test_names[index]; | ||
666 | return NULL; | ||
667 | } | ||
668 | |||
627 | static int | 669 | static int |
628 | sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags) | 670 | sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags) |
629 | { | 671 | { |
@@ -635,6 +677,9 @@ sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags) | |||
635 | /* BIST is automatically run after a special software reset */ | 677 | /* BIST is automatically run after a special software reset */ |
636 | rc = tenxpress_special_reset(efx); | 678 | rc = tenxpress_special_reset(efx); |
637 | results[0] = rc ? -1 : 1; | 679 | results[0] = rc ? -1 : 1; |
680 | |||
681 | efx_mdio_an_reconfigure(efx); | ||
682 | |||
638 | return rc; | 683 | return rc; |
639 | } | 684 | } |
640 | 685 | ||
@@ -650,14 +695,17 @@ static const char *const sft9001_test_names[] = { | |||
650 | "cable.pairD.length", | 695 | "cable.pairD.length", |
651 | }; | 696 | }; |
652 | 697 | ||
698 | static const char *sft9001_test_name(struct efx_nic *efx, unsigned int index) | ||
699 | { | ||
700 | if (index < ARRAY_SIZE(sft9001_test_names)) | ||
701 | return sft9001_test_names[index]; | ||
702 | return NULL; | ||
703 | } | ||
704 | |||
653 | static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags) | 705 | static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags) |
654 | { | 706 | { |
655 | struct ethtool_cmd ecmd; | ||
656 | int rc = 0, rc2, i, ctrl_reg, res_reg; | 707 | int rc = 0, rc2, i, ctrl_reg, res_reg; |
657 | 708 | ||
658 | if (flags & ETH_TEST_FL_OFFLINE) | ||
659 | efx->phy_op->get_settings(efx, &ecmd); | ||
660 | |||
661 | /* Initialise cable diagnostic results to unknown failure */ | 709 | /* Initialise cable diagnostic results to unknown failure */ |
662 | for (i = 1; i < 9; ++i) | 710 | for (i = 1; i < 9; ++i) |
663 | results[i] = -1; | 711 | results[i] = -1; |
@@ -709,9 +757,7 @@ out: | |||
709 | if (!rc) | 757 | if (!rc) |
710 | rc = rc2; | 758 | rc = rc2; |
711 | 759 | ||
712 | rc2 = efx->phy_op->set_settings(efx, &ecmd); | 760 | efx_mdio_an_reconfigure(efx); |
713 | if (!rc) | ||
714 | rc = rc2; | ||
715 | } | 761 | } |
716 | 762 | ||
717 | return rc; | 763 | return rc; |
@@ -758,7 +804,7 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | |||
758 | * but doesn't advertise the correct speed. So override it */ | 804 | * but doesn't advertise the correct speed. So override it */ |
759 | if (efx->loopback_mode == LOOPBACK_GPHY) | 805 | if (efx->loopback_mode == LOOPBACK_GPHY) |
760 | ecmd->speed = SPEED_1000; | 806 | ecmd->speed = SPEED_1000; |
761 | else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks) | 807 | else if (LOOPBACK_EXTERNAL(efx)) |
762 | ecmd->speed = SPEED_10000; | 808 | ecmd->speed = SPEED_10000; |
763 | } | 809 | } |
764 | 810 | ||
@@ -788,35 +834,31 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising) | |||
788 | } | 834 | } |
789 | 835 | ||
790 | struct efx_phy_operations falcon_sfx7101_phy_ops = { | 836 | struct efx_phy_operations falcon_sfx7101_phy_ops = { |
791 | .macs = EFX_XMAC, | 837 | .probe = tenxpress_phy_probe, |
792 | .init = tenxpress_phy_init, | 838 | .init = tenxpress_phy_init, |
793 | .reconfigure = tenxpress_phy_reconfigure, | 839 | .reconfigure = tenxpress_phy_reconfigure, |
794 | .poll = tenxpress_phy_poll, | 840 | .poll = tenxpress_phy_poll, |
795 | .fini = tenxpress_phy_fini, | 841 | .fini = sfx7101_phy_fini, |
796 | .clear_interrupt = efx_port_dummy_op_void, | 842 | .remove = tenxpress_phy_remove, |
797 | .get_settings = tenxpress_get_settings, | 843 | .get_settings = tenxpress_get_settings, |
798 | .set_settings = tenxpress_set_settings, | 844 | .set_settings = tenxpress_set_settings, |
799 | .set_npage_adv = sfx7101_set_npage_adv, | 845 | .set_npage_adv = sfx7101_set_npage_adv, |
800 | .num_tests = ARRAY_SIZE(sfx7101_test_names), | 846 | .test_alive = efx_mdio_test_alive, |
801 | .test_names = sfx7101_test_names, | 847 | .test_name = sfx7101_test_name, |
802 | .run_tests = sfx7101_run_tests, | 848 | .run_tests = sfx7101_run_tests, |
803 | .mmds = TENXPRESS_REQUIRED_DEVS, | ||
804 | .loopbacks = SFX7101_LOOPBACKS, | ||
805 | }; | 849 | }; |
806 | 850 | ||
807 | struct efx_phy_operations falcon_sft9001_phy_ops = { | 851 | struct efx_phy_operations falcon_sft9001_phy_ops = { |
808 | .macs = EFX_GMAC | EFX_XMAC, | 852 | .probe = tenxpress_phy_probe, |
809 | .init = tenxpress_phy_init, | 853 | .init = tenxpress_phy_init, |
810 | .reconfigure = tenxpress_phy_reconfigure, | 854 | .reconfigure = tenxpress_phy_reconfigure, |
811 | .poll = tenxpress_phy_poll, | 855 | .poll = tenxpress_phy_poll, |
812 | .fini = tenxpress_phy_fini, | 856 | .fini = efx_port_dummy_op_void, |
813 | .clear_interrupt = efx_port_dummy_op_void, | 857 | .remove = tenxpress_phy_remove, |
814 | .get_settings = tenxpress_get_settings, | 858 | .get_settings = tenxpress_get_settings, |
815 | .set_settings = tenxpress_set_settings, | 859 | .set_settings = tenxpress_set_settings, |
816 | .set_npage_adv = sft9001_set_npage_adv, | 860 | .set_npage_adv = sft9001_set_npage_adv, |
817 | .num_tests = ARRAY_SIZE(sft9001_test_names), | 861 | .test_alive = efx_mdio_test_alive, |
818 | .test_names = sft9001_test_names, | 862 | .test_name = sft9001_test_name, |
819 | .run_tests = sft9001_run_tests, | 863 | .run_tests = sft9001_run_tests, |
820 | .mmds = TENXPRESS_REQUIRED_DEVS, | ||
821 | .loopbacks = SFT9001_LOOPBACKS, | ||
822 | }; | 864 | }; |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 489c4de31447..be0e110a1f73 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2005-2008 Solarflare Communications Inc. | 4 | * Copyright 2005-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -12,12 +12,14 @@ | |||
12 | #include <linux/tcp.h> | 12 | #include <linux/tcp.h> |
13 | #include <linux/ip.h> | 13 | #include <linux/ip.h> |
14 | #include <linux/in.h> | 14 | #include <linux/in.h> |
15 | #include <linux/ipv6.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <net/ipv6.h> | ||
15 | #include <linux/if_ether.h> | 18 | #include <linux/if_ether.h> |
16 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
17 | #include "net_driver.h" | 20 | #include "net_driver.h" |
18 | #include "tx.h" | ||
19 | #include "efx.h" | 21 | #include "efx.h" |
20 | #include "falcon.h" | 22 | #include "nic.h" |
21 | #include "workarounds.h" | 23 | #include "workarounds.h" |
22 | 24 | ||
23 | /* | 25 | /* |
@@ -26,8 +28,7 @@ | |||
26 | * The tx_queue descriptor ring fill-level must fall below this value | 28 | * The tx_queue descriptor ring fill-level must fall below this value |
27 | * before we restart the netif queue | 29 | * before we restart the netif queue |
28 | */ | 30 | */ |
29 | #define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \ | 31 | #define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) |
30 | (_tx_queue->efx->type->txd_ring_mask / 2u) | ||
31 | 32 | ||
32 | /* We want to be able to nest calls to netif_stop_queue(), since each | 33 | /* We want to be able to nest calls to netif_stop_queue(), since each |
33 | * channel can have an individual stop on the queue. | 34 | * channel can have an individual stop on the queue. |
@@ -125,6 +126,24 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue, | |||
125 | } | 126 | } |
126 | 127 | ||
127 | 128 | ||
129 | static inline unsigned | ||
130 | efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) | ||
131 | { | ||
132 | /* Depending on the NIC revision, we can use descriptor | ||
133 | * lengths up to 8K or 8K-1. However, since PCI Express | ||
134 | * devices must split read requests at 4K boundaries, there is | ||
135 | * little benefit from using descriptors that cross those | ||
136 | * boundaries and we keep things simple by not doing so. | ||
137 | */ | ||
138 | unsigned len = (~dma_addr & 0xfff) + 1; | ||
139 | |||
140 | /* Work around hardware bug for unaligned buffers. */ | ||
141 | if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) | ||
142 | len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); | ||
143 | |||
144 | return len; | ||
145 | } | ||
146 | |||
128 | /* | 147 | /* |
129 | * Add a socket buffer to a TX queue | 148 | * Add a socket buffer to a TX queue |
130 | * | 149 | * |
@@ -135,11 +154,13 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue, | |||
135 | * If any DMA mapping fails, any mapped fragments will be unmapped, | 154 | * If any DMA mapping fails, any mapped fragments will be unmapped, |
136 | * the queue's insert pointer will be restored to its original value. | 155 | * the queue's insert pointer will be restored to its original value. |
137 | * | 156 | * |
157 | * This function is split out from efx_hard_start_xmit to allow the | ||
158 | * loopback test to direct packets via specific TX queues. | ||
159 | * | ||
138 | * Returns NETDEV_TX_OK or NETDEV_TX_BUSY | 160 | * Returns NETDEV_TX_OK or NETDEV_TX_BUSY |
139 | * You must hold netif_tx_lock() to call this function. | 161 | * You must hold netif_tx_lock() to call this function. |
140 | */ | 162 | */ |
141 | static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | 163 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
142 | struct sk_buff *skb) | ||
143 | { | 164 | { |
144 | struct efx_nic *efx = tx_queue->efx; | 165 | struct efx_nic *efx = tx_queue->efx; |
145 | struct pci_dev *pci_dev = efx->pci_dev; | 166 | struct pci_dev *pci_dev = efx->pci_dev; |
@@ -147,7 +168,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
147 | skb_frag_t *fragment; | 168 | skb_frag_t *fragment; |
148 | struct page *page; | 169 | struct page *page; |
149 | int page_offset; | 170 | int page_offset; |
150 | unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; | 171 | unsigned int len, unmap_len = 0, fill_level, insert_ptr; |
151 | dma_addr_t dma_addr, unmap_addr = 0; | 172 | dma_addr_t dma_addr, unmap_addr = 0; |
152 | unsigned int dma_len; | 173 | unsigned int dma_len; |
153 | bool unmap_single; | 174 | bool unmap_single; |
@@ -156,7 +177,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
156 | 177 | ||
157 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | 178 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); |
158 | 179 | ||
159 | if (skb_shinfo((struct sk_buff *)skb)->gso_size) | 180 | if (skb_shinfo(skb)->gso_size) |
160 | return efx_enqueue_skb_tso(tx_queue, skb); | 181 | return efx_enqueue_skb_tso(tx_queue, skb); |
161 | 182 | ||
162 | /* Get size of the initial fragment */ | 183 | /* Get size of the initial fragment */ |
@@ -171,7 +192,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
171 | } | 192 | } |
172 | 193 | ||
173 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | 194 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; |
174 | q_space = efx->type->txd_ring_mask - 1 - fill_level; | 195 | q_space = EFX_TXQ_MASK - 1 - fill_level; |
175 | 196 | ||
176 | /* Map for DMA. Use pci_map_single rather than pci_map_page | 197 | /* Map for DMA. Use pci_map_single rather than pci_map_page |
177 | * since this is more efficient on machines with sparse | 198 | * since this is more efficient on machines with sparse |
@@ -208,16 +229,14 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
208 | &tx_queue->read_count; | 229 | &tx_queue->read_count; |
209 | fill_level = (tx_queue->insert_count | 230 | fill_level = (tx_queue->insert_count |
210 | - tx_queue->old_read_count); | 231 | - tx_queue->old_read_count); |
211 | q_space = (efx->type->txd_ring_mask - 1 - | 232 | q_space = EFX_TXQ_MASK - 1 - fill_level; |
212 | fill_level); | ||
213 | if (unlikely(q_space-- <= 0)) | 233 | if (unlikely(q_space-- <= 0)) |
214 | goto stop; | 234 | goto stop; |
215 | smp_mb(); | 235 | smp_mb(); |
216 | --tx_queue->stopped; | 236 | --tx_queue->stopped; |
217 | } | 237 | } |
218 | 238 | ||
219 | insert_ptr = (tx_queue->insert_count & | 239 | insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; |
220 | efx->type->txd_ring_mask); | ||
221 | buffer = &tx_queue->buffer[insert_ptr]; | 240 | buffer = &tx_queue->buffer[insert_ptr]; |
222 | efx_tsoh_free(tx_queue, buffer); | 241 | efx_tsoh_free(tx_queue, buffer); |
223 | EFX_BUG_ON_PARANOID(buffer->tsoh); | 242 | EFX_BUG_ON_PARANOID(buffer->tsoh); |
@@ -226,14 +245,10 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
226 | EFX_BUG_ON_PARANOID(!buffer->continuation); | 245 | EFX_BUG_ON_PARANOID(!buffer->continuation); |
227 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | 246 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
228 | 247 | ||
229 | dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); | 248 | dma_len = efx_max_tx_len(efx, dma_addr); |
230 | if (likely(dma_len > len)) | 249 | if (likely(dma_len >= len)) |
231 | dma_len = len; | 250 | dma_len = len; |
232 | 251 | ||
233 | misalign = (unsigned)dma_addr & efx->type->bug5391_mask; | ||
234 | if (misalign && dma_len + misalign > 512) | ||
235 | dma_len = 512 - misalign; | ||
236 | |||
237 | /* Fill out per descriptor fields */ | 252 | /* Fill out per descriptor fields */ |
238 | buffer->len = dma_len; | 253 | buffer->len = dma_len; |
239 | buffer->dma_addr = dma_addr; | 254 | buffer->dma_addr = dma_addr; |
@@ -266,7 +281,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
266 | buffer->continuation = false; | 281 | buffer->continuation = false; |
267 | 282 | ||
268 | /* Pass off to hardware */ | 283 | /* Pass off to hardware */ |
269 | falcon_push_buffers(tx_queue); | 284 | efx_nic_push_buffers(tx_queue); |
270 | 285 | ||
271 | return NETDEV_TX_OK; | 286 | return NETDEV_TX_OK; |
272 | 287 | ||
@@ -276,7 +291,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
276 | skb_shinfo(skb)->nr_frags + 1); | 291 | skb_shinfo(skb)->nr_frags + 1); |
277 | 292 | ||
278 | /* Mark the packet as transmitted, and free the SKB ourselves */ | 293 | /* Mark the packet as transmitted, and free the SKB ourselves */ |
279 | dev_kfree_skb_any((struct sk_buff *)skb); | 294 | dev_kfree_skb_any(skb); |
280 | goto unwind; | 295 | goto unwind; |
281 | 296 | ||
282 | stop: | 297 | stop: |
@@ -289,7 +304,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
289 | /* Work backwards until we hit the original insert pointer value */ | 304 | /* Work backwards until we hit the original insert pointer value */ |
290 | while (tx_queue->insert_count != tx_queue->write_count) { | 305 | while (tx_queue->insert_count != tx_queue->write_count) { |
291 | --tx_queue->insert_count; | 306 | --tx_queue->insert_count; |
292 | insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; | 307 | insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; |
293 | buffer = &tx_queue->buffer[insert_ptr]; | 308 | buffer = &tx_queue->buffer[insert_ptr]; |
294 | efx_dequeue_buffer(tx_queue, buffer); | 309 | efx_dequeue_buffer(tx_queue, buffer); |
295 | buffer->len = 0; | 310 | buffer->len = 0; |
@@ -318,10 +333,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | |||
318 | { | 333 | { |
319 | struct efx_nic *efx = tx_queue->efx; | 334 | struct efx_nic *efx = tx_queue->efx; |
320 | unsigned int stop_index, read_ptr; | 335 | unsigned int stop_index, read_ptr; |
321 | unsigned int mask = tx_queue->efx->type->txd_ring_mask; | ||
322 | 336 | ||
323 | stop_index = (index + 1) & mask; | 337 | stop_index = (index + 1) & EFX_TXQ_MASK; |
324 | read_ptr = tx_queue->read_count & mask; | 338 | read_ptr = tx_queue->read_count & EFX_TXQ_MASK; |
325 | 339 | ||
326 | while (read_ptr != stop_index) { | 340 | while (read_ptr != stop_index) { |
327 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | 341 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; |
@@ -338,28 +352,10 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | |||
338 | buffer->len = 0; | 352 | buffer->len = 0; |
339 | 353 | ||
340 | ++tx_queue->read_count; | 354 | ++tx_queue->read_count; |
341 | read_ptr = tx_queue->read_count & mask; | 355 | read_ptr = tx_queue->read_count & EFX_TXQ_MASK; |
342 | } | 356 | } |
343 | } | 357 | } |
344 | 358 | ||
345 | /* Initiate a packet transmission on the specified TX queue. | ||
346 | * Note that returning anything other than NETDEV_TX_OK will cause the | ||
347 | * OS to free the skb. | ||
348 | * | ||
349 | * This function is split out from efx_hard_start_xmit to allow the | ||
350 | * loopback test to direct packets via specific TX queues. It is | ||
351 | * therefore a non-static inline, so as not to penalise performance | ||
352 | * for non-loopback transmissions. | ||
353 | * | ||
354 | * Context: netif_tx_lock held | ||
355 | */ | ||
356 | inline netdev_tx_t efx_xmit(struct efx_nic *efx, | ||
357 | struct efx_tx_queue *tx_queue, struct sk_buff *skb) | ||
358 | { | ||
359 | /* Map fragments for DMA and add to TX queue */ | ||
360 | return efx_enqueue_skb(tx_queue, skb); | ||
361 | } | ||
362 | |||
363 | /* Initiate a packet transmission. We use one channel per CPU | 359 | /* Initiate a packet transmission. We use one channel per CPU |
364 | * (sharing when we have more CPUs than channels). On Falcon, the TX | 360 | * (sharing when we have more CPUs than channels). On Falcon, the TX |
365 | * completion events will be directed back to the CPU that transmitted | 361 | * completion events will be directed back to the CPU that transmitted |
@@ -383,7 +379,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, | |||
383 | else | 379 | else |
384 | tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; | 380 | tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; |
385 | 381 | ||
386 | return efx_xmit(efx, tx_queue, skb); | 382 | return efx_enqueue_skb(tx_queue, skb); |
387 | } | 383 | } |
388 | 384 | ||
389 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | 385 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) |
@@ -391,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
391 | unsigned fill_level; | 387 | unsigned fill_level; |
392 | struct efx_nic *efx = tx_queue->efx; | 388 | struct efx_nic *efx = tx_queue->efx; |
393 | 389 | ||
394 | EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask); | 390 | EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK); |
395 | 391 | ||
396 | efx_dequeue_buffers(tx_queue, index); | 392 | efx_dequeue_buffers(tx_queue, index); |
397 | 393 | ||
@@ -401,7 +397,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
401 | smp_mb(); | 397 | smp_mb(); |
402 | if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { | 398 | if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { |
403 | fill_level = tx_queue->insert_count - tx_queue->read_count; | 399 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
404 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { | 400 | if (fill_level < EFX_TXQ_THRESHOLD) { |
405 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); | 401 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |
406 | 402 | ||
407 | /* Do this under netif_tx_lock(), to avoid racing | 403 | /* Do this under netif_tx_lock(), to avoid racing |
@@ -425,15 +421,15 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | |||
425 | EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); | 421 | EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); |
426 | 422 | ||
427 | /* Allocate software ring */ | 423 | /* Allocate software ring */ |
428 | txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); | 424 | txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); |
429 | tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); | 425 | tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); |
430 | if (!tx_queue->buffer) | 426 | if (!tx_queue->buffer) |
431 | return -ENOMEM; | 427 | return -ENOMEM; |
432 | for (i = 0; i <= efx->type->txd_ring_mask; ++i) | 428 | for (i = 0; i <= EFX_TXQ_MASK; ++i) |
433 | tx_queue->buffer[i].continuation = true; | 429 | tx_queue->buffer[i].continuation = true; |
434 | 430 | ||
435 | /* Allocate hardware ring */ | 431 | /* Allocate hardware ring */ |
436 | rc = falcon_probe_tx(tx_queue); | 432 | rc = efx_nic_probe_tx(tx_queue); |
437 | if (rc) | 433 | if (rc) |
438 | goto fail; | 434 | goto fail; |
439 | 435 | ||
@@ -456,7 +452,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) | |||
456 | BUG_ON(tx_queue->stopped); | 452 | BUG_ON(tx_queue->stopped); |
457 | 453 | ||
458 | /* Set up TX descriptor ring */ | 454 | /* Set up TX descriptor ring */ |
459 | falcon_init_tx(tx_queue); | 455 | efx_nic_init_tx(tx_queue); |
460 | } | 456 | } |
461 | 457 | ||
462 | void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) | 458 | void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) |
@@ -468,8 +464,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) | |||
468 | 464 | ||
469 | /* Free any buffers left in the ring */ | 465 | /* Free any buffers left in the ring */ |
470 | while (tx_queue->read_count != tx_queue->write_count) { | 466 | while (tx_queue->read_count != tx_queue->write_count) { |
471 | buffer = &tx_queue->buffer[tx_queue->read_count & | 467 | buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK]; |
472 | tx_queue->efx->type->txd_ring_mask]; | ||
473 | efx_dequeue_buffer(tx_queue, buffer); | 468 | efx_dequeue_buffer(tx_queue, buffer); |
474 | buffer->continuation = true; | 469 | buffer->continuation = true; |
475 | buffer->len = 0; | 470 | buffer->len = 0; |
@@ -483,7 +478,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | |||
483 | EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue); | 478 | EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue); |
484 | 479 | ||
485 | /* Flush TX queue, remove descriptor ring */ | 480 | /* Flush TX queue, remove descriptor ring */ |
486 | falcon_fini_tx(tx_queue); | 481 | efx_nic_fini_tx(tx_queue); |
487 | 482 | ||
488 | efx_release_tx_buffers(tx_queue); | 483 | efx_release_tx_buffers(tx_queue); |
489 | 484 | ||
@@ -500,7 +495,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | |||
500 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | 495 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) |
501 | { | 496 | { |
502 | EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue); | 497 | EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue); |
503 | falcon_remove_tx(tx_queue); | 498 | efx_nic_remove_tx(tx_queue); |
504 | 499 | ||
505 | kfree(tx_queue->buffer); | 500 | kfree(tx_queue->buffer); |
506 | tx_queue->buffer = NULL; | 501 | tx_queue->buffer = NULL; |
@@ -539,6 +534,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | |||
539 | #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) | 534 | #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) |
540 | #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) | 535 | #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) |
541 | #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) | 536 | #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) |
537 | #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) | ||
542 | 538 | ||
543 | /** | 539 | /** |
544 | * struct tso_state - TSO state for an SKB | 540 | * struct tso_state - TSO state for an SKB |
@@ -551,6 +547,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | |||
551 | * @unmap_len: Length of SKB fragment | 547 | * @unmap_len: Length of SKB fragment |
552 | * @unmap_addr: DMA address of SKB fragment | 548 | * @unmap_addr: DMA address of SKB fragment |
553 | * @unmap_single: DMA single vs page mapping flag | 549 | * @unmap_single: DMA single vs page mapping flag |
550 | * @protocol: Network protocol (after any VLAN header) | ||
554 | * @header_len: Number of bytes of header | 551 | * @header_len: Number of bytes of header |
555 | * @full_packet_size: Number of bytes to put in each outgoing segment | 552 | * @full_packet_size: Number of bytes to put in each outgoing segment |
556 | * | 553 | * |
@@ -571,6 +568,7 @@ struct tso_state { | |||
571 | dma_addr_t unmap_addr; | 568 | dma_addr_t unmap_addr; |
572 | bool unmap_single; | 569 | bool unmap_single; |
573 | 570 | ||
571 | __be16 protocol; | ||
574 | unsigned header_len; | 572 | unsigned header_len; |
575 | int full_packet_size; | 573 | int full_packet_size; |
576 | }; | 574 | }; |
@@ -578,9 +576,9 @@ struct tso_state { | |||
578 | 576 | ||
579 | /* | 577 | /* |
580 | * Verify that our various assumptions about sk_buffs and the conditions | 578 | * Verify that our various assumptions about sk_buffs and the conditions |
581 | * under which TSO will be attempted hold true. | 579 | * under which TSO will be attempted hold true. Return the protocol number. |
582 | */ | 580 | */ |
583 | static void efx_tso_check_safe(struct sk_buff *skb) | 581 | static __be16 efx_tso_check_protocol(struct sk_buff *skb) |
584 | { | 582 | { |
585 | __be16 protocol = skb->protocol; | 583 | __be16 protocol = skb->protocol; |
586 | 584 | ||
@@ -595,13 +593,22 @@ static void efx_tso_check_safe(struct sk_buff *skb) | |||
595 | if (protocol == htons(ETH_P_IP)) | 593 | if (protocol == htons(ETH_P_IP)) |
596 | skb_set_transport_header(skb, sizeof(*veh) + | 594 | skb_set_transport_header(skb, sizeof(*veh) + |
597 | 4 * ip_hdr(skb)->ihl); | 595 | 4 * ip_hdr(skb)->ihl); |
596 | else if (protocol == htons(ETH_P_IPV6)) | ||
597 | skb_set_transport_header(skb, sizeof(*veh) + | ||
598 | sizeof(struct ipv6hdr)); | ||
598 | } | 599 | } |
599 | 600 | ||
600 | EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP)); | 601 | if (protocol == htons(ETH_P_IP)) { |
601 | EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); | 602 | EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); |
603 | } else { | ||
604 | EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); | ||
605 | EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); | ||
606 | } | ||
602 | EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) | 607 | EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) |
603 | + (tcp_hdr(skb)->doff << 2u)) > | 608 | + (tcp_hdr(skb)->doff << 2u)) > |
604 | skb_headlen(skb)); | 609 | skb_headlen(skb)); |
610 | |||
611 | return protocol; | ||
605 | } | 612 | } |
606 | 613 | ||
607 | 614 | ||
@@ -708,14 +715,14 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
708 | { | 715 | { |
709 | struct efx_tx_buffer *buffer; | 716 | struct efx_tx_buffer *buffer; |
710 | struct efx_nic *efx = tx_queue->efx; | 717 | struct efx_nic *efx = tx_queue->efx; |
711 | unsigned dma_len, fill_level, insert_ptr, misalign; | 718 | unsigned dma_len, fill_level, insert_ptr; |
712 | int q_space; | 719 | int q_space; |
713 | 720 | ||
714 | EFX_BUG_ON_PARANOID(len <= 0); | 721 | EFX_BUG_ON_PARANOID(len <= 0); |
715 | 722 | ||
716 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | 723 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; |
717 | /* -1 as there is no way to represent all descriptors used */ | 724 | /* -1 as there is no way to represent all descriptors used */ |
718 | q_space = efx->type->txd_ring_mask - 1 - fill_level; | 725 | q_space = EFX_TXQ_MASK - 1 - fill_level; |
719 | 726 | ||
720 | while (1) { | 727 | while (1) { |
721 | if (unlikely(q_space-- <= 0)) { | 728 | if (unlikely(q_space-- <= 0)) { |
@@ -731,7 +738,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
731 | *(volatile unsigned *)&tx_queue->read_count; | 738 | *(volatile unsigned *)&tx_queue->read_count; |
732 | fill_level = (tx_queue->insert_count | 739 | fill_level = (tx_queue->insert_count |
733 | - tx_queue->old_read_count); | 740 | - tx_queue->old_read_count); |
734 | q_space = efx->type->txd_ring_mask - 1 - fill_level; | 741 | q_space = EFX_TXQ_MASK - 1 - fill_level; |
735 | if (unlikely(q_space-- <= 0)) { | 742 | if (unlikely(q_space-- <= 0)) { |
736 | *final_buffer = NULL; | 743 | *final_buffer = NULL; |
737 | return 1; | 744 | return 1; |
@@ -740,13 +747,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
740 | --tx_queue->stopped; | 747 | --tx_queue->stopped; |
741 | } | 748 | } |
742 | 749 | ||
743 | insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; | 750 | insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; |
744 | buffer = &tx_queue->buffer[insert_ptr]; | 751 | buffer = &tx_queue->buffer[insert_ptr]; |
745 | ++tx_queue->insert_count; | 752 | ++tx_queue->insert_count; |
746 | 753 | ||
747 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - | 754 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - |
748 | tx_queue->read_count > | 755 | tx_queue->read_count > |
749 | efx->type->txd_ring_mask); | 756 | EFX_TXQ_MASK); |
750 | 757 | ||
751 | efx_tsoh_free(tx_queue, buffer); | 758 | efx_tsoh_free(tx_queue, buffer); |
752 | EFX_BUG_ON_PARANOID(buffer->len); | 759 | EFX_BUG_ON_PARANOID(buffer->len); |
@@ -757,12 +764,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | |||
757 | 764 | ||
758 | buffer->dma_addr = dma_addr; | 765 | buffer->dma_addr = dma_addr; |
759 | 766 | ||
760 | /* Ensure we do not cross a boundary unsupported by H/W */ | 767 | dma_len = efx_max_tx_len(efx, dma_addr); |
761 | dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1; | ||
762 | |||
763 | misalign = (unsigned)dma_addr & efx->type->bug5391_mask; | ||
764 | if (misalign && dma_len + misalign > 512) | ||
765 | dma_len = 512 - misalign; | ||
766 | 768 | ||
767 | /* If there is enough space to send then do so */ | 769 | /* If there is enough space to send then do so */ |
768 | if (dma_len >= len) | 770 | if (dma_len >= len) |
@@ -792,8 +794,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue, | |||
792 | { | 794 | { |
793 | struct efx_tx_buffer *buffer; | 795 | struct efx_tx_buffer *buffer; |
794 | 796 | ||
795 | buffer = &tx_queue->buffer[tx_queue->insert_count & | 797 | buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK]; |
796 | tx_queue->efx->type->txd_ring_mask]; | ||
797 | efx_tsoh_free(tx_queue, buffer); | 798 | efx_tsoh_free(tx_queue, buffer); |
798 | EFX_BUG_ON_PARANOID(buffer->len); | 799 | EFX_BUG_ON_PARANOID(buffer->len); |
799 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | 800 | EFX_BUG_ON_PARANOID(buffer->unmap_len); |
@@ -818,11 +819,9 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | |||
818 | while (tx_queue->insert_count != tx_queue->write_count) { | 819 | while (tx_queue->insert_count != tx_queue->write_count) { |
819 | --tx_queue->insert_count; | 820 | --tx_queue->insert_count; |
820 | buffer = &tx_queue->buffer[tx_queue->insert_count & | 821 | buffer = &tx_queue->buffer[tx_queue->insert_count & |
821 | tx_queue->efx->type->txd_ring_mask]; | 822 | EFX_TXQ_MASK]; |
822 | efx_tsoh_free(tx_queue, buffer); | 823 | efx_tsoh_free(tx_queue, buffer); |
823 | EFX_BUG_ON_PARANOID(buffer->skb); | 824 | EFX_BUG_ON_PARANOID(buffer->skb); |
824 | buffer->len = 0; | ||
825 | buffer->continuation = true; | ||
826 | if (buffer->unmap_len) { | 825 | if (buffer->unmap_len) { |
827 | unmap_addr = (buffer->dma_addr + buffer->len - | 826 | unmap_addr = (buffer->dma_addr + buffer->len - |
828 | buffer->unmap_len); | 827 | buffer->unmap_len); |
@@ -836,6 +835,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | |||
836 | PCI_DMA_TODEVICE); | 835 | PCI_DMA_TODEVICE); |
837 | buffer->unmap_len = 0; | 836 | buffer->unmap_len = 0; |
838 | } | 837 | } |
838 | buffer->len = 0; | ||
839 | buffer->continuation = true; | ||
839 | } | 840 | } |
840 | } | 841 | } |
841 | 842 | ||
@@ -850,7 +851,10 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb) | |||
850 | + PTR_DIFF(tcp_hdr(skb), skb->data)); | 851 | + PTR_DIFF(tcp_hdr(skb), skb->data)); |
851 | st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; | 852 | st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; |
852 | 853 | ||
853 | st->ipv4_id = ntohs(ip_hdr(skb)->id); | 854 | if (st->protocol == htons(ETH_P_IP)) |
855 | st->ipv4_id = ntohs(ip_hdr(skb)->id); | ||
856 | else | ||
857 | st->ipv4_id = 0; | ||
854 | st->seqnum = ntohl(tcp_hdr(skb)->seq); | 858 | st->seqnum = ntohl(tcp_hdr(skb)->seq); |
855 | 859 | ||
856 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); | 860 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); |
@@ -965,7 +969,6 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, | |||
965 | struct tso_state *st) | 969 | struct tso_state *st) |
966 | { | 970 | { |
967 | struct efx_tso_header *tsoh; | 971 | struct efx_tso_header *tsoh; |
968 | struct iphdr *tsoh_iph; | ||
969 | struct tcphdr *tsoh_th; | 972 | struct tcphdr *tsoh_th; |
970 | unsigned ip_length; | 973 | unsigned ip_length; |
971 | u8 *header; | 974 | u8 *header; |
@@ -989,7 +992,6 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, | |||
989 | 992 | ||
990 | header = TSOH_BUFFER(tsoh); | 993 | header = TSOH_BUFFER(tsoh); |
991 | tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); | 994 | tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); |
992 | tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); | ||
993 | 995 | ||
994 | /* Copy and update the headers. */ | 996 | /* Copy and update the headers. */ |
995 | memcpy(header, skb->data, st->header_len); | 997 | memcpy(header, skb->data, st->header_len); |
@@ -1007,11 +1009,22 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, | |||
1007 | tsoh_th->fin = tcp_hdr(skb)->fin; | 1009 | tsoh_th->fin = tcp_hdr(skb)->fin; |
1008 | tsoh_th->psh = tcp_hdr(skb)->psh; | 1010 | tsoh_th->psh = tcp_hdr(skb)->psh; |
1009 | } | 1011 | } |
1010 | tsoh_iph->tot_len = htons(ip_length); | ||
1011 | 1012 | ||
1012 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ | 1013 | if (st->protocol == htons(ETH_P_IP)) { |
1013 | tsoh_iph->id = htons(st->ipv4_id); | 1014 | struct iphdr *tsoh_iph = |
1014 | st->ipv4_id++; | 1015 | (struct iphdr *)(header + SKB_IPV4_OFF(skb)); |
1016 | |||
1017 | tsoh_iph->tot_len = htons(ip_length); | ||
1018 | |||
1019 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ | ||
1020 | tsoh_iph->id = htons(st->ipv4_id); | ||
1021 | st->ipv4_id++; | ||
1022 | } else { | ||
1023 | struct ipv6hdr *tsoh_iph = | ||
1024 | (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); | ||
1025 | |||
1026 | tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); | ||
1027 | } | ||
1015 | 1028 | ||
1016 | st->packet_space = skb_shinfo(skb)->gso_size; | 1029 | st->packet_space = skb_shinfo(skb)->gso_size; |
1017 | ++tx_queue->tso_packets; | 1030 | ++tx_queue->tso_packets; |
@@ -1041,8 +1054,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1041 | int frag_i, rc, rc2 = NETDEV_TX_OK; | 1054 | int frag_i, rc, rc2 = NETDEV_TX_OK; |
1042 | struct tso_state state; | 1055 | struct tso_state state; |
1043 | 1056 | ||
1044 | /* Verify TSO is safe - these checks should never fail. */ | 1057 | /* Find the packet protocol and sanity-check it */ |
1045 | efx_tso_check_safe(skb); | 1058 | state.protocol = efx_tso_check_protocol(skb); |
1046 | 1059 | ||
1047 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | 1060 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); |
1048 | 1061 | ||
@@ -1092,14 +1105,14 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1092 | } | 1105 | } |
1093 | 1106 | ||
1094 | /* Pass off to hardware */ | 1107 | /* Pass off to hardware */ |
1095 | falcon_push_buffers(tx_queue); | 1108 | efx_nic_push_buffers(tx_queue); |
1096 | 1109 | ||
1097 | tx_queue->tso_bursts++; | 1110 | tx_queue->tso_bursts++; |
1098 | return NETDEV_TX_OK; | 1111 | return NETDEV_TX_OK; |
1099 | 1112 | ||
1100 | mem_err: | 1113 | mem_err: |
1101 | EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n"); | 1114 | EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n"); |
1102 | dev_kfree_skb_any((struct sk_buff *)skb); | 1115 | dev_kfree_skb_any(skb); |
1103 | goto unwind; | 1116 | goto unwind; |
1104 | 1117 | ||
1105 | stop: | 1118 | stop: |
@@ -1135,7 +1148,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue) | |||
1135 | unsigned i; | 1148 | unsigned i; |
1136 | 1149 | ||
1137 | if (tx_queue->buffer) { | 1150 | if (tx_queue->buffer) { |
1138 | for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) | 1151 | for (i = 0; i <= EFX_TXQ_MASK; ++i) |
1139 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); | 1152 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); |
1140 | } | 1153 | } |
1141 | 1154 | ||
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h deleted file mode 100644 index e3678962a5b4..000000000000 --- a/drivers/net/sfc/tx.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_TX_H | ||
12 | #define EFX_TX_H | ||
13 | |||
14 | #include "net_driver.h" | ||
15 | |||
16 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); | ||
17 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); | ||
18 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue); | ||
19 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); | ||
20 | |||
21 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, | ||
22 | struct net_device *net_dev); | ||
23 | void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); | ||
24 | |||
25 | #endif /* EFX_TX_H */ | ||
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index c821c15445a0..acd9c734e483 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2006-2008 Solarflare Communications Inc. | 3 | * Copyright 2006-2009 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
@@ -16,7 +16,9 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 | 18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 |
19 | #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) | 19 | #define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) |
20 | #define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) | ||
21 | #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) | ||
20 | #define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) | 22 | #define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) |
21 | #define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \ | 23 | #define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \ |
22 | (efx)->phy_type == PHY_TYPE_SFT9001B) | 24 | (efx)->phy_type == PHY_TYPE_SFT9001B) |
@@ -27,20 +29,22 @@ | |||
27 | #define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS | 29 | #define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS |
28 | /* Bit-bashed I2C reads cause performance drop */ | 30 | /* Bit-bashed I2C reads cause performance drop */ |
29 | #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G | 31 | #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G |
30 | /* TX pkt parser problem with <= 16 byte TXes */ | ||
31 | #define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS | ||
32 | /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor | 32 | /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor |
33 | * or a PCIe error (bug 11028) */ | 33 | * or a PCIe error (bug 11028) */ |
34 | #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS | 34 | #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS |
35 | /* Transmit flow control may get disabled */ | 35 | /* Transmit flow control may get disabled */ |
36 | #define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS | 36 | #define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB |
37 | /* Flush events can take a very long time to appear */ | ||
38 | #define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS | ||
39 | /* Truncated IPv4 packets can confuse the TX packet parser */ | 37 | /* Truncated IPv4 packets can confuse the TX packet parser */ |
40 | #define EFX_WORKAROUND_15592 EFX_WORKAROUND_ALWAYS | 38 | #define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB |
39 | /* Legacy ISR read can return zero once */ | ||
40 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_SIENA | ||
41 | /* Legacy interrupt storm when interrupt fifo fills */ | ||
42 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA | ||
41 | 43 | ||
42 | /* Spurious parity errors in TSORT buffers */ | 44 | /* Spurious parity errors in TSORT buffers */ |
43 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A | 45 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A |
46 | /* Unaligned read request >512 bytes after aligning may break TSORT */ | ||
47 | #define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A | ||
44 | /* iSCSI parsing errors */ | 48 | /* iSCSI parsing errors */ |
45 | #define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A | 49 | #define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A |
46 | /* RX events go missing */ | 50 | /* RX events go missing */ |
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c deleted file mode 100644 index e6b3d5eaddba..000000000000 --- a/drivers/net/sfc/xfp_phy.c +++ /dev/null | |||
@@ -1,250 +0,0 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | /* | ||
10 | * Driver for SFP+ and XFP optical PHYs plus some support specific to the | ||
11 | * AMCC QT20xx adapters; see www.amcc.com for details | ||
12 | */ | ||
13 | |||
14 | #include <linux/timer.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include "efx.h" | ||
17 | #include "mdio_10g.h" | ||
18 | #include "phy.h" | ||
19 | #include "falcon.h" | ||
20 | |||
21 | #define XFP_REQUIRED_DEVS (MDIO_DEVS_PCS | \ | ||
22 | MDIO_DEVS_PMAPMD | \ | ||
23 | MDIO_DEVS_PHYXS) | ||
24 | |||
25 | #define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ | ||
26 | (1 << LOOPBACK_PMAPMD) | \ | ||
27 | (1 << LOOPBACK_NETWORK)) | ||
28 | |||
29 | /****************************************************************************/ | ||
30 | /* Quake-specific MDIO registers */ | ||
31 | #define MDIO_QUAKE_LED0_REG (0xD006) | ||
32 | |||
33 | /* QT2025C only */ | ||
34 | #define PCS_FW_HEARTBEAT_REG 0xd7ee | ||
35 | #define PCS_FW_HEARTB_LBN 0 | ||
36 | #define PCS_FW_HEARTB_WIDTH 8 | ||
37 | #define PCS_UC8051_STATUS_REG 0xd7fd | ||
38 | #define PCS_UC_STATUS_LBN 0 | ||
39 | #define PCS_UC_STATUS_WIDTH 8 | ||
40 | #define PCS_UC_STATUS_FW_SAVE 0x20 | ||
41 | #define PMA_PMD_FTX_CTRL2_REG 0xc309 | ||
42 | #define PMA_PMD_FTX_STATIC_LBN 13 | ||
43 | #define PMA_PMD_VEND1_REG 0xc001 | ||
44 | #define PMA_PMD_VEND1_LBTXD_LBN 15 | ||
45 | #define PCS_VEND1_REG 0xc000 | ||
46 | #define PCS_VEND1_LBTXD_LBN 5 | ||
47 | |||
48 | void xfp_set_led(struct efx_nic *p, int led, int mode) | ||
49 | { | ||
50 | int addr = MDIO_QUAKE_LED0_REG + led; | ||
51 | efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode); | ||
52 | } | ||
53 | |||
54 | struct xfp_phy_data { | ||
55 | enum efx_phy_mode phy_mode; | ||
56 | }; | ||
57 | |||
58 | #define XFP_MAX_RESET_TIME 500 | ||
59 | #define XFP_RESET_WAIT 10 | ||
60 | |||
61 | static int qt2025c_wait_reset(struct efx_nic *efx) | ||
62 | { | ||
63 | unsigned long timeout = jiffies + 10 * HZ; | ||
64 | int reg, old_counter = 0; | ||
65 | |||
66 | /* Wait for firmware heartbeat to start */ | ||
67 | for (;;) { | ||
68 | int counter; | ||
69 | reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG); | ||
70 | if (reg < 0) | ||
71 | return reg; | ||
72 | counter = ((reg >> PCS_FW_HEARTB_LBN) & | ||
73 | ((1 << PCS_FW_HEARTB_WIDTH) - 1)); | ||
74 | if (old_counter == 0) | ||
75 | old_counter = counter; | ||
76 | else if (counter != old_counter) | ||
77 | break; | ||
78 | if (time_after(jiffies, timeout)) | ||
79 | return -ETIMEDOUT; | ||
80 | msleep(10); | ||
81 | } | ||
82 | |||
83 | /* Wait for firmware status to look good */ | ||
84 | for (;;) { | ||
85 | reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG); | ||
86 | if (reg < 0) | ||
87 | return reg; | ||
88 | if ((reg & | ||
89 | ((1 << PCS_UC_STATUS_WIDTH) - 1) << PCS_UC_STATUS_LBN) >= | ||
90 | PCS_UC_STATUS_FW_SAVE) | ||
91 | break; | ||
92 | if (time_after(jiffies, timeout)) | ||
93 | return -ETIMEDOUT; | ||
94 | msleep(100); | ||
95 | } | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int xfp_reset_phy(struct efx_nic *efx) | ||
101 | { | ||
102 | int rc; | ||
103 | |||
104 | if (efx->phy_type == PHY_TYPE_QT2025C) { | ||
105 | /* Wait for the reset triggered by falcon_reset_hw() | ||
106 | * to complete */ | ||
107 | rc = qt2025c_wait_reset(efx); | ||
108 | if (rc < 0) | ||
109 | goto fail; | ||
110 | } else { | ||
111 | /* Reset the PHYXS MMD. This is documented as doing | ||
112 | * a complete soft reset. */ | ||
113 | rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS, | ||
114 | XFP_MAX_RESET_TIME / XFP_RESET_WAIT, | ||
115 | XFP_RESET_WAIT); | ||
116 | if (rc < 0) | ||
117 | goto fail; | ||
118 | } | ||
119 | |||
120 | /* Wait 250ms for the PHY to complete bootup */ | ||
121 | msleep(250); | ||
122 | |||
123 | /* Check that all the MMDs we expect are present and responding. We | ||
124 | * expect faults on some if the link is down, but not on the PHY XS */ | ||
125 | rc = efx_mdio_check_mmds(efx, XFP_REQUIRED_DEVS, MDIO_DEVS_PHYXS); | ||
126 | if (rc < 0) | ||
127 | goto fail; | ||
128 | |||
129 | efx->board_info.init_leds(efx); | ||
130 | |||
131 | return rc; | ||
132 | |||
133 | fail: | ||
134 | EFX_ERR(efx, "PHY reset timed out\n"); | ||
135 | return rc; | ||
136 | } | ||
137 | |||
138 | static int xfp_phy_init(struct efx_nic *efx) | ||
139 | { | ||
140 | struct xfp_phy_data *phy_data; | ||
141 | u32 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); | ||
142 | int rc; | ||
143 | |||
144 | phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); | ||
145 | if (!phy_data) | ||
146 | return -ENOMEM; | ||
147 | efx->phy_data = phy_data; | ||
148 | |||
149 | EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", | ||
150 | devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), | ||
151 | efx_mdio_id_rev(devid)); | ||
152 | |||
153 | phy_data->phy_mode = efx->phy_mode; | ||
154 | |||
155 | rc = xfp_reset_phy(efx); | ||
156 | |||
157 | EFX_INFO(efx, "PHY init %s.\n", | ||
158 | rc ? "failed" : "successful"); | ||
159 | if (rc < 0) | ||
160 | goto fail; | ||
161 | |||
162 | return 0; | ||
163 | |||
164 | fail: | ||
165 | kfree(efx->phy_data); | ||
166 | efx->phy_data = NULL; | ||
167 | return rc; | ||
168 | } | ||
169 | |||
170 | static void xfp_phy_clear_interrupt(struct efx_nic *efx) | ||
171 | { | ||
172 | /* Read to clear link status alarm */ | ||
173 | efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT); | ||
174 | } | ||
175 | |||
176 | static int xfp_link_ok(struct efx_nic *efx) | ||
177 | { | ||
178 | return efx_mdio_links_ok(efx, XFP_REQUIRED_DEVS); | ||
179 | } | ||
180 | |||
181 | static void xfp_phy_poll(struct efx_nic *efx) | ||
182 | { | ||
183 | int link_up = xfp_link_ok(efx); | ||
184 | /* Simulate a PHY event if link state has changed */ | ||
185 | if (link_up != efx->link_up) | ||
186 | falcon_sim_phy_event(efx); | ||
187 | } | ||
188 | |||
189 | static void xfp_phy_reconfigure(struct efx_nic *efx) | ||
190 | { | ||
191 | struct xfp_phy_data *phy_data = efx->phy_data; | ||
192 | |||
193 | if (efx->phy_type == PHY_TYPE_QT2025C) { | ||
194 | /* There are several different register bits which can | ||
195 | * disable TX (and save power) on direct-attach cables | ||
196 | * or optical transceivers, varying somewhat between | ||
197 | * firmware versions. Only 'static mode' appears to | ||
198 | * cover everything. */ | ||
199 | mdio_set_flag( | ||
200 | &efx->mdio, efx->mdio.prtad, MDIO_MMD_PMAPMD, | ||
201 | PMA_PMD_FTX_CTRL2_REG, 1 << PMA_PMD_FTX_STATIC_LBN, | ||
202 | efx->phy_mode & PHY_MODE_TX_DISABLED || | ||
203 | efx->phy_mode & PHY_MODE_LOW_POWER || | ||
204 | efx->loopback_mode == LOOPBACK_PCS || | ||
205 | efx->loopback_mode == LOOPBACK_PMAPMD); | ||
206 | } else { | ||
207 | /* Reset the PHY when moving from tx off to tx on */ | ||
208 | if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) && | ||
209 | (phy_data->phy_mode & PHY_MODE_TX_DISABLED)) | ||
210 | xfp_reset_phy(efx); | ||
211 | |||
212 | efx_mdio_transmit_disable(efx); | ||
213 | } | ||
214 | |||
215 | efx_mdio_phy_reconfigure(efx); | ||
216 | |||
217 | phy_data->phy_mode = efx->phy_mode; | ||
218 | efx->link_up = xfp_link_ok(efx); | ||
219 | efx->link_speed = 10000; | ||
220 | efx->link_fd = true; | ||
221 | efx->link_fc = efx->wanted_fc; | ||
222 | } | ||
223 | |||
224 | static void xfp_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
225 | { | ||
226 | mdio45_ethtool_gset(&efx->mdio, ecmd); | ||
227 | } | ||
228 | |||
229 | static void xfp_phy_fini(struct efx_nic *efx) | ||
230 | { | ||
231 | /* Clobber the LED if it was blinking */ | ||
232 | efx->board_info.blink(efx, false); | ||
233 | |||
234 | /* Free the context block */ | ||
235 | kfree(efx->phy_data); | ||
236 | efx->phy_data = NULL; | ||
237 | } | ||
238 | |||
239 | struct efx_phy_operations falcon_xfp_phy_ops = { | ||
240 | .macs = EFX_XMAC, | ||
241 | .init = xfp_phy_init, | ||
242 | .reconfigure = xfp_phy_reconfigure, | ||
243 | .poll = xfp_phy_poll, | ||
244 | .fini = xfp_phy_fini, | ||
245 | .clear_interrupt = xfp_phy_clear_interrupt, | ||
246 | .get_settings = xfp_phy_get_settings, | ||
247 | .set_settings = efx_mdio_set_settings, | ||
248 | .mmds = XFP_REQUIRED_DEVS, | ||
249 | .loopbacks = XFP_LOOPBACKS, | ||
250 | }; | ||