diff options
Diffstat (limited to 'drivers/net/sfc')
37 files changed, 25511 insertions, 0 deletions
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig new file mode 100644 index 00000000000..a3d5bb9e39d --- /dev/null +++ b/drivers/net/sfc/Kconfig | |||
@@ -0,0 +1,21 @@ | |||
1 | config SFC | ||
2 | tristate "Solarflare SFC4000/SFC9000-family support" | ||
3 | depends on PCI && INET | ||
4 | select MDIO | ||
5 | select CRC32 | ||
6 | select I2C | ||
7 | select I2C_ALGOBIT | ||
8 | help | ||
9 | This driver supports 10-gigabit Ethernet cards based on | ||
10 | the Solarflare SFC4000 and SFC9000-family controllers. | ||
11 | |||
12 | To compile this driver as a module, choose M here. The module | ||
13 | will be called sfc. | ||
14 | config SFC_MTD | ||
15 | bool "Solarflare SFC4000/SFC9000-family MTD support" | ||
16 | depends on SFC && MTD && !(SFC=y && MTD=m) | ||
17 | default y | ||
18 | help | ||
19 | This exposes the on-board flash memory as MTD devices (e.g. | ||
20 | /dev/mtd1). This makes it possible to upload new firmware | ||
21 | to the NIC. | ||
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile new file mode 100644 index 00000000000..ab31c7124db --- /dev/null +++ b/drivers/net/sfc/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \ | ||
2 | falcon_xmac.o mcdi_mac.o \ | ||
3 | selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ | ||
4 | tenxpress.o txc43128_phy.o falcon_boards.o \ | ||
5 | mcdi.o mcdi_phy.o | ||
6 | sfc-$(CONFIG_SFC_MTD) += mtd.o | ||
7 | |||
8 | obj-$(CONFIG_SFC) += sfc.o | ||
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h new file mode 100644 index 00000000000..098ac2ad757 --- /dev/null +++ b/drivers/net/sfc/bitfield.h | |||
@@ -0,0 +1,538 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2009 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_BITFIELD_H | ||
12 | #define EFX_BITFIELD_H | ||
13 | |||
14 | /* | ||
15 | * Efx bitfield access | ||
16 | * | ||
17 | * Efx NICs make extensive use of bitfields up to 128 bits | ||
18 | * wide. Since there is no native 128-bit datatype on most systems, | ||
19 | * and since 64-bit datatypes are inefficient on 32-bit systems and | ||
20 | * vice versa, we wrap accesses in a way that uses the most efficient | ||
21 | * datatype. | ||
22 | * | ||
23 | * The NICs are PCI devices and therefore little-endian. Since most | ||
24 | * of the quantities that we deal with are DMAed to/from host memory, | ||
25 | * we define our datatypes (efx_oword_t, efx_qword_t and | ||
26 | * efx_dword_t) to be little-endian. | ||
27 | */ | ||
28 | |||
29 | /* Lowest bit numbers and widths */ | ||
30 | #define EFX_DUMMY_FIELD_LBN 0 | ||
31 | #define EFX_DUMMY_FIELD_WIDTH 0 | ||
32 | #define EFX_DWORD_0_LBN 0 | ||
33 | #define EFX_DWORD_0_WIDTH 32 | ||
34 | #define EFX_DWORD_1_LBN 32 | ||
35 | #define EFX_DWORD_1_WIDTH 32 | ||
36 | #define EFX_DWORD_2_LBN 64 | ||
37 | #define EFX_DWORD_2_WIDTH 32 | ||
38 | #define EFX_DWORD_3_LBN 96 | ||
39 | #define EFX_DWORD_3_WIDTH 32 | ||
40 | #define EFX_QWORD_0_LBN 0 | ||
41 | #define EFX_QWORD_0_WIDTH 64 | ||
42 | |||
43 | /* Specified attribute (e.g. LBN) of the specified field */ | ||
44 | #define EFX_VAL(field, attribute) field ## _ ## attribute | ||
45 | /* Low bit number of the specified field */ | ||
46 | #define EFX_LOW_BIT(field) EFX_VAL(field, LBN) | ||
47 | /* Bit width of the specified field */ | ||
48 | #define EFX_WIDTH(field) EFX_VAL(field, WIDTH) | ||
49 | /* High bit number of the specified field */ | ||
50 | #define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1) | ||
51 | /* Mask equal in width to the specified field. | ||
52 | * | ||
53 | * For example, a field with width 5 would have a mask of 0x1f. | ||
54 | * | ||
55 | * The maximum width mask that can be generated is 64 bits. | ||
56 | */ | ||
57 | #define EFX_MASK64(width) \ | ||
58 | ((width) == 64 ? ~((u64) 0) : \ | ||
59 | (((((u64) 1) << (width))) - 1)) | ||
60 | |||
61 | /* Mask equal in width to the specified field. | ||
62 | * | ||
63 | * For example, a field with width 5 would have a mask of 0x1f. | ||
64 | * | ||
65 | * The maximum width mask that can be generated is 32 bits. Use | ||
66 | * EFX_MASK64 for higher width fields. | ||
67 | */ | ||
68 | #define EFX_MASK32(width) \ | ||
69 | ((width) == 32 ? ~((u32) 0) : \ | ||
70 | (((((u32) 1) << (width))) - 1)) | ||
71 | |||
72 | /* A doubleword (i.e. 4 byte) datatype - little-endian in HW */ | ||
73 | typedef union efx_dword { | ||
74 | __le32 u32[1]; | ||
75 | } efx_dword_t; | ||
76 | |||
77 | /* A quadword (i.e. 8 byte) datatype - little-endian in HW */ | ||
78 | typedef union efx_qword { | ||
79 | __le64 u64[1]; | ||
80 | __le32 u32[2]; | ||
81 | efx_dword_t dword[2]; | ||
82 | } efx_qword_t; | ||
83 | |||
84 | /* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */ | ||
85 | typedef union efx_oword { | ||
86 | __le64 u64[2]; | ||
87 | efx_qword_t qword[2]; | ||
88 | __le32 u32[4]; | ||
89 | efx_dword_t dword[4]; | ||
90 | } efx_oword_t; | ||
91 | |||
92 | /* Format string and value expanders for printk */ | ||
93 | #define EFX_DWORD_FMT "%08x" | ||
94 | #define EFX_QWORD_FMT "%08x:%08x" | ||
95 | #define EFX_OWORD_FMT "%08x:%08x:%08x:%08x" | ||
96 | #define EFX_DWORD_VAL(dword) \ | ||
97 | ((unsigned int) le32_to_cpu((dword).u32[0])) | ||
98 | #define EFX_QWORD_VAL(qword) \ | ||
99 | ((unsigned int) le32_to_cpu((qword).u32[1])), \ | ||
100 | ((unsigned int) le32_to_cpu((qword).u32[0])) | ||
101 | #define EFX_OWORD_VAL(oword) \ | ||
102 | ((unsigned int) le32_to_cpu((oword).u32[3])), \ | ||
103 | ((unsigned int) le32_to_cpu((oword).u32[2])), \ | ||
104 | ((unsigned int) le32_to_cpu((oword).u32[1])), \ | ||
105 | ((unsigned int) le32_to_cpu((oword).u32[0])) | ||
106 | |||
107 | /* | ||
108 | * Extract bit field portion [low,high) from the native-endian element | ||
109 | * which contains bits [min,max). | ||
110 | * | ||
111 | * For example, suppose "element" represents the high 32 bits of a | ||
112 | * 64-bit value, and we wish to extract the bits belonging to the bit | ||
113 | * field occupying bits 28-45 of this 64-bit value. | ||
114 | * | ||
115 | * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give | ||
116 | * | ||
117 | * ( element ) << 4 | ||
118 | * | ||
119 | * The result will contain the relevant bits filled in in the range | ||
120 | * [0,high-low), with garbage in bits [high-low+1,...). | ||
121 | */ | ||
122 | #define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \ | ||
123 | (((low > max) || (high < min)) ? 0 : \ | ||
124 | ((low > min) ? \ | ||
125 | ((native_element) >> (low - min)) : \ | ||
126 | ((native_element) << (min - low)))) | ||
127 | |||
128 | /* | ||
129 | * Extract bit field portion [low,high) from the 64-bit little-endian | ||
130 | * element which contains bits [min,max) | ||
131 | */ | ||
132 | #define EFX_EXTRACT64(element, min, max, low, high) \ | ||
133 | EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high) | ||
134 | |||
135 | /* | ||
136 | * Extract bit field portion [low,high) from the 32-bit little-endian | ||
137 | * element which contains bits [min,max) | ||
138 | */ | ||
139 | #define EFX_EXTRACT32(element, min, max, low, high) \ | ||
140 | EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high) | ||
141 | |||
142 | #define EFX_EXTRACT_OWORD64(oword, low, high) \ | ||
143 | ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ | ||
144 | EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \ | ||
145 | EFX_MASK64(high + 1 - low)) | ||
146 | |||
147 | #define EFX_EXTRACT_QWORD64(qword, low, high) \ | ||
148 | (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \ | ||
149 | EFX_MASK64(high + 1 - low)) | ||
150 | |||
151 | #define EFX_EXTRACT_OWORD32(oword, low, high) \ | ||
152 | ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ | ||
153 | EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ | ||
154 | EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ | ||
155 | EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \ | ||
156 | EFX_MASK32(high + 1 - low)) | ||
157 | |||
158 | #define EFX_EXTRACT_QWORD32(qword, low, high) \ | ||
159 | ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ | ||
160 | EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \ | ||
161 | EFX_MASK32(high + 1 - low)) | ||
162 | |||
163 | #define EFX_EXTRACT_DWORD(dword, low, high) \ | ||
164 | (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \ | ||
165 | EFX_MASK32(high + 1 - low)) | ||
166 | |||
167 | #define EFX_OWORD_FIELD64(oword, field) \ | ||
168 | EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \ | ||
169 | EFX_HIGH_BIT(field)) | ||
170 | |||
171 | #define EFX_QWORD_FIELD64(qword, field) \ | ||
172 | EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \ | ||
173 | EFX_HIGH_BIT(field)) | ||
174 | |||
175 | #define EFX_OWORD_FIELD32(oword, field) \ | ||
176 | EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \ | ||
177 | EFX_HIGH_BIT(field)) | ||
178 | |||
179 | #define EFX_QWORD_FIELD32(qword, field) \ | ||
180 | EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \ | ||
181 | EFX_HIGH_BIT(field)) | ||
182 | |||
183 | #define EFX_DWORD_FIELD(dword, field) \ | ||
184 | EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \ | ||
185 | EFX_HIGH_BIT(field)) | ||
186 | |||
187 | #define EFX_OWORD_IS_ZERO64(oword) \ | ||
188 | (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0) | ||
189 | |||
190 | #define EFX_QWORD_IS_ZERO64(qword) \ | ||
191 | (((qword).u64[0]) == (__force __le64) 0) | ||
192 | |||
193 | #define EFX_OWORD_IS_ZERO32(oword) \ | ||
194 | (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \ | ||
195 | == (__force __le32) 0) | ||
196 | |||
197 | #define EFX_QWORD_IS_ZERO32(qword) \ | ||
198 | (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0) | ||
199 | |||
200 | #define EFX_DWORD_IS_ZERO(dword) \ | ||
201 | (((dword).u32[0]) == (__force __le32) 0) | ||
202 | |||
203 | #define EFX_OWORD_IS_ALL_ONES64(oword) \ | ||
204 | (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0)) | ||
205 | |||
206 | #define EFX_QWORD_IS_ALL_ONES64(qword) \ | ||
207 | ((qword).u64[0] == ~((__force __le64) 0)) | ||
208 | |||
209 | #define EFX_OWORD_IS_ALL_ONES32(oword) \ | ||
210 | (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \ | ||
211 | == ~((__force __le32) 0)) | ||
212 | |||
213 | #define EFX_QWORD_IS_ALL_ONES32(qword) \ | ||
214 | (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0)) | ||
215 | |||
216 | #define EFX_DWORD_IS_ALL_ONES(dword) \ | ||
217 | ((dword).u32[0] == ~((__force __le32) 0)) | ||
218 | |||
219 | #if BITS_PER_LONG == 64 | ||
220 | #define EFX_OWORD_FIELD EFX_OWORD_FIELD64 | ||
221 | #define EFX_QWORD_FIELD EFX_QWORD_FIELD64 | ||
222 | #define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64 | ||
223 | #define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64 | ||
224 | #define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64 | ||
225 | #define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64 | ||
226 | #else | ||
227 | #define EFX_OWORD_FIELD EFX_OWORD_FIELD32 | ||
228 | #define EFX_QWORD_FIELD EFX_QWORD_FIELD32 | ||
229 | #define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32 | ||
230 | #define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32 | ||
231 | #define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32 | ||
232 | #define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32 | ||
233 | #endif | ||
234 | |||
235 | /* | ||
236 | * Construct bit field portion | ||
237 | * | ||
238 | * Creates the portion of the bit field [low,high) that lies within | ||
239 | * the range [min,max). | ||
240 | */ | ||
241 | #define EFX_INSERT_NATIVE64(min, max, low, high, value) \ | ||
242 | (((low > max) || (high < min)) ? 0 : \ | ||
243 | ((low > min) ? \ | ||
244 | (((u64) (value)) << (low - min)) : \ | ||
245 | (((u64) (value)) >> (min - low)))) | ||
246 | |||
247 | #define EFX_INSERT_NATIVE32(min, max, low, high, value) \ | ||
248 | (((low > max) || (high < min)) ? 0 : \ | ||
249 | ((low > min) ? \ | ||
250 | (((u32) (value)) << (low - min)) : \ | ||
251 | (((u32) (value)) >> (min - low)))) | ||
252 | |||
253 | #define EFX_INSERT_NATIVE(min, max, low, high, value) \ | ||
254 | ((((max - min) >= 32) || ((high - low) >= 32)) ? \ | ||
255 | EFX_INSERT_NATIVE64(min, max, low, high, value) : \ | ||
256 | EFX_INSERT_NATIVE32(min, max, low, high, value)) | ||
257 | |||
258 | /* | ||
259 | * Construct bit field portion | ||
260 | * | ||
261 | * Creates the portion of the named bit field that lies within the | ||
262 | * range [min,max). | ||
263 | */ | ||
264 | #define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \ | ||
265 | EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \ | ||
266 | EFX_HIGH_BIT(field), value) | ||
267 | |||
268 | /* | ||
269 | * Construct bit field | ||
270 | * | ||
271 | * Creates the portion of the named bit fields that lie within the | ||
272 | * range [min,max). | ||
273 | */ | ||
274 | #define EFX_INSERT_FIELDS_NATIVE(min, max, \ | ||
275 | field1, value1, \ | ||
276 | field2, value2, \ | ||
277 | field3, value3, \ | ||
278 | field4, value4, \ | ||
279 | field5, value5, \ | ||
280 | field6, value6, \ | ||
281 | field7, value7, \ | ||
282 | field8, value8, \ | ||
283 | field9, value9, \ | ||
284 | field10, value10) \ | ||
285 | (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \ | ||
286 | EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \ | ||
287 | EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \ | ||
288 | EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \ | ||
289 | EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \ | ||
290 | EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \ | ||
291 | EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \ | ||
292 | EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \ | ||
293 | EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \ | ||
294 | EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10))) | ||
295 | |||
296 | #define EFX_INSERT_FIELDS64(...) \ | ||
297 | cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) | ||
298 | |||
299 | #define EFX_INSERT_FIELDS32(...) \ | ||
300 | cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) | ||
301 | |||
302 | #define EFX_POPULATE_OWORD64(oword, ...) do { \ | ||
303 | (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ | ||
304 | (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \ | ||
305 | } while (0) | ||
306 | |||
307 | #define EFX_POPULATE_QWORD64(qword, ...) do { \ | ||
308 | (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ | ||
309 | } while (0) | ||
310 | |||
311 | #define EFX_POPULATE_OWORD32(oword, ...) do { \ | ||
312 | (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ | ||
313 | (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ | ||
314 | (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \ | ||
315 | (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \ | ||
316 | } while (0) | ||
317 | |||
318 | #define EFX_POPULATE_QWORD32(qword, ...) do { \ | ||
319 | (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ | ||
320 | (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ | ||
321 | } while (0) | ||
322 | |||
323 | #define EFX_POPULATE_DWORD(dword, ...) do { \ | ||
324 | (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ | ||
325 | } while (0) | ||
326 | |||
327 | #if BITS_PER_LONG == 64 | ||
328 | #define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64 | ||
329 | #define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64 | ||
330 | #else | ||
331 | #define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32 | ||
332 | #define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32 | ||
333 | #endif | ||
334 | |||
335 | /* Populate an octword field with various numbers of arguments */ | ||
336 | #define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD | ||
337 | #define EFX_POPULATE_OWORD_9(oword, ...) \ | ||
338 | EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
339 | #define EFX_POPULATE_OWORD_8(oword, ...) \ | ||
340 | EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
341 | #define EFX_POPULATE_OWORD_7(oword, ...) \ | ||
342 | EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
343 | #define EFX_POPULATE_OWORD_6(oword, ...) \ | ||
344 | EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
345 | #define EFX_POPULATE_OWORD_5(oword, ...) \ | ||
346 | EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
347 | #define EFX_POPULATE_OWORD_4(oword, ...) \ | ||
348 | EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
349 | #define EFX_POPULATE_OWORD_3(oword, ...) \ | ||
350 | EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
351 | #define EFX_POPULATE_OWORD_2(oword, ...) \ | ||
352 | EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
353 | #define EFX_POPULATE_OWORD_1(oword, ...) \ | ||
354 | EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
355 | #define EFX_ZERO_OWORD(oword) \ | ||
356 | EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0) | ||
357 | #define EFX_SET_OWORD(oword) \ | ||
358 | EFX_POPULATE_OWORD_4(oword, \ | ||
359 | EFX_DWORD_0, 0xffffffff, \ | ||
360 | EFX_DWORD_1, 0xffffffff, \ | ||
361 | EFX_DWORD_2, 0xffffffff, \ | ||
362 | EFX_DWORD_3, 0xffffffff) | ||
363 | |||
364 | /* Populate a quadword field with various numbers of arguments */ | ||
365 | #define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD | ||
366 | #define EFX_POPULATE_QWORD_9(qword, ...) \ | ||
367 | EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
368 | #define EFX_POPULATE_QWORD_8(qword, ...) \ | ||
369 | EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
370 | #define EFX_POPULATE_QWORD_7(qword, ...) \ | ||
371 | EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
372 | #define EFX_POPULATE_QWORD_6(qword, ...) \ | ||
373 | EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
374 | #define EFX_POPULATE_QWORD_5(qword, ...) \ | ||
375 | EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
376 | #define EFX_POPULATE_QWORD_4(qword, ...) \ | ||
377 | EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
378 | #define EFX_POPULATE_QWORD_3(qword, ...) \ | ||
379 | EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
380 | #define EFX_POPULATE_QWORD_2(qword, ...) \ | ||
381 | EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
382 | #define EFX_POPULATE_QWORD_1(qword, ...) \ | ||
383 | EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
384 | #define EFX_ZERO_QWORD(qword) \ | ||
385 | EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0) | ||
386 | #define EFX_SET_QWORD(qword) \ | ||
387 | EFX_POPULATE_QWORD_2(qword, \ | ||
388 | EFX_DWORD_0, 0xffffffff, \ | ||
389 | EFX_DWORD_1, 0xffffffff) | ||
390 | |||
391 | /* Populate a dword field with various numbers of arguments */ | ||
392 | #define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD | ||
393 | #define EFX_POPULATE_DWORD_9(dword, ...) \ | ||
394 | EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
395 | #define EFX_POPULATE_DWORD_8(dword, ...) \ | ||
396 | EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
397 | #define EFX_POPULATE_DWORD_7(dword, ...) \ | ||
398 | EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
399 | #define EFX_POPULATE_DWORD_6(dword, ...) \ | ||
400 | EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
401 | #define EFX_POPULATE_DWORD_5(dword, ...) \ | ||
402 | EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
403 | #define EFX_POPULATE_DWORD_4(dword, ...) \ | ||
404 | EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
405 | #define EFX_POPULATE_DWORD_3(dword, ...) \ | ||
406 | EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
407 | #define EFX_POPULATE_DWORD_2(dword, ...) \ | ||
408 | EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
409 | #define EFX_POPULATE_DWORD_1(dword, ...) \ | ||
410 | EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
411 | #define EFX_ZERO_DWORD(dword) \ | ||
412 | EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0) | ||
413 | #define EFX_SET_DWORD(dword) \ | ||
414 | EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff) | ||
415 | |||
416 | /* | ||
417 | * Modify a named field within an already-populated structure. Used | ||
418 | * for read-modify-write operations. | ||
419 | * | ||
420 | */ | ||
421 | #define EFX_INVERT_OWORD(oword) do { \ | ||
422 | (oword).u64[0] = ~((oword).u64[0]); \ | ||
423 | (oword).u64[1] = ~((oword).u64[1]); \ | ||
424 | } while (0) | ||
425 | |||
426 | #define EFX_AND_OWORD(oword, from, mask) \ | ||
427 | do { \ | ||
428 | (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \ | ||
429 | (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ | ||
430 | } while (0) | ||
431 | |||
432 | #define EFX_OR_OWORD(oword, from, mask) \ | ||
433 | do { \ | ||
434 | (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \ | ||
435 | (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \ | ||
436 | } while (0) | ||
437 | |||
438 | #define EFX_INSERT64(min, max, low, high, value) \ | ||
439 | cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value)) | ||
440 | |||
441 | #define EFX_INSERT32(min, max, low, high, value) \ | ||
442 | cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value)) | ||
443 | |||
444 | #define EFX_INPLACE_MASK64(min, max, low, high) \ | ||
445 | EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low)) | ||
446 | |||
447 | #define EFX_INPLACE_MASK32(min, max, low, high) \ | ||
448 | EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low)) | ||
449 | |||
450 | #define EFX_SET_OWORD64(oword, low, high, value) do { \ | ||
451 | (oword).u64[0] = (((oword).u64[0] \ | ||
452 | & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ | ||
453 | | EFX_INSERT64(0, 63, low, high, value)); \ | ||
454 | (oword).u64[1] = (((oword).u64[1] \ | ||
455 | & ~EFX_INPLACE_MASK64(64, 127, low, high)) \ | ||
456 | | EFX_INSERT64(64, 127, low, high, value)); \ | ||
457 | } while (0) | ||
458 | |||
459 | #define EFX_SET_QWORD64(qword, low, high, value) do { \ | ||
460 | (qword).u64[0] = (((qword).u64[0] \ | ||
461 | & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ | ||
462 | | EFX_INSERT64(0, 63, low, high, value)); \ | ||
463 | } while (0) | ||
464 | |||
465 | #define EFX_SET_OWORD32(oword, low, high, value) do { \ | ||
466 | (oword).u32[0] = (((oword).u32[0] \ | ||
467 | & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ | ||
468 | | EFX_INSERT32(0, 31, low, high, value)); \ | ||
469 | (oword).u32[1] = (((oword).u32[1] \ | ||
470 | & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ | ||
471 | | EFX_INSERT32(32, 63, low, high, value)); \ | ||
472 | (oword).u32[2] = (((oword).u32[2] \ | ||
473 | & ~EFX_INPLACE_MASK32(64, 95, low, high)) \ | ||
474 | | EFX_INSERT32(64, 95, low, high, value)); \ | ||
475 | (oword).u32[3] = (((oword).u32[3] \ | ||
476 | & ~EFX_INPLACE_MASK32(96, 127, low, high)) \ | ||
477 | | EFX_INSERT32(96, 127, low, high, value)); \ | ||
478 | } while (0) | ||
479 | |||
480 | #define EFX_SET_QWORD32(qword, low, high, value) do { \ | ||
481 | (qword).u32[0] = (((qword).u32[0] \ | ||
482 | & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ | ||
483 | | EFX_INSERT32(0, 31, low, high, value)); \ | ||
484 | (qword).u32[1] = (((qword).u32[1] \ | ||
485 | & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ | ||
486 | | EFX_INSERT32(32, 63, low, high, value)); \ | ||
487 | } while (0) | ||
488 | |||
489 | #define EFX_SET_DWORD32(dword, low, high, value) do { \ | ||
490 | (dword).u32[0] = (((dword).u32[0] \ | ||
491 | & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ | ||
492 | | EFX_INSERT32(0, 31, low, high, value)); \ | ||
493 | } while (0) | ||
494 | |||
495 | #define EFX_SET_OWORD_FIELD64(oword, field, value) \ | ||
496 | EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \ | ||
497 | EFX_HIGH_BIT(field), value) | ||
498 | |||
499 | #define EFX_SET_QWORD_FIELD64(qword, field, value) \ | ||
500 | EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \ | ||
501 | EFX_HIGH_BIT(field), value) | ||
502 | |||
503 | #define EFX_SET_OWORD_FIELD32(oword, field, value) \ | ||
504 | EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \ | ||
505 | EFX_HIGH_BIT(field), value) | ||
506 | |||
507 | #define EFX_SET_QWORD_FIELD32(qword, field, value) \ | ||
508 | EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \ | ||
509 | EFX_HIGH_BIT(field), value) | ||
510 | |||
511 | #define EFX_SET_DWORD_FIELD(dword, field, value) \ | ||
512 | EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \ | ||
513 | EFX_HIGH_BIT(field), value) | ||
514 | |||
515 | |||
516 | |||
517 | #if BITS_PER_LONG == 64 | ||
518 | #define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 | ||
519 | #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 | ||
520 | #else | ||
521 | #define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32 | ||
522 | #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 | ||
523 | #endif | ||
524 | |||
525 | /* Used to avoid compiler warnings about shift range exceeding width | ||
526 | * of the data types when dma_addr_t is only 32 bits wide. | ||
527 | */ | ||
528 | #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) | ||
529 | #define EFX_DMA_TYPE_WIDTH(width) \ | ||
530 | (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) | ||
531 | |||
532 | |||
533 | /* Static initialiser */ | ||
534 | #define EFX_OWORD32(a, b, c, d) \ | ||
535 | { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \ | ||
536 | cpu_to_le32(c), cpu_to_le32(d) } } | ||
537 | |||
538 | #endif /* EFX_BITFIELD_H */ | ||
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c new file mode 100644 index 00000000000..b59abc706d9 --- /dev/null +++ b/drivers/net/sfc/efx.c | |||
@@ -0,0 +1,2700 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2005-2011 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/pci.h> | ||
13 | #include <linux/netdevice.h> | ||
14 | #include <linux/etherdevice.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/notifier.h> | ||
17 | #include <linux/ip.h> | ||
18 | #include <linux/tcp.h> | ||
19 | #include <linux/in.h> | ||
20 | #include <linux/crc32.h> | ||
21 | #include <linux/ethtool.h> | ||
22 | #include <linux/topology.h> | ||
23 | #include <linux/gfp.h> | ||
24 | #include <linux/cpu_rmap.h> | ||
25 | #include "net_driver.h" | ||
26 | #include "efx.h" | ||
27 | #include "nic.h" | ||
28 | |||
29 | #include "mcdi.h" | ||
30 | #include "workarounds.h" | ||
31 | |||
32 | /************************************************************************** | ||
33 | * | ||
34 | * Type name strings | ||
35 | * | ||
36 | ************************************************************************** | ||
37 | */ | ||
38 | |||
39 | /* Loopback mode names (see LOOPBACK_MODE()) */ | ||
40 | const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; | ||
41 | const char *efx_loopback_mode_names[] = { | ||
42 | [LOOPBACK_NONE] = "NONE", | ||
43 | [LOOPBACK_DATA] = "DATAPATH", | ||
44 | [LOOPBACK_GMAC] = "GMAC", | ||
45 | [LOOPBACK_XGMII] = "XGMII", | ||
46 | [LOOPBACK_XGXS] = "XGXS", | ||
47 | [LOOPBACK_XAUI] = "XAUI", | ||
48 | [LOOPBACK_GMII] = "GMII", | ||
49 | [LOOPBACK_SGMII] = "SGMII", | ||
50 | [LOOPBACK_XGBR] = "XGBR", | ||
51 | [LOOPBACK_XFI] = "XFI", | ||
52 | [LOOPBACK_XAUI_FAR] = "XAUI_FAR", | ||
53 | [LOOPBACK_GMII_FAR] = "GMII_FAR", | ||
54 | [LOOPBACK_SGMII_FAR] = "SGMII_FAR", | ||
55 | [LOOPBACK_XFI_FAR] = "XFI_FAR", | ||
56 | [LOOPBACK_GPHY] = "GPHY", | ||
57 | [LOOPBACK_PHYXS] = "PHYXS", | ||
58 | [LOOPBACK_PCS] = "PCS", | ||
59 | [LOOPBACK_PMAPMD] = "PMA/PMD", | ||
60 | [LOOPBACK_XPORT] = "XPORT", | ||
61 | [LOOPBACK_XGMII_WS] = "XGMII_WS", | ||
62 | [LOOPBACK_XAUI_WS] = "XAUI_WS", | ||
63 | [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", | ||
64 | [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", | ||
65 | [LOOPBACK_GMII_WS] = "GMII_WS", | ||
66 | [LOOPBACK_XFI_WS] = "XFI_WS", | ||
67 | [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", | ||
68 | [LOOPBACK_PHYXS_WS] = "PHYXS_WS", | ||
69 | }; | ||
70 | |||
71 | const unsigned int efx_reset_type_max = RESET_TYPE_MAX; | ||
72 | const char *efx_reset_type_names[] = { | ||
73 | [RESET_TYPE_INVISIBLE] = "INVISIBLE", | ||
74 | [RESET_TYPE_ALL] = "ALL", | ||
75 | [RESET_TYPE_WORLD] = "WORLD", | ||
76 | [RESET_TYPE_DISABLE] = "DISABLE", | ||
77 | [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", | ||
78 | [RESET_TYPE_INT_ERROR] = "INT_ERROR", | ||
79 | [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", | ||
80 | [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", | ||
81 | [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", | ||
82 | [RESET_TYPE_TX_SKIP] = "TX_SKIP", | ||
83 | [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", | ||
84 | }; | ||
85 | |||
86 | #define EFX_MAX_MTU (9 * 1024) | ||
87 | |||
88 | /* Reset workqueue. If any NIC has a hardware failure then a reset will be | ||
89 | * queued onto this work queue. This is not a per-nic work queue, because | ||
90 | * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. | ||
91 | */ | ||
92 | static struct workqueue_struct *reset_workqueue; | ||
93 | |||
94 | /************************************************************************** | ||
95 | * | ||
96 | * Configurable values | ||
97 | * | ||
98 | *************************************************************************/ | ||
99 | |||
100 | /* | ||
101 | * Use separate channels for TX and RX events | ||
102 | * | ||
103 | * Set this to 1 to use separate channels for TX and RX. It allows us | ||
104 | * to control interrupt affinity separately for TX and RX. | ||
105 | * | ||
106 | * This is only used in MSI-X interrupt mode | ||
107 | */ | ||
108 | static unsigned int separate_tx_channels; | ||
109 | module_param(separate_tx_channels, uint, 0444); | ||
110 | MODULE_PARM_DESC(separate_tx_channels, | ||
111 | "Use separate channels for TX and RX"); | ||
112 | |||
113 | /* This is the weight assigned to each of the (per-channel) virtual | ||
114 | * NAPI devices. | ||
115 | */ | ||
116 | static int napi_weight = 64; | ||
117 | |||
118 | /* This is the time (in jiffies) between invocations of the hardware | ||
119 | * monitor. On Falcon-based NICs, this will: | ||
120 | * - Check the on-board hardware monitor; | ||
121 | * - Poll the link state and reconfigure the hardware as necessary. | ||
122 | */ | ||
123 | static unsigned int efx_monitor_interval = 1 * HZ; | ||
124 | |||
125 | /* This controls whether or not the driver will initialise devices | ||
126 | * with invalid MAC addresses stored in the EEPROM or flash. If true, | ||
127 | * such devices will be initialised with a random locally-generated | ||
128 | * MAC address. This allows for loading the sfc_mtd driver to | ||
129 | * reprogram the flash, even if the flash contents (including the MAC | ||
130 | * address) have previously been erased. | ||
131 | */ | ||
132 | static unsigned int allow_bad_hwaddr; | ||
133 | |||
134 | /* Initial interrupt moderation settings. They can be modified after | ||
135 | * module load with ethtool. | ||
136 | * | ||
137 | * The default for RX should strike a balance between increasing the | ||
138 | * round-trip latency and reducing overhead. | ||
139 | */ | ||
140 | static unsigned int rx_irq_mod_usec = 60; | ||
141 | |||
142 | /* Initial interrupt moderation settings. They can be modified after | ||
143 | * module load with ethtool. | ||
144 | * | ||
145 | * This default is chosen to ensure that a 10G link does not go idle | ||
146 | * while a TX queue is stopped after it has become full. A queue is | ||
147 | * restarted when it drops below half full. The time this takes (assuming | ||
148 | * worst case 3 descriptors per packet and 1024 descriptors) is | ||
149 | * 512 / 3 * 1.2 = 205 usec. | ||
150 | */ | ||
151 | static unsigned int tx_irq_mod_usec = 150; | ||
152 | |||
153 | /* This is the first interrupt mode to try out of: | ||
154 | * 0 => MSI-X | ||
155 | * 1 => MSI | ||
156 | * 2 => legacy | ||
157 | */ | ||
158 | static unsigned int interrupt_mode; | ||
159 | |||
160 | /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), | ||
161 | * i.e. the number of CPUs among which we may distribute simultaneous | ||
162 | * interrupt handling. | ||
163 | * | ||
164 | * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. | ||
165 | * The default (0) means to assign an interrupt to each package (level II cache) | ||
166 | */ | ||
167 | static unsigned int rss_cpus; | ||
168 | module_param(rss_cpus, uint, 0444); | ||
169 | MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); | ||
170 | |||
171 | static int phy_flash_cfg; | ||
172 | module_param(phy_flash_cfg, int, 0644); | ||
173 | MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); | ||
174 | |||
175 | static unsigned irq_adapt_low_thresh = 10000; | ||
176 | module_param(irq_adapt_low_thresh, uint, 0644); | ||
177 | MODULE_PARM_DESC(irq_adapt_low_thresh, | ||
178 | "Threshold score for reducing IRQ moderation"); | ||
179 | |||
180 | static unsigned irq_adapt_high_thresh = 20000; | ||
181 | module_param(irq_adapt_high_thresh, uint, 0644); | ||
182 | MODULE_PARM_DESC(irq_adapt_high_thresh, | ||
183 | "Threshold score for increasing IRQ moderation"); | ||
184 | |||
185 | static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | | ||
186 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | | ||
187 | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | | ||
188 | NETIF_MSG_TX_ERR | NETIF_MSG_HW); | ||
189 | module_param(debug, uint, 0); | ||
190 | MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); | ||
191 | |||
192 | /************************************************************************** | ||
193 | * | ||
194 | * Utility functions and prototypes | ||
195 | * | ||
196 | *************************************************************************/ | ||
197 | |||
198 | static void efx_remove_channels(struct efx_nic *efx); | ||
199 | static void efx_remove_port(struct efx_nic *efx); | ||
200 | static void efx_init_napi(struct efx_nic *efx); | ||
201 | static void efx_fini_napi(struct efx_nic *efx); | ||
202 | static void efx_fini_napi_channel(struct efx_channel *channel); | ||
203 | static void efx_fini_struct(struct efx_nic *efx); | ||
204 | static void efx_start_all(struct efx_nic *efx); | ||
205 | static void efx_stop_all(struct efx_nic *efx); | ||
206 | |||
207 | #define EFX_ASSERT_RESET_SERIALISED(efx) \ | ||
208 | do { \ | ||
209 | if ((efx->state == STATE_RUNNING) || \ | ||
210 | (efx->state == STATE_DISABLED)) \ | ||
211 | ASSERT_RTNL(); \ | ||
212 | } while (0) | ||
213 | |||
214 | /************************************************************************** | ||
215 | * | ||
216 | * Event queue processing | ||
217 | * | ||
218 | *************************************************************************/ | ||
219 | |||
220 | /* Process channel's event queue | ||
221 | * | ||
222 | * This function is responsible for processing the event queue of a | ||
223 | * single channel. The caller must guarantee that this function will | ||
224 | * never be concurrently called more than once on the same channel, | ||
225 | * though different channels may be being processed concurrently. | ||
226 | */ | ||
227 | static int efx_process_channel(struct efx_channel *channel, int budget) | ||
228 | { | ||
229 | struct efx_nic *efx = channel->efx; | ||
230 | int spent; | ||
231 | |||
232 | if (unlikely(efx->reset_pending || !channel->enabled)) | ||
233 | return 0; | ||
234 | |||
235 | spent = efx_nic_process_eventq(channel, budget); | ||
236 | if (spent == 0) | ||
237 | return 0; | ||
238 | |||
239 | /* Deliver last RX packet. */ | ||
240 | if (channel->rx_pkt) { | ||
241 | __efx_rx_packet(channel, channel->rx_pkt, | ||
242 | channel->rx_pkt_csummed); | ||
243 | channel->rx_pkt = NULL; | ||
244 | } | ||
245 | |||
246 | efx_rx_strategy(channel); | ||
247 | |||
248 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); | ||
249 | |||
250 | return spent; | ||
251 | } | ||
252 | |||
253 | /* Mark channel as finished processing | ||
254 | * | ||
255 | * Note that since we will not receive further interrupts for this | ||
256 | * channel before we finish processing and call the eventq_read_ack() | ||
257 | * method, there is no need to use the interrupt hold-off timers. | ||
258 | */ | ||
259 | static inline void efx_channel_processed(struct efx_channel *channel) | ||
260 | { | ||
261 | /* The interrupt handler for this channel may set work_pending | ||
262 | * as soon as we acknowledge the events we've seen. Make sure | ||
263 | * it's cleared before then. */ | ||
264 | channel->work_pending = false; | ||
265 | smp_wmb(); | ||
266 | |||
267 | efx_nic_eventq_read_ack(channel); | ||
268 | } | ||
269 | |||
270 | /* NAPI poll handler | ||
271 | * | ||
272 | * NAPI guarantees serialisation of polls of the same device, which | ||
273 | * provides the guarantee required by efx_process_channel(). | ||
274 | */ | ||
275 | static int efx_poll(struct napi_struct *napi, int budget) | ||
276 | { | ||
277 | struct efx_channel *channel = | ||
278 | container_of(napi, struct efx_channel, napi_str); | ||
279 | struct efx_nic *efx = channel->efx; | ||
280 | int spent; | ||
281 | |||
282 | netif_vdbg(efx, intr, efx->net_dev, | ||
283 | "channel %d NAPI poll executing on CPU %d\n", | ||
284 | channel->channel, raw_smp_processor_id()); | ||
285 | |||
286 | spent = efx_process_channel(channel, budget); | ||
287 | |||
288 | if (spent < budget) { | ||
289 | if (channel->channel < efx->n_rx_channels && | ||
290 | efx->irq_rx_adaptive && | ||
291 | unlikely(++channel->irq_count == 1000)) { | ||
292 | if (unlikely(channel->irq_mod_score < | ||
293 | irq_adapt_low_thresh)) { | ||
294 | if (channel->irq_moderation > 1) { | ||
295 | channel->irq_moderation -= 1; | ||
296 | efx->type->push_irq_moderation(channel); | ||
297 | } | ||
298 | } else if (unlikely(channel->irq_mod_score > | ||
299 | irq_adapt_high_thresh)) { | ||
300 | if (channel->irq_moderation < | ||
301 | efx->irq_rx_moderation) { | ||
302 | channel->irq_moderation += 1; | ||
303 | efx->type->push_irq_moderation(channel); | ||
304 | } | ||
305 | } | ||
306 | channel->irq_count = 0; | ||
307 | channel->irq_mod_score = 0; | ||
308 | } | ||
309 | |||
310 | efx_filter_rfs_expire(channel); | ||
311 | |||
312 | /* There is no race here; although napi_disable() will | ||
313 | * only wait for napi_complete(), this isn't a problem | ||
314 | * since efx_channel_processed() will have no effect if | ||
315 | * interrupts have already been disabled. | ||
316 | */ | ||
317 | napi_complete(napi); | ||
318 | efx_channel_processed(channel); | ||
319 | } | ||
320 | |||
321 | return spent; | ||
322 | } | ||
323 | |||
324 | /* Process the eventq of the specified channel immediately on this CPU | ||
325 | * | ||
326 | * Disable hardware generated interrupts, wait for any existing | ||
327 | * processing to finish, then directly poll (and ack ) the eventq. | ||
328 | * Finally reenable NAPI and interrupts. | ||
329 | * | ||
330 | * This is for use only during a loopback self-test. It must not | ||
331 | * deliver any packets up the stack as this can result in deadlock. | ||
332 | */ | ||
333 | void efx_process_channel_now(struct efx_channel *channel) | ||
334 | { | ||
335 | struct efx_nic *efx = channel->efx; | ||
336 | |||
337 | BUG_ON(channel->channel >= efx->n_channels); | ||
338 | BUG_ON(!channel->enabled); | ||
339 | BUG_ON(!efx->loopback_selftest); | ||
340 | |||
341 | /* Disable interrupts and wait for ISRs to complete */ | ||
342 | efx_nic_disable_interrupts(efx); | ||
343 | if (efx->legacy_irq) { | ||
344 | synchronize_irq(efx->legacy_irq); | ||
345 | efx->legacy_irq_enabled = false; | ||
346 | } | ||
347 | if (channel->irq) | ||
348 | synchronize_irq(channel->irq); | ||
349 | |||
350 | /* Wait for any NAPI processing to complete */ | ||
351 | napi_disable(&channel->napi_str); | ||
352 | |||
353 | /* Poll the channel */ | ||
354 | efx_process_channel(channel, channel->eventq_mask + 1); | ||
355 | |||
356 | /* Ack the eventq. This may cause an interrupt to be generated | ||
357 | * when they are reenabled */ | ||
358 | efx_channel_processed(channel); | ||
359 | |||
360 | napi_enable(&channel->napi_str); | ||
361 | if (efx->legacy_irq) | ||
362 | efx->legacy_irq_enabled = true; | ||
363 | efx_nic_enable_interrupts(efx); | ||
364 | } | ||
365 | |||
366 | /* Create event queue | ||
367 | * Event queue memory allocations are done only once. If the channel | ||
368 | * is reset, the memory buffer will be reused; this guards against | ||
369 | * errors during channel reset and also simplifies interrupt handling. | ||
370 | */ | ||
371 | static int efx_probe_eventq(struct efx_channel *channel) | ||
372 | { | ||
373 | struct efx_nic *efx = channel->efx; | ||
374 | unsigned long entries; | ||
375 | |||
376 | netif_dbg(channel->efx, probe, channel->efx->net_dev, | ||
377 | "chan %d create event queue\n", channel->channel); | ||
378 | |||
379 | /* Build an event queue with room for one event per tx and rx buffer, | ||
380 | * plus some extra for link state events and MCDI completions. */ | ||
381 | entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); | ||
382 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); | ||
383 | channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; | ||
384 | |||
385 | return efx_nic_probe_eventq(channel); | ||
386 | } | ||
387 | |||
388 | /* Prepare channel's event queue */ | ||
389 | static void efx_init_eventq(struct efx_channel *channel) | ||
390 | { | ||
391 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | ||
392 | "chan %d init event queue\n", channel->channel); | ||
393 | |||
394 | channel->eventq_read_ptr = 0; | ||
395 | |||
396 | efx_nic_init_eventq(channel); | ||
397 | } | ||
398 | |||
399 | static void efx_fini_eventq(struct efx_channel *channel) | ||
400 | { | ||
401 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | ||
402 | "chan %d fini event queue\n", channel->channel); | ||
403 | |||
404 | efx_nic_fini_eventq(channel); | ||
405 | } | ||
406 | |||
407 | static void efx_remove_eventq(struct efx_channel *channel) | ||
408 | { | ||
409 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | ||
410 | "chan %d remove event queue\n", channel->channel); | ||
411 | |||
412 | efx_nic_remove_eventq(channel); | ||
413 | } | ||
414 | |||
415 | /************************************************************************** | ||
416 | * | ||
417 | * Channel handling | ||
418 | * | ||
419 | *************************************************************************/ | ||
420 | |||
421 | /* Allocate and initialise a channel structure, optionally copying | ||
422 | * parameters (but not resources) from an old channel structure. */ | ||
423 | static struct efx_channel * | ||
424 | efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) | ||
425 | { | ||
426 | struct efx_channel *channel; | ||
427 | struct efx_rx_queue *rx_queue; | ||
428 | struct efx_tx_queue *tx_queue; | ||
429 | int j; | ||
430 | |||
431 | if (old_channel) { | ||
432 | channel = kmalloc(sizeof(*channel), GFP_KERNEL); | ||
433 | if (!channel) | ||
434 | return NULL; | ||
435 | |||
436 | *channel = *old_channel; | ||
437 | |||
438 | channel->napi_dev = NULL; | ||
439 | memset(&channel->eventq, 0, sizeof(channel->eventq)); | ||
440 | |||
441 | rx_queue = &channel->rx_queue; | ||
442 | rx_queue->buffer = NULL; | ||
443 | memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); | ||
444 | |||
445 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | ||
446 | tx_queue = &channel->tx_queue[j]; | ||
447 | if (tx_queue->channel) | ||
448 | tx_queue->channel = channel; | ||
449 | tx_queue->buffer = NULL; | ||
450 | memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); | ||
451 | } | ||
452 | } else { | ||
453 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); | ||
454 | if (!channel) | ||
455 | return NULL; | ||
456 | |||
457 | channel->efx = efx; | ||
458 | channel->channel = i; | ||
459 | |||
460 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | ||
461 | tx_queue = &channel->tx_queue[j]; | ||
462 | tx_queue->efx = efx; | ||
463 | tx_queue->queue = i * EFX_TXQ_TYPES + j; | ||
464 | tx_queue->channel = channel; | ||
465 | } | ||
466 | } | ||
467 | |||
468 | rx_queue = &channel->rx_queue; | ||
469 | rx_queue->efx = efx; | ||
470 | setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, | ||
471 | (unsigned long)rx_queue); | ||
472 | |||
473 | return channel; | ||
474 | } | ||
475 | |||
476 | static int efx_probe_channel(struct efx_channel *channel) | ||
477 | { | ||
478 | struct efx_tx_queue *tx_queue; | ||
479 | struct efx_rx_queue *rx_queue; | ||
480 | int rc; | ||
481 | |||
482 | netif_dbg(channel->efx, probe, channel->efx->net_dev, | ||
483 | "creating channel %d\n", channel->channel); | ||
484 | |||
485 | rc = efx_probe_eventq(channel); | ||
486 | if (rc) | ||
487 | goto fail1; | ||
488 | |||
489 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
490 | rc = efx_probe_tx_queue(tx_queue); | ||
491 | if (rc) | ||
492 | goto fail2; | ||
493 | } | ||
494 | |||
495 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
496 | rc = efx_probe_rx_queue(rx_queue); | ||
497 | if (rc) | ||
498 | goto fail3; | ||
499 | } | ||
500 | |||
501 | channel->n_rx_frm_trunc = 0; | ||
502 | |||
503 | return 0; | ||
504 | |||
505 | fail3: | ||
506 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
507 | efx_remove_rx_queue(rx_queue); | ||
508 | fail2: | ||
509 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
510 | efx_remove_tx_queue(tx_queue); | ||
511 | fail1: | ||
512 | return rc; | ||
513 | } | ||
514 | |||
515 | |||
516 | static void efx_set_channel_names(struct efx_nic *efx) | ||
517 | { | ||
518 | struct efx_channel *channel; | ||
519 | const char *type = ""; | ||
520 | int number; | ||
521 | |||
522 | efx_for_each_channel(channel, efx) { | ||
523 | number = channel->channel; | ||
524 | if (efx->n_channels > efx->n_rx_channels) { | ||
525 | if (channel->channel < efx->n_rx_channels) { | ||
526 | type = "-rx"; | ||
527 | } else { | ||
528 | type = "-tx"; | ||
529 | number -= efx->n_rx_channels; | ||
530 | } | ||
531 | } | ||
532 | snprintf(efx->channel_name[channel->channel], | ||
533 | sizeof(efx->channel_name[0]), | ||
534 | "%s%s-%d", efx->name, type, number); | ||
535 | } | ||
536 | } | ||
537 | |||
538 | static int efx_probe_channels(struct efx_nic *efx) | ||
539 | { | ||
540 | struct efx_channel *channel; | ||
541 | int rc; | ||
542 | |||
543 | /* Restart special buffer allocation */ | ||
544 | efx->next_buffer_table = 0; | ||
545 | |||
546 | efx_for_each_channel(channel, efx) { | ||
547 | rc = efx_probe_channel(channel); | ||
548 | if (rc) { | ||
549 | netif_err(efx, probe, efx->net_dev, | ||
550 | "failed to create channel %d\n", | ||
551 | channel->channel); | ||
552 | goto fail; | ||
553 | } | ||
554 | } | ||
555 | efx_set_channel_names(efx); | ||
556 | |||
557 | return 0; | ||
558 | |||
559 | fail: | ||
560 | efx_remove_channels(efx); | ||
561 | return rc; | ||
562 | } | ||
563 | |||
564 | /* Channels are shutdown and reinitialised whilst the NIC is running | ||
565 | * to propagate configuration changes (mtu, checksum offload), or | ||
566 | * to clear hardware error conditions | ||
567 | */ | ||
568 | static void efx_init_channels(struct efx_nic *efx) | ||
569 | { | ||
570 | struct efx_tx_queue *tx_queue; | ||
571 | struct efx_rx_queue *rx_queue; | ||
572 | struct efx_channel *channel; | ||
573 | |||
574 | /* Calculate the rx buffer allocation parameters required to | ||
575 | * support the current MTU, including padding for header | ||
576 | * alignment and overruns. | ||
577 | */ | ||
578 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | ||
579 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | ||
580 | efx->type->rx_buffer_hash_size + | ||
581 | efx->type->rx_buffer_padding); | ||
582 | efx->rx_buffer_order = get_order(efx->rx_buffer_len + | ||
583 | sizeof(struct efx_rx_page_state)); | ||
584 | |||
585 | /* Initialise the channels */ | ||
586 | efx_for_each_channel(channel, efx) { | ||
587 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | ||
588 | "init chan %d\n", channel->channel); | ||
589 | |||
590 | efx_init_eventq(channel); | ||
591 | |||
592 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
593 | efx_init_tx_queue(tx_queue); | ||
594 | |||
595 | /* The rx buffer allocation strategy is MTU dependent */ | ||
596 | efx_rx_strategy(channel); | ||
597 | |||
598 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
599 | efx_init_rx_queue(rx_queue); | ||
600 | |||
601 | WARN_ON(channel->rx_pkt != NULL); | ||
602 | efx_rx_strategy(channel); | ||
603 | } | ||
604 | } | ||
605 | |||
606 | /* This enables event queue processing and packet transmission. | ||
607 | * | ||
608 | * Note that this function is not allowed to fail, since that would | ||
609 | * introduce too much complexity into the suspend/resume path. | ||
610 | */ | ||
611 | static void efx_start_channel(struct efx_channel *channel) | ||
612 | { | ||
613 | struct efx_rx_queue *rx_queue; | ||
614 | |||
615 | netif_dbg(channel->efx, ifup, channel->efx->net_dev, | ||
616 | "starting chan %d\n", channel->channel); | ||
617 | |||
618 | /* The interrupt handler for this channel may set work_pending | ||
619 | * as soon as we enable it. Make sure it's cleared before | ||
620 | * then. Similarly, make sure it sees the enabled flag set. */ | ||
621 | channel->work_pending = false; | ||
622 | channel->enabled = true; | ||
623 | smp_wmb(); | ||
624 | |||
625 | /* Fill the queues before enabling NAPI */ | ||
626 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
627 | efx_fast_push_rx_descriptors(rx_queue); | ||
628 | |||
629 | napi_enable(&channel->napi_str); | ||
630 | } | ||
631 | |||
632 | /* This disables event queue processing and packet transmission. | ||
633 | * This function does not guarantee that all queue processing | ||
634 | * (e.g. RX refill) is complete. | ||
635 | */ | ||
636 | static void efx_stop_channel(struct efx_channel *channel) | ||
637 | { | ||
638 | if (!channel->enabled) | ||
639 | return; | ||
640 | |||
641 | netif_dbg(channel->efx, ifdown, channel->efx->net_dev, | ||
642 | "stop chan %d\n", channel->channel); | ||
643 | |||
644 | channel->enabled = false; | ||
645 | napi_disable(&channel->napi_str); | ||
646 | } | ||
647 | |||
648 | static void efx_fini_channels(struct efx_nic *efx) | ||
649 | { | ||
650 | struct efx_channel *channel; | ||
651 | struct efx_tx_queue *tx_queue; | ||
652 | struct efx_rx_queue *rx_queue; | ||
653 | int rc; | ||
654 | |||
655 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
656 | BUG_ON(efx->port_enabled); | ||
657 | |||
658 | rc = efx_nic_flush_queues(efx); | ||
659 | if (rc && EFX_WORKAROUND_7803(efx)) { | ||
660 | /* Schedule a reset to recover from the flush failure. The | ||
661 | * descriptor caches reference memory we're about to free, | ||
662 | * but falcon_reconfigure_mac_wrapper() won't reconnect | ||
663 | * the MACs because of the pending reset. */ | ||
664 | netif_err(efx, drv, efx->net_dev, | ||
665 | "Resetting to recover from flush failure\n"); | ||
666 | efx_schedule_reset(efx, RESET_TYPE_ALL); | ||
667 | } else if (rc) { | ||
668 | netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); | ||
669 | } else { | ||
670 | netif_dbg(efx, drv, efx->net_dev, | ||
671 | "successfully flushed all queues\n"); | ||
672 | } | ||
673 | |||
674 | efx_for_each_channel(channel, efx) { | ||
675 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | ||
676 | "shut down chan %d\n", channel->channel); | ||
677 | |||
678 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
679 | efx_fini_rx_queue(rx_queue); | ||
680 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) | ||
681 | efx_fini_tx_queue(tx_queue); | ||
682 | efx_fini_eventq(channel); | ||
683 | } | ||
684 | } | ||
685 | |||
686 | static void efx_remove_channel(struct efx_channel *channel) | ||
687 | { | ||
688 | struct efx_tx_queue *tx_queue; | ||
689 | struct efx_rx_queue *rx_queue; | ||
690 | |||
691 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | ||
692 | "destroy chan %d\n", channel->channel); | ||
693 | |||
694 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
695 | efx_remove_rx_queue(rx_queue); | ||
696 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) | ||
697 | efx_remove_tx_queue(tx_queue); | ||
698 | efx_remove_eventq(channel); | ||
699 | } | ||
700 | |||
701 | static void efx_remove_channels(struct efx_nic *efx) | ||
702 | { | ||
703 | struct efx_channel *channel; | ||
704 | |||
705 | efx_for_each_channel(channel, efx) | ||
706 | efx_remove_channel(channel); | ||
707 | } | ||
708 | |||
709 | int | ||
710 | efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) | ||
711 | { | ||
712 | struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; | ||
713 | u32 old_rxq_entries, old_txq_entries; | ||
714 | unsigned i; | ||
715 | int rc; | ||
716 | |||
717 | efx_stop_all(efx); | ||
718 | efx_fini_channels(efx); | ||
719 | |||
720 | /* Clone channels */ | ||
721 | memset(other_channel, 0, sizeof(other_channel)); | ||
722 | for (i = 0; i < efx->n_channels; i++) { | ||
723 | channel = efx_alloc_channel(efx, i, efx->channel[i]); | ||
724 | if (!channel) { | ||
725 | rc = -ENOMEM; | ||
726 | goto out; | ||
727 | } | ||
728 | other_channel[i] = channel; | ||
729 | } | ||
730 | |||
731 | /* Swap entry counts and channel pointers */ | ||
732 | old_rxq_entries = efx->rxq_entries; | ||
733 | old_txq_entries = efx->txq_entries; | ||
734 | efx->rxq_entries = rxq_entries; | ||
735 | efx->txq_entries = txq_entries; | ||
736 | for (i = 0; i < efx->n_channels; i++) { | ||
737 | channel = efx->channel[i]; | ||
738 | efx->channel[i] = other_channel[i]; | ||
739 | other_channel[i] = channel; | ||
740 | } | ||
741 | |||
742 | rc = efx_probe_channels(efx); | ||
743 | if (rc) | ||
744 | goto rollback; | ||
745 | |||
746 | efx_init_napi(efx); | ||
747 | |||
748 | /* Destroy old channels */ | ||
749 | for (i = 0; i < efx->n_channels; i++) { | ||
750 | efx_fini_napi_channel(other_channel[i]); | ||
751 | efx_remove_channel(other_channel[i]); | ||
752 | } | ||
753 | out: | ||
754 | /* Free unused channel structures */ | ||
755 | for (i = 0; i < efx->n_channels; i++) | ||
756 | kfree(other_channel[i]); | ||
757 | |||
758 | efx_init_channels(efx); | ||
759 | efx_start_all(efx); | ||
760 | return rc; | ||
761 | |||
762 | rollback: | ||
763 | /* Swap back */ | ||
764 | efx->rxq_entries = old_rxq_entries; | ||
765 | efx->txq_entries = old_txq_entries; | ||
766 | for (i = 0; i < efx->n_channels; i++) { | ||
767 | channel = efx->channel[i]; | ||
768 | efx->channel[i] = other_channel[i]; | ||
769 | other_channel[i] = channel; | ||
770 | } | ||
771 | goto out; | ||
772 | } | ||
773 | |||
774 | void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) | ||
775 | { | ||
776 | mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); | ||
777 | } | ||
778 | |||
779 | /************************************************************************** | ||
780 | * | ||
781 | * Port handling | ||
782 | * | ||
783 | **************************************************************************/ | ||
784 | |||
785 | /* This ensures that the kernel is kept informed (via | ||
786 | * netif_carrier_on/off) of the link status, and also maintains the | ||
787 | * link status's stop on the port's TX queue. | ||
788 | */ | ||
789 | void efx_link_status_changed(struct efx_nic *efx) | ||
790 | { | ||
791 | struct efx_link_state *link_state = &efx->link_state; | ||
792 | |||
793 | /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure | ||
794 | * that no events are triggered between unregister_netdev() and the | ||
795 | * driver unloading. A more general condition is that NETDEV_CHANGE | ||
796 | * can only be generated between NETDEV_UP and NETDEV_DOWN */ | ||
797 | if (!netif_running(efx->net_dev)) | ||
798 | return; | ||
799 | |||
800 | if (link_state->up != netif_carrier_ok(efx->net_dev)) { | ||
801 | efx->n_link_state_changes++; | ||
802 | |||
803 | if (link_state->up) | ||
804 | netif_carrier_on(efx->net_dev); | ||
805 | else | ||
806 | netif_carrier_off(efx->net_dev); | ||
807 | } | ||
808 | |||
809 | /* Status message for kernel log */ | ||
810 | if (link_state->up) { | ||
811 | netif_info(efx, link, efx->net_dev, | ||
812 | "link up at %uMbps %s-duplex (MTU %d)%s\n", | ||
813 | link_state->speed, link_state->fd ? "full" : "half", | ||
814 | efx->net_dev->mtu, | ||
815 | (efx->promiscuous ? " [PROMISC]" : "")); | ||
816 | } else { | ||
817 | netif_info(efx, link, efx->net_dev, "link down\n"); | ||
818 | } | ||
819 | |||
820 | } | ||
821 | |||
822 | void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) | ||
823 | { | ||
824 | efx->link_advertising = advertising; | ||
825 | if (advertising) { | ||
826 | if (advertising & ADVERTISED_Pause) | ||
827 | efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); | ||
828 | else | ||
829 | efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); | ||
830 | if (advertising & ADVERTISED_Asym_Pause) | ||
831 | efx->wanted_fc ^= EFX_FC_TX; | ||
832 | } | ||
833 | } | ||
834 | |||
835 | void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) | ||
836 | { | ||
837 | efx->wanted_fc = wanted_fc; | ||
838 | if (efx->link_advertising) { | ||
839 | if (wanted_fc & EFX_FC_RX) | ||
840 | efx->link_advertising |= (ADVERTISED_Pause | | ||
841 | ADVERTISED_Asym_Pause); | ||
842 | else | ||
843 | efx->link_advertising &= ~(ADVERTISED_Pause | | ||
844 | ADVERTISED_Asym_Pause); | ||
845 | if (wanted_fc & EFX_FC_TX) | ||
846 | efx->link_advertising ^= ADVERTISED_Asym_Pause; | ||
847 | } | ||
848 | } | ||
849 | |||
850 | static void efx_fini_port(struct efx_nic *efx); | ||
851 | |||
852 | /* Push loopback/power/transmit disable settings to the PHY, and reconfigure | ||
853 | * the MAC appropriately. All other PHY configuration changes are pushed | ||
854 | * through phy_op->set_settings(), and pushed asynchronously to the MAC | ||
855 | * through efx_monitor(). | ||
856 | * | ||
857 | * Callers must hold the mac_lock | ||
858 | */ | ||
859 | int __efx_reconfigure_port(struct efx_nic *efx) | ||
860 | { | ||
861 | enum efx_phy_mode phy_mode; | ||
862 | int rc; | ||
863 | |||
864 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
865 | |||
866 | /* Serialise the promiscuous flag with efx_set_multicast_list. */ | ||
867 | if (efx_dev_registered(efx)) { | ||
868 | netif_addr_lock_bh(efx->net_dev); | ||
869 | netif_addr_unlock_bh(efx->net_dev); | ||
870 | } | ||
871 | |||
872 | /* Disable PHY transmit in mac level loopbacks */ | ||
873 | phy_mode = efx->phy_mode; | ||
874 | if (LOOPBACK_INTERNAL(efx)) | ||
875 | efx->phy_mode |= PHY_MODE_TX_DISABLED; | ||
876 | else | ||
877 | efx->phy_mode &= ~PHY_MODE_TX_DISABLED; | ||
878 | |||
879 | rc = efx->type->reconfigure_port(efx); | ||
880 | |||
881 | if (rc) | ||
882 | efx->phy_mode = phy_mode; | ||
883 | |||
884 | return rc; | ||
885 | } | ||
886 | |||
887 | /* Reinitialise the MAC to pick up new PHY settings, even if the port is | ||
888 | * disabled. */ | ||
889 | int efx_reconfigure_port(struct efx_nic *efx) | ||
890 | { | ||
891 | int rc; | ||
892 | |||
893 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
894 | |||
895 | mutex_lock(&efx->mac_lock); | ||
896 | rc = __efx_reconfigure_port(efx); | ||
897 | mutex_unlock(&efx->mac_lock); | ||
898 | |||
899 | return rc; | ||
900 | } | ||
901 | |||
902 | /* Asynchronous work item for changing MAC promiscuity and multicast | ||
903 | * hash. Avoid a drain/rx_ingress enable by reconfiguring the current | ||
904 | * MAC directly. */ | ||
905 | static void efx_mac_work(struct work_struct *data) | ||
906 | { | ||
907 | struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); | ||
908 | |||
909 | mutex_lock(&efx->mac_lock); | ||
910 | if (efx->port_enabled) { | ||
911 | efx->type->push_multicast_hash(efx); | ||
912 | efx->mac_op->reconfigure(efx); | ||
913 | } | ||
914 | mutex_unlock(&efx->mac_lock); | ||
915 | } | ||
916 | |||
917 | static int efx_probe_port(struct efx_nic *efx) | ||
918 | { | ||
919 | unsigned char *perm_addr; | ||
920 | int rc; | ||
921 | |||
922 | netif_dbg(efx, probe, efx->net_dev, "create port\n"); | ||
923 | |||
924 | if (phy_flash_cfg) | ||
925 | efx->phy_mode = PHY_MODE_SPECIAL; | ||
926 | |||
927 | /* Connect up MAC/PHY operations table */ | ||
928 | rc = efx->type->probe_port(efx); | ||
929 | if (rc) | ||
930 | return rc; | ||
931 | |||
932 | /* Sanity check MAC address */ | ||
933 | perm_addr = efx->net_dev->perm_addr; | ||
934 | if (is_valid_ether_addr(perm_addr)) { | ||
935 | memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN); | ||
936 | } else { | ||
937 | netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", | ||
938 | perm_addr); | ||
939 | if (!allow_bad_hwaddr) { | ||
940 | rc = -EINVAL; | ||
941 | goto err; | ||
942 | } | ||
943 | random_ether_addr(efx->net_dev->dev_addr); | ||
944 | netif_info(efx, probe, efx->net_dev, | ||
945 | "using locally-generated MAC %pM\n", | ||
946 | efx->net_dev->dev_addr); | ||
947 | } | ||
948 | |||
949 | return 0; | ||
950 | |||
951 | err: | ||
952 | efx->type->remove_port(efx); | ||
953 | return rc; | ||
954 | } | ||
955 | |||
956 | static int efx_init_port(struct efx_nic *efx) | ||
957 | { | ||
958 | int rc; | ||
959 | |||
960 | netif_dbg(efx, drv, efx->net_dev, "init port\n"); | ||
961 | |||
962 | mutex_lock(&efx->mac_lock); | ||
963 | |||
964 | rc = efx->phy_op->init(efx); | ||
965 | if (rc) | ||
966 | goto fail1; | ||
967 | |||
968 | efx->port_initialized = true; | ||
969 | |||
970 | /* Reconfigure the MAC before creating dma queues (required for | ||
971 | * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ | ||
972 | efx->mac_op->reconfigure(efx); | ||
973 | |||
974 | /* Ensure the PHY advertises the correct flow control settings */ | ||
975 | rc = efx->phy_op->reconfigure(efx); | ||
976 | if (rc) | ||
977 | goto fail2; | ||
978 | |||
979 | mutex_unlock(&efx->mac_lock); | ||
980 | return 0; | ||
981 | |||
982 | fail2: | ||
983 | efx->phy_op->fini(efx); | ||
984 | fail1: | ||
985 | mutex_unlock(&efx->mac_lock); | ||
986 | return rc; | ||
987 | } | ||
988 | |||
989 | static void efx_start_port(struct efx_nic *efx) | ||
990 | { | ||
991 | netif_dbg(efx, ifup, efx->net_dev, "start port\n"); | ||
992 | BUG_ON(efx->port_enabled); | ||
993 | |||
994 | mutex_lock(&efx->mac_lock); | ||
995 | efx->port_enabled = true; | ||
996 | |||
997 | /* efx_mac_work() might have been scheduled after efx_stop_port(), | ||
998 | * and then cancelled by efx_flush_all() */ | ||
999 | efx->type->push_multicast_hash(efx); | ||
1000 | efx->mac_op->reconfigure(efx); | ||
1001 | |||
1002 | mutex_unlock(&efx->mac_lock); | ||
1003 | } | ||
1004 | |||
1005 | /* Prevent efx_mac_work() and efx_monitor() from working */ | ||
1006 | static void efx_stop_port(struct efx_nic *efx) | ||
1007 | { | ||
1008 | netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); | ||
1009 | |||
1010 | mutex_lock(&efx->mac_lock); | ||
1011 | efx->port_enabled = false; | ||
1012 | mutex_unlock(&efx->mac_lock); | ||
1013 | |||
1014 | /* Serialise against efx_set_multicast_list() */ | ||
1015 | if (efx_dev_registered(efx)) { | ||
1016 | netif_addr_lock_bh(efx->net_dev); | ||
1017 | netif_addr_unlock_bh(efx->net_dev); | ||
1018 | } | ||
1019 | } | ||
1020 | |||
1021 | static void efx_fini_port(struct efx_nic *efx) | ||
1022 | { | ||
1023 | netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); | ||
1024 | |||
1025 | if (!efx->port_initialized) | ||
1026 | return; | ||
1027 | |||
1028 | efx->phy_op->fini(efx); | ||
1029 | efx->port_initialized = false; | ||
1030 | |||
1031 | efx->link_state.up = false; | ||
1032 | efx_link_status_changed(efx); | ||
1033 | } | ||
1034 | |||
1035 | static void efx_remove_port(struct efx_nic *efx) | ||
1036 | { | ||
1037 | netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); | ||
1038 | |||
1039 | efx->type->remove_port(efx); | ||
1040 | } | ||
1041 | |||
1042 | /************************************************************************** | ||
1043 | * | ||
1044 | * NIC handling | ||
1045 | * | ||
1046 | **************************************************************************/ | ||
1047 | |||
1048 | /* This configures the PCI device to enable I/O and DMA. */ | ||
1049 | static int efx_init_io(struct efx_nic *efx) | ||
1050 | { | ||
1051 | struct pci_dev *pci_dev = efx->pci_dev; | ||
1052 | dma_addr_t dma_mask = efx->type->max_dma_mask; | ||
1053 | int rc; | ||
1054 | |||
1055 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); | ||
1056 | |||
1057 | rc = pci_enable_device(pci_dev); | ||
1058 | if (rc) { | ||
1059 | netif_err(efx, probe, efx->net_dev, | ||
1060 | "failed to enable PCI device\n"); | ||
1061 | goto fail1; | ||
1062 | } | ||
1063 | |||
1064 | pci_set_master(pci_dev); | ||
1065 | |||
1066 | /* Set the PCI DMA mask. Try all possibilities from our | ||
1067 | * genuine mask down to 32 bits, because some architectures | ||
1068 | * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit | ||
1069 | * masks event though they reject 46 bit masks. | ||
1070 | */ | ||
1071 | while (dma_mask > 0x7fffffffUL) { | ||
1072 | if (pci_dma_supported(pci_dev, dma_mask) && | ||
1073 | ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) | ||
1074 | break; | ||
1075 | dma_mask >>= 1; | ||
1076 | } | ||
1077 | if (rc) { | ||
1078 | netif_err(efx, probe, efx->net_dev, | ||
1079 | "could not find a suitable DMA mask\n"); | ||
1080 | goto fail2; | ||
1081 | } | ||
1082 | netif_dbg(efx, probe, efx->net_dev, | ||
1083 | "using DMA mask %llx\n", (unsigned long long) dma_mask); | ||
1084 | rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); | ||
1085 | if (rc) { | ||
1086 | /* pci_set_consistent_dma_mask() is not *allowed* to | ||
1087 | * fail with a mask that pci_set_dma_mask() accepted, | ||
1088 | * but just in case... | ||
1089 | */ | ||
1090 | netif_err(efx, probe, efx->net_dev, | ||
1091 | "failed to set consistent DMA mask\n"); | ||
1092 | goto fail2; | ||
1093 | } | ||
1094 | |||
1095 | efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); | ||
1096 | rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); | ||
1097 | if (rc) { | ||
1098 | netif_err(efx, probe, efx->net_dev, | ||
1099 | "request for memory BAR failed\n"); | ||
1100 | rc = -EIO; | ||
1101 | goto fail3; | ||
1102 | } | ||
1103 | efx->membase = ioremap_nocache(efx->membase_phys, | ||
1104 | efx->type->mem_map_size); | ||
1105 | if (!efx->membase) { | ||
1106 | netif_err(efx, probe, efx->net_dev, | ||
1107 | "could not map memory BAR at %llx+%x\n", | ||
1108 | (unsigned long long)efx->membase_phys, | ||
1109 | efx->type->mem_map_size); | ||
1110 | rc = -ENOMEM; | ||
1111 | goto fail4; | ||
1112 | } | ||
1113 | netif_dbg(efx, probe, efx->net_dev, | ||
1114 | "memory BAR at %llx+%x (virtual %p)\n", | ||
1115 | (unsigned long long)efx->membase_phys, | ||
1116 | efx->type->mem_map_size, efx->membase); | ||
1117 | |||
1118 | return 0; | ||
1119 | |||
1120 | fail4: | ||
1121 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); | ||
1122 | fail3: | ||
1123 | efx->membase_phys = 0; | ||
1124 | fail2: | ||
1125 | pci_disable_device(efx->pci_dev); | ||
1126 | fail1: | ||
1127 | return rc; | ||
1128 | } | ||
1129 | |||
1130 | static void efx_fini_io(struct efx_nic *efx) | ||
1131 | { | ||
1132 | netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); | ||
1133 | |||
1134 | if (efx->membase) { | ||
1135 | iounmap(efx->membase); | ||
1136 | efx->membase = NULL; | ||
1137 | } | ||
1138 | |||
1139 | if (efx->membase_phys) { | ||
1140 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); | ||
1141 | efx->membase_phys = 0; | ||
1142 | } | ||
1143 | |||
1144 | pci_disable_device(efx->pci_dev); | ||
1145 | } | ||
1146 | |||
1147 | /* Get number of channels wanted. Each channel will have its own IRQ, | ||
1148 | * 1 RX queue and/or 2 TX queues. */ | ||
1149 | static int efx_wanted_channels(void) | ||
1150 | { | ||
1151 | cpumask_var_t core_mask; | ||
1152 | int count; | ||
1153 | int cpu; | ||
1154 | |||
1155 | if (rss_cpus) | ||
1156 | return rss_cpus; | ||
1157 | |||
1158 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { | ||
1159 | printk(KERN_WARNING | ||
1160 | "sfc: RSS disabled due to allocation failure\n"); | ||
1161 | return 1; | ||
1162 | } | ||
1163 | |||
1164 | count = 0; | ||
1165 | for_each_online_cpu(cpu) { | ||
1166 | if (!cpumask_test_cpu(cpu, core_mask)) { | ||
1167 | ++count; | ||
1168 | cpumask_or(core_mask, core_mask, | ||
1169 | topology_core_cpumask(cpu)); | ||
1170 | } | ||
1171 | } | ||
1172 | |||
1173 | free_cpumask_var(core_mask); | ||
1174 | return count; | ||
1175 | } | ||
1176 | |||
1177 | static int | ||
1178 | efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) | ||
1179 | { | ||
1180 | #ifdef CONFIG_RFS_ACCEL | ||
1181 | int i, rc; | ||
1182 | |||
1183 | efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); | ||
1184 | if (!efx->net_dev->rx_cpu_rmap) | ||
1185 | return -ENOMEM; | ||
1186 | for (i = 0; i < efx->n_rx_channels; i++) { | ||
1187 | rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, | ||
1188 | xentries[i].vector); | ||
1189 | if (rc) { | ||
1190 | free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); | ||
1191 | efx->net_dev->rx_cpu_rmap = NULL; | ||
1192 | return rc; | ||
1193 | } | ||
1194 | } | ||
1195 | #endif | ||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
1199 | /* Probe the number and type of interrupts we are able to obtain, and | ||
1200 | * the resulting numbers of channels and RX queues. | ||
1201 | */ | ||
1202 | static int efx_probe_interrupts(struct efx_nic *efx) | ||
1203 | { | ||
1204 | int max_channels = | ||
1205 | min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); | ||
1206 | int rc, i; | ||
1207 | |||
1208 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { | ||
1209 | struct msix_entry xentries[EFX_MAX_CHANNELS]; | ||
1210 | int n_channels; | ||
1211 | |||
1212 | n_channels = efx_wanted_channels(); | ||
1213 | if (separate_tx_channels) | ||
1214 | n_channels *= 2; | ||
1215 | n_channels = min(n_channels, max_channels); | ||
1216 | |||
1217 | for (i = 0; i < n_channels; i++) | ||
1218 | xentries[i].entry = i; | ||
1219 | rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); | ||
1220 | if (rc > 0) { | ||
1221 | netif_err(efx, drv, efx->net_dev, | ||
1222 | "WARNING: Insufficient MSI-X vectors" | ||
1223 | " available (%d < %d).\n", rc, n_channels); | ||
1224 | netif_err(efx, drv, efx->net_dev, | ||
1225 | "WARNING: Performance may be reduced.\n"); | ||
1226 | EFX_BUG_ON_PARANOID(rc >= n_channels); | ||
1227 | n_channels = rc; | ||
1228 | rc = pci_enable_msix(efx->pci_dev, xentries, | ||
1229 | n_channels); | ||
1230 | } | ||
1231 | |||
1232 | if (rc == 0) { | ||
1233 | efx->n_channels = n_channels; | ||
1234 | if (separate_tx_channels) { | ||
1235 | efx->n_tx_channels = | ||
1236 | max(efx->n_channels / 2, 1U); | ||
1237 | efx->n_rx_channels = | ||
1238 | max(efx->n_channels - | ||
1239 | efx->n_tx_channels, 1U); | ||
1240 | } else { | ||
1241 | efx->n_tx_channels = efx->n_channels; | ||
1242 | efx->n_rx_channels = efx->n_channels; | ||
1243 | } | ||
1244 | rc = efx_init_rx_cpu_rmap(efx, xentries); | ||
1245 | if (rc) { | ||
1246 | pci_disable_msix(efx->pci_dev); | ||
1247 | return rc; | ||
1248 | } | ||
1249 | for (i = 0; i < n_channels; i++) | ||
1250 | efx_get_channel(efx, i)->irq = | ||
1251 | xentries[i].vector; | ||
1252 | } else { | ||
1253 | /* Fall back to single channel MSI */ | ||
1254 | efx->interrupt_mode = EFX_INT_MODE_MSI; | ||
1255 | netif_err(efx, drv, efx->net_dev, | ||
1256 | "could not enable MSI-X\n"); | ||
1257 | } | ||
1258 | } | ||
1259 | |||
1260 | /* Try single interrupt MSI */ | ||
1261 | if (efx->interrupt_mode == EFX_INT_MODE_MSI) { | ||
1262 | efx->n_channels = 1; | ||
1263 | efx->n_rx_channels = 1; | ||
1264 | efx->n_tx_channels = 1; | ||
1265 | rc = pci_enable_msi(efx->pci_dev); | ||
1266 | if (rc == 0) { | ||
1267 | efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; | ||
1268 | } else { | ||
1269 | netif_err(efx, drv, efx->net_dev, | ||
1270 | "could not enable MSI\n"); | ||
1271 | efx->interrupt_mode = EFX_INT_MODE_LEGACY; | ||
1272 | } | ||
1273 | } | ||
1274 | |||
1275 | /* Assume legacy interrupts */ | ||
1276 | if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { | ||
1277 | efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); | ||
1278 | efx->n_rx_channels = 1; | ||
1279 | efx->n_tx_channels = 1; | ||
1280 | efx->legacy_irq = efx->pci_dev->irq; | ||
1281 | } | ||
1282 | |||
1283 | return 0; | ||
1284 | } | ||
1285 | |||
1286 | static void efx_remove_interrupts(struct efx_nic *efx) | ||
1287 | { | ||
1288 | struct efx_channel *channel; | ||
1289 | |||
1290 | /* Remove MSI/MSI-X interrupts */ | ||
1291 | efx_for_each_channel(channel, efx) | ||
1292 | channel->irq = 0; | ||
1293 | pci_disable_msi(efx->pci_dev); | ||
1294 | pci_disable_msix(efx->pci_dev); | ||
1295 | |||
1296 | /* Remove legacy interrupt */ | ||
1297 | efx->legacy_irq = 0; | ||
1298 | } | ||
1299 | |||
1300 | static void efx_set_channels(struct efx_nic *efx) | ||
1301 | { | ||
1302 | struct efx_channel *channel; | ||
1303 | struct efx_tx_queue *tx_queue; | ||
1304 | |||
1305 | efx->tx_channel_offset = | ||
1306 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; | ||
1307 | |||
1308 | /* We need to adjust the TX queue numbers if we have separate | ||
1309 | * RX-only and TX-only channels. | ||
1310 | */ | ||
1311 | efx_for_each_channel(channel, efx) { | ||
1312 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
1313 | tx_queue->queue -= (efx->tx_channel_offset * | ||
1314 | EFX_TXQ_TYPES); | ||
1315 | } | ||
1316 | } | ||
1317 | |||
1318 | static int efx_probe_nic(struct efx_nic *efx) | ||
1319 | { | ||
1320 | size_t i; | ||
1321 | int rc; | ||
1322 | |||
1323 | netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); | ||
1324 | |||
1325 | /* Carry out hardware-type specific initialisation */ | ||
1326 | rc = efx->type->probe(efx); | ||
1327 | if (rc) | ||
1328 | return rc; | ||
1329 | |||
1330 | /* Determine the number of channels and queues by trying to hook | ||
1331 | * in MSI-X interrupts. */ | ||
1332 | rc = efx_probe_interrupts(efx); | ||
1333 | if (rc) | ||
1334 | goto fail; | ||
1335 | |||
1336 | if (efx->n_channels > 1) | ||
1337 | get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); | ||
1338 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) | ||
1339 | efx->rx_indir_table[i] = i % efx->n_rx_channels; | ||
1340 | |||
1341 | efx_set_channels(efx); | ||
1342 | netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); | ||
1343 | netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); | ||
1344 | |||
1345 | /* Initialise the interrupt moderation settings */ | ||
1346 | efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); | ||
1347 | |||
1348 | return 0; | ||
1349 | |||
1350 | fail: | ||
1351 | efx->type->remove(efx); | ||
1352 | return rc; | ||
1353 | } | ||
1354 | |||
1355 | static void efx_remove_nic(struct efx_nic *efx) | ||
1356 | { | ||
1357 | netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); | ||
1358 | |||
1359 | efx_remove_interrupts(efx); | ||
1360 | efx->type->remove(efx); | ||
1361 | } | ||
1362 | |||
1363 | /************************************************************************** | ||
1364 | * | ||
1365 | * NIC startup/shutdown | ||
1366 | * | ||
1367 | *************************************************************************/ | ||
1368 | |||
1369 | static int efx_probe_all(struct efx_nic *efx) | ||
1370 | { | ||
1371 | int rc; | ||
1372 | |||
1373 | rc = efx_probe_nic(efx); | ||
1374 | if (rc) { | ||
1375 | netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); | ||
1376 | goto fail1; | ||
1377 | } | ||
1378 | |||
1379 | rc = efx_probe_port(efx); | ||
1380 | if (rc) { | ||
1381 | netif_err(efx, probe, efx->net_dev, "failed to create port\n"); | ||
1382 | goto fail2; | ||
1383 | } | ||
1384 | |||
1385 | efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; | ||
1386 | rc = efx_probe_channels(efx); | ||
1387 | if (rc) | ||
1388 | goto fail3; | ||
1389 | |||
1390 | rc = efx_probe_filters(efx); | ||
1391 | if (rc) { | ||
1392 | netif_err(efx, probe, efx->net_dev, | ||
1393 | "failed to create filter tables\n"); | ||
1394 | goto fail4; | ||
1395 | } | ||
1396 | |||
1397 | return 0; | ||
1398 | |||
1399 | fail4: | ||
1400 | efx_remove_channels(efx); | ||
1401 | fail3: | ||
1402 | efx_remove_port(efx); | ||
1403 | fail2: | ||
1404 | efx_remove_nic(efx); | ||
1405 | fail1: | ||
1406 | return rc; | ||
1407 | } | ||
1408 | |||
1409 | /* Called after previous invocation(s) of efx_stop_all, restarts the | ||
1410 | * port, kernel transmit queue, NAPI processing and hardware interrupts, | ||
1411 | * and ensures that the port is scheduled to be reconfigured. | ||
1412 | * This function is safe to call multiple times when the NIC is in any | ||
1413 | * state. */ | ||
1414 | static void efx_start_all(struct efx_nic *efx) | ||
1415 | { | ||
1416 | struct efx_channel *channel; | ||
1417 | |||
1418 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
1419 | |||
1420 | /* Check that it is appropriate to restart the interface. All | ||
1421 | * of these flags are safe to read under just the rtnl lock */ | ||
1422 | if (efx->port_enabled) | ||
1423 | return; | ||
1424 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) | ||
1425 | return; | ||
1426 | if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) | ||
1427 | return; | ||
1428 | |||
1429 | /* Mark the port as enabled so port reconfigurations can start, then | ||
1430 | * restart the transmit interface early so the watchdog timer stops */ | ||
1431 | efx_start_port(efx); | ||
1432 | |||
1433 | if (efx_dev_registered(efx) && netif_device_present(efx->net_dev)) | ||
1434 | netif_tx_wake_all_queues(efx->net_dev); | ||
1435 | |||
1436 | efx_for_each_channel(channel, efx) | ||
1437 | efx_start_channel(channel); | ||
1438 | |||
1439 | if (efx->legacy_irq) | ||
1440 | efx->legacy_irq_enabled = true; | ||
1441 | efx_nic_enable_interrupts(efx); | ||
1442 | |||
1443 | /* Switch to event based MCDI completions after enabling interrupts. | ||
1444 | * If a reset has been scheduled, then we need to stay in polled mode. | ||
1445 | * Rather than serialising efx_mcdi_mode_event() [which sleeps] and | ||
1446 | * reset_pending [modified from an atomic context], we instead guarantee | ||
1447 | * that efx_mcdi_mode_poll() isn't reverted erroneously */ | ||
1448 | efx_mcdi_mode_event(efx); | ||
1449 | if (efx->reset_pending) | ||
1450 | efx_mcdi_mode_poll(efx); | ||
1451 | |||
1452 | /* Start the hardware monitor if there is one. Otherwise (we're link | ||
1453 | * event driven), we have to poll the PHY because after an event queue | ||
1454 | * flush, we could have a missed a link state change */ | ||
1455 | if (efx->type->monitor != NULL) { | ||
1456 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | ||
1457 | efx_monitor_interval); | ||
1458 | } else { | ||
1459 | mutex_lock(&efx->mac_lock); | ||
1460 | if (efx->phy_op->poll(efx)) | ||
1461 | efx_link_status_changed(efx); | ||
1462 | mutex_unlock(&efx->mac_lock); | ||
1463 | } | ||
1464 | |||
1465 | efx->type->start_stats(efx); | ||
1466 | } | ||
1467 | |||
1468 | /* Flush all delayed work. Should only be called when no more delayed work | ||
1469 | * will be scheduled. This doesn't flush pending online resets (efx_reset), | ||
1470 | * since we're holding the rtnl_lock at this point. */ | ||
1471 | static void efx_flush_all(struct efx_nic *efx) | ||
1472 | { | ||
1473 | /* Make sure the hardware monitor is stopped */ | ||
1474 | cancel_delayed_work_sync(&efx->monitor_work); | ||
1475 | /* Stop scheduled port reconfigurations */ | ||
1476 | cancel_work_sync(&efx->mac_work); | ||
1477 | } | ||
1478 | |||
1479 | /* Quiesce hardware and software without bringing the link down. | ||
1480 | * Safe to call multiple times, when the nic and interface is in any | ||
1481 | * state. The caller is guaranteed to subsequently be in a position | ||
1482 | * to modify any hardware and software state they see fit without | ||
1483 | * taking locks. */ | ||
1484 | static void efx_stop_all(struct efx_nic *efx) | ||
1485 | { | ||
1486 | struct efx_channel *channel; | ||
1487 | |||
1488 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
1489 | |||
1490 | /* port_enabled can be read safely under the rtnl lock */ | ||
1491 | if (!efx->port_enabled) | ||
1492 | return; | ||
1493 | |||
1494 | efx->type->stop_stats(efx); | ||
1495 | |||
1496 | /* Switch to MCDI polling on Siena before disabling interrupts */ | ||
1497 | efx_mcdi_mode_poll(efx); | ||
1498 | |||
1499 | /* Disable interrupts and wait for ISR to complete */ | ||
1500 | efx_nic_disable_interrupts(efx); | ||
1501 | if (efx->legacy_irq) { | ||
1502 | synchronize_irq(efx->legacy_irq); | ||
1503 | efx->legacy_irq_enabled = false; | ||
1504 | } | ||
1505 | efx_for_each_channel(channel, efx) { | ||
1506 | if (channel->irq) | ||
1507 | synchronize_irq(channel->irq); | ||
1508 | } | ||
1509 | |||
1510 | /* Stop all NAPI processing and synchronous rx refills */ | ||
1511 | efx_for_each_channel(channel, efx) | ||
1512 | efx_stop_channel(channel); | ||
1513 | |||
1514 | /* Stop all asynchronous port reconfigurations. Since all | ||
1515 | * event processing has already been stopped, there is no | ||
1516 | * window to loose phy events */ | ||
1517 | efx_stop_port(efx); | ||
1518 | |||
1519 | /* Flush efx_mac_work(), refill_workqueue, monitor_work */ | ||
1520 | efx_flush_all(efx); | ||
1521 | |||
1522 | /* Stop the kernel transmit interface late, so the watchdog | ||
1523 | * timer isn't ticking over the flush */ | ||
1524 | if (efx_dev_registered(efx)) { | ||
1525 | netif_tx_stop_all_queues(efx->net_dev); | ||
1526 | netif_tx_lock_bh(efx->net_dev); | ||
1527 | netif_tx_unlock_bh(efx->net_dev); | ||
1528 | } | ||
1529 | } | ||
1530 | |||
1531 | static void efx_remove_all(struct efx_nic *efx) | ||
1532 | { | ||
1533 | efx_remove_filters(efx); | ||
1534 | efx_remove_channels(efx); | ||
1535 | efx_remove_port(efx); | ||
1536 | efx_remove_nic(efx); | ||
1537 | } | ||
1538 | |||
1539 | /************************************************************************** | ||
1540 | * | ||
1541 | * Interrupt moderation | ||
1542 | * | ||
1543 | **************************************************************************/ | ||
1544 | |||
1545 | static unsigned irq_mod_ticks(int usecs, int resolution) | ||
1546 | { | ||
1547 | if (usecs <= 0) | ||
1548 | return 0; /* cannot receive interrupts ahead of time :-) */ | ||
1549 | if (usecs < resolution) | ||
1550 | return 1; /* never round down to 0 */ | ||
1551 | return usecs / resolution; | ||
1552 | } | ||
1553 | |||
1554 | /* Set interrupt moderation parameters */ | ||
1555 | void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, | ||
1556 | bool rx_adaptive) | ||
1557 | { | ||
1558 | struct efx_channel *channel; | ||
1559 | unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); | ||
1560 | unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); | ||
1561 | |||
1562 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
1563 | |||
1564 | efx->irq_rx_adaptive = rx_adaptive; | ||
1565 | efx->irq_rx_moderation = rx_ticks; | ||
1566 | efx_for_each_channel(channel, efx) { | ||
1567 | if (efx_channel_has_rx_queue(channel)) | ||
1568 | channel->irq_moderation = rx_ticks; | ||
1569 | else if (efx_channel_has_tx_queues(channel)) | ||
1570 | channel->irq_moderation = tx_ticks; | ||
1571 | } | ||
1572 | } | ||
1573 | |||
1574 | /************************************************************************** | ||
1575 | * | ||
1576 | * Hardware monitor | ||
1577 | * | ||
1578 | **************************************************************************/ | ||
1579 | |||
1580 | /* Run periodically off the general workqueue */ | ||
1581 | static void efx_monitor(struct work_struct *data) | ||
1582 | { | ||
1583 | struct efx_nic *efx = container_of(data, struct efx_nic, | ||
1584 | monitor_work.work); | ||
1585 | |||
1586 | netif_vdbg(efx, timer, efx->net_dev, | ||
1587 | "hardware monitor executing on CPU %d\n", | ||
1588 | raw_smp_processor_id()); | ||
1589 | BUG_ON(efx->type->monitor == NULL); | ||
1590 | |||
1591 | /* If the mac_lock is already held then it is likely a port | ||
1592 | * reconfiguration is already in place, which will likely do | ||
1593 | * most of the work of monitor() anyway. */ | ||
1594 | if (mutex_trylock(&efx->mac_lock)) { | ||
1595 | if (efx->port_enabled) | ||
1596 | efx->type->monitor(efx); | ||
1597 | mutex_unlock(&efx->mac_lock); | ||
1598 | } | ||
1599 | |||
1600 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | ||
1601 | efx_monitor_interval); | ||
1602 | } | ||
1603 | |||
1604 | /************************************************************************** | ||
1605 | * | ||
1606 | * ioctls | ||
1607 | * | ||
1608 | *************************************************************************/ | ||
1609 | |||
1610 | /* Net device ioctl | ||
1611 | * Context: process, rtnl_lock() held. | ||
1612 | */ | ||
1613 | static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) | ||
1614 | { | ||
1615 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1616 | struct mii_ioctl_data *data = if_mii(ifr); | ||
1617 | |||
1618 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
1619 | |||
1620 | /* Convert phy_id from older PRTAD/DEVAD format */ | ||
1621 | if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && | ||
1622 | (data->phy_id & 0xfc00) == 0x0400) | ||
1623 | data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; | ||
1624 | |||
1625 | return mdio_mii_ioctl(&efx->mdio, data, cmd); | ||
1626 | } | ||
1627 | |||
1628 | /************************************************************************** | ||
1629 | * | ||
1630 | * NAPI interface | ||
1631 | * | ||
1632 | **************************************************************************/ | ||
1633 | |||
1634 | static void efx_init_napi(struct efx_nic *efx) | ||
1635 | { | ||
1636 | struct efx_channel *channel; | ||
1637 | |||
1638 | efx_for_each_channel(channel, efx) { | ||
1639 | channel->napi_dev = efx->net_dev; | ||
1640 | netif_napi_add(channel->napi_dev, &channel->napi_str, | ||
1641 | efx_poll, napi_weight); | ||
1642 | } | ||
1643 | } | ||
1644 | |||
1645 | static void efx_fini_napi_channel(struct efx_channel *channel) | ||
1646 | { | ||
1647 | if (channel->napi_dev) | ||
1648 | netif_napi_del(&channel->napi_str); | ||
1649 | channel->napi_dev = NULL; | ||
1650 | } | ||
1651 | |||
1652 | static void efx_fini_napi(struct efx_nic *efx) | ||
1653 | { | ||
1654 | struct efx_channel *channel; | ||
1655 | |||
1656 | efx_for_each_channel(channel, efx) | ||
1657 | efx_fini_napi_channel(channel); | ||
1658 | } | ||
1659 | |||
1660 | /************************************************************************** | ||
1661 | * | ||
1662 | * Kernel netpoll interface | ||
1663 | * | ||
1664 | *************************************************************************/ | ||
1665 | |||
1666 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1667 | |||
1668 | /* Although in the common case interrupts will be disabled, this is not | ||
1669 | * guaranteed. However, all our work happens inside the NAPI callback, | ||
1670 | * so no locking is required. | ||
1671 | */ | ||
1672 | static void efx_netpoll(struct net_device *net_dev) | ||
1673 | { | ||
1674 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1675 | struct efx_channel *channel; | ||
1676 | |||
1677 | efx_for_each_channel(channel, efx) | ||
1678 | efx_schedule_channel(channel); | ||
1679 | } | ||
1680 | |||
1681 | #endif | ||
1682 | |||
1683 | /************************************************************************** | ||
1684 | * | ||
1685 | * Kernel net device interface | ||
1686 | * | ||
1687 | *************************************************************************/ | ||
1688 | |||
1689 | /* Context: process, rtnl_lock() held. */ | ||
1690 | static int efx_net_open(struct net_device *net_dev) | ||
1691 | { | ||
1692 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1693 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
1694 | |||
1695 | netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", | ||
1696 | raw_smp_processor_id()); | ||
1697 | |||
1698 | if (efx->state == STATE_DISABLED) | ||
1699 | return -EIO; | ||
1700 | if (efx->phy_mode & PHY_MODE_SPECIAL) | ||
1701 | return -EBUSY; | ||
1702 | if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) | ||
1703 | return -EIO; | ||
1704 | |||
1705 | /* Notify the kernel of the link state polled during driver load, | ||
1706 | * before the monitor starts running */ | ||
1707 | efx_link_status_changed(efx); | ||
1708 | |||
1709 | efx_start_all(efx); | ||
1710 | return 0; | ||
1711 | } | ||
1712 | |||
1713 | /* Context: process, rtnl_lock() held. | ||
1714 | * Note that the kernel will ignore our return code; this method | ||
1715 | * should really be a void. | ||
1716 | */ | ||
1717 | static int efx_net_stop(struct net_device *net_dev) | ||
1718 | { | ||
1719 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1720 | |||
1721 | netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", | ||
1722 | raw_smp_processor_id()); | ||
1723 | |||
1724 | if (efx->state != STATE_DISABLED) { | ||
1725 | /* Stop the device and flush all the channels */ | ||
1726 | efx_stop_all(efx); | ||
1727 | efx_fini_channels(efx); | ||
1728 | efx_init_channels(efx); | ||
1729 | } | ||
1730 | |||
1731 | return 0; | ||
1732 | } | ||
1733 | |||
1734 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ | ||
1735 | static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) | ||
1736 | { | ||
1737 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1738 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | ||
1739 | |||
1740 | spin_lock_bh(&efx->stats_lock); | ||
1741 | efx->type->update_stats(efx); | ||
1742 | spin_unlock_bh(&efx->stats_lock); | ||
1743 | |||
1744 | stats->rx_packets = mac_stats->rx_packets; | ||
1745 | stats->tx_packets = mac_stats->tx_packets; | ||
1746 | stats->rx_bytes = mac_stats->rx_bytes; | ||
1747 | stats->tx_bytes = mac_stats->tx_bytes; | ||
1748 | stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; | ||
1749 | stats->multicast = mac_stats->rx_multicast; | ||
1750 | stats->collisions = mac_stats->tx_collision; | ||
1751 | stats->rx_length_errors = (mac_stats->rx_gtjumbo + | ||
1752 | mac_stats->rx_length_error); | ||
1753 | stats->rx_crc_errors = mac_stats->rx_bad; | ||
1754 | stats->rx_frame_errors = mac_stats->rx_align_error; | ||
1755 | stats->rx_fifo_errors = mac_stats->rx_overflow; | ||
1756 | stats->rx_missed_errors = mac_stats->rx_missed; | ||
1757 | stats->tx_window_errors = mac_stats->tx_late_collision; | ||
1758 | |||
1759 | stats->rx_errors = (stats->rx_length_errors + | ||
1760 | stats->rx_crc_errors + | ||
1761 | stats->rx_frame_errors + | ||
1762 | mac_stats->rx_symbol_error); | ||
1763 | stats->tx_errors = (stats->tx_window_errors + | ||
1764 | mac_stats->tx_bad); | ||
1765 | |||
1766 | return stats; | ||
1767 | } | ||
1768 | |||
1769 | /* Context: netif_tx_lock held, BHs disabled. */ | ||
1770 | static void efx_watchdog(struct net_device *net_dev) | ||
1771 | { | ||
1772 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1773 | |||
1774 | netif_err(efx, tx_err, efx->net_dev, | ||
1775 | "TX stuck with port_enabled=%d: resetting channels\n", | ||
1776 | efx->port_enabled); | ||
1777 | |||
1778 | efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); | ||
1779 | } | ||
1780 | |||
1781 | |||
1782 | /* Context: process, rtnl_lock() held. */ | ||
1783 | static int efx_change_mtu(struct net_device *net_dev, int new_mtu) | ||
1784 | { | ||
1785 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1786 | int rc = 0; | ||
1787 | |||
1788 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
1789 | |||
1790 | if (new_mtu > EFX_MAX_MTU) | ||
1791 | return -EINVAL; | ||
1792 | |||
1793 | efx_stop_all(efx); | ||
1794 | |||
1795 | netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); | ||
1796 | |||
1797 | efx_fini_channels(efx); | ||
1798 | |||
1799 | mutex_lock(&efx->mac_lock); | ||
1800 | /* Reconfigure the MAC before enabling the dma queues so that | ||
1801 | * the RX buffers don't overflow */ | ||
1802 | net_dev->mtu = new_mtu; | ||
1803 | efx->mac_op->reconfigure(efx); | ||
1804 | mutex_unlock(&efx->mac_lock); | ||
1805 | |||
1806 | efx_init_channels(efx); | ||
1807 | |||
1808 | efx_start_all(efx); | ||
1809 | return rc; | ||
1810 | } | ||
1811 | |||
1812 | static int efx_set_mac_address(struct net_device *net_dev, void *data) | ||
1813 | { | ||
1814 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1815 | struct sockaddr *addr = data; | ||
1816 | char *new_addr = addr->sa_data; | ||
1817 | |||
1818 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
1819 | |||
1820 | if (!is_valid_ether_addr(new_addr)) { | ||
1821 | netif_err(efx, drv, efx->net_dev, | ||
1822 | "invalid ethernet MAC address requested: %pM\n", | ||
1823 | new_addr); | ||
1824 | return -EINVAL; | ||
1825 | } | ||
1826 | |||
1827 | memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); | ||
1828 | |||
1829 | /* Reconfigure the MAC */ | ||
1830 | mutex_lock(&efx->mac_lock); | ||
1831 | efx->mac_op->reconfigure(efx); | ||
1832 | mutex_unlock(&efx->mac_lock); | ||
1833 | |||
1834 | return 0; | ||
1835 | } | ||
1836 | |||
1837 | /* Context: netif_addr_lock held, BHs disabled. */ | ||
1838 | static void efx_set_multicast_list(struct net_device *net_dev) | ||
1839 | { | ||
1840 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1841 | struct netdev_hw_addr *ha; | ||
1842 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; | ||
1843 | u32 crc; | ||
1844 | int bit; | ||
1845 | |||
1846 | efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); | ||
1847 | |||
1848 | /* Build multicast hash table */ | ||
1849 | if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { | ||
1850 | memset(mc_hash, 0xff, sizeof(*mc_hash)); | ||
1851 | } else { | ||
1852 | memset(mc_hash, 0x00, sizeof(*mc_hash)); | ||
1853 | netdev_for_each_mc_addr(ha, net_dev) { | ||
1854 | crc = ether_crc_le(ETH_ALEN, ha->addr); | ||
1855 | bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); | ||
1856 | set_bit_le(bit, mc_hash->byte); | ||
1857 | } | ||
1858 | |||
1859 | /* Broadcast packets go through the multicast hash filter. | ||
1860 | * ether_crc_le() of the broadcast address is 0xbe2612ff | ||
1861 | * so we always add bit 0xff to the mask. | ||
1862 | */ | ||
1863 | set_bit_le(0xff, mc_hash->byte); | ||
1864 | } | ||
1865 | |||
1866 | if (efx->port_enabled) | ||
1867 | queue_work(efx->workqueue, &efx->mac_work); | ||
1868 | /* Otherwise efx_start_port() will do this */ | ||
1869 | } | ||
1870 | |||
1871 | static int efx_set_features(struct net_device *net_dev, u32 data) | ||
1872 | { | ||
1873 | struct efx_nic *efx = netdev_priv(net_dev); | ||
1874 | |||
1875 | /* If disabling RX n-tuple filtering, clear existing filters */ | ||
1876 | if (net_dev->features & ~data & NETIF_F_NTUPLE) | ||
1877 | efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); | ||
1878 | |||
1879 | return 0; | ||
1880 | } | ||
1881 | |||
1882 | static const struct net_device_ops efx_netdev_ops = { | ||
1883 | .ndo_open = efx_net_open, | ||
1884 | .ndo_stop = efx_net_stop, | ||
1885 | .ndo_get_stats64 = efx_net_stats, | ||
1886 | .ndo_tx_timeout = efx_watchdog, | ||
1887 | .ndo_start_xmit = efx_hard_start_xmit, | ||
1888 | .ndo_validate_addr = eth_validate_addr, | ||
1889 | .ndo_do_ioctl = efx_ioctl, | ||
1890 | .ndo_change_mtu = efx_change_mtu, | ||
1891 | .ndo_set_mac_address = efx_set_mac_address, | ||
1892 | .ndo_set_multicast_list = efx_set_multicast_list, | ||
1893 | .ndo_set_features = efx_set_features, | ||
1894 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1895 | .ndo_poll_controller = efx_netpoll, | ||
1896 | #endif | ||
1897 | .ndo_setup_tc = efx_setup_tc, | ||
1898 | #ifdef CONFIG_RFS_ACCEL | ||
1899 | .ndo_rx_flow_steer = efx_filter_rfs, | ||
1900 | #endif | ||
1901 | }; | ||
1902 | |||
1903 | static void efx_update_name(struct efx_nic *efx) | ||
1904 | { | ||
1905 | strcpy(efx->name, efx->net_dev->name); | ||
1906 | efx_mtd_rename(efx); | ||
1907 | efx_set_channel_names(efx); | ||
1908 | } | ||
1909 | |||
1910 | static int efx_netdev_event(struct notifier_block *this, | ||
1911 | unsigned long event, void *ptr) | ||
1912 | { | ||
1913 | struct net_device *net_dev = ptr; | ||
1914 | |||
1915 | if (net_dev->netdev_ops == &efx_netdev_ops && | ||
1916 | event == NETDEV_CHANGENAME) | ||
1917 | efx_update_name(netdev_priv(net_dev)); | ||
1918 | |||
1919 | return NOTIFY_DONE; | ||
1920 | } | ||
1921 | |||
1922 | static struct notifier_block efx_netdev_notifier = { | ||
1923 | .notifier_call = efx_netdev_event, | ||
1924 | }; | ||
1925 | |||
1926 | static ssize_t | ||
1927 | show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) | ||
1928 | { | ||
1929 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
1930 | return sprintf(buf, "%d\n", efx->phy_type); | ||
1931 | } | ||
1932 | static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); | ||
1933 | |||
1934 | static int efx_register_netdev(struct efx_nic *efx) | ||
1935 | { | ||
1936 | struct net_device *net_dev = efx->net_dev; | ||
1937 | struct efx_channel *channel; | ||
1938 | int rc; | ||
1939 | |||
1940 | net_dev->watchdog_timeo = 5 * HZ; | ||
1941 | net_dev->irq = efx->pci_dev->irq; | ||
1942 | net_dev->netdev_ops = &efx_netdev_ops; | ||
1943 | SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); | ||
1944 | |||
1945 | /* Clear MAC statistics */ | ||
1946 | efx->mac_op->update_stats(efx); | ||
1947 | memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); | ||
1948 | |||
1949 | rtnl_lock(); | ||
1950 | |||
1951 | rc = dev_alloc_name(net_dev, net_dev->name); | ||
1952 | if (rc < 0) | ||
1953 | goto fail_locked; | ||
1954 | efx_update_name(efx); | ||
1955 | |||
1956 | rc = register_netdevice(net_dev); | ||
1957 | if (rc) | ||
1958 | goto fail_locked; | ||
1959 | |||
1960 | efx_for_each_channel(channel, efx) { | ||
1961 | struct efx_tx_queue *tx_queue; | ||
1962 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
1963 | efx_init_tx_queue_core_txq(tx_queue); | ||
1964 | } | ||
1965 | |||
1966 | /* Always start with carrier off; PHY events will detect the link */ | ||
1967 | netif_carrier_off(efx->net_dev); | ||
1968 | |||
1969 | rtnl_unlock(); | ||
1970 | |||
1971 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); | ||
1972 | if (rc) { | ||
1973 | netif_err(efx, drv, efx->net_dev, | ||
1974 | "failed to init net dev attributes\n"); | ||
1975 | goto fail_registered; | ||
1976 | } | ||
1977 | |||
1978 | return 0; | ||
1979 | |||
1980 | fail_locked: | ||
1981 | rtnl_unlock(); | ||
1982 | netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); | ||
1983 | return rc; | ||
1984 | |||
1985 | fail_registered: | ||
1986 | unregister_netdev(net_dev); | ||
1987 | return rc; | ||
1988 | } | ||
1989 | |||
1990 | static void efx_unregister_netdev(struct efx_nic *efx) | ||
1991 | { | ||
1992 | struct efx_channel *channel; | ||
1993 | struct efx_tx_queue *tx_queue; | ||
1994 | |||
1995 | if (!efx->net_dev) | ||
1996 | return; | ||
1997 | |||
1998 | BUG_ON(netdev_priv(efx->net_dev) != efx); | ||
1999 | |||
2000 | /* Free up any skbs still remaining. This has to happen before | ||
2001 | * we try to unregister the netdev as running their destructors | ||
2002 | * may be needed to get the device ref. count to 0. */ | ||
2003 | efx_for_each_channel(channel, efx) { | ||
2004 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
2005 | efx_release_tx_buffers(tx_queue); | ||
2006 | } | ||
2007 | |||
2008 | if (efx_dev_registered(efx)) { | ||
2009 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); | ||
2010 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); | ||
2011 | unregister_netdev(efx->net_dev); | ||
2012 | } | ||
2013 | } | ||
2014 | |||
2015 | /************************************************************************** | ||
2016 | * | ||
2017 | * Device reset and suspend | ||
2018 | * | ||
2019 | **************************************************************************/ | ||
2020 | |||
2021 | /* Tears down the entire software state and most of the hardware state | ||
2022 | * before reset. */ | ||
2023 | void efx_reset_down(struct efx_nic *efx, enum reset_type method) | ||
2024 | { | ||
2025 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
2026 | |||
2027 | efx_stop_all(efx); | ||
2028 | mutex_lock(&efx->mac_lock); | ||
2029 | |||
2030 | efx_fini_channels(efx); | ||
2031 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) | ||
2032 | efx->phy_op->fini(efx); | ||
2033 | efx->type->fini(efx); | ||
2034 | } | ||
2035 | |||
2036 | /* This function will always ensure that the locks acquired in | ||
2037 | * efx_reset_down() are released. A failure return code indicates | ||
2038 | * that we were unable to reinitialise the hardware, and the | ||
2039 | * driver should be disabled. If ok is false, then the rx and tx | ||
2040 | * engines are not restarted, pending a RESET_DISABLE. */ | ||
2041 | int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) | ||
2042 | { | ||
2043 | int rc; | ||
2044 | |||
2045 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
2046 | |||
2047 | rc = efx->type->init(efx); | ||
2048 | if (rc) { | ||
2049 | netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); | ||
2050 | goto fail; | ||
2051 | } | ||
2052 | |||
2053 | if (!ok) | ||
2054 | goto fail; | ||
2055 | |||
2056 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { | ||
2057 | rc = efx->phy_op->init(efx); | ||
2058 | if (rc) | ||
2059 | goto fail; | ||
2060 | if (efx->phy_op->reconfigure(efx)) | ||
2061 | netif_err(efx, drv, efx->net_dev, | ||
2062 | "could not restore PHY settings\n"); | ||
2063 | } | ||
2064 | |||
2065 | efx->mac_op->reconfigure(efx); | ||
2066 | |||
2067 | efx_init_channels(efx); | ||
2068 | efx_restore_filters(efx); | ||
2069 | |||
2070 | mutex_unlock(&efx->mac_lock); | ||
2071 | |||
2072 | efx_start_all(efx); | ||
2073 | |||
2074 | return 0; | ||
2075 | |||
2076 | fail: | ||
2077 | efx->port_initialized = false; | ||
2078 | |||
2079 | mutex_unlock(&efx->mac_lock); | ||
2080 | |||
2081 | return rc; | ||
2082 | } | ||
2083 | |||
2084 | /* Reset the NIC using the specified method. Note that the reset may | ||
2085 | * fail, in which case the card will be left in an unusable state. | ||
2086 | * | ||
2087 | * Caller must hold the rtnl_lock. | ||
2088 | */ | ||
2089 | int efx_reset(struct efx_nic *efx, enum reset_type method) | ||
2090 | { | ||
2091 | int rc, rc2; | ||
2092 | bool disabled; | ||
2093 | |||
2094 | netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", | ||
2095 | RESET_TYPE(method)); | ||
2096 | |||
2097 | netif_device_detach(efx->net_dev); | ||
2098 | efx_reset_down(efx, method); | ||
2099 | |||
2100 | rc = efx->type->reset(efx, method); | ||
2101 | if (rc) { | ||
2102 | netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); | ||
2103 | goto out; | ||
2104 | } | ||
2105 | |||
2106 | /* Clear flags for the scopes we covered. We assume the NIC and | ||
2107 | * driver are now quiescent so that there is no race here. | ||
2108 | */ | ||
2109 | efx->reset_pending &= -(1 << (method + 1)); | ||
2110 | |||
2111 | /* Reinitialise bus-mastering, which may have been turned off before | ||
2112 | * the reset was scheduled. This is still appropriate, even in the | ||
2113 | * RESET_TYPE_DISABLE since this driver generally assumes the hardware | ||
2114 | * can respond to requests. */ | ||
2115 | pci_set_master(efx->pci_dev); | ||
2116 | |||
2117 | out: | ||
2118 | /* Leave device stopped if necessary */ | ||
2119 | disabled = rc || method == RESET_TYPE_DISABLE; | ||
2120 | rc2 = efx_reset_up(efx, method, !disabled); | ||
2121 | if (rc2) { | ||
2122 | disabled = true; | ||
2123 | if (!rc) | ||
2124 | rc = rc2; | ||
2125 | } | ||
2126 | |||
2127 | if (disabled) { | ||
2128 | dev_close(efx->net_dev); | ||
2129 | netif_err(efx, drv, efx->net_dev, "has been disabled\n"); | ||
2130 | efx->state = STATE_DISABLED; | ||
2131 | } else { | ||
2132 | netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); | ||
2133 | netif_device_attach(efx->net_dev); | ||
2134 | } | ||
2135 | return rc; | ||
2136 | } | ||
2137 | |||
2138 | /* The worker thread exists so that code that cannot sleep can | ||
2139 | * schedule a reset for later. | ||
2140 | */ | ||
2141 | static void efx_reset_work(struct work_struct *data) | ||
2142 | { | ||
2143 | struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); | ||
2144 | unsigned long pending = ACCESS_ONCE(efx->reset_pending); | ||
2145 | |||
2146 | if (!pending) | ||
2147 | return; | ||
2148 | |||
2149 | /* If we're not RUNNING then don't reset. Leave the reset_pending | ||
2150 | * flags set so that efx_pci_probe_main will be retried */ | ||
2151 | if (efx->state != STATE_RUNNING) { | ||
2152 | netif_info(efx, drv, efx->net_dev, | ||
2153 | "scheduled reset quenched. NIC not RUNNING\n"); | ||
2154 | return; | ||
2155 | } | ||
2156 | |||
2157 | rtnl_lock(); | ||
2158 | (void)efx_reset(efx, fls(pending) - 1); | ||
2159 | rtnl_unlock(); | ||
2160 | } | ||
2161 | |||
2162 | void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | ||
2163 | { | ||
2164 | enum reset_type method; | ||
2165 | |||
2166 | switch (type) { | ||
2167 | case RESET_TYPE_INVISIBLE: | ||
2168 | case RESET_TYPE_ALL: | ||
2169 | case RESET_TYPE_WORLD: | ||
2170 | case RESET_TYPE_DISABLE: | ||
2171 | method = type; | ||
2172 | netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", | ||
2173 | RESET_TYPE(method)); | ||
2174 | break; | ||
2175 | default: | ||
2176 | method = efx->type->map_reset_reason(type); | ||
2177 | netif_dbg(efx, drv, efx->net_dev, | ||
2178 | "scheduling %s reset for %s\n", | ||
2179 | RESET_TYPE(method), RESET_TYPE(type)); | ||
2180 | break; | ||
2181 | } | ||
2182 | |||
2183 | set_bit(method, &efx->reset_pending); | ||
2184 | |||
2185 | /* efx_process_channel() will no longer read events once a | ||
2186 | * reset is scheduled. So switch back to poll'd MCDI completions. */ | ||
2187 | efx_mcdi_mode_poll(efx); | ||
2188 | |||
2189 | queue_work(reset_workqueue, &efx->reset_work); | ||
2190 | } | ||
2191 | |||
2192 | /************************************************************************** | ||
2193 | * | ||
2194 | * List of NICs we support | ||
2195 | * | ||
2196 | **************************************************************************/ | ||
2197 | |||
2198 | /* PCI device ID table */ | ||
2199 | static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { | ||
2200 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), | ||
2201 | .driver_data = (unsigned long) &falcon_a1_nic_type}, | ||
2202 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), | ||
2203 | .driver_data = (unsigned long) &falcon_b0_nic_type}, | ||
2204 | {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), | ||
2205 | .driver_data = (unsigned long) &siena_a0_nic_type}, | ||
2206 | {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), | ||
2207 | .driver_data = (unsigned long) &siena_a0_nic_type}, | ||
2208 | {0} /* end of list */ | ||
2209 | }; | ||
2210 | |||
2211 | /************************************************************************** | ||
2212 | * | ||
2213 | * Dummy PHY/MAC operations | ||
2214 | * | ||
2215 | * Can be used for some unimplemented operations | ||
2216 | * Needed so all function pointers are valid and do not have to be tested | ||
2217 | * before use | ||
2218 | * | ||
2219 | **************************************************************************/ | ||
2220 | int efx_port_dummy_op_int(struct efx_nic *efx) | ||
2221 | { | ||
2222 | return 0; | ||
2223 | } | ||
2224 | void efx_port_dummy_op_void(struct efx_nic *efx) {} | ||
2225 | |||
2226 | static bool efx_port_dummy_op_poll(struct efx_nic *efx) | ||
2227 | { | ||
2228 | return false; | ||
2229 | } | ||
2230 | |||
2231 | static const struct efx_phy_operations efx_dummy_phy_operations = { | ||
2232 | .init = efx_port_dummy_op_int, | ||
2233 | .reconfigure = efx_port_dummy_op_int, | ||
2234 | .poll = efx_port_dummy_op_poll, | ||
2235 | .fini = efx_port_dummy_op_void, | ||
2236 | }; | ||
2237 | |||
2238 | /************************************************************************** | ||
2239 | * | ||
2240 | * Data housekeeping | ||
2241 | * | ||
2242 | **************************************************************************/ | ||
2243 | |||
2244 | /* This zeroes out and then fills in the invariants in a struct | ||
2245 | * efx_nic (including all sub-structures). | ||
2246 | */ | ||
2247 | static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, | ||
2248 | struct pci_dev *pci_dev, struct net_device *net_dev) | ||
2249 | { | ||
2250 | int i; | ||
2251 | |||
2252 | /* Initialise common structures */ | ||
2253 | memset(efx, 0, sizeof(*efx)); | ||
2254 | spin_lock_init(&efx->biu_lock); | ||
2255 | #ifdef CONFIG_SFC_MTD | ||
2256 | INIT_LIST_HEAD(&efx->mtd_list); | ||
2257 | #endif | ||
2258 | INIT_WORK(&efx->reset_work, efx_reset_work); | ||
2259 | INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); | ||
2260 | efx->pci_dev = pci_dev; | ||
2261 | efx->msg_enable = debug; | ||
2262 | efx->state = STATE_INIT; | ||
2263 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); | ||
2264 | |||
2265 | efx->net_dev = net_dev; | ||
2266 | spin_lock_init(&efx->stats_lock); | ||
2267 | mutex_init(&efx->mac_lock); | ||
2268 | efx->mac_op = type->default_mac_ops; | ||
2269 | efx->phy_op = &efx_dummy_phy_operations; | ||
2270 | efx->mdio.dev = net_dev; | ||
2271 | INIT_WORK(&efx->mac_work, efx_mac_work); | ||
2272 | |||
2273 | for (i = 0; i < EFX_MAX_CHANNELS; i++) { | ||
2274 | efx->channel[i] = efx_alloc_channel(efx, i, NULL); | ||
2275 | if (!efx->channel[i]) | ||
2276 | goto fail; | ||
2277 | } | ||
2278 | |||
2279 | efx->type = type; | ||
2280 | |||
2281 | EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); | ||
2282 | |||
2283 | /* Higher numbered interrupt modes are less capable! */ | ||
2284 | efx->interrupt_mode = max(efx->type->max_interrupt_mode, | ||
2285 | interrupt_mode); | ||
2286 | |||
2287 | /* Would be good to use the net_dev name, but we're too early */ | ||
2288 | snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", | ||
2289 | pci_name(pci_dev)); | ||
2290 | efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); | ||
2291 | if (!efx->workqueue) | ||
2292 | goto fail; | ||
2293 | |||
2294 | return 0; | ||
2295 | |||
2296 | fail: | ||
2297 | efx_fini_struct(efx); | ||
2298 | return -ENOMEM; | ||
2299 | } | ||
2300 | |||
2301 | static void efx_fini_struct(struct efx_nic *efx) | ||
2302 | { | ||
2303 | int i; | ||
2304 | |||
2305 | for (i = 0; i < EFX_MAX_CHANNELS; i++) | ||
2306 | kfree(efx->channel[i]); | ||
2307 | |||
2308 | if (efx->workqueue) { | ||
2309 | destroy_workqueue(efx->workqueue); | ||
2310 | efx->workqueue = NULL; | ||
2311 | } | ||
2312 | } | ||
2313 | |||
2314 | /************************************************************************** | ||
2315 | * | ||
2316 | * PCI interface | ||
2317 | * | ||
2318 | **************************************************************************/ | ||
2319 | |||
2320 | /* Main body of final NIC shutdown code | ||
2321 | * This is called only at module unload (or hotplug removal). | ||
2322 | */ | ||
2323 | static void efx_pci_remove_main(struct efx_nic *efx) | ||
2324 | { | ||
2325 | #ifdef CONFIG_RFS_ACCEL | ||
2326 | free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); | ||
2327 | efx->net_dev->rx_cpu_rmap = NULL; | ||
2328 | #endif | ||
2329 | efx_nic_fini_interrupt(efx); | ||
2330 | efx_fini_channels(efx); | ||
2331 | efx_fini_port(efx); | ||
2332 | efx->type->fini(efx); | ||
2333 | efx_fini_napi(efx); | ||
2334 | efx_remove_all(efx); | ||
2335 | } | ||
2336 | |||
2337 | /* Final NIC shutdown | ||
2338 | * This is called only at module unload (or hotplug removal). | ||
2339 | */ | ||
2340 | static void efx_pci_remove(struct pci_dev *pci_dev) | ||
2341 | { | ||
2342 | struct efx_nic *efx; | ||
2343 | |||
2344 | efx = pci_get_drvdata(pci_dev); | ||
2345 | if (!efx) | ||
2346 | return; | ||
2347 | |||
2348 | /* Mark the NIC as fini, then stop the interface */ | ||
2349 | rtnl_lock(); | ||
2350 | efx->state = STATE_FINI; | ||
2351 | dev_close(efx->net_dev); | ||
2352 | |||
2353 | /* Allow any queued efx_resets() to complete */ | ||
2354 | rtnl_unlock(); | ||
2355 | |||
2356 | efx_unregister_netdev(efx); | ||
2357 | |||
2358 | efx_mtd_remove(efx); | ||
2359 | |||
2360 | /* Wait for any scheduled resets to complete. No more will be | ||
2361 | * scheduled from this point because efx_stop_all() has been | ||
2362 | * called, we are no longer registered with driverlink, and | ||
2363 | * the net_device's have been removed. */ | ||
2364 | cancel_work_sync(&efx->reset_work); | ||
2365 | |||
2366 | efx_pci_remove_main(efx); | ||
2367 | |||
2368 | efx_fini_io(efx); | ||
2369 | netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); | ||
2370 | |||
2371 | pci_set_drvdata(pci_dev, NULL); | ||
2372 | efx_fini_struct(efx); | ||
2373 | free_netdev(efx->net_dev); | ||
2374 | }; | ||
2375 | |||
2376 | /* Main body of NIC initialisation | ||
2377 | * This is called at module load (or hotplug insertion, theoretically). | ||
2378 | */ | ||
2379 | static int efx_pci_probe_main(struct efx_nic *efx) | ||
2380 | { | ||
2381 | int rc; | ||
2382 | |||
2383 | /* Do start-of-day initialisation */ | ||
2384 | rc = efx_probe_all(efx); | ||
2385 | if (rc) | ||
2386 | goto fail1; | ||
2387 | |||
2388 | efx_init_napi(efx); | ||
2389 | |||
2390 | rc = efx->type->init(efx); | ||
2391 | if (rc) { | ||
2392 | netif_err(efx, probe, efx->net_dev, | ||
2393 | "failed to initialise NIC\n"); | ||
2394 | goto fail3; | ||
2395 | } | ||
2396 | |||
2397 | rc = efx_init_port(efx); | ||
2398 | if (rc) { | ||
2399 | netif_err(efx, probe, efx->net_dev, | ||
2400 | "failed to initialise port\n"); | ||
2401 | goto fail4; | ||
2402 | } | ||
2403 | |||
2404 | efx_init_channels(efx); | ||
2405 | |||
2406 | rc = efx_nic_init_interrupt(efx); | ||
2407 | if (rc) | ||
2408 | goto fail5; | ||
2409 | |||
2410 | return 0; | ||
2411 | |||
2412 | fail5: | ||
2413 | efx_fini_channels(efx); | ||
2414 | efx_fini_port(efx); | ||
2415 | fail4: | ||
2416 | efx->type->fini(efx); | ||
2417 | fail3: | ||
2418 | efx_fini_napi(efx); | ||
2419 | efx_remove_all(efx); | ||
2420 | fail1: | ||
2421 | return rc; | ||
2422 | } | ||
2423 | |||
2424 | /* NIC initialisation | ||
2425 | * | ||
2426 | * This is called at module load (or hotplug insertion, | ||
2427 | * theoretically). It sets up PCI mappings, tests and resets the NIC, | ||
2428 | * sets up and registers the network devices with the kernel and hooks | ||
2429 | * the interrupt service routine. It does not prepare the device for | ||
2430 | * transmission; this is left to the first time one of the network | ||
2431 | * interfaces is brought up (i.e. efx_net_open). | ||
2432 | */ | ||
2433 | static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | ||
2434 | const struct pci_device_id *entry) | ||
2435 | { | ||
2436 | const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; | ||
2437 | struct net_device *net_dev; | ||
2438 | struct efx_nic *efx; | ||
2439 | int i, rc; | ||
2440 | |||
2441 | /* Allocate and initialise a struct net_device and struct efx_nic */ | ||
2442 | net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, | ||
2443 | EFX_MAX_RX_QUEUES); | ||
2444 | if (!net_dev) | ||
2445 | return -ENOMEM; | ||
2446 | net_dev->features |= (type->offload_features | NETIF_F_SG | | ||
2447 | NETIF_F_HIGHDMA | NETIF_F_TSO | | ||
2448 | NETIF_F_RXCSUM); | ||
2449 | if (type->offload_features & NETIF_F_V6_CSUM) | ||
2450 | net_dev->features |= NETIF_F_TSO6; | ||
2451 | /* Mask for features that also apply to VLAN devices */ | ||
2452 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | | ||
2453 | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | | ||
2454 | NETIF_F_RXCSUM); | ||
2455 | /* All offloads can be toggled */ | ||
2456 | net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; | ||
2457 | efx = netdev_priv(net_dev); | ||
2458 | pci_set_drvdata(pci_dev, efx); | ||
2459 | SET_NETDEV_DEV(net_dev, &pci_dev->dev); | ||
2460 | rc = efx_init_struct(efx, type, pci_dev, net_dev); | ||
2461 | if (rc) | ||
2462 | goto fail1; | ||
2463 | |||
2464 | netif_info(efx, probe, efx->net_dev, | ||
2465 | "Solarflare NIC detected\n"); | ||
2466 | |||
2467 | /* Set up basic I/O (BAR mappings etc) */ | ||
2468 | rc = efx_init_io(efx); | ||
2469 | if (rc) | ||
2470 | goto fail2; | ||
2471 | |||
2472 | /* No serialisation is required with the reset path because | ||
2473 | * we're in STATE_INIT. */ | ||
2474 | for (i = 0; i < 5; i++) { | ||
2475 | rc = efx_pci_probe_main(efx); | ||
2476 | |||
2477 | /* Serialise against efx_reset(). No more resets will be | ||
2478 | * scheduled since efx_stop_all() has been called, and we | ||
2479 | * have not and never have been registered with either | ||
2480 | * the rtnetlink or driverlink layers. */ | ||
2481 | cancel_work_sync(&efx->reset_work); | ||
2482 | |||
2483 | if (rc == 0) { | ||
2484 | if (efx->reset_pending) { | ||
2485 | /* If there was a scheduled reset during | ||
2486 | * probe, the NIC is probably hosed anyway */ | ||
2487 | efx_pci_remove_main(efx); | ||
2488 | rc = -EIO; | ||
2489 | } else { | ||
2490 | break; | ||
2491 | } | ||
2492 | } | ||
2493 | |||
2494 | /* Retry if a recoverably reset event has been scheduled */ | ||
2495 | if (efx->reset_pending & | ||
2496 | ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) || | ||
2497 | !efx->reset_pending) | ||
2498 | goto fail3; | ||
2499 | |||
2500 | efx->reset_pending = 0; | ||
2501 | } | ||
2502 | |||
2503 | if (rc) { | ||
2504 | netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); | ||
2505 | goto fail4; | ||
2506 | } | ||
2507 | |||
2508 | /* Switch to the running state before we expose the device to the OS, | ||
2509 | * so that dev_open()|efx_start_all() will actually start the device */ | ||
2510 | efx->state = STATE_RUNNING; | ||
2511 | |||
2512 | rc = efx_register_netdev(efx); | ||
2513 | if (rc) | ||
2514 | goto fail5; | ||
2515 | |||
2516 | netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); | ||
2517 | |||
2518 | rtnl_lock(); | ||
2519 | efx_mtd_probe(efx); /* allowed to fail */ | ||
2520 | rtnl_unlock(); | ||
2521 | return 0; | ||
2522 | |||
2523 | fail5: | ||
2524 | efx_pci_remove_main(efx); | ||
2525 | fail4: | ||
2526 | fail3: | ||
2527 | efx_fini_io(efx); | ||
2528 | fail2: | ||
2529 | efx_fini_struct(efx); | ||
2530 | fail1: | ||
2531 | WARN_ON(rc > 0); | ||
2532 | netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); | ||
2533 | free_netdev(net_dev); | ||
2534 | return rc; | ||
2535 | } | ||
2536 | |||
2537 | static int efx_pm_freeze(struct device *dev) | ||
2538 | { | ||
2539 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
2540 | |||
2541 | efx->state = STATE_FINI; | ||
2542 | |||
2543 | netif_device_detach(efx->net_dev); | ||
2544 | |||
2545 | efx_stop_all(efx); | ||
2546 | efx_fini_channels(efx); | ||
2547 | |||
2548 | return 0; | ||
2549 | } | ||
2550 | |||
2551 | static int efx_pm_thaw(struct device *dev) | ||
2552 | { | ||
2553 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
2554 | |||
2555 | efx->state = STATE_INIT; | ||
2556 | |||
2557 | efx_init_channels(efx); | ||
2558 | |||
2559 | mutex_lock(&efx->mac_lock); | ||
2560 | efx->phy_op->reconfigure(efx); | ||
2561 | mutex_unlock(&efx->mac_lock); | ||
2562 | |||
2563 | efx_start_all(efx); | ||
2564 | |||
2565 | netif_device_attach(efx->net_dev); | ||
2566 | |||
2567 | efx->state = STATE_RUNNING; | ||
2568 | |||
2569 | efx->type->resume_wol(efx); | ||
2570 | |||
2571 | /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ | ||
2572 | queue_work(reset_workqueue, &efx->reset_work); | ||
2573 | |||
2574 | return 0; | ||
2575 | } | ||
2576 | |||
2577 | static int efx_pm_poweroff(struct device *dev) | ||
2578 | { | ||
2579 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
2580 | struct efx_nic *efx = pci_get_drvdata(pci_dev); | ||
2581 | |||
2582 | efx->type->fini(efx); | ||
2583 | |||
2584 | efx->reset_pending = 0; | ||
2585 | |||
2586 | pci_save_state(pci_dev); | ||
2587 | return pci_set_power_state(pci_dev, PCI_D3hot); | ||
2588 | } | ||
2589 | |||
2590 | /* Used for both resume and restore */ | ||
2591 | static int efx_pm_resume(struct device *dev) | ||
2592 | { | ||
2593 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
2594 | struct efx_nic *efx = pci_get_drvdata(pci_dev); | ||
2595 | int rc; | ||
2596 | |||
2597 | rc = pci_set_power_state(pci_dev, PCI_D0); | ||
2598 | if (rc) | ||
2599 | return rc; | ||
2600 | pci_restore_state(pci_dev); | ||
2601 | rc = pci_enable_device(pci_dev); | ||
2602 | if (rc) | ||
2603 | return rc; | ||
2604 | pci_set_master(efx->pci_dev); | ||
2605 | rc = efx->type->reset(efx, RESET_TYPE_ALL); | ||
2606 | if (rc) | ||
2607 | return rc; | ||
2608 | rc = efx->type->init(efx); | ||
2609 | if (rc) | ||
2610 | return rc; | ||
2611 | efx_pm_thaw(dev); | ||
2612 | return 0; | ||
2613 | } | ||
2614 | |||
2615 | static int efx_pm_suspend(struct device *dev) | ||
2616 | { | ||
2617 | int rc; | ||
2618 | |||
2619 | efx_pm_freeze(dev); | ||
2620 | rc = efx_pm_poweroff(dev); | ||
2621 | if (rc) | ||
2622 | efx_pm_resume(dev); | ||
2623 | return rc; | ||
2624 | } | ||
2625 | |||
2626 | static struct dev_pm_ops efx_pm_ops = { | ||
2627 | .suspend = efx_pm_suspend, | ||
2628 | .resume = efx_pm_resume, | ||
2629 | .freeze = efx_pm_freeze, | ||
2630 | .thaw = efx_pm_thaw, | ||
2631 | .poweroff = efx_pm_poweroff, | ||
2632 | .restore = efx_pm_resume, | ||
2633 | }; | ||
2634 | |||
2635 | static struct pci_driver efx_pci_driver = { | ||
2636 | .name = KBUILD_MODNAME, | ||
2637 | .id_table = efx_pci_table, | ||
2638 | .probe = efx_pci_probe, | ||
2639 | .remove = efx_pci_remove, | ||
2640 | .driver.pm = &efx_pm_ops, | ||
2641 | }; | ||
2642 | |||
2643 | /************************************************************************** | ||
2644 | * | ||
2645 | * Kernel module interface | ||
2646 | * | ||
2647 | *************************************************************************/ | ||
2648 | |||
2649 | module_param(interrupt_mode, uint, 0444); | ||
2650 | MODULE_PARM_DESC(interrupt_mode, | ||
2651 | "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); | ||
2652 | |||
2653 | static int __init efx_init_module(void) | ||
2654 | { | ||
2655 | int rc; | ||
2656 | |||
2657 | printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); | ||
2658 | |||
2659 | rc = register_netdevice_notifier(&efx_netdev_notifier); | ||
2660 | if (rc) | ||
2661 | goto err_notifier; | ||
2662 | |||
2663 | reset_workqueue = create_singlethread_workqueue("sfc_reset"); | ||
2664 | if (!reset_workqueue) { | ||
2665 | rc = -ENOMEM; | ||
2666 | goto err_reset; | ||
2667 | } | ||
2668 | |||
2669 | rc = pci_register_driver(&efx_pci_driver); | ||
2670 | if (rc < 0) | ||
2671 | goto err_pci; | ||
2672 | |||
2673 | return 0; | ||
2674 | |||
2675 | err_pci: | ||
2676 | destroy_workqueue(reset_workqueue); | ||
2677 | err_reset: | ||
2678 | unregister_netdevice_notifier(&efx_netdev_notifier); | ||
2679 | err_notifier: | ||
2680 | return rc; | ||
2681 | } | ||
2682 | |||
2683 | static void __exit efx_exit_module(void) | ||
2684 | { | ||
2685 | printk(KERN_INFO "Solarflare NET driver unloading\n"); | ||
2686 | |||
2687 | pci_unregister_driver(&efx_pci_driver); | ||
2688 | destroy_workqueue(reset_workqueue); | ||
2689 | unregister_netdevice_notifier(&efx_netdev_notifier); | ||
2690 | |||
2691 | } | ||
2692 | |||
2693 | module_init(efx_init_module); | ||
2694 | module_exit(efx_exit_module); | ||
2695 | |||
2696 | MODULE_AUTHOR("Solarflare Communications and " | ||
2697 | "Michael Brown <mbrown@fensystems.co.uk>"); | ||
2698 | MODULE_DESCRIPTION("Solarflare Communications network driver"); | ||
2699 | MODULE_LICENSE("GPL"); | ||
2700 | MODULE_DEVICE_TABLE(pci, efx_pci_table); | ||
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h new file mode 100644 index 00000000000..b0d1209ea18 --- /dev/null +++ b/drivers/net/sfc/efx.h | |||
@@ -0,0 +1,147 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_EFX_H | ||
12 | #define EFX_EFX_H | ||
13 | |||
14 | #include "net_driver.h" | ||
15 | #include "filter.h" | ||
16 | |||
17 | /* PCI IDs */ | ||
18 | #define EFX_VENDID_SFC 0x1924 | ||
19 | #define FALCON_A_P_DEVID 0x0703 | ||
20 | #define FALCON_A_S_DEVID 0x6703 | ||
21 | #define FALCON_B_P_DEVID 0x0710 | ||
22 | #define BETHPAGE_A_P_DEVID 0x0803 | ||
23 | #define SIENA_A_P_DEVID 0x0813 | ||
24 | |||
25 | /* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ | ||
26 | #define EFX_MEM_BAR 2 | ||
27 | |||
28 | /* TX */ | ||
29 | extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); | ||
30 | extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); | ||
31 | extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); | ||
32 | extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); | ||
33 | extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); | ||
34 | extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); | ||
35 | extern netdev_tx_t | ||
36 | efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); | ||
37 | extern netdev_tx_t | ||
38 | efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); | ||
39 | extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); | ||
40 | extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); | ||
41 | |||
42 | /* RX */ | ||
43 | extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); | ||
44 | extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); | ||
45 | extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); | ||
46 | extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); | ||
47 | extern void efx_rx_strategy(struct efx_channel *channel); | ||
48 | extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); | ||
49 | extern void efx_rx_slow_fill(unsigned long context); | ||
50 | extern void __efx_rx_packet(struct efx_channel *channel, | ||
51 | struct efx_rx_buffer *rx_buf, bool checksummed); | ||
52 | extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | ||
53 | unsigned int len, bool checksummed, bool discard); | ||
54 | extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); | ||
55 | |||
56 | #define EFX_MAX_DMAQ_SIZE 4096UL | ||
57 | #define EFX_DEFAULT_DMAQ_SIZE 1024UL | ||
58 | #define EFX_MIN_DMAQ_SIZE 512UL | ||
59 | |||
60 | #define EFX_MAX_EVQ_SIZE 16384UL | ||
61 | #define EFX_MIN_EVQ_SIZE 512UL | ||
62 | |||
63 | /* The smallest [rt]xq_entries that the driver supports. Callers of | ||
64 | * efx_wake_queue() assume that they can subsequently send at least one | ||
65 | * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ | ||
66 | #define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) | ||
67 | |||
68 | /* Filters */ | ||
69 | extern int efx_probe_filters(struct efx_nic *efx); | ||
70 | extern void efx_restore_filters(struct efx_nic *efx); | ||
71 | extern void efx_remove_filters(struct efx_nic *efx); | ||
72 | extern int efx_filter_insert_filter(struct efx_nic *efx, | ||
73 | struct efx_filter_spec *spec, | ||
74 | bool replace); | ||
75 | extern int efx_filter_remove_filter(struct efx_nic *efx, | ||
76 | struct efx_filter_spec *spec); | ||
77 | extern void efx_filter_clear_rx(struct efx_nic *efx, | ||
78 | enum efx_filter_priority priority); | ||
79 | #ifdef CONFIG_RFS_ACCEL | ||
80 | extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | ||
81 | u16 rxq_index, u32 flow_id); | ||
82 | extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); | ||
83 | static inline void efx_filter_rfs_expire(struct efx_channel *channel) | ||
84 | { | ||
85 | if (channel->rfs_filters_added >= 60 && | ||
86 | __efx_filter_rfs_expire(channel->efx, 100)) | ||
87 | channel->rfs_filters_added -= 60; | ||
88 | } | ||
89 | #define efx_filter_rfs_enabled() 1 | ||
90 | #else | ||
91 | static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} | ||
92 | #define efx_filter_rfs_enabled() 0 | ||
93 | #endif | ||
94 | |||
95 | /* Channels */ | ||
96 | extern void efx_process_channel_now(struct efx_channel *channel); | ||
97 | extern int | ||
98 | efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); | ||
99 | |||
100 | /* Ports */ | ||
101 | extern int efx_reconfigure_port(struct efx_nic *efx); | ||
102 | extern int __efx_reconfigure_port(struct efx_nic *efx); | ||
103 | |||
104 | /* Ethtool support */ | ||
105 | extern const struct ethtool_ops efx_ethtool_ops; | ||
106 | |||
107 | /* Reset handling */ | ||
108 | extern int efx_reset(struct efx_nic *efx, enum reset_type method); | ||
109 | extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); | ||
110 | extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); | ||
111 | |||
112 | /* Global */ | ||
113 | extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); | ||
114 | extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, | ||
115 | int rx_usecs, bool rx_adaptive); | ||
116 | |||
117 | /* Dummy PHY ops for PHY drivers */ | ||
118 | extern int efx_port_dummy_op_int(struct efx_nic *efx); | ||
119 | extern void efx_port_dummy_op_void(struct efx_nic *efx); | ||
120 | |||
121 | |||
122 | /* MTD */ | ||
123 | #ifdef CONFIG_SFC_MTD | ||
124 | extern int efx_mtd_probe(struct efx_nic *efx); | ||
125 | extern void efx_mtd_rename(struct efx_nic *efx); | ||
126 | extern void efx_mtd_remove(struct efx_nic *efx); | ||
127 | #else | ||
128 | static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } | ||
129 | static inline void efx_mtd_rename(struct efx_nic *efx) {} | ||
130 | static inline void efx_mtd_remove(struct efx_nic *efx) {} | ||
131 | #endif | ||
132 | |||
133 | static inline void efx_schedule_channel(struct efx_channel *channel) | ||
134 | { | ||
135 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, | ||
136 | "channel %d scheduling NAPI poll on CPU%d\n", | ||
137 | channel->channel, raw_smp_processor_id()); | ||
138 | channel->work_pending = true; | ||
139 | |||
140 | napi_schedule(&channel->napi_str); | ||
141 | } | ||
142 | |||
143 | extern void efx_link_status_changed(struct efx_nic *efx); | ||
144 | extern void efx_link_set_advertising(struct efx_nic *efx, u32); | ||
145 | extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); | ||
146 | |||
147 | #endif /* EFX_EFX_H */ | ||
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h new file mode 100644 index 00000000000..d725a8fbe1a --- /dev/null +++ b/drivers/net/sfc/enum.h | |||
@@ -0,0 +1,167 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2007-2009 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #ifndef EFX_ENUM_H | ||
11 | #define EFX_ENUM_H | ||
12 | |||
13 | /** | ||
14 | * enum efx_loopback_mode - loopback modes | ||
15 | * @LOOPBACK_NONE: no loopback | ||
16 | * @LOOPBACK_DATA: data path loopback | ||
17 | * @LOOPBACK_GMAC: loopback within GMAC | ||
18 | * @LOOPBACK_XGMII: loopback after XMAC | ||
19 | * @LOOPBACK_XGXS: loopback within BPX after XGXS | ||
20 | * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes | ||
21 | * @LOOPBACK_GMII: loopback within BPX after GMAC | ||
22 | * @LOOPBACK_SGMII: loopback within BPX within SGMII | ||
23 | * @LOOPBACK_XGBR: loopback within BPX within XGBR | ||
24 | * @LOOPBACK_XFI: loopback within BPX before XFI serdes | ||
25 | * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes | ||
26 | * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII | ||
27 | * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII | ||
28 | * @LOOPBACK_XFI_FAR: loopback after XFI serdes | ||
29 | * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level | ||
30 | * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level | ||
31 | * @LOOPBACK_PCS: loopback within 10G PHY at PCS level | ||
32 | * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level | ||
33 | * @LOOPBACK_XPORT: cross port loopback | ||
34 | * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC | ||
35 | * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes | ||
36 | * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes | ||
37 | * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes | ||
38 | * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC | ||
39 | * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes | ||
40 | * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes | ||
41 | * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level | ||
42 | */ | ||
43 | /* Please keep up-to-date w.r.t the following two #defines */ | ||
44 | enum efx_loopback_mode { | ||
45 | LOOPBACK_NONE = 0, | ||
46 | LOOPBACK_DATA = 1, | ||
47 | LOOPBACK_GMAC = 2, | ||
48 | LOOPBACK_XGMII = 3, | ||
49 | LOOPBACK_XGXS = 4, | ||
50 | LOOPBACK_XAUI = 5, | ||
51 | LOOPBACK_GMII = 6, | ||
52 | LOOPBACK_SGMII = 7, | ||
53 | LOOPBACK_XGBR = 8, | ||
54 | LOOPBACK_XFI = 9, | ||
55 | LOOPBACK_XAUI_FAR = 10, | ||
56 | LOOPBACK_GMII_FAR = 11, | ||
57 | LOOPBACK_SGMII_FAR = 12, | ||
58 | LOOPBACK_XFI_FAR = 13, | ||
59 | LOOPBACK_GPHY = 14, | ||
60 | LOOPBACK_PHYXS = 15, | ||
61 | LOOPBACK_PCS = 16, | ||
62 | LOOPBACK_PMAPMD = 17, | ||
63 | LOOPBACK_XPORT = 18, | ||
64 | LOOPBACK_XGMII_WS = 19, | ||
65 | LOOPBACK_XAUI_WS = 20, | ||
66 | LOOPBACK_XAUI_WS_FAR = 21, | ||
67 | LOOPBACK_XAUI_WS_NEAR = 22, | ||
68 | LOOPBACK_GMII_WS = 23, | ||
69 | LOOPBACK_XFI_WS = 24, | ||
70 | LOOPBACK_XFI_WS_FAR = 25, | ||
71 | LOOPBACK_PHYXS_WS = 26, | ||
72 | LOOPBACK_MAX | ||
73 | }; | ||
74 | #define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD | ||
75 | |||
76 | /* These loopbacks occur within the controller */ | ||
77 | #define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \ | ||
78 | (1 << LOOPBACK_GMAC) | \ | ||
79 | (1 << LOOPBACK_XGMII)| \ | ||
80 | (1 << LOOPBACK_XGXS) | \ | ||
81 | (1 << LOOPBACK_XAUI) | \ | ||
82 | (1 << LOOPBACK_GMII) | \ | ||
83 | (1 << LOOPBACK_SGMII) | \ | ||
84 | (1 << LOOPBACK_SGMII) | \ | ||
85 | (1 << LOOPBACK_XGBR) | \ | ||
86 | (1 << LOOPBACK_XFI) | \ | ||
87 | (1 << LOOPBACK_XAUI_FAR) | \ | ||
88 | (1 << LOOPBACK_GMII_FAR) | \ | ||
89 | (1 << LOOPBACK_SGMII_FAR) | \ | ||
90 | (1 << LOOPBACK_XFI_FAR) | \ | ||
91 | (1 << LOOPBACK_XGMII_WS) | \ | ||
92 | (1 << LOOPBACK_XAUI_WS) | \ | ||
93 | (1 << LOOPBACK_XAUI_WS_FAR) | \ | ||
94 | (1 << LOOPBACK_XAUI_WS_NEAR) | \ | ||
95 | (1 << LOOPBACK_GMII_WS) | \ | ||
96 | (1 << LOOPBACK_XFI_WS) | \ | ||
97 | (1 << LOOPBACK_XFI_WS_FAR)) | ||
98 | |||
99 | #define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \ | ||
100 | (1 << LOOPBACK_XAUI_WS) | \ | ||
101 | (1 << LOOPBACK_XAUI_WS_FAR) | \ | ||
102 | (1 << LOOPBACK_XAUI_WS_NEAR) | \ | ||
103 | (1 << LOOPBACK_GMII_WS) | \ | ||
104 | (1 << LOOPBACK_XFI_WS) | \ | ||
105 | (1 << LOOPBACK_XFI_WS_FAR) | \ | ||
106 | (1 << LOOPBACK_PHYXS_WS)) | ||
107 | |||
108 | #define LOOPBACKS_EXTERNAL(_efx) \ | ||
109 | ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \ | ||
110 | ~(1 << LOOPBACK_NONE)) | ||
111 | |||
112 | #define LOOPBACK_MASK(_efx) \ | ||
113 | (1 << (_efx)->loopback_mode) | ||
114 | |||
115 | #define LOOPBACK_INTERNAL(_efx) \ | ||
116 | (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx))) | ||
117 | |||
118 | #define LOOPBACK_EXTERNAL(_efx) \ | ||
119 | (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx))) | ||
120 | |||
121 | #define LOOPBACK_CHANGED(_from, _to, _mask) \ | ||
122 | (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask))) | ||
123 | |||
124 | #define LOOPBACK_OUT_OF(_from, _to, _mask) \ | ||
125 | ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask))) | ||
126 | |||
127 | /*****************************************************************************/ | ||
128 | |||
129 | /** | ||
130 | * enum reset_type - reset types | ||
131 | * | ||
132 | * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and | ||
133 | * %RESET_TYPE_DISABLE specify the method/scope of the reset. The | ||
134 | * other valuesspecify reasons, which efx_schedule_reset() will choose | ||
135 | * a method for. | ||
136 | * | ||
137 | * Reset methods are numbered in order of increasing scope. | ||
138 | * | ||
139 | * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts | ||
140 | * @RESET_TYPE_ALL: reset everything but PCI core blocks | ||
141 | * @RESET_TYPE_WORLD: reset everything, save & restore PCI config | ||
142 | * @RESET_TYPE_DISABLE: disable NIC | ||
143 | * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog | ||
144 | * @RESET_TYPE_INT_ERROR: reset due to internal error | ||
145 | * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors | ||
146 | * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch | ||
147 | * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch | ||
148 | * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors | ||
149 | * @RESET_TYPE_MC_FAILURE: MC reboot/assertion | ||
150 | */ | ||
151 | enum reset_type { | ||
152 | RESET_TYPE_INVISIBLE = 0, | ||
153 | RESET_TYPE_ALL = 1, | ||
154 | RESET_TYPE_WORLD = 2, | ||
155 | RESET_TYPE_DISABLE = 3, | ||
156 | RESET_TYPE_MAX_METHOD, | ||
157 | RESET_TYPE_TX_WATCHDOG, | ||
158 | RESET_TYPE_INT_ERROR, | ||
159 | RESET_TYPE_RX_RECOVERY, | ||
160 | RESET_TYPE_RX_DESC_FETCH, | ||
161 | RESET_TYPE_TX_DESC_FETCH, | ||
162 | RESET_TYPE_TX_SKIP, | ||
163 | RESET_TYPE_MC_FAILURE, | ||
164 | RESET_TYPE_MAX, | ||
165 | }; | ||
166 | |||
167 | #endif /* EFX_ENUM_H */ | ||
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c new file mode 100644 index 00000000000..bc4643af6dd --- /dev/null +++ b/drivers/net/sfc/ethtool.c | |||
@@ -0,0 +1,1012 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/netdevice.h> | ||
12 | #include <linux/ethtool.h> | ||
13 | #include <linux/rtnetlink.h> | ||
14 | #include <linux/in.h> | ||
15 | #include "net_driver.h" | ||
16 | #include "workarounds.h" | ||
17 | #include "selftest.h" | ||
18 | #include "efx.h" | ||
19 | #include "filter.h" | ||
20 | #include "nic.h" | ||
21 | |||
22 | struct ethtool_string { | ||
23 | char name[ETH_GSTRING_LEN]; | ||
24 | }; | ||
25 | |||
26 | struct efx_ethtool_stat { | ||
27 | const char *name; | ||
28 | enum { | ||
29 | EFX_ETHTOOL_STAT_SOURCE_mac_stats, | ||
30 | EFX_ETHTOOL_STAT_SOURCE_nic, | ||
31 | EFX_ETHTOOL_STAT_SOURCE_channel, | ||
32 | EFX_ETHTOOL_STAT_SOURCE_tx_queue | ||
33 | } source; | ||
34 | unsigned offset; | ||
35 | u64(*get_stat) (void *field); /* Reader function */ | ||
36 | }; | ||
37 | |||
38 | /* Initialiser for a struct #efx_ethtool_stat with type-checking */ | ||
39 | #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ | ||
40 | get_stat_function) { \ | ||
41 | .name = #stat_name, \ | ||
42 | .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ | ||
43 | .offset = ((((field_type *) 0) == \ | ||
44 | &((struct efx_##source_name *)0)->field) ? \ | ||
45 | offsetof(struct efx_##source_name, field) : \ | ||
46 | offsetof(struct efx_##source_name, field)), \ | ||
47 | .get_stat = get_stat_function, \ | ||
48 | } | ||
49 | |||
50 | static u64 efx_get_uint_stat(void *field) | ||
51 | { | ||
52 | return *(unsigned int *)field; | ||
53 | } | ||
54 | |||
55 | static u64 efx_get_ulong_stat(void *field) | ||
56 | { | ||
57 | return *(unsigned long *)field; | ||
58 | } | ||
59 | |||
60 | static u64 efx_get_u64_stat(void *field) | ||
61 | { | ||
62 | return *(u64 *) field; | ||
63 | } | ||
64 | |||
65 | static u64 efx_get_atomic_stat(void *field) | ||
66 | { | ||
67 | return atomic_read((atomic_t *) field); | ||
68 | } | ||
69 | |||
70 | #define EFX_ETHTOOL_ULONG_MAC_STAT(field) \ | ||
71 | EFX_ETHTOOL_STAT(field, mac_stats, field, \ | ||
72 | unsigned long, efx_get_ulong_stat) | ||
73 | |||
74 | #define EFX_ETHTOOL_U64_MAC_STAT(field) \ | ||
75 | EFX_ETHTOOL_STAT(field, mac_stats, field, \ | ||
76 | u64, efx_get_u64_stat) | ||
77 | |||
78 | #define EFX_ETHTOOL_UINT_NIC_STAT(name) \ | ||
79 | EFX_ETHTOOL_STAT(name, nic, n_##name, \ | ||
80 | unsigned int, efx_get_uint_stat) | ||
81 | |||
82 | #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ | ||
83 | EFX_ETHTOOL_STAT(field, nic, field, \ | ||
84 | atomic_t, efx_get_atomic_stat) | ||
85 | |||
86 | #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ | ||
87 | EFX_ETHTOOL_STAT(field, channel, n_##field, \ | ||
88 | unsigned int, efx_get_uint_stat) | ||
89 | |||
90 | #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ | ||
91 | EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ | ||
92 | unsigned int, efx_get_uint_stat) | ||
93 | |||
94 | static struct efx_ethtool_stat efx_ethtool_stats[] = { | ||
95 | EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), | ||
96 | EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), | ||
97 | EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes), | ||
98 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets), | ||
99 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad), | ||
100 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause), | ||
101 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_control), | ||
102 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast), | ||
103 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast), | ||
104 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast), | ||
105 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64), | ||
106 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_64), | ||
107 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127), | ||
108 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255), | ||
109 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511), | ||
110 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023), | ||
111 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx), | ||
112 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo), | ||
113 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo), | ||
114 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision), | ||
115 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision), | ||
116 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision), | ||
117 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision), | ||
118 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred), | ||
119 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision), | ||
120 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred), | ||
121 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), | ||
122 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), | ||
123 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), | ||
124 | EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), | ||
125 | EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), | ||
126 | EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), | ||
127 | EFX_ETHTOOL_UINT_TXQ_STAT(pushes), | ||
128 | EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), | ||
129 | EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), | ||
130 | EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), | ||
131 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets), | ||
132 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_good), | ||
133 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad), | ||
134 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause), | ||
135 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_control), | ||
136 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast), | ||
137 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast), | ||
138 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast), | ||
139 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64), | ||
140 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_64), | ||
141 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127), | ||
142 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255), | ||
143 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511), | ||
144 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023), | ||
145 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx), | ||
146 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo), | ||
147 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo), | ||
148 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64), | ||
149 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx), | ||
150 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo), | ||
151 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo), | ||
152 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow), | ||
153 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed), | ||
154 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier), | ||
155 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error), | ||
156 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error), | ||
157 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error), | ||
158 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error), | ||
159 | EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt), | ||
160 | EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), | ||
161 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), | ||
162 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), | ||
163 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), | ||
164 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), | ||
165 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), | ||
166 | }; | ||
167 | |||
168 | /* Number of ethtool statistics */ | ||
169 | #define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) | ||
170 | |||
171 | #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB | ||
172 | |||
173 | /************************************************************************** | ||
174 | * | ||
175 | * Ethtool operations | ||
176 | * | ||
177 | ************************************************************************** | ||
178 | */ | ||
179 | |||
180 | /* Identify device by flashing LEDs */ | ||
181 | static int efx_ethtool_phys_id(struct net_device *net_dev, | ||
182 | enum ethtool_phys_id_state state) | ||
183 | { | ||
184 | struct efx_nic *efx = netdev_priv(net_dev); | ||
185 | enum efx_led_mode mode = EFX_LED_DEFAULT; | ||
186 | |||
187 | switch (state) { | ||
188 | case ETHTOOL_ID_ON: | ||
189 | mode = EFX_LED_ON; | ||
190 | break; | ||
191 | case ETHTOOL_ID_OFF: | ||
192 | mode = EFX_LED_OFF; | ||
193 | break; | ||
194 | case ETHTOOL_ID_INACTIVE: | ||
195 | mode = EFX_LED_DEFAULT; | ||
196 | break; | ||
197 | case ETHTOOL_ID_ACTIVE: | ||
198 | return 1; /* cycle on/off once per second */ | ||
199 | } | ||
200 | |||
201 | efx->type->set_id_led(efx, mode); | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | /* This must be called with rtnl_lock held. */ | ||
206 | static int efx_ethtool_get_settings(struct net_device *net_dev, | ||
207 | struct ethtool_cmd *ecmd) | ||
208 | { | ||
209 | struct efx_nic *efx = netdev_priv(net_dev); | ||
210 | struct efx_link_state *link_state = &efx->link_state; | ||
211 | |||
212 | mutex_lock(&efx->mac_lock); | ||
213 | efx->phy_op->get_settings(efx, ecmd); | ||
214 | mutex_unlock(&efx->mac_lock); | ||
215 | |||
216 | /* GMAC does not support 1000Mbps HD */ | ||
217 | ecmd->supported &= ~SUPPORTED_1000baseT_Half; | ||
218 | /* Both MACs support pause frames (bidirectional and respond-only) */ | ||
219 | ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
220 | |||
221 | if (LOOPBACK_INTERNAL(efx)) { | ||
222 | ethtool_cmd_speed_set(ecmd, link_state->speed); | ||
223 | ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; | ||
224 | } | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | /* This must be called with rtnl_lock held. */ | ||
230 | static int efx_ethtool_set_settings(struct net_device *net_dev, | ||
231 | struct ethtool_cmd *ecmd) | ||
232 | { | ||
233 | struct efx_nic *efx = netdev_priv(net_dev); | ||
234 | int rc; | ||
235 | |||
236 | /* GMAC does not support 1000Mbps HD */ | ||
237 | if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && | ||
238 | (ecmd->duplex != DUPLEX_FULL)) { | ||
239 | netif_dbg(efx, drv, efx->net_dev, | ||
240 | "rejecting unsupported 1000Mbps HD setting\n"); | ||
241 | return -EINVAL; | ||
242 | } | ||
243 | |||
244 | mutex_lock(&efx->mac_lock); | ||
245 | rc = efx->phy_op->set_settings(efx, ecmd); | ||
246 | mutex_unlock(&efx->mac_lock); | ||
247 | return rc; | ||
248 | } | ||
249 | |||
250 | static void efx_ethtool_get_drvinfo(struct net_device *net_dev, | ||
251 | struct ethtool_drvinfo *info) | ||
252 | { | ||
253 | struct efx_nic *efx = netdev_priv(net_dev); | ||
254 | |||
255 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); | ||
256 | strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); | ||
257 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
258 | efx_mcdi_print_fwver(efx, info->fw_version, | ||
259 | sizeof(info->fw_version)); | ||
260 | strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); | ||
261 | } | ||
262 | |||
263 | static int efx_ethtool_get_regs_len(struct net_device *net_dev) | ||
264 | { | ||
265 | return efx_nic_get_regs_len(netdev_priv(net_dev)); | ||
266 | } | ||
267 | |||
268 | static void efx_ethtool_get_regs(struct net_device *net_dev, | ||
269 | struct ethtool_regs *regs, void *buf) | ||
270 | { | ||
271 | struct efx_nic *efx = netdev_priv(net_dev); | ||
272 | |||
273 | regs->version = efx->type->revision; | ||
274 | efx_nic_get_regs(efx, buf); | ||
275 | } | ||
276 | |||
277 | static u32 efx_ethtool_get_msglevel(struct net_device *net_dev) | ||
278 | { | ||
279 | struct efx_nic *efx = netdev_priv(net_dev); | ||
280 | return efx->msg_enable; | ||
281 | } | ||
282 | |||
283 | static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) | ||
284 | { | ||
285 | struct efx_nic *efx = netdev_priv(net_dev); | ||
286 | efx->msg_enable = msg_enable; | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * efx_fill_test - fill in an individual self-test entry | ||
291 | * @test_index: Index of the test | ||
292 | * @strings: Ethtool strings, or %NULL | ||
293 | * @data: Ethtool test results, or %NULL | ||
294 | * @test: Pointer to test result (used only if data != %NULL) | ||
295 | * @unit_format: Unit name format (e.g. "chan\%d") | ||
296 | * @unit_id: Unit id (e.g. 0 for "chan0") | ||
297 | * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") | ||
298 | * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") | ||
299 | * | ||
300 | * Fill in an individual self-test entry. | ||
301 | */ | ||
302 | static void efx_fill_test(unsigned int test_index, | ||
303 | struct ethtool_string *strings, u64 *data, | ||
304 | int *test, const char *unit_format, int unit_id, | ||
305 | const char *test_format, const char *test_id) | ||
306 | { | ||
307 | struct ethtool_string unit_str, test_str; | ||
308 | |||
309 | /* Fill data value, if applicable */ | ||
310 | if (data) | ||
311 | data[test_index] = *test; | ||
312 | |||
313 | /* Fill string, if applicable */ | ||
314 | if (strings) { | ||
315 | if (strchr(unit_format, '%')) | ||
316 | snprintf(unit_str.name, sizeof(unit_str.name), | ||
317 | unit_format, unit_id); | ||
318 | else | ||
319 | strcpy(unit_str.name, unit_format); | ||
320 | snprintf(test_str.name, sizeof(test_str.name), | ||
321 | test_format, test_id); | ||
322 | snprintf(strings[test_index].name, | ||
323 | sizeof(strings[test_index].name), | ||
324 | "%-6s %-24s", unit_str.name, test_str.name); | ||
325 | } | ||
326 | } | ||
327 | |||
328 | #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel | ||
329 | #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue | ||
330 | #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue | ||
331 | #define EFX_LOOPBACK_NAME(_mode, _counter) \ | ||
332 | "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) | ||
333 | |||
334 | /** | ||
335 | * efx_fill_loopback_test - fill in a block of loopback self-test entries | ||
336 | * @efx: Efx NIC | ||
337 | * @lb_tests: Efx loopback self-test results structure | ||
338 | * @mode: Loopback test mode | ||
339 | * @test_index: Starting index of the test | ||
340 | * @strings: Ethtool strings, or %NULL | ||
341 | * @data: Ethtool test results, or %NULL | ||
342 | */ | ||
343 | static int efx_fill_loopback_test(struct efx_nic *efx, | ||
344 | struct efx_loopback_self_tests *lb_tests, | ||
345 | enum efx_loopback_mode mode, | ||
346 | unsigned int test_index, | ||
347 | struct ethtool_string *strings, u64 *data) | ||
348 | { | ||
349 | struct efx_channel *channel = efx_get_channel(efx, 0); | ||
350 | struct efx_tx_queue *tx_queue; | ||
351 | |||
352 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
353 | efx_fill_test(test_index++, strings, data, | ||
354 | &lb_tests->tx_sent[tx_queue->queue], | ||
355 | EFX_TX_QUEUE_NAME(tx_queue), | ||
356 | EFX_LOOPBACK_NAME(mode, "tx_sent")); | ||
357 | efx_fill_test(test_index++, strings, data, | ||
358 | &lb_tests->tx_done[tx_queue->queue], | ||
359 | EFX_TX_QUEUE_NAME(tx_queue), | ||
360 | EFX_LOOPBACK_NAME(mode, "tx_done")); | ||
361 | } | ||
362 | efx_fill_test(test_index++, strings, data, | ||
363 | &lb_tests->rx_good, | ||
364 | "rx", 0, | ||
365 | EFX_LOOPBACK_NAME(mode, "rx_good")); | ||
366 | efx_fill_test(test_index++, strings, data, | ||
367 | &lb_tests->rx_bad, | ||
368 | "rx", 0, | ||
369 | EFX_LOOPBACK_NAME(mode, "rx_bad")); | ||
370 | |||
371 | return test_index; | ||
372 | } | ||
373 | |||
374 | /** | ||
375 | * efx_ethtool_fill_self_tests - get self-test details | ||
376 | * @efx: Efx NIC | ||
377 | * @tests: Efx self-test results structure, or %NULL | ||
378 | * @strings: Ethtool strings, or %NULL | ||
379 | * @data: Ethtool test results, or %NULL | ||
380 | */ | ||
381 | static int efx_ethtool_fill_self_tests(struct efx_nic *efx, | ||
382 | struct efx_self_tests *tests, | ||
383 | struct ethtool_string *strings, | ||
384 | u64 *data) | ||
385 | { | ||
386 | struct efx_channel *channel; | ||
387 | unsigned int n = 0, i; | ||
388 | enum efx_loopback_mode mode; | ||
389 | |||
390 | efx_fill_test(n++, strings, data, &tests->phy_alive, | ||
391 | "phy", 0, "alive", NULL); | ||
392 | efx_fill_test(n++, strings, data, &tests->nvram, | ||
393 | "core", 0, "nvram", NULL); | ||
394 | efx_fill_test(n++, strings, data, &tests->interrupt, | ||
395 | "core", 0, "interrupt", NULL); | ||
396 | |||
397 | /* Event queues */ | ||
398 | efx_for_each_channel(channel, efx) { | ||
399 | efx_fill_test(n++, strings, data, | ||
400 | &tests->eventq_dma[channel->channel], | ||
401 | EFX_CHANNEL_NAME(channel), | ||
402 | "eventq.dma", NULL); | ||
403 | efx_fill_test(n++, strings, data, | ||
404 | &tests->eventq_int[channel->channel], | ||
405 | EFX_CHANNEL_NAME(channel), | ||
406 | "eventq.int", NULL); | ||
407 | efx_fill_test(n++, strings, data, | ||
408 | &tests->eventq_poll[channel->channel], | ||
409 | EFX_CHANNEL_NAME(channel), | ||
410 | "eventq.poll", NULL); | ||
411 | } | ||
412 | |||
413 | efx_fill_test(n++, strings, data, &tests->registers, | ||
414 | "core", 0, "registers", NULL); | ||
415 | |||
416 | if (efx->phy_op->run_tests != NULL) { | ||
417 | EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL); | ||
418 | |||
419 | for (i = 0; true; ++i) { | ||
420 | const char *name; | ||
421 | |||
422 | EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); | ||
423 | name = efx->phy_op->test_name(efx, i); | ||
424 | if (name == NULL) | ||
425 | break; | ||
426 | |||
427 | efx_fill_test(n++, strings, data, &tests->phy_ext[i], | ||
428 | "phy", 0, name, NULL); | ||
429 | } | ||
430 | } | ||
431 | |||
432 | /* Loopback tests */ | ||
433 | for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { | ||
434 | if (!(efx->loopback_modes & (1 << mode))) | ||
435 | continue; | ||
436 | n = efx_fill_loopback_test(efx, | ||
437 | &tests->loopback[mode], mode, n, | ||
438 | strings, data); | ||
439 | } | ||
440 | |||
441 | return n; | ||
442 | } | ||
443 | |||
444 | static int efx_ethtool_get_sset_count(struct net_device *net_dev, | ||
445 | int string_set) | ||
446 | { | ||
447 | switch (string_set) { | ||
448 | case ETH_SS_STATS: | ||
449 | return EFX_ETHTOOL_NUM_STATS; | ||
450 | case ETH_SS_TEST: | ||
451 | return efx_ethtool_fill_self_tests(netdev_priv(net_dev), | ||
452 | NULL, NULL, NULL); | ||
453 | default: | ||
454 | return -EINVAL; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | static void efx_ethtool_get_strings(struct net_device *net_dev, | ||
459 | u32 string_set, u8 *strings) | ||
460 | { | ||
461 | struct efx_nic *efx = netdev_priv(net_dev); | ||
462 | struct ethtool_string *ethtool_strings = | ||
463 | (struct ethtool_string *)strings; | ||
464 | int i; | ||
465 | |||
466 | switch (string_set) { | ||
467 | case ETH_SS_STATS: | ||
468 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) | ||
469 | strncpy(ethtool_strings[i].name, | ||
470 | efx_ethtool_stats[i].name, | ||
471 | sizeof(ethtool_strings[i].name)); | ||
472 | break; | ||
473 | case ETH_SS_TEST: | ||
474 | efx_ethtool_fill_self_tests(efx, NULL, | ||
475 | ethtool_strings, NULL); | ||
476 | break; | ||
477 | default: | ||
478 | /* No other string sets */ | ||
479 | break; | ||
480 | } | ||
481 | } | ||
482 | |||
483 | static void efx_ethtool_get_stats(struct net_device *net_dev, | ||
484 | struct ethtool_stats *stats, | ||
485 | u64 *data) | ||
486 | { | ||
487 | struct efx_nic *efx = netdev_priv(net_dev); | ||
488 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | ||
489 | struct efx_ethtool_stat *stat; | ||
490 | struct efx_channel *channel; | ||
491 | struct efx_tx_queue *tx_queue; | ||
492 | struct rtnl_link_stats64 temp; | ||
493 | int i; | ||
494 | |||
495 | EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); | ||
496 | |||
497 | /* Update MAC and NIC statistics */ | ||
498 | dev_get_stats(net_dev, &temp); | ||
499 | |||
500 | /* Fill detailed statistics buffer */ | ||
501 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { | ||
502 | stat = &efx_ethtool_stats[i]; | ||
503 | switch (stat->source) { | ||
504 | case EFX_ETHTOOL_STAT_SOURCE_mac_stats: | ||
505 | data[i] = stat->get_stat((void *)mac_stats + | ||
506 | stat->offset); | ||
507 | break; | ||
508 | case EFX_ETHTOOL_STAT_SOURCE_nic: | ||
509 | data[i] = stat->get_stat((void *)efx + stat->offset); | ||
510 | break; | ||
511 | case EFX_ETHTOOL_STAT_SOURCE_channel: | ||
512 | data[i] = 0; | ||
513 | efx_for_each_channel(channel, efx) | ||
514 | data[i] += stat->get_stat((void *)channel + | ||
515 | stat->offset); | ||
516 | break; | ||
517 | case EFX_ETHTOOL_STAT_SOURCE_tx_queue: | ||
518 | data[i] = 0; | ||
519 | efx_for_each_channel(channel, efx) { | ||
520 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
521 | data[i] += | ||
522 | stat->get_stat((void *)tx_queue | ||
523 | + stat->offset); | ||
524 | } | ||
525 | break; | ||
526 | } | ||
527 | } | ||
528 | } | ||
529 | |||
530 | static void efx_ethtool_self_test(struct net_device *net_dev, | ||
531 | struct ethtool_test *test, u64 *data) | ||
532 | { | ||
533 | struct efx_nic *efx = netdev_priv(net_dev); | ||
534 | struct efx_self_tests *efx_tests; | ||
535 | int already_up; | ||
536 | int rc = -ENOMEM; | ||
537 | |||
538 | efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); | ||
539 | if (!efx_tests) | ||
540 | goto fail; | ||
541 | |||
542 | |||
543 | ASSERT_RTNL(); | ||
544 | if (efx->state != STATE_RUNNING) { | ||
545 | rc = -EIO; | ||
546 | goto fail1; | ||
547 | } | ||
548 | |||
549 | netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", | ||
550 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); | ||
551 | |||
552 | /* We need rx buffers and interrupts. */ | ||
553 | already_up = (efx->net_dev->flags & IFF_UP); | ||
554 | if (!already_up) { | ||
555 | rc = dev_open(efx->net_dev); | ||
556 | if (rc) { | ||
557 | netif_err(efx, drv, efx->net_dev, | ||
558 | "failed opening device.\n"); | ||
559 | goto fail1; | ||
560 | } | ||
561 | } | ||
562 | |||
563 | rc = efx_selftest(efx, efx_tests, test->flags); | ||
564 | |||
565 | if (!already_up) | ||
566 | dev_close(efx->net_dev); | ||
567 | |||
568 | netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", | ||
569 | rc == 0 ? "passed" : "failed", | ||
570 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); | ||
571 | |||
572 | fail1: | ||
573 | /* Fill ethtool results structures */ | ||
574 | efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); | ||
575 | kfree(efx_tests); | ||
576 | fail: | ||
577 | if (rc) | ||
578 | test->flags |= ETH_TEST_FL_FAILED; | ||
579 | } | ||
580 | |||
581 | /* Restart autonegotiation */ | ||
582 | static int efx_ethtool_nway_reset(struct net_device *net_dev) | ||
583 | { | ||
584 | struct efx_nic *efx = netdev_priv(net_dev); | ||
585 | |||
586 | return mdio45_nway_restart(&efx->mdio); | ||
587 | } | ||
588 | |||
589 | static int efx_ethtool_get_coalesce(struct net_device *net_dev, | ||
590 | struct ethtool_coalesce *coalesce) | ||
591 | { | ||
592 | struct efx_nic *efx = netdev_priv(net_dev); | ||
593 | struct efx_channel *channel; | ||
594 | |||
595 | memset(coalesce, 0, sizeof(*coalesce)); | ||
596 | |||
597 | /* Find lowest IRQ moderation across all used TX queues */ | ||
598 | coalesce->tx_coalesce_usecs_irq = ~((u32) 0); | ||
599 | efx_for_each_channel(channel, efx) { | ||
600 | if (!efx_channel_has_tx_queues(channel)) | ||
601 | continue; | ||
602 | if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { | ||
603 | if (channel->channel < efx->n_rx_channels) | ||
604 | coalesce->tx_coalesce_usecs_irq = | ||
605 | channel->irq_moderation; | ||
606 | else | ||
607 | coalesce->tx_coalesce_usecs_irq = 0; | ||
608 | } | ||
609 | } | ||
610 | |||
611 | coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; | ||
612 | coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; | ||
613 | |||
614 | coalesce->tx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION; | ||
615 | coalesce->rx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION; | ||
616 | |||
617 | return 0; | ||
618 | } | ||
619 | |||
620 | /* Set coalescing parameters | ||
621 | * The difficulties occur for shared channels | ||
622 | */ | ||
623 | static int efx_ethtool_set_coalesce(struct net_device *net_dev, | ||
624 | struct ethtool_coalesce *coalesce) | ||
625 | { | ||
626 | struct efx_nic *efx = netdev_priv(net_dev); | ||
627 | struct efx_channel *channel; | ||
628 | unsigned tx_usecs, rx_usecs, adaptive; | ||
629 | |||
630 | if (coalesce->use_adaptive_tx_coalesce) | ||
631 | return -EOPNOTSUPP; | ||
632 | |||
633 | if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) { | ||
634 | netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. " | ||
635 | "Only rx/tx_coalesce_usecs_irq are supported\n"); | ||
636 | return -EOPNOTSUPP; | ||
637 | } | ||
638 | |||
639 | rx_usecs = coalesce->rx_coalesce_usecs_irq; | ||
640 | tx_usecs = coalesce->tx_coalesce_usecs_irq; | ||
641 | adaptive = coalesce->use_adaptive_rx_coalesce; | ||
642 | |||
643 | /* If the channel is shared only allow RX parameters to be set */ | ||
644 | efx_for_each_channel(channel, efx) { | ||
645 | if (efx_channel_has_rx_queue(channel) && | ||
646 | efx_channel_has_tx_queues(channel) && | ||
647 | tx_usecs) { | ||
648 | netif_err(efx, drv, efx->net_dev, "Channel is shared. " | ||
649 | "Only RX coalescing may be set\n"); | ||
650 | return -EOPNOTSUPP; | ||
651 | } | ||
652 | } | ||
653 | |||
654 | efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive); | ||
655 | efx_for_each_channel(channel, efx) | ||
656 | efx->type->push_irq_moderation(channel); | ||
657 | |||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | static void efx_ethtool_get_ringparam(struct net_device *net_dev, | ||
662 | struct ethtool_ringparam *ring) | ||
663 | { | ||
664 | struct efx_nic *efx = netdev_priv(net_dev); | ||
665 | |||
666 | ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; | ||
667 | ring->tx_max_pending = EFX_MAX_DMAQ_SIZE; | ||
668 | ring->rx_mini_max_pending = 0; | ||
669 | ring->rx_jumbo_max_pending = 0; | ||
670 | ring->rx_pending = efx->rxq_entries; | ||
671 | ring->tx_pending = efx->txq_entries; | ||
672 | ring->rx_mini_pending = 0; | ||
673 | ring->rx_jumbo_pending = 0; | ||
674 | } | ||
675 | |||
676 | static int efx_ethtool_set_ringparam(struct net_device *net_dev, | ||
677 | struct ethtool_ringparam *ring) | ||
678 | { | ||
679 | struct efx_nic *efx = netdev_priv(net_dev); | ||
680 | |||
681 | if (ring->rx_mini_pending || ring->rx_jumbo_pending || | ||
682 | ring->rx_pending > EFX_MAX_DMAQ_SIZE || | ||
683 | ring->tx_pending > EFX_MAX_DMAQ_SIZE) | ||
684 | return -EINVAL; | ||
685 | |||
686 | if (ring->rx_pending < EFX_MIN_RING_SIZE || | ||
687 | ring->tx_pending < EFX_MIN_RING_SIZE) { | ||
688 | netif_err(efx, drv, efx->net_dev, | ||
689 | "TX and RX queues cannot be smaller than %ld\n", | ||
690 | EFX_MIN_RING_SIZE); | ||
691 | return -EINVAL; | ||
692 | } | ||
693 | |||
694 | return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); | ||
695 | } | ||
696 | |||
697 | static int efx_ethtool_set_pauseparam(struct net_device *net_dev, | ||
698 | struct ethtool_pauseparam *pause) | ||
699 | { | ||
700 | struct efx_nic *efx = netdev_priv(net_dev); | ||
701 | u8 wanted_fc, old_fc; | ||
702 | u32 old_adv; | ||
703 | bool reset; | ||
704 | int rc = 0; | ||
705 | |||
706 | mutex_lock(&efx->mac_lock); | ||
707 | |||
708 | wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | | ||
709 | (pause->tx_pause ? EFX_FC_TX : 0) | | ||
710 | (pause->autoneg ? EFX_FC_AUTO : 0)); | ||
711 | |||
712 | if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { | ||
713 | netif_dbg(efx, drv, efx->net_dev, | ||
714 | "Flow control unsupported: tx ON rx OFF\n"); | ||
715 | rc = -EINVAL; | ||
716 | goto out; | ||
717 | } | ||
718 | |||
719 | if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { | ||
720 | netif_dbg(efx, drv, efx->net_dev, | ||
721 | "Autonegotiation is disabled\n"); | ||
722 | rc = -EINVAL; | ||
723 | goto out; | ||
724 | } | ||
725 | |||
726 | /* TX flow control may automatically turn itself off if the | ||
727 | * link partner (intermittently) stops responding to pause | ||
728 | * frames. There isn't any indication that this has happened, | ||
729 | * so the best we do is leave it up to the user to spot this | ||
730 | * and fix it be cycling transmit flow control on this end. */ | ||
731 | reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); | ||
732 | if (EFX_WORKAROUND_11482(efx) && reset) { | ||
733 | if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) { | ||
734 | /* Recover by resetting the EM block */ | ||
735 | falcon_stop_nic_stats(efx); | ||
736 | falcon_drain_tx_fifo(efx); | ||
737 | efx->mac_op->reconfigure(efx); | ||
738 | falcon_start_nic_stats(efx); | ||
739 | } else { | ||
740 | /* Schedule a reset to recover */ | ||
741 | efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); | ||
742 | } | ||
743 | } | ||
744 | |||
745 | old_adv = efx->link_advertising; | ||
746 | old_fc = efx->wanted_fc; | ||
747 | efx_link_set_wanted_fc(efx, wanted_fc); | ||
748 | if (efx->link_advertising != old_adv || | ||
749 | (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { | ||
750 | rc = efx->phy_op->reconfigure(efx); | ||
751 | if (rc) { | ||
752 | netif_err(efx, drv, efx->net_dev, | ||
753 | "Unable to advertise requested flow " | ||
754 | "control setting\n"); | ||
755 | goto out; | ||
756 | } | ||
757 | } | ||
758 | |||
759 | /* Reconfigure the MAC. The PHY *may* generate a link state change event | ||
760 | * if the user just changed the advertised capabilities, but there's no | ||
761 | * harm doing this twice */ | ||
762 | efx->mac_op->reconfigure(efx); | ||
763 | |||
764 | out: | ||
765 | mutex_unlock(&efx->mac_lock); | ||
766 | |||
767 | return rc; | ||
768 | } | ||
769 | |||
770 | static void efx_ethtool_get_pauseparam(struct net_device *net_dev, | ||
771 | struct ethtool_pauseparam *pause) | ||
772 | { | ||
773 | struct efx_nic *efx = netdev_priv(net_dev); | ||
774 | |||
775 | pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); | ||
776 | pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); | ||
777 | pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); | ||
778 | } | ||
779 | |||
780 | |||
781 | static void efx_ethtool_get_wol(struct net_device *net_dev, | ||
782 | struct ethtool_wolinfo *wol) | ||
783 | { | ||
784 | struct efx_nic *efx = netdev_priv(net_dev); | ||
785 | return efx->type->get_wol(efx, wol); | ||
786 | } | ||
787 | |||
788 | |||
789 | static int efx_ethtool_set_wol(struct net_device *net_dev, | ||
790 | struct ethtool_wolinfo *wol) | ||
791 | { | ||
792 | struct efx_nic *efx = netdev_priv(net_dev); | ||
793 | return efx->type->set_wol(efx, wol->wolopts); | ||
794 | } | ||
795 | |||
796 | static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) | ||
797 | { | ||
798 | struct efx_nic *efx = netdev_priv(net_dev); | ||
799 | int rc; | ||
800 | |||
801 | rc = efx->type->map_reset_flags(flags); | ||
802 | if (rc < 0) | ||
803 | return rc; | ||
804 | |||
805 | return efx_reset(efx, rc); | ||
806 | } | ||
807 | |||
808 | static int | ||
809 | efx_ethtool_get_rxnfc(struct net_device *net_dev, | ||
810 | struct ethtool_rxnfc *info, void *rules __always_unused) | ||
811 | { | ||
812 | struct efx_nic *efx = netdev_priv(net_dev); | ||
813 | |||
814 | switch (info->cmd) { | ||
815 | case ETHTOOL_GRXRINGS: | ||
816 | info->data = efx->n_rx_channels; | ||
817 | return 0; | ||
818 | |||
819 | case ETHTOOL_GRXFH: { | ||
820 | unsigned min_revision = 0; | ||
821 | |||
822 | info->data = 0; | ||
823 | switch (info->flow_type) { | ||
824 | case TCP_V4_FLOW: | ||
825 | info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | ||
826 | /* fall through */ | ||
827 | case UDP_V4_FLOW: | ||
828 | case SCTP_V4_FLOW: | ||
829 | case AH_ESP_V4_FLOW: | ||
830 | case IPV4_FLOW: | ||
831 | info->data |= RXH_IP_SRC | RXH_IP_DST; | ||
832 | min_revision = EFX_REV_FALCON_B0; | ||
833 | break; | ||
834 | case TCP_V6_FLOW: | ||
835 | info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | ||
836 | /* fall through */ | ||
837 | case UDP_V6_FLOW: | ||
838 | case SCTP_V6_FLOW: | ||
839 | case AH_ESP_V6_FLOW: | ||
840 | case IPV6_FLOW: | ||
841 | info->data |= RXH_IP_SRC | RXH_IP_DST; | ||
842 | min_revision = EFX_REV_SIENA_A0; | ||
843 | break; | ||
844 | default: | ||
845 | break; | ||
846 | } | ||
847 | if (efx_nic_rev(efx) < min_revision) | ||
848 | info->data = 0; | ||
849 | return 0; | ||
850 | } | ||
851 | |||
852 | default: | ||
853 | return -EOPNOTSUPP; | ||
854 | } | ||
855 | } | ||
856 | |||
857 | static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, | ||
858 | struct ethtool_rx_ntuple *ntuple) | ||
859 | { | ||
860 | struct efx_nic *efx = netdev_priv(net_dev); | ||
861 | struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec; | ||
862 | struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec; | ||
863 | struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; | ||
864 | struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; | ||
865 | struct efx_filter_spec filter; | ||
866 | int rc; | ||
867 | |||
868 | /* Range-check action */ | ||
869 | if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || | ||
870 | ntuple->fs.action >= (s32)efx->n_rx_channels) | ||
871 | return -EINVAL; | ||
872 | |||
873 | if (~ntuple->fs.data_mask) | ||
874 | return -EINVAL; | ||
875 | |||
876 | efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0, | ||
877 | (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ? | ||
878 | 0xfff : ntuple->fs.action); | ||
879 | |||
880 | switch (ntuple->fs.flow_type) { | ||
881 | case TCP_V4_FLOW: | ||
882 | case UDP_V4_FLOW: { | ||
883 | u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ? | ||
884 | IPPROTO_TCP : IPPROTO_UDP); | ||
885 | |||
886 | /* Must match all of destination, */ | ||
887 | if (ip_mask->ip4dst | ip_mask->pdst) | ||
888 | return -EINVAL; | ||
889 | /* all or none of source, */ | ||
890 | if ((ip_mask->ip4src | ip_mask->psrc) && | ||
891 | ((__force u32)~ip_mask->ip4src | | ||
892 | (__force u16)~ip_mask->psrc)) | ||
893 | return -EINVAL; | ||
894 | /* and nothing else */ | ||
895 | if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) | ||
896 | return -EINVAL; | ||
897 | |||
898 | if (!ip_mask->ip4src) | ||
899 | rc = efx_filter_set_ipv4_full(&filter, proto, | ||
900 | ip_entry->ip4dst, | ||
901 | ip_entry->pdst, | ||
902 | ip_entry->ip4src, | ||
903 | ip_entry->psrc); | ||
904 | else | ||
905 | rc = efx_filter_set_ipv4_local(&filter, proto, | ||
906 | ip_entry->ip4dst, | ||
907 | ip_entry->pdst); | ||
908 | if (rc) | ||
909 | return rc; | ||
910 | break; | ||
911 | } | ||
912 | |||
913 | case ETHER_FLOW: | ||
914 | /* Must match all of destination, */ | ||
915 | if (!is_zero_ether_addr(mac_mask->h_dest)) | ||
916 | return -EINVAL; | ||
917 | /* all or none of VID, */ | ||
918 | if (ntuple->fs.vlan_tag_mask != 0xf000 && | ||
919 | ntuple->fs.vlan_tag_mask != 0xffff) | ||
920 | return -EINVAL; | ||
921 | /* and nothing else */ | ||
922 | if (!is_broadcast_ether_addr(mac_mask->h_source) || | ||
923 | mac_mask->h_proto != htons(0xffff)) | ||
924 | return -EINVAL; | ||
925 | |||
926 | rc = efx_filter_set_eth_local( | ||
927 | &filter, | ||
928 | (ntuple->fs.vlan_tag_mask == 0xf000) ? | ||
929 | ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC, | ||
930 | mac_entry->h_dest); | ||
931 | if (rc) | ||
932 | return rc; | ||
933 | break; | ||
934 | |||
935 | default: | ||
936 | return -EINVAL; | ||
937 | } | ||
938 | |||
939 | if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) | ||
940 | return efx_filter_remove_filter(efx, &filter); | ||
941 | |||
942 | rc = efx_filter_insert_filter(efx, &filter, true); | ||
943 | return rc < 0 ? rc : 0; | ||
944 | } | ||
945 | |||
946 | static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, | ||
947 | struct ethtool_rxfh_indir *indir) | ||
948 | { | ||
949 | struct efx_nic *efx = netdev_priv(net_dev); | ||
950 | size_t copy_size = | ||
951 | min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table)); | ||
952 | |||
953 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | ||
954 | return -EOPNOTSUPP; | ||
955 | |||
956 | indir->size = ARRAY_SIZE(efx->rx_indir_table); | ||
957 | memcpy(indir->ring_index, efx->rx_indir_table, | ||
958 | copy_size * sizeof(indir->ring_index[0])); | ||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, | ||
963 | const struct ethtool_rxfh_indir *indir) | ||
964 | { | ||
965 | struct efx_nic *efx = netdev_priv(net_dev); | ||
966 | size_t i; | ||
967 | |||
968 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | ||
969 | return -EOPNOTSUPP; | ||
970 | |||
971 | /* Validate size and indices */ | ||
972 | if (indir->size != ARRAY_SIZE(efx->rx_indir_table)) | ||
973 | return -EINVAL; | ||
974 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) | ||
975 | if (indir->ring_index[i] >= efx->n_rx_channels) | ||
976 | return -EINVAL; | ||
977 | |||
978 | memcpy(efx->rx_indir_table, indir->ring_index, | ||
979 | sizeof(efx->rx_indir_table)); | ||
980 | efx_nic_push_rx_indir_table(efx); | ||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | const struct ethtool_ops efx_ethtool_ops = { | ||
985 | .get_settings = efx_ethtool_get_settings, | ||
986 | .set_settings = efx_ethtool_set_settings, | ||
987 | .get_drvinfo = efx_ethtool_get_drvinfo, | ||
988 | .get_regs_len = efx_ethtool_get_regs_len, | ||
989 | .get_regs = efx_ethtool_get_regs, | ||
990 | .get_msglevel = efx_ethtool_get_msglevel, | ||
991 | .set_msglevel = efx_ethtool_set_msglevel, | ||
992 | .nway_reset = efx_ethtool_nway_reset, | ||
993 | .get_link = ethtool_op_get_link, | ||
994 | .get_coalesce = efx_ethtool_get_coalesce, | ||
995 | .set_coalesce = efx_ethtool_set_coalesce, | ||
996 | .get_ringparam = efx_ethtool_get_ringparam, | ||
997 | .set_ringparam = efx_ethtool_set_ringparam, | ||
998 | .get_pauseparam = efx_ethtool_get_pauseparam, | ||
999 | .set_pauseparam = efx_ethtool_set_pauseparam, | ||
1000 | .get_sset_count = efx_ethtool_get_sset_count, | ||
1001 | .self_test = efx_ethtool_self_test, | ||
1002 | .get_strings = efx_ethtool_get_strings, | ||
1003 | .set_phys_id = efx_ethtool_phys_id, | ||
1004 | .get_ethtool_stats = efx_ethtool_get_stats, | ||
1005 | .get_wol = efx_ethtool_get_wol, | ||
1006 | .set_wol = efx_ethtool_set_wol, | ||
1007 | .reset = efx_ethtool_reset, | ||
1008 | .get_rxnfc = efx_ethtool_get_rxnfc, | ||
1009 | .set_rx_ntuple = efx_ethtool_set_rx_ntuple, | ||
1010 | .get_rxfh_indir = efx_ethtool_get_rxfh_indir, | ||
1011 | .set_rxfh_indir = efx_ethtool_set_rxfh_indir, | ||
1012 | }; | ||
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c new file mode 100644 index 00000000000..94bf4aaf984 --- /dev/null +++ b/drivers/net/sfc/falcon.c | |||
@@ -0,0 +1,1841 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/seq_file.h> | ||
16 | #include <linux/i2c.h> | ||
17 | #include <linux/mii.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include "net_driver.h" | ||
20 | #include "bitfield.h" | ||
21 | #include "efx.h" | ||
22 | #include "mac.h" | ||
23 | #include "spi.h" | ||
24 | #include "nic.h" | ||
25 | #include "regs.h" | ||
26 | #include "io.h" | ||
27 | #include "phy.h" | ||
28 | #include "workarounds.h" | ||
29 | |||
30 | /* Hardware control for SFC4000 (aka Falcon). */ | ||
31 | |||
32 | static const unsigned int | ||
33 | /* "Large" EEPROM device: Atmel AT25640 or similar | ||
34 | * 8 KB, 16-bit address, 32 B write block */ | ||
35 | large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN) | ||
36 | | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN) | ||
37 | | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)), | ||
38 | /* Default flash device: Atmel AT25F1024 | ||
39 | * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */ | ||
40 | default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN) | ||
41 | | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN) | ||
42 | | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN) | ||
43 | | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) | ||
44 | | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)); | ||
45 | |||
46 | /************************************************************************** | ||
47 | * | ||
48 | * I2C bus - this is a bit-bashing interface using GPIO pins | ||
49 | * Note that it uses the output enables to tristate the outputs | ||
50 | * SDA is the data pin and SCL is the clock | ||
51 | * | ||
52 | ************************************************************************** | ||
53 | */ | ||
54 | static void falcon_setsda(void *data, int state) | ||
55 | { | ||
56 | struct efx_nic *efx = (struct efx_nic *)data; | ||
57 | efx_oword_t reg; | ||
58 | |||
59 | efx_reado(efx, ®, FR_AB_GPIO_CTL); | ||
60 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state); | ||
61 | efx_writeo(efx, ®, FR_AB_GPIO_CTL); | ||
62 | } | ||
63 | |||
64 | static void falcon_setscl(void *data, int state) | ||
65 | { | ||
66 | struct efx_nic *efx = (struct efx_nic *)data; | ||
67 | efx_oword_t reg; | ||
68 | |||
69 | efx_reado(efx, ®, FR_AB_GPIO_CTL); | ||
70 | EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state); | ||
71 | efx_writeo(efx, ®, FR_AB_GPIO_CTL); | ||
72 | } | ||
73 | |||
74 | static int falcon_getsda(void *data) | ||
75 | { | ||
76 | struct efx_nic *efx = (struct efx_nic *)data; | ||
77 | efx_oword_t reg; | ||
78 | |||
79 | efx_reado(efx, ®, FR_AB_GPIO_CTL); | ||
80 | return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN); | ||
81 | } | ||
82 | |||
83 | static int falcon_getscl(void *data) | ||
84 | { | ||
85 | struct efx_nic *efx = (struct efx_nic *)data; | ||
86 | efx_oword_t reg; | ||
87 | |||
88 | efx_reado(efx, ®, FR_AB_GPIO_CTL); | ||
89 | return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN); | ||
90 | } | ||
91 | |||
92 | static struct i2c_algo_bit_data falcon_i2c_bit_operations = { | ||
93 | .setsda = falcon_setsda, | ||
94 | .setscl = falcon_setscl, | ||
95 | .getsda = falcon_getsda, | ||
96 | .getscl = falcon_getscl, | ||
97 | .udelay = 5, | ||
98 | /* Wait up to 50 ms for slave to let us pull SCL high */ | ||
99 | .timeout = DIV_ROUND_UP(HZ, 20), | ||
100 | }; | ||
101 | |||
102 | static void falcon_push_irq_moderation(struct efx_channel *channel) | ||
103 | { | ||
104 | efx_dword_t timer_cmd; | ||
105 | struct efx_nic *efx = channel->efx; | ||
106 | |||
107 | /* Set timer register */ | ||
108 | if (channel->irq_moderation) { | ||
109 | EFX_POPULATE_DWORD_2(timer_cmd, | ||
110 | FRF_AB_TC_TIMER_MODE, | ||
111 | FFE_BB_TIMER_MODE_INT_HLDOFF, | ||
112 | FRF_AB_TC_TIMER_VAL, | ||
113 | channel->irq_moderation - 1); | ||
114 | } else { | ||
115 | EFX_POPULATE_DWORD_2(timer_cmd, | ||
116 | FRF_AB_TC_TIMER_MODE, | ||
117 | FFE_BB_TIMER_MODE_DIS, | ||
118 | FRF_AB_TC_TIMER_VAL, 0); | ||
119 | } | ||
120 | BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0); | ||
121 | efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, | ||
122 | channel->channel); | ||
123 | } | ||
124 | |||
125 | static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx); | ||
126 | |||
127 | static void falcon_prepare_flush(struct efx_nic *efx) | ||
128 | { | ||
129 | falcon_deconfigure_mac_wrapper(efx); | ||
130 | |||
131 | /* Wait for the tx and rx fifo's to get to the next packet boundary | ||
132 | * (~1ms without back-pressure), then to drain the remainder of the | ||
133 | * fifo's at data path speeds (negligible), with a healthy margin. */ | ||
134 | msleep(10); | ||
135 | } | ||
136 | |||
137 | /* Acknowledge a legacy interrupt from Falcon | ||
138 | * | ||
139 | * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG. | ||
140 | * | ||
141 | * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the | ||
142 | * BIU. Interrupt acknowledge is read sensitive so must write instead | ||
143 | * (then read to ensure the BIU collector is flushed) | ||
144 | * | ||
145 | * NB most hardware supports MSI interrupts | ||
146 | */ | ||
147 | inline void falcon_irq_ack_a1(struct efx_nic *efx) | ||
148 | { | ||
149 | efx_dword_t reg; | ||
150 | |||
151 | EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e); | ||
152 | efx_writed(efx, ®, FR_AA_INT_ACK_KER); | ||
153 | efx_readd(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS); | ||
154 | } | ||
155 | |||
156 | |||
157 | irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | ||
158 | { | ||
159 | struct efx_nic *efx = dev_id; | ||
160 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
161 | int syserr; | ||
162 | int queues; | ||
163 | |||
164 | /* Check to see if this is our interrupt. If it isn't, we | ||
165 | * exit without having touched the hardware. | ||
166 | */ | ||
167 | if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { | ||
168 | netif_vdbg(efx, intr, efx->net_dev, | ||
169 | "IRQ %d on CPU %d not for me\n", irq, | ||
170 | raw_smp_processor_id()); | ||
171 | return IRQ_NONE; | ||
172 | } | ||
173 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
174 | netif_vdbg(efx, intr, efx->net_dev, | ||
175 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | ||
176 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
177 | |||
178 | /* Determine interrupting queues, clear interrupt status | ||
179 | * register and acknowledge the device interrupt. | ||
180 | */ | ||
181 | BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); | ||
182 | queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); | ||
183 | |||
184 | /* Check to see if we have a serious error condition */ | ||
185 | if (queues & (1U << efx->fatal_irq_level)) { | ||
186 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
187 | if (unlikely(syserr)) | ||
188 | return efx_nic_fatal_interrupt(efx); | ||
189 | } | ||
190 | |||
191 | EFX_ZERO_OWORD(*int_ker); | ||
192 | wmb(); /* Ensure the vector is cleared before interrupt ack */ | ||
193 | falcon_irq_ack_a1(efx); | ||
194 | |||
195 | if (queues & 1) | ||
196 | efx_schedule_channel(efx_get_channel(efx, 0)); | ||
197 | if (queues & 2) | ||
198 | efx_schedule_channel(efx_get_channel(efx, 1)); | ||
199 | return IRQ_HANDLED; | ||
200 | } | ||
201 | /************************************************************************** | ||
202 | * | ||
203 | * EEPROM/flash | ||
204 | * | ||
205 | ************************************************************************** | ||
206 | */ | ||
207 | |||
208 | #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) | ||
209 | |||
210 | static int falcon_spi_poll(struct efx_nic *efx) | ||
211 | { | ||
212 | efx_oword_t reg; | ||
213 | efx_reado(efx, ®, FR_AB_EE_SPI_HCMD); | ||
214 | return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; | ||
215 | } | ||
216 | |||
217 | /* Wait for SPI command completion */ | ||
218 | static int falcon_spi_wait(struct efx_nic *efx) | ||
219 | { | ||
220 | /* Most commands will finish quickly, so we start polling at | ||
221 | * very short intervals. Sometimes the command may have to | ||
222 | * wait for VPD or expansion ROM access outside of our | ||
223 | * control, so we allow up to 100 ms. */ | ||
224 | unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10); | ||
225 | int i; | ||
226 | |||
227 | for (i = 0; i < 10; i++) { | ||
228 | if (!falcon_spi_poll(efx)) | ||
229 | return 0; | ||
230 | udelay(10); | ||
231 | } | ||
232 | |||
233 | for (;;) { | ||
234 | if (!falcon_spi_poll(efx)) | ||
235 | return 0; | ||
236 | if (time_after_eq(jiffies, timeout)) { | ||
237 | netif_err(efx, hw, efx->net_dev, | ||
238 | "timed out waiting for SPI\n"); | ||
239 | return -ETIMEDOUT; | ||
240 | } | ||
241 | schedule_timeout_uninterruptible(1); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, | ||
246 | unsigned int command, int address, | ||
247 | const void *in, void *out, size_t len) | ||
248 | { | ||
249 | bool addressed = (address >= 0); | ||
250 | bool reading = (out != NULL); | ||
251 | efx_oword_t reg; | ||
252 | int rc; | ||
253 | |||
254 | /* Input validation */ | ||
255 | if (len > FALCON_SPI_MAX_LEN) | ||
256 | return -EINVAL; | ||
257 | |||
258 | /* Check that previous command is not still running */ | ||
259 | rc = falcon_spi_poll(efx); | ||
260 | if (rc) | ||
261 | return rc; | ||
262 | |||
263 | /* Program address register, if we have an address */ | ||
264 | if (addressed) { | ||
265 | EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address); | ||
266 | efx_writeo(efx, ®, FR_AB_EE_SPI_HADR); | ||
267 | } | ||
268 | |||
269 | /* Program data register, if we have data */ | ||
270 | if (in != NULL) { | ||
271 | memcpy(®, in, len); | ||
272 | efx_writeo(efx, ®, FR_AB_EE_SPI_HDATA); | ||
273 | } | ||
274 | |||
275 | /* Issue read/write command */ | ||
276 | EFX_POPULATE_OWORD_7(reg, | ||
277 | FRF_AB_EE_SPI_HCMD_CMD_EN, 1, | ||
278 | FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id, | ||
279 | FRF_AB_EE_SPI_HCMD_DABCNT, len, | ||
280 | FRF_AB_EE_SPI_HCMD_READ, reading, | ||
281 | FRF_AB_EE_SPI_HCMD_DUBCNT, 0, | ||
282 | FRF_AB_EE_SPI_HCMD_ADBCNT, | ||
283 | (addressed ? spi->addr_len : 0), | ||
284 | FRF_AB_EE_SPI_HCMD_ENC, command); | ||
285 | efx_writeo(efx, ®, FR_AB_EE_SPI_HCMD); | ||
286 | |||
287 | /* Wait for read/write to complete */ | ||
288 | rc = falcon_spi_wait(efx); | ||
289 | if (rc) | ||
290 | return rc; | ||
291 | |||
292 | /* Read data */ | ||
293 | if (out != NULL) { | ||
294 | efx_reado(efx, ®, FR_AB_EE_SPI_HDATA); | ||
295 | memcpy(out, ®, len); | ||
296 | } | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | static size_t | ||
302 | falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start) | ||
303 | { | ||
304 | return min(FALCON_SPI_MAX_LEN, | ||
305 | (spi->block_size - (start & (spi->block_size - 1)))); | ||
306 | } | ||
307 | |||
308 | static inline u8 | ||
309 | efx_spi_munge_command(const struct efx_spi_device *spi, | ||
310 | const u8 command, const unsigned int address) | ||
311 | { | ||
312 | return command | (((address >> 8) & spi->munge_address) << 3); | ||
313 | } | ||
314 | |||
315 | /* Wait up to 10 ms for buffered write completion */ | ||
316 | int | ||
317 | falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) | ||
318 | { | ||
319 | unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); | ||
320 | u8 status; | ||
321 | int rc; | ||
322 | |||
323 | for (;;) { | ||
324 | rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, | ||
325 | &status, sizeof(status)); | ||
326 | if (rc) | ||
327 | return rc; | ||
328 | if (!(status & SPI_STATUS_NRDY)) | ||
329 | return 0; | ||
330 | if (time_after_eq(jiffies, timeout)) { | ||
331 | netif_err(efx, hw, efx->net_dev, | ||
332 | "SPI write timeout on device %d" | ||
333 | " last status=0x%02x\n", | ||
334 | spi->device_id, status); | ||
335 | return -ETIMEDOUT; | ||
336 | } | ||
337 | schedule_timeout_uninterruptible(1); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, | ||
342 | loff_t start, size_t len, size_t *retlen, u8 *buffer) | ||
343 | { | ||
344 | size_t block_len, pos = 0; | ||
345 | unsigned int command; | ||
346 | int rc = 0; | ||
347 | |||
348 | while (pos < len) { | ||
349 | block_len = min(len - pos, FALCON_SPI_MAX_LEN); | ||
350 | |||
351 | command = efx_spi_munge_command(spi, SPI_READ, start + pos); | ||
352 | rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, | ||
353 | buffer + pos, block_len); | ||
354 | if (rc) | ||
355 | break; | ||
356 | pos += block_len; | ||
357 | |||
358 | /* Avoid locking up the system */ | ||
359 | cond_resched(); | ||
360 | if (signal_pending(current)) { | ||
361 | rc = -EINTR; | ||
362 | break; | ||
363 | } | ||
364 | } | ||
365 | |||
366 | if (retlen) | ||
367 | *retlen = pos; | ||
368 | return rc; | ||
369 | } | ||
370 | |||
371 | int | ||
372 | falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, | ||
373 | loff_t start, size_t len, size_t *retlen, const u8 *buffer) | ||
374 | { | ||
375 | u8 verify_buffer[FALCON_SPI_MAX_LEN]; | ||
376 | size_t block_len, pos = 0; | ||
377 | unsigned int command; | ||
378 | int rc = 0; | ||
379 | |||
380 | while (pos < len) { | ||
381 | rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); | ||
382 | if (rc) | ||
383 | break; | ||
384 | |||
385 | block_len = min(len - pos, | ||
386 | falcon_spi_write_limit(spi, start + pos)); | ||
387 | command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); | ||
388 | rc = falcon_spi_cmd(efx, spi, command, start + pos, | ||
389 | buffer + pos, NULL, block_len); | ||
390 | if (rc) | ||
391 | break; | ||
392 | |||
393 | rc = falcon_spi_wait_write(efx, spi); | ||
394 | if (rc) | ||
395 | break; | ||
396 | |||
397 | command = efx_spi_munge_command(spi, SPI_READ, start + pos); | ||
398 | rc = falcon_spi_cmd(efx, spi, command, start + pos, | ||
399 | NULL, verify_buffer, block_len); | ||
400 | if (memcmp(verify_buffer, buffer + pos, block_len)) { | ||
401 | rc = -EIO; | ||
402 | break; | ||
403 | } | ||
404 | |||
405 | pos += block_len; | ||
406 | |||
407 | /* Avoid locking up the system */ | ||
408 | cond_resched(); | ||
409 | if (signal_pending(current)) { | ||
410 | rc = -EINTR; | ||
411 | break; | ||
412 | } | ||
413 | } | ||
414 | |||
415 | if (retlen) | ||
416 | *retlen = pos; | ||
417 | return rc; | ||
418 | } | ||
419 | |||
420 | /************************************************************************** | ||
421 | * | ||
422 | * MAC wrapper | ||
423 | * | ||
424 | ************************************************************************** | ||
425 | */ | ||
426 | |||
427 | static void falcon_push_multicast_hash(struct efx_nic *efx) | ||
428 | { | ||
429 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; | ||
430 | |||
431 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
432 | |||
433 | efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0); | ||
434 | efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1); | ||
435 | } | ||
436 | |||
437 | static void falcon_reset_macs(struct efx_nic *efx) | ||
438 | { | ||
439 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
440 | efx_oword_t reg, mac_ctrl; | ||
441 | int count; | ||
442 | |||
443 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | ||
444 | /* It's not safe to use GLB_CTL_REG to reset the | ||
445 | * macs, so instead use the internal MAC resets | ||
446 | */ | ||
447 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1); | ||
448 | efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); | ||
449 | |||
450 | for (count = 0; count < 10000; count++) { | ||
451 | efx_reado(efx, ®, FR_AB_XM_GLB_CFG); | ||
452 | if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) == | ||
453 | 0) | ||
454 | return; | ||
455 | udelay(10); | ||
456 | } | ||
457 | |||
458 | netif_err(efx, hw, efx->net_dev, | ||
459 | "timed out waiting for XMAC core reset\n"); | ||
460 | } | ||
461 | |||
462 | /* Mac stats will fail whist the TX fifo is draining */ | ||
463 | WARN_ON(nic_data->stats_disable_count == 0); | ||
464 | |||
465 | efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL); | ||
466 | EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1); | ||
467 | efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); | ||
468 | |||
469 | efx_reado(efx, ®, FR_AB_GLB_CTL); | ||
470 | EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1); | ||
471 | EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1); | ||
472 | EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1); | ||
473 | efx_writeo(efx, ®, FR_AB_GLB_CTL); | ||
474 | |||
475 | count = 0; | ||
476 | while (1) { | ||
477 | efx_reado(efx, ®, FR_AB_GLB_CTL); | ||
478 | if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && | ||
479 | !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && | ||
480 | !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { | ||
481 | netif_dbg(efx, hw, efx->net_dev, | ||
482 | "Completed MAC reset after %d loops\n", | ||
483 | count); | ||
484 | break; | ||
485 | } | ||
486 | if (count > 20) { | ||
487 | netif_err(efx, hw, efx->net_dev, "MAC reset failed\n"); | ||
488 | break; | ||
489 | } | ||
490 | count++; | ||
491 | udelay(10); | ||
492 | } | ||
493 | |||
494 | /* Ensure the correct MAC is selected before statistics | ||
495 | * are re-enabled by the caller */ | ||
496 | efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); | ||
497 | |||
498 | falcon_setup_xaui(efx); | ||
499 | } | ||
500 | |||
501 | void falcon_drain_tx_fifo(struct efx_nic *efx) | ||
502 | { | ||
503 | efx_oword_t reg; | ||
504 | |||
505 | if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) || | ||
506 | (efx->loopback_mode != LOOPBACK_NONE)) | ||
507 | return; | ||
508 | |||
509 | efx_reado(efx, ®, FR_AB_MAC_CTRL); | ||
510 | /* There is no point in draining more than once */ | ||
511 | if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN)) | ||
512 | return; | ||
513 | |||
514 | falcon_reset_macs(efx); | ||
515 | } | ||
516 | |||
517 | static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) | ||
518 | { | ||
519 | efx_oword_t reg; | ||
520 | |||
521 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | ||
522 | return; | ||
523 | |||
524 | /* Isolate the MAC -> RX */ | ||
525 | efx_reado(efx, ®, FR_AZ_RX_CFG); | ||
526 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0); | ||
527 | efx_writeo(efx, ®, FR_AZ_RX_CFG); | ||
528 | |||
529 | /* Isolate TX -> MAC */ | ||
530 | falcon_drain_tx_fifo(efx); | ||
531 | } | ||
532 | |||
533 | void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | ||
534 | { | ||
535 | struct efx_link_state *link_state = &efx->link_state; | ||
536 | efx_oword_t reg; | ||
537 | int link_speed, isolate; | ||
538 | |||
539 | isolate = !!ACCESS_ONCE(efx->reset_pending); | ||
540 | |||
541 | switch (link_state->speed) { | ||
542 | case 10000: link_speed = 3; break; | ||
543 | case 1000: link_speed = 2; break; | ||
544 | case 100: link_speed = 1; break; | ||
545 | default: link_speed = 0; break; | ||
546 | } | ||
547 | /* MAC_LINK_STATUS controls MAC backpressure but doesn't work | ||
548 | * as advertised. Disable to ensure packets are not | ||
549 | * indefinitely held and TX queue can be flushed at any point | ||
550 | * while the link is down. */ | ||
551 | EFX_POPULATE_OWORD_5(reg, | ||
552 | FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, | ||
553 | FRF_AB_MAC_BCAD_ACPT, 1, | ||
554 | FRF_AB_MAC_UC_PROM, efx->promiscuous, | ||
555 | FRF_AB_MAC_LINK_STATUS, 1, /* always set */ | ||
556 | FRF_AB_MAC_SPEED, link_speed); | ||
557 | /* On B0, MAC backpressure can be disabled and packets get | ||
558 | * discarded. */ | ||
559 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
560 | EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, | ||
561 | !link_state->up || isolate); | ||
562 | } | ||
563 | |||
564 | efx_writeo(efx, ®, FR_AB_MAC_CTRL); | ||
565 | |||
566 | /* Restore the multicast hash registers. */ | ||
567 | falcon_push_multicast_hash(efx); | ||
568 | |||
569 | efx_reado(efx, ®, FR_AZ_RX_CFG); | ||
570 | /* Enable XOFF signal from RX FIFO (we enabled it during NIC | ||
571 | * initialisation but it may read back as 0) */ | ||
572 | EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); | ||
573 | /* Unisolate the MAC -> RX */ | ||
574 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
575 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate); | ||
576 | efx_writeo(efx, ®, FR_AZ_RX_CFG); | ||
577 | } | ||
578 | |||
579 | static void falcon_stats_request(struct efx_nic *efx) | ||
580 | { | ||
581 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
582 | efx_oword_t reg; | ||
583 | |||
584 | WARN_ON(nic_data->stats_pending); | ||
585 | WARN_ON(nic_data->stats_disable_count); | ||
586 | |||
587 | if (nic_data->stats_dma_done == NULL) | ||
588 | return; /* no mac selected */ | ||
589 | |||
590 | *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE; | ||
591 | nic_data->stats_pending = true; | ||
592 | wmb(); /* ensure done flag is clear */ | ||
593 | |||
594 | /* Initiate DMA transfer of stats */ | ||
595 | EFX_POPULATE_OWORD_2(reg, | ||
596 | FRF_AB_MAC_STAT_DMA_CMD, 1, | ||
597 | FRF_AB_MAC_STAT_DMA_ADR, | ||
598 | efx->stats_buffer.dma_addr); | ||
599 | efx_writeo(efx, ®, FR_AB_MAC_STAT_DMA); | ||
600 | |||
601 | mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2)); | ||
602 | } | ||
603 | |||
604 | static void falcon_stats_complete(struct efx_nic *efx) | ||
605 | { | ||
606 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
607 | |||
608 | if (!nic_data->stats_pending) | ||
609 | return; | ||
610 | |||
611 | nic_data->stats_pending = 0; | ||
612 | if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { | ||
613 | rmb(); /* read the done flag before the stats */ | ||
614 | efx->mac_op->update_stats(efx); | ||
615 | } else { | ||
616 | netif_err(efx, hw, efx->net_dev, | ||
617 | "timed out waiting for statistics\n"); | ||
618 | } | ||
619 | } | ||
620 | |||
621 | static void falcon_stats_timer_func(unsigned long context) | ||
622 | { | ||
623 | struct efx_nic *efx = (struct efx_nic *)context; | ||
624 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
625 | |||
626 | spin_lock(&efx->stats_lock); | ||
627 | |||
628 | falcon_stats_complete(efx); | ||
629 | if (nic_data->stats_disable_count == 0) | ||
630 | falcon_stats_request(efx); | ||
631 | |||
632 | spin_unlock(&efx->stats_lock); | ||
633 | } | ||
634 | |||
635 | static bool falcon_loopback_link_poll(struct efx_nic *efx) | ||
636 | { | ||
637 | struct efx_link_state old_state = efx->link_state; | ||
638 | |||
639 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
640 | WARN_ON(!LOOPBACK_INTERNAL(efx)); | ||
641 | |||
642 | efx->link_state.fd = true; | ||
643 | efx->link_state.fc = efx->wanted_fc; | ||
644 | efx->link_state.up = true; | ||
645 | efx->link_state.speed = 10000; | ||
646 | |||
647 | return !efx_link_state_equal(&efx->link_state, &old_state); | ||
648 | } | ||
649 | |||
650 | static int falcon_reconfigure_port(struct efx_nic *efx) | ||
651 | { | ||
652 | int rc; | ||
653 | |||
654 | WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0); | ||
655 | |||
656 | /* Poll the PHY link state *before* reconfiguring it. This means we | ||
657 | * will pick up the correct speed (in loopback) to select the correct | ||
658 | * MAC. | ||
659 | */ | ||
660 | if (LOOPBACK_INTERNAL(efx)) | ||
661 | falcon_loopback_link_poll(efx); | ||
662 | else | ||
663 | efx->phy_op->poll(efx); | ||
664 | |||
665 | falcon_stop_nic_stats(efx); | ||
666 | falcon_deconfigure_mac_wrapper(efx); | ||
667 | |||
668 | falcon_reset_macs(efx); | ||
669 | |||
670 | efx->phy_op->reconfigure(efx); | ||
671 | rc = efx->mac_op->reconfigure(efx); | ||
672 | BUG_ON(rc); | ||
673 | |||
674 | falcon_start_nic_stats(efx); | ||
675 | |||
676 | /* Synchronise efx->link_state with the kernel */ | ||
677 | efx_link_status_changed(efx); | ||
678 | |||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | /************************************************************************** | ||
683 | * | ||
684 | * PHY access via GMII | ||
685 | * | ||
686 | ************************************************************************** | ||
687 | */ | ||
688 | |||
689 | /* Wait for GMII access to complete */ | ||
690 | static int falcon_gmii_wait(struct efx_nic *efx) | ||
691 | { | ||
692 | efx_oword_t md_stat; | ||
693 | int count; | ||
694 | |||
695 | /* wait up to 50ms - taken max from datasheet */ | ||
696 | for (count = 0; count < 5000; count++) { | ||
697 | efx_reado(efx, &md_stat, FR_AB_MD_STAT); | ||
698 | if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { | ||
699 | if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || | ||
700 | EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { | ||
701 | netif_err(efx, hw, efx->net_dev, | ||
702 | "error from GMII access " | ||
703 | EFX_OWORD_FMT"\n", | ||
704 | EFX_OWORD_VAL(md_stat)); | ||
705 | return -EIO; | ||
706 | } | ||
707 | return 0; | ||
708 | } | ||
709 | udelay(10); | ||
710 | } | ||
711 | netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n"); | ||
712 | return -ETIMEDOUT; | ||
713 | } | ||
714 | |||
715 | /* Write an MDIO register of a PHY connected to Falcon. */ | ||
716 | static int falcon_mdio_write(struct net_device *net_dev, | ||
717 | int prtad, int devad, u16 addr, u16 value) | ||
718 | { | ||
719 | struct efx_nic *efx = netdev_priv(net_dev); | ||
720 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
721 | efx_oword_t reg; | ||
722 | int rc; | ||
723 | |||
724 | netif_vdbg(efx, hw, efx->net_dev, | ||
725 | "writing MDIO %d register %d.%d with 0x%04x\n", | ||
726 | prtad, devad, addr, value); | ||
727 | |||
728 | mutex_lock(&nic_data->mdio_lock); | ||
729 | |||
730 | /* Check MDIO not currently being accessed */ | ||
731 | rc = falcon_gmii_wait(efx); | ||
732 | if (rc) | ||
733 | goto out; | ||
734 | |||
735 | /* Write the address/ID register */ | ||
736 | EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); | ||
737 | efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); | ||
738 | |||
739 | EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, | ||
740 | FRF_AB_MD_DEV_ADR, devad); | ||
741 | efx_writeo(efx, ®, FR_AB_MD_ID); | ||
742 | |||
743 | /* Write data */ | ||
744 | EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value); | ||
745 | efx_writeo(efx, ®, FR_AB_MD_TXD); | ||
746 | |||
747 | EFX_POPULATE_OWORD_2(reg, | ||
748 | FRF_AB_MD_WRC, 1, | ||
749 | FRF_AB_MD_GC, 0); | ||
750 | efx_writeo(efx, ®, FR_AB_MD_CS); | ||
751 | |||
752 | /* Wait for data to be written */ | ||
753 | rc = falcon_gmii_wait(efx); | ||
754 | if (rc) { | ||
755 | /* Abort the write operation */ | ||
756 | EFX_POPULATE_OWORD_2(reg, | ||
757 | FRF_AB_MD_WRC, 0, | ||
758 | FRF_AB_MD_GC, 1); | ||
759 | efx_writeo(efx, ®, FR_AB_MD_CS); | ||
760 | udelay(10); | ||
761 | } | ||
762 | |||
763 | out: | ||
764 | mutex_unlock(&nic_data->mdio_lock); | ||
765 | return rc; | ||
766 | } | ||
767 | |||
768 | /* Read an MDIO register of a PHY connected to Falcon. */ | ||
769 | static int falcon_mdio_read(struct net_device *net_dev, | ||
770 | int prtad, int devad, u16 addr) | ||
771 | { | ||
772 | struct efx_nic *efx = netdev_priv(net_dev); | ||
773 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
774 | efx_oword_t reg; | ||
775 | int rc; | ||
776 | |||
777 | mutex_lock(&nic_data->mdio_lock); | ||
778 | |||
779 | /* Check MDIO not currently being accessed */ | ||
780 | rc = falcon_gmii_wait(efx); | ||
781 | if (rc) | ||
782 | goto out; | ||
783 | |||
784 | EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); | ||
785 | efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); | ||
786 | |||
787 | EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, | ||
788 | FRF_AB_MD_DEV_ADR, devad); | ||
789 | efx_writeo(efx, ®, FR_AB_MD_ID); | ||
790 | |||
791 | /* Request data to be read */ | ||
792 | EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); | ||
793 | efx_writeo(efx, ®, FR_AB_MD_CS); | ||
794 | |||
795 | /* Wait for data to become available */ | ||
796 | rc = falcon_gmii_wait(efx); | ||
797 | if (rc == 0) { | ||
798 | efx_reado(efx, ®, FR_AB_MD_RXD); | ||
799 | rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); | ||
800 | netif_vdbg(efx, hw, efx->net_dev, | ||
801 | "read from MDIO %d register %d.%d, got %04x\n", | ||
802 | prtad, devad, addr, rc); | ||
803 | } else { | ||
804 | /* Abort the read operation */ | ||
805 | EFX_POPULATE_OWORD_2(reg, | ||
806 | FRF_AB_MD_RIC, 0, | ||
807 | FRF_AB_MD_GC, 1); | ||
808 | efx_writeo(efx, ®, FR_AB_MD_CS); | ||
809 | |||
810 | netif_dbg(efx, hw, efx->net_dev, | ||
811 | "read from MDIO %d register %d.%d, got error %d\n", | ||
812 | prtad, devad, addr, rc); | ||
813 | } | ||
814 | |||
815 | out: | ||
816 | mutex_unlock(&nic_data->mdio_lock); | ||
817 | return rc; | ||
818 | } | ||
819 | |||
820 | /* This call is responsible for hooking in the MAC and PHY operations */ | ||
821 | static int falcon_probe_port(struct efx_nic *efx) | ||
822 | { | ||
823 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
824 | int rc; | ||
825 | |||
826 | switch (efx->phy_type) { | ||
827 | case PHY_TYPE_SFX7101: | ||
828 | efx->phy_op = &falcon_sfx7101_phy_ops; | ||
829 | break; | ||
830 | case PHY_TYPE_QT2022C2: | ||
831 | case PHY_TYPE_QT2025C: | ||
832 | efx->phy_op = &falcon_qt202x_phy_ops; | ||
833 | break; | ||
834 | case PHY_TYPE_TXC43128: | ||
835 | efx->phy_op = &falcon_txc_phy_ops; | ||
836 | break; | ||
837 | default: | ||
838 | netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", | ||
839 | efx->phy_type); | ||
840 | return -ENODEV; | ||
841 | } | ||
842 | |||
843 | /* Fill out MDIO structure and loopback modes */ | ||
844 | mutex_init(&nic_data->mdio_lock); | ||
845 | efx->mdio.mdio_read = falcon_mdio_read; | ||
846 | efx->mdio.mdio_write = falcon_mdio_write; | ||
847 | rc = efx->phy_op->probe(efx); | ||
848 | if (rc != 0) | ||
849 | return rc; | ||
850 | |||
851 | /* Initial assumption */ | ||
852 | efx->link_state.speed = 10000; | ||
853 | efx->link_state.fd = true; | ||
854 | |||
855 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ | ||
856 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
857 | efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; | ||
858 | else | ||
859 | efx->wanted_fc = EFX_FC_RX; | ||
860 | if (efx->mdio.mmds & MDIO_DEVS_AN) | ||
861 | efx->wanted_fc |= EFX_FC_AUTO; | ||
862 | |||
863 | /* Allocate buffer for stats */ | ||
864 | rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, | ||
865 | FALCON_MAC_STATS_SIZE); | ||
866 | if (rc) | ||
867 | return rc; | ||
868 | netif_dbg(efx, probe, efx->net_dev, | ||
869 | "stats buffer at %llx (virt %p phys %llx)\n", | ||
870 | (u64)efx->stats_buffer.dma_addr, | ||
871 | efx->stats_buffer.addr, | ||
872 | (u64)virt_to_phys(efx->stats_buffer.addr)); | ||
873 | nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset; | ||
874 | |||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | static void falcon_remove_port(struct efx_nic *efx) | ||
879 | { | ||
880 | efx->phy_op->remove(efx); | ||
881 | efx_nic_free_buffer(efx, &efx->stats_buffer); | ||
882 | } | ||
883 | |||
884 | /* Global events are basically PHY events */ | ||
885 | static bool | ||
886 | falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event) | ||
887 | { | ||
888 | struct efx_nic *efx = channel->efx; | ||
889 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
890 | |||
891 | if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || | ||
892 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || | ||
893 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) | ||
894 | /* Ignored */ | ||
895 | return true; | ||
896 | |||
897 | if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) && | ||
898 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { | ||
899 | nic_data->xmac_poll_required = true; | ||
900 | return true; | ||
901 | } | ||
902 | |||
903 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? | ||
904 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : | ||
905 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { | ||
906 | netif_err(efx, rx_err, efx->net_dev, | ||
907 | "channel %d seen global RX_RESET event. Resetting.\n", | ||
908 | channel->channel); | ||
909 | |||
910 | atomic_inc(&efx->rx_reset); | ||
911 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? | ||
912 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
913 | return true; | ||
914 | } | ||
915 | |||
916 | return false; | ||
917 | } | ||
918 | |||
919 | /************************************************************************** | ||
920 | * | ||
921 | * Falcon test code | ||
922 | * | ||
923 | **************************************************************************/ | ||
924 | |||
925 | static int | ||
926 | falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | ||
927 | { | ||
928 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
929 | struct falcon_nvconfig *nvconfig; | ||
930 | struct efx_spi_device *spi; | ||
931 | void *region; | ||
932 | int rc, magic_num, struct_ver; | ||
933 | __le16 *word, *limit; | ||
934 | u32 csum; | ||
935 | |||
936 | if (efx_spi_present(&nic_data->spi_flash)) | ||
937 | spi = &nic_data->spi_flash; | ||
938 | else if (efx_spi_present(&nic_data->spi_eeprom)) | ||
939 | spi = &nic_data->spi_eeprom; | ||
940 | else | ||
941 | return -EINVAL; | ||
942 | |||
943 | region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); | ||
944 | if (!region) | ||
945 | return -ENOMEM; | ||
946 | nvconfig = region + FALCON_NVCONFIG_OFFSET; | ||
947 | |||
948 | mutex_lock(&nic_data->spi_lock); | ||
949 | rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); | ||
950 | mutex_unlock(&nic_data->spi_lock); | ||
951 | if (rc) { | ||
952 | netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", | ||
953 | efx_spi_present(&nic_data->spi_flash) ? | ||
954 | "flash" : "EEPROM"); | ||
955 | rc = -EIO; | ||
956 | goto out; | ||
957 | } | ||
958 | |||
959 | magic_num = le16_to_cpu(nvconfig->board_magic_num); | ||
960 | struct_ver = le16_to_cpu(nvconfig->board_struct_ver); | ||
961 | |||
962 | rc = -EINVAL; | ||
963 | if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { | ||
964 | netif_err(efx, hw, efx->net_dev, | ||
965 | "NVRAM bad magic 0x%x\n", magic_num); | ||
966 | goto out; | ||
967 | } | ||
968 | if (struct_ver < 2) { | ||
969 | netif_err(efx, hw, efx->net_dev, | ||
970 | "NVRAM has ancient version 0x%x\n", struct_ver); | ||
971 | goto out; | ||
972 | } else if (struct_ver < 4) { | ||
973 | word = &nvconfig->board_magic_num; | ||
974 | limit = (__le16 *) (nvconfig + 1); | ||
975 | } else { | ||
976 | word = region; | ||
977 | limit = region + FALCON_NVCONFIG_END; | ||
978 | } | ||
979 | for (csum = 0; word < limit; ++word) | ||
980 | csum += le16_to_cpu(*word); | ||
981 | |||
982 | if (~csum & 0xffff) { | ||
983 | netif_err(efx, hw, efx->net_dev, | ||
984 | "NVRAM has incorrect checksum\n"); | ||
985 | goto out; | ||
986 | } | ||
987 | |||
988 | rc = 0; | ||
989 | if (nvconfig_out) | ||
990 | memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig)); | ||
991 | |||
992 | out: | ||
993 | kfree(region); | ||
994 | return rc; | ||
995 | } | ||
996 | |||
997 | static int falcon_test_nvram(struct efx_nic *efx) | ||
998 | { | ||
999 | return falcon_read_nvram(efx, NULL); | ||
1000 | } | ||
1001 | |||
1002 | static const struct efx_nic_register_test falcon_b0_register_tests[] = { | ||
1003 | { FR_AZ_ADR_REGION, | ||
1004 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, | ||
1005 | { FR_AZ_RX_CFG, | ||
1006 | EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, | ||
1007 | { FR_AZ_TX_CFG, | ||
1008 | EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, | ||
1009 | { FR_AZ_TX_RESERVED, | ||
1010 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, | ||
1011 | { FR_AB_MAC_CTRL, | ||
1012 | EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, | ||
1013 | { FR_AZ_SRM_TX_DC_CFG, | ||
1014 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, | ||
1015 | { FR_AZ_RX_DC_CFG, | ||
1016 | EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, | ||
1017 | { FR_AZ_RX_DC_PF_WM, | ||
1018 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, | ||
1019 | { FR_BZ_DP_CTRL, | ||
1020 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, | ||
1021 | { FR_AB_GM_CFG2, | ||
1022 | EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, | ||
1023 | { FR_AB_GMF_CFG0, | ||
1024 | EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, | ||
1025 | { FR_AB_XM_GLB_CFG, | ||
1026 | EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, | ||
1027 | { FR_AB_XM_TX_CFG, | ||
1028 | EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, | ||
1029 | { FR_AB_XM_RX_CFG, | ||
1030 | EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, | ||
1031 | { FR_AB_XM_RX_PARAM, | ||
1032 | EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, | ||
1033 | { FR_AB_XM_FC, | ||
1034 | EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, | ||
1035 | { FR_AB_XM_ADR_LO, | ||
1036 | EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, | ||
1037 | { FR_AB_XX_SD_CTL, | ||
1038 | EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, | ||
1039 | }; | ||
1040 | |||
1041 | static int falcon_b0_test_registers(struct efx_nic *efx) | ||
1042 | { | ||
1043 | return efx_nic_test_registers(efx, falcon_b0_register_tests, | ||
1044 | ARRAY_SIZE(falcon_b0_register_tests)); | ||
1045 | } | ||
1046 | |||
1047 | /************************************************************************** | ||
1048 | * | ||
1049 | * Device reset | ||
1050 | * | ||
1051 | ************************************************************************** | ||
1052 | */ | ||
1053 | |||
1054 | static enum reset_type falcon_map_reset_reason(enum reset_type reason) | ||
1055 | { | ||
1056 | switch (reason) { | ||
1057 | case RESET_TYPE_RX_RECOVERY: | ||
1058 | case RESET_TYPE_RX_DESC_FETCH: | ||
1059 | case RESET_TYPE_TX_DESC_FETCH: | ||
1060 | case RESET_TYPE_TX_SKIP: | ||
1061 | /* These can occasionally occur due to hardware bugs. | ||
1062 | * We try to reset without disrupting the link. | ||
1063 | */ | ||
1064 | return RESET_TYPE_INVISIBLE; | ||
1065 | default: | ||
1066 | return RESET_TYPE_ALL; | ||
1067 | } | ||
1068 | } | ||
1069 | |||
1070 | static int falcon_map_reset_flags(u32 *flags) | ||
1071 | { | ||
1072 | enum { | ||
1073 | FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER | | ||
1074 | ETH_RESET_OFFLOAD | ETH_RESET_MAC), | ||
1075 | FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY, | ||
1076 | FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ, | ||
1077 | }; | ||
1078 | |||
1079 | if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) { | ||
1080 | *flags &= ~FALCON_RESET_WORLD; | ||
1081 | return RESET_TYPE_WORLD; | ||
1082 | } | ||
1083 | |||
1084 | if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) { | ||
1085 | *flags &= ~FALCON_RESET_ALL; | ||
1086 | return RESET_TYPE_ALL; | ||
1087 | } | ||
1088 | |||
1089 | if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) { | ||
1090 | *flags &= ~FALCON_RESET_INVISIBLE; | ||
1091 | return RESET_TYPE_INVISIBLE; | ||
1092 | } | ||
1093 | |||
1094 | return -EINVAL; | ||
1095 | } | ||
1096 | |||
1097 | /* Resets NIC to known state. This routine must be called in process | ||
1098 | * context and is allowed to sleep. */ | ||
1099 | static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | ||
1100 | { | ||
1101 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1102 | efx_oword_t glb_ctl_reg_ker; | ||
1103 | int rc; | ||
1104 | |||
1105 | netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n", | ||
1106 | RESET_TYPE(method)); | ||
1107 | |||
1108 | /* Initiate device reset */ | ||
1109 | if (method == RESET_TYPE_WORLD) { | ||
1110 | rc = pci_save_state(efx->pci_dev); | ||
1111 | if (rc) { | ||
1112 | netif_err(efx, drv, efx->net_dev, | ||
1113 | "failed to backup PCI state of primary " | ||
1114 | "function prior to hardware reset\n"); | ||
1115 | goto fail1; | ||
1116 | } | ||
1117 | if (efx_nic_is_dual_func(efx)) { | ||
1118 | rc = pci_save_state(nic_data->pci_dev2); | ||
1119 | if (rc) { | ||
1120 | netif_err(efx, drv, efx->net_dev, | ||
1121 | "failed to backup PCI state of " | ||
1122 | "secondary function prior to " | ||
1123 | "hardware reset\n"); | ||
1124 | goto fail2; | ||
1125 | } | ||
1126 | } | ||
1127 | |||
1128 | EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, | ||
1129 | FRF_AB_EXT_PHY_RST_DUR, | ||
1130 | FFE_AB_EXT_PHY_RST_DUR_10240US, | ||
1131 | FRF_AB_SWRST, 1); | ||
1132 | } else { | ||
1133 | EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, | ||
1134 | /* exclude PHY from "invisible" reset */ | ||
1135 | FRF_AB_EXT_PHY_RST_CTL, | ||
1136 | method == RESET_TYPE_INVISIBLE, | ||
1137 | /* exclude EEPROM/flash and PCIe */ | ||
1138 | FRF_AB_PCIE_CORE_RST_CTL, 1, | ||
1139 | FRF_AB_PCIE_NSTKY_RST_CTL, 1, | ||
1140 | FRF_AB_PCIE_SD_RST_CTL, 1, | ||
1141 | FRF_AB_EE_RST_CTL, 1, | ||
1142 | FRF_AB_EXT_PHY_RST_DUR, | ||
1143 | FFE_AB_EXT_PHY_RST_DUR_10240US, | ||
1144 | FRF_AB_SWRST, 1); | ||
1145 | } | ||
1146 | efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); | ||
1147 | |||
1148 | netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n"); | ||
1149 | schedule_timeout_uninterruptible(HZ / 20); | ||
1150 | |||
1151 | /* Restore PCI configuration if needed */ | ||
1152 | if (method == RESET_TYPE_WORLD) { | ||
1153 | if (efx_nic_is_dual_func(efx)) | ||
1154 | pci_restore_state(nic_data->pci_dev2); | ||
1155 | pci_restore_state(efx->pci_dev); | ||
1156 | netif_dbg(efx, drv, efx->net_dev, | ||
1157 | "successfully restored PCI config\n"); | ||
1158 | } | ||
1159 | |||
1160 | /* Assert that reset complete */ | ||
1161 | efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); | ||
1162 | if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { | ||
1163 | rc = -ETIMEDOUT; | ||
1164 | netif_err(efx, hw, efx->net_dev, | ||
1165 | "timed out waiting for hardware reset\n"); | ||
1166 | goto fail3; | ||
1167 | } | ||
1168 | netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); | ||
1169 | |||
1170 | return 0; | ||
1171 | |||
1172 | /* pci_save_state() and pci_restore_state() MUST be called in pairs */ | ||
1173 | fail2: | ||
1174 | pci_restore_state(efx->pci_dev); | ||
1175 | fail1: | ||
1176 | fail3: | ||
1177 | return rc; | ||
1178 | } | ||
1179 | |||
1180 | static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | ||
1181 | { | ||
1182 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1183 | int rc; | ||
1184 | |||
1185 | mutex_lock(&nic_data->spi_lock); | ||
1186 | rc = __falcon_reset_hw(efx, method); | ||
1187 | mutex_unlock(&nic_data->spi_lock); | ||
1188 | |||
1189 | return rc; | ||
1190 | } | ||
1191 | |||
1192 | static void falcon_monitor(struct efx_nic *efx) | ||
1193 | { | ||
1194 | bool link_changed; | ||
1195 | int rc; | ||
1196 | |||
1197 | BUG_ON(!mutex_is_locked(&efx->mac_lock)); | ||
1198 | |||
1199 | rc = falcon_board(efx)->type->monitor(efx); | ||
1200 | if (rc) { | ||
1201 | netif_err(efx, hw, efx->net_dev, | ||
1202 | "Board sensor %s; shutting down PHY\n", | ||
1203 | (rc == -ERANGE) ? "reported fault" : "failed"); | ||
1204 | efx->phy_mode |= PHY_MODE_LOW_POWER; | ||
1205 | rc = __efx_reconfigure_port(efx); | ||
1206 | WARN_ON(rc); | ||
1207 | } | ||
1208 | |||
1209 | if (LOOPBACK_INTERNAL(efx)) | ||
1210 | link_changed = falcon_loopback_link_poll(efx); | ||
1211 | else | ||
1212 | link_changed = efx->phy_op->poll(efx); | ||
1213 | |||
1214 | if (link_changed) { | ||
1215 | falcon_stop_nic_stats(efx); | ||
1216 | falcon_deconfigure_mac_wrapper(efx); | ||
1217 | |||
1218 | falcon_reset_macs(efx); | ||
1219 | rc = efx->mac_op->reconfigure(efx); | ||
1220 | BUG_ON(rc); | ||
1221 | |||
1222 | falcon_start_nic_stats(efx); | ||
1223 | |||
1224 | efx_link_status_changed(efx); | ||
1225 | } | ||
1226 | |||
1227 | falcon_poll_xmac(efx); | ||
1228 | } | ||
1229 | |||
1230 | /* Zeroes out the SRAM contents. This routine must be called in | ||
1231 | * process context and is allowed to sleep. | ||
1232 | */ | ||
1233 | static int falcon_reset_sram(struct efx_nic *efx) | ||
1234 | { | ||
1235 | efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; | ||
1236 | int count; | ||
1237 | |||
1238 | /* Set the SRAM wake/sleep GPIO appropriately. */ | ||
1239 | efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); | ||
1240 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); | ||
1241 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); | ||
1242 | efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); | ||
1243 | |||
1244 | /* Initiate SRAM reset */ | ||
1245 | EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, | ||
1246 | FRF_AZ_SRM_INIT_EN, 1, | ||
1247 | FRF_AZ_SRM_NB_SZ, 0); | ||
1248 | efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); | ||
1249 | |||
1250 | /* Wait for SRAM reset to complete */ | ||
1251 | count = 0; | ||
1252 | do { | ||
1253 | netif_dbg(efx, hw, efx->net_dev, | ||
1254 | "waiting for SRAM reset (attempt %d)...\n", count); | ||
1255 | |||
1256 | /* SRAM reset is slow; expect around 16ms */ | ||
1257 | schedule_timeout_uninterruptible(HZ / 50); | ||
1258 | |||
1259 | /* Check for reset complete */ | ||
1260 | efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); | ||
1261 | if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { | ||
1262 | netif_dbg(efx, hw, efx->net_dev, | ||
1263 | "SRAM reset complete\n"); | ||
1264 | |||
1265 | return 0; | ||
1266 | } | ||
1267 | } while (++count < 20); /* wait up to 0.4 sec */ | ||
1268 | |||
1269 | netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); | ||
1270 | return -ETIMEDOUT; | ||
1271 | } | ||
1272 | |||
1273 | static void falcon_spi_device_init(struct efx_nic *efx, | ||
1274 | struct efx_spi_device *spi_device, | ||
1275 | unsigned int device_id, u32 device_type) | ||
1276 | { | ||
1277 | if (device_type != 0) { | ||
1278 | spi_device->device_id = device_id; | ||
1279 | spi_device->size = | ||
1280 | 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); | ||
1281 | spi_device->addr_len = | ||
1282 | SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN); | ||
1283 | spi_device->munge_address = (spi_device->size == 1 << 9 && | ||
1284 | spi_device->addr_len == 1); | ||
1285 | spi_device->erase_command = | ||
1286 | SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD); | ||
1287 | spi_device->erase_size = | ||
1288 | 1 << SPI_DEV_TYPE_FIELD(device_type, | ||
1289 | SPI_DEV_TYPE_ERASE_SIZE); | ||
1290 | spi_device->block_size = | ||
1291 | 1 << SPI_DEV_TYPE_FIELD(device_type, | ||
1292 | SPI_DEV_TYPE_BLOCK_SIZE); | ||
1293 | } else { | ||
1294 | spi_device->size = 0; | ||
1295 | } | ||
1296 | } | ||
1297 | |||
1298 | /* Extract non-volatile configuration */ | ||
1299 | static int falcon_probe_nvconfig(struct efx_nic *efx) | ||
1300 | { | ||
1301 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1302 | struct falcon_nvconfig *nvconfig; | ||
1303 | int rc; | ||
1304 | |||
1305 | nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); | ||
1306 | if (!nvconfig) | ||
1307 | return -ENOMEM; | ||
1308 | |||
1309 | rc = falcon_read_nvram(efx, nvconfig); | ||
1310 | if (rc) | ||
1311 | goto out; | ||
1312 | |||
1313 | efx->phy_type = nvconfig->board_v2.port0_phy_type; | ||
1314 | efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr; | ||
1315 | |||
1316 | if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { | ||
1317 | falcon_spi_device_init( | ||
1318 | efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH, | ||
1319 | le32_to_cpu(nvconfig->board_v3 | ||
1320 | .spi_device_type[FFE_AB_SPI_DEVICE_FLASH])); | ||
1321 | falcon_spi_device_init( | ||
1322 | efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, | ||
1323 | le32_to_cpu(nvconfig->board_v3 | ||
1324 | .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM])); | ||
1325 | } | ||
1326 | |||
1327 | /* Read the MAC addresses */ | ||
1328 | memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN); | ||
1329 | |||
1330 | netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", | ||
1331 | efx->phy_type, efx->mdio.prtad); | ||
1332 | |||
1333 | rc = falcon_probe_board(efx, | ||
1334 | le16_to_cpu(nvconfig->board_v2.board_revision)); | ||
1335 | out: | ||
1336 | kfree(nvconfig); | ||
1337 | return rc; | ||
1338 | } | ||
1339 | |||
1340 | /* Probe all SPI devices on the NIC */ | ||
1341 | static void falcon_probe_spi_devices(struct efx_nic *efx) | ||
1342 | { | ||
1343 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1344 | efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; | ||
1345 | int boot_dev; | ||
1346 | |||
1347 | efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL); | ||
1348 | efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); | ||
1349 | efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); | ||
1350 | |||
1351 | if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { | ||
1352 | boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? | ||
1353 | FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); | ||
1354 | netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n", | ||
1355 | boot_dev == FFE_AB_SPI_DEVICE_FLASH ? | ||
1356 | "flash" : "EEPROM"); | ||
1357 | } else { | ||
1358 | /* Disable VPD and set clock dividers to safe | ||
1359 | * values for initial programming. */ | ||
1360 | boot_dev = -1; | ||
1361 | netif_dbg(efx, probe, efx->net_dev, | ||
1362 | "Booted from internal ASIC settings;" | ||
1363 | " setting SPI config\n"); | ||
1364 | EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, | ||
1365 | /* 125 MHz / 7 ~= 20 MHz */ | ||
1366 | FRF_AB_EE_SF_CLOCK_DIV, 7, | ||
1367 | /* 125 MHz / 63 ~= 2 MHz */ | ||
1368 | FRF_AB_EE_EE_CLOCK_DIV, 63); | ||
1369 | efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); | ||
1370 | } | ||
1371 | |||
1372 | mutex_init(&nic_data->spi_lock); | ||
1373 | |||
1374 | if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) | ||
1375 | falcon_spi_device_init(efx, &nic_data->spi_flash, | ||
1376 | FFE_AB_SPI_DEVICE_FLASH, | ||
1377 | default_flash_type); | ||
1378 | if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) | ||
1379 | falcon_spi_device_init(efx, &nic_data->spi_eeprom, | ||
1380 | FFE_AB_SPI_DEVICE_EEPROM, | ||
1381 | large_eeprom_type); | ||
1382 | } | ||
1383 | |||
1384 | static int falcon_probe_nic(struct efx_nic *efx) | ||
1385 | { | ||
1386 | struct falcon_nic_data *nic_data; | ||
1387 | struct falcon_board *board; | ||
1388 | int rc; | ||
1389 | |||
1390 | /* Allocate storage for hardware specific data */ | ||
1391 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | ||
1392 | if (!nic_data) | ||
1393 | return -ENOMEM; | ||
1394 | efx->nic_data = nic_data; | ||
1395 | |||
1396 | rc = -ENODEV; | ||
1397 | |||
1398 | if (efx_nic_fpga_ver(efx) != 0) { | ||
1399 | netif_err(efx, probe, efx->net_dev, | ||
1400 | "Falcon FPGA not supported\n"); | ||
1401 | goto fail1; | ||
1402 | } | ||
1403 | |||
1404 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { | ||
1405 | efx_oword_t nic_stat; | ||
1406 | struct pci_dev *dev; | ||
1407 | u8 pci_rev = efx->pci_dev->revision; | ||
1408 | |||
1409 | if ((pci_rev == 0xff) || (pci_rev == 0)) { | ||
1410 | netif_err(efx, probe, efx->net_dev, | ||
1411 | "Falcon rev A0 not supported\n"); | ||
1412 | goto fail1; | ||
1413 | } | ||
1414 | efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); | ||
1415 | if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { | ||
1416 | netif_err(efx, probe, efx->net_dev, | ||
1417 | "Falcon rev A1 1G not supported\n"); | ||
1418 | goto fail1; | ||
1419 | } | ||
1420 | if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { | ||
1421 | netif_err(efx, probe, efx->net_dev, | ||
1422 | "Falcon rev A1 PCI-X not supported\n"); | ||
1423 | goto fail1; | ||
1424 | } | ||
1425 | |||
1426 | dev = pci_dev_get(efx->pci_dev); | ||
1427 | while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, | ||
1428 | dev))) { | ||
1429 | if (dev->bus == efx->pci_dev->bus && | ||
1430 | dev->devfn == efx->pci_dev->devfn + 1) { | ||
1431 | nic_data->pci_dev2 = dev; | ||
1432 | break; | ||
1433 | } | ||
1434 | } | ||
1435 | if (!nic_data->pci_dev2) { | ||
1436 | netif_err(efx, probe, efx->net_dev, | ||
1437 | "failed to find secondary function\n"); | ||
1438 | rc = -ENODEV; | ||
1439 | goto fail2; | ||
1440 | } | ||
1441 | } | ||
1442 | |||
1443 | /* Now we can reset the NIC */ | ||
1444 | rc = __falcon_reset_hw(efx, RESET_TYPE_ALL); | ||
1445 | if (rc) { | ||
1446 | netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); | ||
1447 | goto fail3; | ||
1448 | } | ||
1449 | |||
1450 | /* Allocate memory for INT_KER */ | ||
1451 | rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); | ||
1452 | if (rc) | ||
1453 | goto fail4; | ||
1454 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | ||
1455 | |||
1456 | netif_dbg(efx, probe, efx->net_dev, | ||
1457 | "INT_KER at %llx (virt %p phys %llx)\n", | ||
1458 | (u64)efx->irq_status.dma_addr, | ||
1459 | efx->irq_status.addr, | ||
1460 | (u64)virt_to_phys(efx->irq_status.addr)); | ||
1461 | |||
1462 | falcon_probe_spi_devices(efx); | ||
1463 | |||
1464 | /* Read in the non-volatile configuration */ | ||
1465 | rc = falcon_probe_nvconfig(efx); | ||
1466 | if (rc) { | ||
1467 | if (rc == -EINVAL) | ||
1468 | netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n"); | ||
1469 | goto fail5; | ||
1470 | } | ||
1471 | |||
1472 | /* Initialise I2C adapter */ | ||
1473 | board = falcon_board(efx); | ||
1474 | board->i2c_adap.owner = THIS_MODULE; | ||
1475 | board->i2c_data = falcon_i2c_bit_operations; | ||
1476 | board->i2c_data.data = efx; | ||
1477 | board->i2c_adap.algo_data = &board->i2c_data; | ||
1478 | board->i2c_adap.dev.parent = &efx->pci_dev->dev; | ||
1479 | strlcpy(board->i2c_adap.name, "SFC4000 GPIO", | ||
1480 | sizeof(board->i2c_adap.name)); | ||
1481 | rc = i2c_bit_add_bus(&board->i2c_adap); | ||
1482 | if (rc) | ||
1483 | goto fail5; | ||
1484 | |||
1485 | rc = falcon_board(efx)->type->init(efx); | ||
1486 | if (rc) { | ||
1487 | netif_err(efx, probe, efx->net_dev, | ||
1488 | "failed to initialise board\n"); | ||
1489 | goto fail6; | ||
1490 | } | ||
1491 | |||
1492 | nic_data->stats_disable_count = 1; | ||
1493 | setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, | ||
1494 | (unsigned long)efx); | ||
1495 | |||
1496 | return 0; | ||
1497 | |||
1498 | fail6: | ||
1499 | BUG_ON(i2c_del_adapter(&board->i2c_adap)); | ||
1500 | memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); | ||
1501 | fail5: | ||
1502 | efx_nic_free_buffer(efx, &efx->irq_status); | ||
1503 | fail4: | ||
1504 | fail3: | ||
1505 | if (nic_data->pci_dev2) { | ||
1506 | pci_dev_put(nic_data->pci_dev2); | ||
1507 | nic_data->pci_dev2 = NULL; | ||
1508 | } | ||
1509 | fail2: | ||
1510 | fail1: | ||
1511 | kfree(efx->nic_data); | ||
1512 | return rc; | ||
1513 | } | ||
1514 | |||
1515 | static void falcon_init_rx_cfg(struct efx_nic *efx) | ||
1516 | { | ||
1517 | /* Prior to Siena the RX DMA engine will split each frame at | ||
1518 | * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to | ||
1519 | * be so large that that never happens. */ | ||
1520 | const unsigned huge_buf_size = (3 * 4096) >> 5; | ||
1521 | /* RX control FIFO thresholds (32 entries) */ | ||
1522 | const unsigned ctrl_xon_thr = 20; | ||
1523 | const unsigned ctrl_xoff_thr = 25; | ||
1524 | efx_oword_t reg; | ||
1525 | |||
1526 | efx_reado(efx, ®, FR_AZ_RX_CFG); | ||
1527 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { | ||
1528 | /* Data FIFO size is 5.5K */ | ||
1529 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); | ||
1530 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, | ||
1531 | huge_buf_size); | ||
1532 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); | ||
1533 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); | ||
1534 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); | ||
1535 | EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); | ||
1536 | } else { | ||
1537 | /* Data FIFO size is 80K; register fields moved */ | ||
1538 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); | ||
1539 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, | ||
1540 | huge_buf_size); | ||
1541 | /* Send XON and XOFF at ~3 * max MTU away from empty/full */ | ||
1542 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); | ||
1543 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); | ||
1544 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); | ||
1545 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); | ||
1546 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); | ||
1547 | |||
1548 | /* Enable hash insertion. This is broken for the | ||
1549 | * 'Falcon' hash so also select Toeplitz TCP/IPv4 and | ||
1550 | * IPv4 hashes. */ | ||
1551 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1); | ||
1552 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1); | ||
1553 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1); | ||
1554 | } | ||
1555 | /* Always enable XOFF signal from RX FIFO. We enable | ||
1556 | * or disable transmission of pause frames at the MAC. */ | ||
1557 | EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); | ||
1558 | efx_writeo(efx, ®, FR_AZ_RX_CFG); | ||
1559 | } | ||
1560 | |||
1561 | /* This call performs hardware-specific global initialisation, such as | ||
1562 | * defining the descriptor cache sizes and number of RSS channels. | ||
1563 | * It does not set up any buffers, descriptor rings or event queues. | ||
1564 | */ | ||
1565 | static int falcon_init_nic(struct efx_nic *efx) | ||
1566 | { | ||
1567 | efx_oword_t temp; | ||
1568 | int rc; | ||
1569 | |||
1570 | /* Use on-chip SRAM */ | ||
1571 | efx_reado(efx, &temp, FR_AB_NIC_STAT); | ||
1572 | EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); | ||
1573 | efx_writeo(efx, &temp, FR_AB_NIC_STAT); | ||
1574 | |||
1575 | rc = falcon_reset_sram(efx); | ||
1576 | if (rc) | ||
1577 | return rc; | ||
1578 | |||
1579 | /* Clear the parity enables on the TX data fifos as | ||
1580 | * they produce false parity errors because of timing issues | ||
1581 | */ | ||
1582 | if (EFX_WORKAROUND_5129(efx)) { | ||
1583 | efx_reado(efx, &temp, FR_AZ_CSR_SPARE); | ||
1584 | EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0); | ||
1585 | efx_writeo(efx, &temp, FR_AZ_CSR_SPARE); | ||
1586 | } | ||
1587 | |||
1588 | if (EFX_WORKAROUND_7244(efx)) { | ||
1589 | efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL); | ||
1590 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8); | ||
1591 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8); | ||
1592 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8); | ||
1593 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8); | ||
1594 | efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL); | ||
1595 | } | ||
1596 | |||
1597 | /* XXX This is documented only for Falcon A0/A1 */ | ||
1598 | /* Setup RX. Wait for descriptor is broken and must | ||
1599 | * be disabled. RXDP recovery shouldn't be needed, but is. | ||
1600 | */ | ||
1601 | efx_reado(efx, &temp, FR_AA_RX_SELF_RST); | ||
1602 | EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1); | ||
1603 | EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1); | ||
1604 | if (EFX_WORKAROUND_5583(efx)) | ||
1605 | EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1); | ||
1606 | efx_writeo(efx, &temp, FR_AA_RX_SELF_RST); | ||
1607 | |||
1608 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 | ||
1609 | * descriptors (which is bad). | ||
1610 | */ | ||
1611 | efx_reado(efx, &temp, FR_AZ_TX_CFG); | ||
1612 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); | ||
1613 | efx_writeo(efx, &temp, FR_AZ_TX_CFG); | ||
1614 | |||
1615 | falcon_init_rx_cfg(efx); | ||
1616 | |||
1617 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
1618 | /* Set hash key for IPv4 */ | ||
1619 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); | ||
1620 | efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); | ||
1621 | |||
1622 | /* Set destination of both TX and RX Flush events */ | ||
1623 | EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); | ||
1624 | efx_writeo(efx, &temp, FR_BZ_DP_CTRL); | ||
1625 | } | ||
1626 | |||
1627 | efx_nic_init_common(efx); | ||
1628 | |||
1629 | return 0; | ||
1630 | } | ||
1631 | |||
1632 | static void falcon_remove_nic(struct efx_nic *efx) | ||
1633 | { | ||
1634 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1635 | struct falcon_board *board = falcon_board(efx); | ||
1636 | int rc; | ||
1637 | |||
1638 | board->type->fini(efx); | ||
1639 | |||
1640 | /* Remove I2C adapter and clear it in preparation for a retry */ | ||
1641 | rc = i2c_del_adapter(&board->i2c_adap); | ||
1642 | BUG_ON(rc); | ||
1643 | memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); | ||
1644 | |||
1645 | efx_nic_free_buffer(efx, &efx->irq_status); | ||
1646 | |||
1647 | __falcon_reset_hw(efx, RESET_TYPE_ALL); | ||
1648 | |||
1649 | /* Release the second function after the reset */ | ||
1650 | if (nic_data->pci_dev2) { | ||
1651 | pci_dev_put(nic_data->pci_dev2); | ||
1652 | nic_data->pci_dev2 = NULL; | ||
1653 | } | ||
1654 | |||
1655 | /* Tear down the private nic state */ | ||
1656 | kfree(efx->nic_data); | ||
1657 | efx->nic_data = NULL; | ||
1658 | } | ||
1659 | |||
1660 | static void falcon_update_nic_stats(struct efx_nic *efx) | ||
1661 | { | ||
1662 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1663 | efx_oword_t cnt; | ||
1664 | |||
1665 | if (nic_data->stats_disable_count) | ||
1666 | return; | ||
1667 | |||
1668 | efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); | ||
1669 | efx->n_rx_nodesc_drop_cnt += | ||
1670 | EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); | ||
1671 | |||
1672 | if (nic_data->stats_pending && | ||
1673 | *nic_data->stats_dma_done == FALCON_STATS_DONE) { | ||
1674 | nic_data->stats_pending = false; | ||
1675 | rmb(); /* read the done flag before the stats */ | ||
1676 | efx->mac_op->update_stats(efx); | ||
1677 | } | ||
1678 | } | ||
1679 | |||
1680 | void falcon_start_nic_stats(struct efx_nic *efx) | ||
1681 | { | ||
1682 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1683 | |||
1684 | spin_lock_bh(&efx->stats_lock); | ||
1685 | if (--nic_data->stats_disable_count == 0) | ||
1686 | falcon_stats_request(efx); | ||
1687 | spin_unlock_bh(&efx->stats_lock); | ||
1688 | } | ||
1689 | |||
1690 | void falcon_stop_nic_stats(struct efx_nic *efx) | ||
1691 | { | ||
1692 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1693 | int i; | ||
1694 | |||
1695 | might_sleep(); | ||
1696 | |||
1697 | spin_lock_bh(&efx->stats_lock); | ||
1698 | ++nic_data->stats_disable_count; | ||
1699 | spin_unlock_bh(&efx->stats_lock); | ||
1700 | |||
1701 | del_timer_sync(&nic_data->stats_timer); | ||
1702 | |||
1703 | /* Wait enough time for the most recent transfer to | ||
1704 | * complete. */ | ||
1705 | for (i = 0; i < 4 && nic_data->stats_pending; i++) { | ||
1706 | if (*nic_data->stats_dma_done == FALCON_STATS_DONE) | ||
1707 | break; | ||
1708 | msleep(1); | ||
1709 | } | ||
1710 | |||
1711 | spin_lock_bh(&efx->stats_lock); | ||
1712 | falcon_stats_complete(efx); | ||
1713 | spin_unlock_bh(&efx->stats_lock); | ||
1714 | } | ||
1715 | |||
1716 | static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
1717 | { | ||
1718 | falcon_board(efx)->type->set_id_led(efx, mode); | ||
1719 | } | ||
1720 | |||
1721 | /************************************************************************** | ||
1722 | * | ||
1723 | * Wake on LAN | ||
1724 | * | ||
1725 | ************************************************************************** | ||
1726 | */ | ||
1727 | |||
1728 | static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) | ||
1729 | { | ||
1730 | wol->supported = 0; | ||
1731 | wol->wolopts = 0; | ||
1732 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | ||
1733 | } | ||
1734 | |||
1735 | static int falcon_set_wol(struct efx_nic *efx, u32 type) | ||
1736 | { | ||
1737 | if (type != 0) | ||
1738 | return -EINVAL; | ||
1739 | return 0; | ||
1740 | } | ||
1741 | |||
1742 | /************************************************************************** | ||
1743 | * | ||
1744 | * Revision-dependent attributes used by efx.c and nic.c | ||
1745 | * | ||
1746 | ************************************************************************** | ||
1747 | */ | ||
1748 | |||
1749 | const struct efx_nic_type falcon_a1_nic_type = { | ||
1750 | .probe = falcon_probe_nic, | ||
1751 | .remove = falcon_remove_nic, | ||
1752 | .init = falcon_init_nic, | ||
1753 | .fini = efx_port_dummy_op_void, | ||
1754 | .monitor = falcon_monitor, | ||
1755 | .map_reset_reason = falcon_map_reset_reason, | ||
1756 | .map_reset_flags = falcon_map_reset_flags, | ||
1757 | .reset = falcon_reset_hw, | ||
1758 | .probe_port = falcon_probe_port, | ||
1759 | .remove_port = falcon_remove_port, | ||
1760 | .handle_global_event = falcon_handle_global_event, | ||
1761 | .prepare_flush = falcon_prepare_flush, | ||
1762 | .update_stats = falcon_update_nic_stats, | ||
1763 | .start_stats = falcon_start_nic_stats, | ||
1764 | .stop_stats = falcon_stop_nic_stats, | ||
1765 | .set_id_led = falcon_set_id_led, | ||
1766 | .push_irq_moderation = falcon_push_irq_moderation, | ||
1767 | .push_multicast_hash = falcon_push_multicast_hash, | ||
1768 | .reconfigure_port = falcon_reconfigure_port, | ||
1769 | .get_wol = falcon_get_wol, | ||
1770 | .set_wol = falcon_set_wol, | ||
1771 | .resume_wol = efx_port_dummy_op_void, | ||
1772 | .test_nvram = falcon_test_nvram, | ||
1773 | .default_mac_ops = &falcon_xmac_operations, | ||
1774 | |||
1775 | .revision = EFX_REV_FALCON_A1, | ||
1776 | .mem_map_size = 0x20000, | ||
1777 | .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, | ||
1778 | .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, | ||
1779 | .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, | ||
1780 | .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER, | ||
1781 | .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, | ||
1782 | .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), | ||
1783 | .rx_buffer_padding = 0x24, | ||
1784 | .max_interrupt_mode = EFX_INT_MODE_MSI, | ||
1785 | .phys_addr_channels = 4, | ||
1786 | .tx_dc_base = 0x130000, | ||
1787 | .rx_dc_base = 0x100000, | ||
1788 | .offload_features = NETIF_F_IP_CSUM, | ||
1789 | }; | ||
1790 | |||
1791 | const struct efx_nic_type falcon_b0_nic_type = { | ||
1792 | .probe = falcon_probe_nic, | ||
1793 | .remove = falcon_remove_nic, | ||
1794 | .init = falcon_init_nic, | ||
1795 | .fini = efx_port_dummy_op_void, | ||
1796 | .monitor = falcon_monitor, | ||
1797 | .map_reset_reason = falcon_map_reset_reason, | ||
1798 | .map_reset_flags = falcon_map_reset_flags, | ||
1799 | .reset = falcon_reset_hw, | ||
1800 | .probe_port = falcon_probe_port, | ||
1801 | .remove_port = falcon_remove_port, | ||
1802 | .handle_global_event = falcon_handle_global_event, | ||
1803 | .prepare_flush = falcon_prepare_flush, | ||
1804 | .update_stats = falcon_update_nic_stats, | ||
1805 | .start_stats = falcon_start_nic_stats, | ||
1806 | .stop_stats = falcon_stop_nic_stats, | ||
1807 | .set_id_led = falcon_set_id_led, | ||
1808 | .push_irq_moderation = falcon_push_irq_moderation, | ||
1809 | .push_multicast_hash = falcon_push_multicast_hash, | ||
1810 | .reconfigure_port = falcon_reconfigure_port, | ||
1811 | .get_wol = falcon_get_wol, | ||
1812 | .set_wol = falcon_set_wol, | ||
1813 | .resume_wol = efx_port_dummy_op_void, | ||
1814 | .test_registers = falcon_b0_test_registers, | ||
1815 | .test_nvram = falcon_test_nvram, | ||
1816 | .default_mac_ops = &falcon_xmac_operations, | ||
1817 | |||
1818 | .revision = EFX_REV_FALCON_B0, | ||
1819 | /* Map everything up to and including the RSS indirection | ||
1820 | * table. Don't map MSI-X table, MSI-X PBA since Linux | ||
1821 | * requires that they not be mapped. */ | ||
1822 | .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL + | ||
1823 | FR_BZ_RX_INDIRECTION_TBL_STEP * | ||
1824 | FR_BZ_RX_INDIRECTION_TBL_ROWS), | ||
1825 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | ||
1826 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, | ||
1827 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, | ||
1828 | .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, | ||
1829 | .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, | ||
1830 | .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), | ||
1831 | .rx_buffer_hash_size = 0x10, | ||
1832 | .rx_buffer_padding = 0, | ||
1833 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | ||
1834 | .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy | ||
1835 | * interrupt handler only supports 32 | ||
1836 | * channels */ | ||
1837 | .tx_dc_base = 0x130000, | ||
1838 | .rx_dc_base = 0x100000, | ||
1839 | .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, | ||
1840 | }; | ||
1841 | |||
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c new file mode 100644 index 00000000000..b9cc846811d --- /dev/null +++ b/drivers/net/sfc/falcon_boards.c | |||
@@ -0,0 +1,776 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2007-2010 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include <linux/rtnetlink.h> | ||
11 | |||
12 | #include "net_driver.h" | ||
13 | #include "phy.h" | ||
14 | #include "efx.h" | ||
15 | #include "nic.h" | ||
16 | #include "workarounds.h" | ||
17 | |||
18 | /* Macros for unpacking the board revision */ | ||
19 | /* The revision info is in host byte order. */ | ||
20 | #define FALCON_BOARD_TYPE(_rev) (_rev >> 8) | ||
21 | #define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf) | ||
22 | #define FALCON_BOARD_MINOR(_rev) (_rev & 0xf) | ||
23 | |||
24 | /* Board types */ | ||
25 | #define FALCON_BOARD_SFE4001 0x01 | ||
26 | #define FALCON_BOARD_SFE4002 0x02 | ||
27 | #define FALCON_BOARD_SFE4003 0x03 | ||
28 | #define FALCON_BOARD_SFN4112F 0x52 | ||
29 | |||
30 | /* Board temperature is about 15°C above ambient when air flow is | ||
31 | * limited. The maximum acceptable ambient temperature varies | ||
32 | * depending on the PHY specifications but the critical temperature | ||
33 | * above which we should shut down to avoid damage is 80°C. */ | ||
34 | #define FALCON_BOARD_TEMP_BIAS 15 | ||
35 | #define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS) | ||
36 | |||
37 | /* SFC4000 datasheet says: 'The maximum permitted junction temperature | ||
38 | * is 125°C; the thermal design of the environment for the SFC4000 | ||
39 | * should aim to keep this well below 100°C.' */ | ||
40 | #define FALCON_JUNC_TEMP_MIN 0 | ||
41 | #define FALCON_JUNC_TEMP_MAX 90 | ||
42 | #define FALCON_JUNC_TEMP_CRIT 125 | ||
43 | |||
44 | /***************************************************************************** | ||
45 | * Support for LM87 sensor chip used on several boards | ||
46 | */ | ||
47 | #define LM87_REG_TEMP_HW_INT_LOCK 0x13 | ||
48 | #define LM87_REG_TEMP_HW_EXT_LOCK 0x14 | ||
49 | #define LM87_REG_TEMP_HW_INT 0x17 | ||
50 | #define LM87_REG_TEMP_HW_EXT 0x18 | ||
51 | #define LM87_REG_TEMP_EXT1 0x26 | ||
52 | #define LM87_REG_TEMP_INT 0x27 | ||
53 | #define LM87_REG_ALARMS1 0x41 | ||
54 | #define LM87_REG_ALARMS2 0x42 | ||
55 | #define LM87_IN_LIMITS(nr, _min, _max) \ | ||
56 | 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min | ||
57 | #define LM87_AIN_LIMITS(nr, _min, _max) \ | ||
58 | 0x3B + (nr), _max, 0x1A + (nr), _min | ||
59 | #define LM87_TEMP_INT_LIMITS(_min, _max) \ | ||
60 | 0x39, _max, 0x3A, _min | ||
61 | #define LM87_TEMP_EXT1_LIMITS(_min, _max) \ | ||
62 | 0x37, _max, 0x38, _min | ||
63 | |||
64 | #define LM87_ALARM_TEMP_INT 0x10 | ||
65 | #define LM87_ALARM_TEMP_EXT1 0x20 | ||
66 | |||
67 | #if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE) | ||
68 | |||
69 | static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values) | ||
70 | { | ||
71 | while (*reg_values) { | ||
72 | u8 reg = *reg_values++; | ||
73 | u8 value = *reg_values++; | ||
74 | int rc = i2c_smbus_write_byte_data(client, reg, value); | ||
75 | if (rc) | ||
76 | return rc; | ||
77 | } | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static const u8 falcon_lm87_common_regs[] = { | ||
82 | LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT, | ||
83 | LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT, | ||
84 | LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX), | ||
85 | LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT, | ||
86 | LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT, | ||
87 | 0 | ||
88 | }; | ||
89 | |||
90 | static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, | ||
91 | const u8 *reg_values) | ||
92 | { | ||
93 | struct falcon_board *board = falcon_board(efx); | ||
94 | struct i2c_client *client = i2c_new_device(&board->i2c_adap, info); | ||
95 | int rc; | ||
96 | |||
97 | if (!client) | ||
98 | return -EIO; | ||
99 | |||
100 | /* Read-to-clear alarm/interrupt status */ | ||
101 | i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); | ||
102 | i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); | ||
103 | |||
104 | rc = efx_poke_lm87(client, reg_values); | ||
105 | if (rc) | ||
106 | goto err; | ||
107 | rc = efx_poke_lm87(client, falcon_lm87_common_regs); | ||
108 | if (rc) | ||
109 | goto err; | ||
110 | |||
111 | board->hwmon_client = client; | ||
112 | return 0; | ||
113 | |||
114 | err: | ||
115 | i2c_unregister_device(client); | ||
116 | return rc; | ||
117 | } | ||
118 | |||
119 | static void efx_fini_lm87(struct efx_nic *efx) | ||
120 | { | ||
121 | i2c_unregister_device(falcon_board(efx)->hwmon_client); | ||
122 | } | ||
123 | |||
124 | static int efx_check_lm87(struct efx_nic *efx, unsigned mask) | ||
125 | { | ||
126 | struct i2c_client *client = falcon_board(efx)->hwmon_client; | ||
127 | bool temp_crit, elec_fault, is_failure; | ||
128 | u16 alarms; | ||
129 | s32 reg; | ||
130 | |||
131 | /* If link is up then do not monitor temperature */ | ||
132 | if (EFX_WORKAROUND_7884(efx) && efx->link_state.up) | ||
133 | return 0; | ||
134 | |||
135 | reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); | ||
136 | if (reg < 0) | ||
137 | return reg; | ||
138 | alarms = reg; | ||
139 | reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); | ||
140 | if (reg < 0) | ||
141 | return reg; | ||
142 | alarms |= reg << 8; | ||
143 | alarms &= mask; | ||
144 | |||
145 | temp_crit = false; | ||
146 | if (alarms & LM87_ALARM_TEMP_INT) { | ||
147 | reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT); | ||
148 | if (reg < 0) | ||
149 | return reg; | ||
150 | if (reg > FALCON_BOARD_TEMP_CRIT) | ||
151 | temp_crit = true; | ||
152 | } | ||
153 | if (alarms & LM87_ALARM_TEMP_EXT1) { | ||
154 | reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1); | ||
155 | if (reg < 0) | ||
156 | return reg; | ||
157 | if (reg > FALCON_JUNC_TEMP_CRIT) | ||
158 | temp_crit = true; | ||
159 | } | ||
160 | elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1); | ||
161 | is_failure = temp_crit || elec_fault; | ||
162 | |||
163 | if (alarms) | ||
164 | netif_err(efx, hw, efx->net_dev, | ||
165 | "LM87 detected a hardware %s (status %02x:%02x)" | ||
166 | "%s%s%s%s\n", | ||
167 | is_failure ? "failure" : "problem", | ||
168 | alarms & 0xff, alarms >> 8, | ||
169 | (alarms & LM87_ALARM_TEMP_INT) ? | ||
170 | "; board is overheating" : "", | ||
171 | (alarms & LM87_ALARM_TEMP_EXT1) ? | ||
172 | "; controller is overheating" : "", | ||
173 | temp_crit ? "; reached critical temperature" : "", | ||
174 | elec_fault ? "; electrical fault" : ""); | ||
175 | |||
176 | return is_failure ? -ERANGE : 0; | ||
177 | } | ||
178 | |||
179 | #else /* !CONFIG_SENSORS_LM87 */ | ||
180 | |||
181 | static inline int | ||
182 | efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, | ||
183 | const u8 *reg_values) | ||
184 | { | ||
185 | return 0; | ||
186 | } | ||
187 | static inline void efx_fini_lm87(struct efx_nic *efx) | ||
188 | { | ||
189 | } | ||
190 | static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask) | ||
191 | { | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | #endif /* CONFIG_SENSORS_LM87 */ | ||
196 | |||
197 | /***************************************************************************** | ||
198 | * Support for the SFE4001 NIC. | ||
199 | * | ||
200 | * The SFE4001 does not power-up fully at reset due to its high power | ||
201 | * consumption. We control its power via a PCA9539 I/O expander. | ||
202 | * It also has a MAX6647 temperature monitor which we expose to | ||
203 | * the lm90 driver. | ||
204 | * | ||
205 | * This also provides minimal support for reflashing the PHY, which is | ||
206 | * initiated by resetting it with the FLASH_CFG_1 pin pulled down. | ||
207 | * On SFE4001 rev A2 and later this is connected to the 3V3X output of | ||
208 | * the IO-expander. | ||
209 | * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually | ||
210 | * exclusive with the network device being open. | ||
211 | */ | ||
212 | |||
213 | /************************************************************************** | ||
214 | * Support for I2C IO Expander device on SFE4001 | ||
215 | */ | ||
216 | #define PCA9539 0x74 | ||
217 | |||
218 | #define P0_IN 0x00 | ||
219 | #define P0_OUT 0x02 | ||
220 | #define P0_INVERT 0x04 | ||
221 | #define P0_CONFIG 0x06 | ||
222 | |||
223 | #define P0_EN_1V0X_LBN 0 | ||
224 | #define P0_EN_1V0X_WIDTH 1 | ||
225 | #define P0_EN_1V2_LBN 1 | ||
226 | #define P0_EN_1V2_WIDTH 1 | ||
227 | #define P0_EN_2V5_LBN 2 | ||
228 | #define P0_EN_2V5_WIDTH 1 | ||
229 | #define P0_EN_3V3X_LBN 3 | ||
230 | #define P0_EN_3V3X_WIDTH 1 | ||
231 | #define P0_EN_5V_LBN 4 | ||
232 | #define P0_EN_5V_WIDTH 1 | ||
233 | #define P0_SHORTEN_JTAG_LBN 5 | ||
234 | #define P0_SHORTEN_JTAG_WIDTH 1 | ||
235 | #define P0_X_TRST_LBN 6 | ||
236 | #define P0_X_TRST_WIDTH 1 | ||
237 | #define P0_DSP_RESET_LBN 7 | ||
238 | #define P0_DSP_RESET_WIDTH 1 | ||
239 | |||
240 | #define P1_IN 0x01 | ||
241 | #define P1_OUT 0x03 | ||
242 | #define P1_INVERT 0x05 | ||
243 | #define P1_CONFIG 0x07 | ||
244 | |||
245 | #define P1_AFE_PWD_LBN 0 | ||
246 | #define P1_AFE_PWD_WIDTH 1 | ||
247 | #define P1_DSP_PWD25_LBN 1 | ||
248 | #define P1_DSP_PWD25_WIDTH 1 | ||
249 | #define P1_RESERVED_LBN 2 | ||
250 | #define P1_RESERVED_WIDTH 2 | ||
251 | #define P1_SPARE_LBN 4 | ||
252 | #define P1_SPARE_WIDTH 4 | ||
253 | |||
254 | /* Temperature Sensor */ | ||
255 | #define MAX664X_REG_RSL 0x02 | ||
256 | #define MAX664X_REG_WLHO 0x0B | ||
257 | |||
258 | static void sfe4001_poweroff(struct efx_nic *efx) | ||
259 | { | ||
260 | struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client; | ||
261 | struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client; | ||
262 | |||
263 | /* Turn off all power rails and disable outputs */ | ||
264 | i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff); | ||
265 | i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff); | ||
266 | i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff); | ||
267 | |||
268 | /* Clear any over-temperature alert */ | ||
269 | i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); | ||
270 | } | ||
271 | |||
272 | static int sfe4001_poweron(struct efx_nic *efx) | ||
273 | { | ||
274 | struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client; | ||
275 | struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client; | ||
276 | unsigned int i, j; | ||
277 | int rc; | ||
278 | u8 out; | ||
279 | |||
280 | /* Clear any previous over-temperature alert */ | ||
281 | rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); | ||
282 | if (rc < 0) | ||
283 | return rc; | ||
284 | |||
285 | /* Enable port 0 and port 1 outputs on IO expander */ | ||
286 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00); | ||
287 | if (rc) | ||
288 | return rc; | ||
289 | rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, | ||
290 | 0xff & ~(1 << P1_SPARE_LBN)); | ||
291 | if (rc) | ||
292 | goto fail_on; | ||
293 | |||
294 | /* If PHY power is on, turn it all off and wait 1 second to | ||
295 | * ensure a full reset. | ||
296 | */ | ||
297 | rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT); | ||
298 | if (rc < 0) | ||
299 | goto fail_on; | ||
300 | out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) | | ||
301 | (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | | ||
302 | (0 << P0_EN_1V0X_LBN)); | ||
303 | if (rc != out) { | ||
304 | netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n"); | ||
305 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); | ||
306 | if (rc) | ||
307 | goto fail_on; | ||
308 | schedule_timeout_uninterruptible(HZ); | ||
309 | } | ||
310 | |||
311 | for (i = 0; i < 20; ++i) { | ||
312 | /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */ | ||
313 | out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | | ||
314 | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | | ||
315 | (1 << P0_X_TRST_LBN)); | ||
316 | if (efx->phy_mode & PHY_MODE_SPECIAL) | ||
317 | out |= 1 << P0_EN_3V3X_LBN; | ||
318 | |||
319 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); | ||
320 | if (rc) | ||
321 | goto fail_on; | ||
322 | msleep(10); | ||
323 | |||
324 | /* Turn on 1V power rail */ | ||
325 | out &= ~(1 << P0_EN_1V0X_LBN); | ||
326 | rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); | ||
327 | if (rc) | ||
328 | goto fail_on; | ||
329 | |||
330 | netif_info(efx, hw, efx->net_dev, | ||
331 | "waiting for DSP boot (attempt %d)...\n", i); | ||
332 | |||
333 | /* In flash config mode, DSP does not turn on AFE, so | ||
334 | * just wait 1 second. | ||
335 | */ | ||
336 | if (efx->phy_mode & PHY_MODE_SPECIAL) { | ||
337 | schedule_timeout_uninterruptible(HZ); | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | for (j = 0; j < 10; ++j) { | ||
342 | msleep(100); | ||
343 | |||
344 | /* Check DSP has asserted AFE power line */ | ||
345 | rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN); | ||
346 | if (rc < 0) | ||
347 | goto fail_on; | ||
348 | if (rc & (1 << P1_AFE_PWD_LBN)) | ||
349 | return 0; | ||
350 | } | ||
351 | } | ||
352 | |||
353 | netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n"); | ||
354 | rc = -ETIMEDOUT; | ||
355 | fail_on: | ||
356 | sfe4001_poweroff(efx); | ||
357 | return rc; | ||
358 | } | ||
359 | |||
360 | static ssize_t show_phy_flash_cfg(struct device *dev, | ||
361 | struct device_attribute *attr, char *buf) | ||
362 | { | ||
363 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
364 | return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL)); | ||
365 | } | ||
366 | |||
367 | static ssize_t set_phy_flash_cfg(struct device *dev, | ||
368 | struct device_attribute *attr, | ||
369 | const char *buf, size_t count) | ||
370 | { | ||
371 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
372 | enum efx_phy_mode old_mode, new_mode; | ||
373 | int err; | ||
374 | |||
375 | rtnl_lock(); | ||
376 | old_mode = efx->phy_mode; | ||
377 | if (count == 0 || *buf == '0') | ||
378 | new_mode = old_mode & ~PHY_MODE_SPECIAL; | ||
379 | else | ||
380 | new_mode = PHY_MODE_SPECIAL; | ||
381 | if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { | ||
382 | err = 0; | ||
383 | } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { | ||
384 | err = -EBUSY; | ||
385 | } else { | ||
386 | /* Reset the PHY, reconfigure the MAC and enable/disable | ||
387 | * MAC stats accordingly. */ | ||
388 | efx->phy_mode = new_mode; | ||
389 | if (new_mode & PHY_MODE_SPECIAL) | ||
390 | falcon_stop_nic_stats(efx); | ||
391 | err = sfe4001_poweron(efx); | ||
392 | if (!err) | ||
393 | err = efx_reconfigure_port(efx); | ||
394 | if (!(new_mode & PHY_MODE_SPECIAL)) | ||
395 | falcon_start_nic_stats(efx); | ||
396 | } | ||
397 | rtnl_unlock(); | ||
398 | |||
399 | return err ? err : count; | ||
400 | } | ||
401 | |||
402 | static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg); | ||
403 | |||
404 | static void sfe4001_fini(struct efx_nic *efx) | ||
405 | { | ||
406 | struct falcon_board *board = falcon_board(efx); | ||
407 | |||
408 | netif_info(efx, drv, efx->net_dev, "%s\n", __func__); | ||
409 | |||
410 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
411 | sfe4001_poweroff(efx); | ||
412 | i2c_unregister_device(board->ioexp_client); | ||
413 | i2c_unregister_device(board->hwmon_client); | ||
414 | } | ||
415 | |||
416 | static int sfe4001_check_hw(struct efx_nic *efx) | ||
417 | { | ||
418 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
419 | s32 status; | ||
420 | |||
421 | /* If XAUI link is up then do not monitor */ | ||
422 | if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required) | ||
423 | return 0; | ||
424 | |||
425 | /* Check the powered status of the PHY. Lack of power implies that | ||
426 | * the MAX6647 has shut down power to it, probably due to a temp. | ||
427 | * alarm. Reading the power status rather than the MAX6647 status | ||
428 | * directly because the later is read-to-clear and would thus | ||
429 | * start to power up the PHY again when polled, causing us to blip | ||
430 | * the power undesirably. | ||
431 | * We know we can read from the IO expander because we did | ||
432 | * it during power-on. Assume failure now is bad news. */ | ||
433 | status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN); | ||
434 | if (status >= 0 && | ||
435 | (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0) | ||
436 | return 0; | ||
437 | |||
438 | /* Use board power control, not PHY power control */ | ||
439 | sfe4001_poweroff(efx); | ||
440 | efx->phy_mode = PHY_MODE_OFF; | ||
441 | |||
442 | return (status < 0) ? -EIO : -ERANGE; | ||
443 | } | ||
444 | |||
445 | static struct i2c_board_info sfe4001_hwmon_info = { | ||
446 | I2C_BOARD_INFO("max6647", 0x4e), | ||
447 | }; | ||
448 | |||
449 | /* This board uses an I2C expander to provider power to the PHY, which needs to | ||
450 | * be turned on before the PHY can be used. | ||
451 | * Context: Process context, rtnl lock held | ||
452 | */ | ||
453 | static int sfe4001_init(struct efx_nic *efx) | ||
454 | { | ||
455 | struct falcon_board *board = falcon_board(efx); | ||
456 | int rc; | ||
457 | |||
458 | #if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE) | ||
459 | board->hwmon_client = | ||
460 | i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info); | ||
461 | #else | ||
462 | board->hwmon_client = | ||
463 | i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr); | ||
464 | #endif | ||
465 | if (!board->hwmon_client) | ||
466 | return -EIO; | ||
467 | |||
468 | /* Raise board/PHY high limit from 85 to 90 degrees Celsius */ | ||
469 | rc = i2c_smbus_write_byte_data(board->hwmon_client, | ||
470 | MAX664X_REG_WLHO, 90); | ||
471 | if (rc) | ||
472 | goto fail_hwmon; | ||
473 | |||
474 | board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539); | ||
475 | if (!board->ioexp_client) { | ||
476 | rc = -EIO; | ||
477 | goto fail_hwmon; | ||
478 | } | ||
479 | |||
480 | if (efx->phy_mode & PHY_MODE_SPECIAL) { | ||
481 | /* PHY won't generate a 156.25 MHz clock and MAC stats fetch | ||
482 | * will fail. */ | ||
483 | falcon_stop_nic_stats(efx); | ||
484 | } | ||
485 | rc = sfe4001_poweron(efx); | ||
486 | if (rc) | ||
487 | goto fail_ioexp; | ||
488 | |||
489 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); | ||
490 | if (rc) | ||
491 | goto fail_on; | ||
492 | |||
493 | netif_info(efx, hw, efx->net_dev, "PHY is powered on\n"); | ||
494 | return 0; | ||
495 | |||
496 | fail_on: | ||
497 | sfe4001_poweroff(efx); | ||
498 | fail_ioexp: | ||
499 | i2c_unregister_device(board->ioexp_client); | ||
500 | fail_hwmon: | ||
501 | i2c_unregister_device(board->hwmon_client); | ||
502 | return rc; | ||
503 | } | ||
504 | |||
505 | /***************************************************************************** | ||
506 | * Support for the SFE4002 | ||
507 | * | ||
508 | */ | ||
509 | static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ | ||
510 | |||
511 | static const u8 sfe4002_lm87_regs[] = { | ||
512 | LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ | ||
513 | LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ | ||
514 | LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ | ||
515 | LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */ | ||
516 | LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ | ||
517 | LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ | ||
518 | LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */ | ||
519 | LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ | ||
520 | LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS), | ||
521 | LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), | ||
522 | 0 | ||
523 | }; | ||
524 | |||
525 | static struct i2c_board_info sfe4002_hwmon_info = { | ||
526 | I2C_BOARD_INFO("lm87", 0x2e), | ||
527 | .platform_data = &sfe4002_lm87_channel, | ||
528 | }; | ||
529 | |||
530 | /****************************************************************************/ | ||
531 | /* LED allocations. Note that on rev A0 boards the schematic and the reality | ||
532 | * differ: red and green are swapped. Below is the fixed (A1) layout (there | ||
533 | * are only 3 A0 boards in existence, so no real reason to make this | ||
534 | * conditional). | ||
535 | */ | ||
536 | #define SFE4002_FAULT_LED (2) /* Red */ | ||
537 | #define SFE4002_RX_LED (0) /* Green */ | ||
538 | #define SFE4002_TX_LED (1) /* Amber */ | ||
539 | |||
540 | static void sfe4002_init_phy(struct efx_nic *efx) | ||
541 | { | ||
542 | /* Set the TX and RX LEDs to reflect status and activity, and the | ||
543 | * fault LED off */ | ||
544 | falcon_qt202x_set_led(efx, SFE4002_TX_LED, | ||
545 | QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT); | ||
546 | falcon_qt202x_set_led(efx, SFE4002_RX_LED, | ||
547 | QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT); | ||
548 | falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF); | ||
549 | } | ||
550 | |||
551 | static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
552 | { | ||
553 | falcon_qt202x_set_led( | ||
554 | efx, SFE4002_FAULT_LED, | ||
555 | (mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF); | ||
556 | } | ||
557 | |||
558 | static int sfe4002_check_hw(struct efx_nic *efx) | ||
559 | { | ||
560 | struct falcon_board *board = falcon_board(efx); | ||
561 | |||
562 | /* A0 board rev. 4002s report a temperature fault the whole time | ||
563 | * (bad sensor) so we mask it out. */ | ||
564 | unsigned alarm_mask = | ||
565 | (board->major == 0 && board->minor == 0) ? | ||
566 | ~LM87_ALARM_TEMP_EXT1 : ~0; | ||
567 | |||
568 | return efx_check_lm87(efx, alarm_mask); | ||
569 | } | ||
570 | |||
571 | static int sfe4002_init(struct efx_nic *efx) | ||
572 | { | ||
573 | return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs); | ||
574 | } | ||
575 | |||
576 | /***************************************************************************** | ||
577 | * Support for the SFN4112F | ||
578 | * | ||
579 | */ | ||
580 | static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ | ||
581 | |||
582 | static const u8 sfn4112f_lm87_regs[] = { | ||
583 | LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ | ||
584 | LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ | ||
585 | LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ | ||
586 | LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ | ||
587 | LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ | ||
588 | LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ | ||
589 | LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS), | ||
590 | LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), | ||
591 | 0 | ||
592 | }; | ||
593 | |||
594 | static struct i2c_board_info sfn4112f_hwmon_info = { | ||
595 | I2C_BOARD_INFO("lm87", 0x2e), | ||
596 | .platform_data = &sfn4112f_lm87_channel, | ||
597 | }; | ||
598 | |||
599 | #define SFN4112F_ACT_LED 0 | ||
600 | #define SFN4112F_LINK_LED 1 | ||
601 | |||
602 | static void sfn4112f_init_phy(struct efx_nic *efx) | ||
603 | { | ||
604 | falcon_qt202x_set_led(efx, SFN4112F_ACT_LED, | ||
605 | QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT); | ||
606 | falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, | ||
607 | QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT); | ||
608 | } | ||
609 | |||
610 | static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
611 | { | ||
612 | int reg; | ||
613 | |||
614 | switch (mode) { | ||
615 | case EFX_LED_OFF: | ||
616 | reg = QUAKE_LED_OFF; | ||
617 | break; | ||
618 | case EFX_LED_ON: | ||
619 | reg = QUAKE_LED_ON; | ||
620 | break; | ||
621 | default: | ||
622 | reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT; | ||
623 | break; | ||
624 | } | ||
625 | |||
626 | falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg); | ||
627 | } | ||
628 | |||
629 | static int sfn4112f_check_hw(struct efx_nic *efx) | ||
630 | { | ||
631 | /* Mask out unused sensors */ | ||
632 | return efx_check_lm87(efx, ~0x48); | ||
633 | } | ||
634 | |||
635 | static int sfn4112f_init(struct efx_nic *efx) | ||
636 | { | ||
637 | return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs); | ||
638 | } | ||
639 | |||
640 | /***************************************************************************** | ||
641 | * Support for the SFE4003 | ||
642 | * | ||
643 | */ | ||
644 | static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */ | ||
645 | |||
646 | static const u8 sfe4003_lm87_regs[] = { | ||
647 | LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */ | ||
648 | LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ | ||
649 | LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ | ||
650 | LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ | ||
651 | LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ | ||
652 | LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS), | ||
653 | 0 | ||
654 | }; | ||
655 | |||
656 | static struct i2c_board_info sfe4003_hwmon_info = { | ||
657 | I2C_BOARD_INFO("lm87", 0x2e), | ||
658 | .platform_data = &sfe4003_lm87_channel, | ||
659 | }; | ||
660 | |||
661 | /* Board-specific LED info. */ | ||
662 | #define SFE4003_RED_LED_GPIO 11 | ||
663 | #define SFE4003_LED_ON 1 | ||
664 | #define SFE4003_LED_OFF 0 | ||
665 | |||
666 | static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
667 | { | ||
668 | struct falcon_board *board = falcon_board(efx); | ||
669 | |||
670 | /* The LEDs were not wired to GPIOs before A3 */ | ||
671 | if (board->minor < 3 && board->major == 0) | ||
672 | return; | ||
673 | |||
674 | falcon_txc_set_gpio_val( | ||
675 | efx, SFE4003_RED_LED_GPIO, | ||
676 | (mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF); | ||
677 | } | ||
678 | |||
679 | static void sfe4003_init_phy(struct efx_nic *efx) | ||
680 | { | ||
681 | struct falcon_board *board = falcon_board(efx); | ||
682 | |||
683 | /* The LEDs were not wired to GPIOs before A3 */ | ||
684 | if (board->minor < 3 && board->major == 0) | ||
685 | return; | ||
686 | |||
687 | falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT); | ||
688 | falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF); | ||
689 | } | ||
690 | |||
691 | static int sfe4003_check_hw(struct efx_nic *efx) | ||
692 | { | ||
693 | struct falcon_board *board = falcon_board(efx); | ||
694 | |||
695 | /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time | ||
696 | * (bad sensor) so we mask it out. */ | ||
697 | unsigned alarm_mask = | ||
698 | (board->major == 0 && board->minor <= 2) ? | ||
699 | ~LM87_ALARM_TEMP_EXT1 : ~0; | ||
700 | |||
701 | return efx_check_lm87(efx, alarm_mask); | ||
702 | } | ||
703 | |||
704 | static int sfe4003_init(struct efx_nic *efx) | ||
705 | { | ||
706 | return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs); | ||
707 | } | ||
708 | |||
709 | static const struct falcon_board_type board_types[] = { | ||
710 | { | ||
711 | .id = FALCON_BOARD_SFE4001, | ||
712 | .ref_model = "SFE4001", | ||
713 | .gen_type = "10GBASE-T adapter", | ||
714 | .init = sfe4001_init, | ||
715 | .init_phy = efx_port_dummy_op_void, | ||
716 | .fini = sfe4001_fini, | ||
717 | .set_id_led = tenxpress_set_id_led, | ||
718 | .monitor = sfe4001_check_hw, | ||
719 | }, | ||
720 | { | ||
721 | .id = FALCON_BOARD_SFE4002, | ||
722 | .ref_model = "SFE4002", | ||
723 | .gen_type = "XFP adapter", | ||
724 | .init = sfe4002_init, | ||
725 | .init_phy = sfe4002_init_phy, | ||
726 | .fini = efx_fini_lm87, | ||
727 | .set_id_led = sfe4002_set_id_led, | ||
728 | .monitor = sfe4002_check_hw, | ||
729 | }, | ||
730 | { | ||
731 | .id = FALCON_BOARD_SFE4003, | ||
732 | .ref_model = "SFE4003", | ||
733 | .gen_type = "10GBASE-CX4 adapter", | ||
734 | .init = sfe4003_init, | ||
735 | .init_phy = sfe4003_init_phy, | ||
736 | .fini = efx_fini_lm87, | ||
737 | .set_id_led = sfe4003_set_id_led, | ||
738 | .monitor = sfe4003_check_hw, | ||
739 | }, | ||
740 | { | ||
741 | .id = FALCON_BOARD_SFN4112F, | ||
742 | .ref_model = "SFN4112F", | ||
743 | .gen_type = "SFP+ adapter", | ||
744 | .init = sfn4112f_init, | ||
745 | .init_phy = sfn4112f_init_phy, | ||
746 | .fini = efx_fini_lm87, | ||
747 | .set_id_led = sfn4112f_set_id_led, | ||
748 | .monitor = sfn4112f_check_hw, | ||
749 | }, | ||
750 | }; | ||
751 | |||
752 | int falcon_probe_board(struct efx_nic *efx, u16 revision_info) | ||
753 | { | ||
754 | struct falcon_board *board = falcon_board(efx); | ||
755 | u8 type_id = FALCON_BOARD_TYPE(revision_info); | ||
756 | int i; | ||
757 | |||
758 | board->major = FALCON_BOARD_MAJOR(revision_info); | ||
759 | board->minor = FALCON_BOARD_MINOR(revision_info); | ||
760 | |||
761 | for (i = 0; i < ARRAY_SIZE(board_types); i++) | ||
762 | if (board_types[i].id == type_id) | ||
763 | board->type = &board_types[i]; | ||
764 | |||
765 | if (board->type) { | ||
766 | netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n", | ||
767 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) | ||
768 | ? board->type->ref_model : board->type->gen_type, | ||
769 | 'A' + board->major, board->minor); | ||
770 | return 0; | ||
771 | } else { | ||
772 | netif_err(efx, probe, efx->net_dev, "unknown board type %d\n", | ||
773 | type_id); | ||
774 | return -ENODEV; | ||
775 | } | ||
776 | } | ||
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c new file mode 100644 index 00000000000..9516452c079 --- /dev/null +++ b/drivers/net/sfc/falcon_xmac.c | |||
@@ -0,0 +1,369 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/delay.h> | ||
12 | #include "net_driver.h" | ||
13 | #include "efx.h" | ||
14 | #include "nic.h" | ||
15 | #include "regs.h" | ||
16 | #include "io.h" | ||
17 | #include "mac.h" | ||
18 | #include "mdio_10g.h" | ||
19 | #include "workarounds.h" | ||
20 | |||
21 | /************************************************************************** | ||
22 | * | ||
23 | * MAC operations | ||
24 | * | ||
25 | *************************************************************************/ | ||
26 | |||
27 | /* Configure the XAUI driver that is an output from Falcon */ | ||
28 | void falcon_setup_xaui(struct efx_nic *efx) | ||
29 | { | ||
30 | efx_oword_t sdctl, txdrv; | ||
31 | |||
32 | /* Move the XAUI into low power, unless there is no PHY, in | ||
33 | * which case the XAUI will have to drive a cable. */ | ||
34 | if (efx->phy_type == PHY_TYPE_NONE) | ||
35 | return; | ||
36 | |||
37 | efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL); | ||
38 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF); | ||
39 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF); | ||
40 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF); | ||
41 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF); | ||
42 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF); | ||
43 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF); | ||
44 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF); | ||
45 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF); | ||
46 | efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL); | ||
47 | |||
48 | EFX_POPULATE_OWORD_8(txdrv, | ||
49 | FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF, | ||
50 | FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF, | ||
51 | FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF, | ||
52 | FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF, | ||
53 | FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF, | ||
54 | FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF, | ||
55 | FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF, | ||
56 | FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF); | ||
57 | efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL); | ||
58 | } | ||
59 | |||
60 | int falcon_reset_xaui(struct efx_nic *efx) | ||
61 | { | ||
62 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
63 | efx_oword_t reg; | ||
64 | int count; | ||
65 | |||
66 | /* Don't fetch MAC statistics over an XMAC reset */ | ||
67 | WARN_ON(nic_data->stats_disable_count == 0); | ||
68 | |||
69 | /* Start reset sequence */ | ||
70 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1); | ||
71 | efx_writeo(efx, ®, FR_AB_XX_PWR_RST); | ||
72 | |||
73 | /* Wait up to 10 ms for completion, then reinitialise */ | ||
74 | for (count = 0; count < 1000; count++) { | ||
75 | efx_reado(efx, ®, FR_AB_XX_PWR_RST); | ||
76 | if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 && | ||
77 | EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) { | ||
78 | falcon_setup_xaui(efx); | ||
79 | return 0; | ||
80 | } | ||
81 | udelay(10); | ||
82 | } | ||
83 | netif_err(efx, hw, efx->net_dev, | ||
84 | "timed out waiting for XAUI/XGXS reset\n"); | ||
85 | return -ETIMEDOUT; | ||
86 | } | ||
87 | |||
88 | static void falcon_ack_status_intr(struct efx_nic *efx) | ||
89 | { | ||
90 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
91 | efx_oword_t reg; | ||
92 | |||
93 | if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) | ||
94 | return; | ||
95 | |||
96 | /* We expect xgmii faults if the wireside link is down */ | ||
97 | if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) | ||
98 | return; | ||
99 | |||
100 | /* We can only use this interrupt to signal the negative edge of | ||
101 | * xaui_align [we have to poll the positive edge]. */ | ||
102 | if (nic_data->xmac_poll_required) | ||
103 | return; | ||
104 | |||
105 | efx_reado(efx, ®, FR_AB_XM_MGT_INT_MSK); | ||
106 | } | ||
107 | |||
108 | static bool falcon_xgxs_link_ok(struct efx_nic *efx) | ||
109 | { | ||
110 | efx_oword_t reg; | ||
111 | bool align_done, link_ok = false; | ||
112 | int sync_status; | ||
113 | |||
114 | /* Read link status */ | ||
115 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); | ||
116 | |||
117 | align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE); | ||
118 | sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT); | ||
119 | if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES)) | ||
120 | link_ok = true; | ||
121 | |||
122 | /* Clear link status ready for next read */ | ||
123 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES); | ||
124 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES); | ||
125 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); | ||
126 | efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); | ||
127 | |||
128 | return link_ok; | ||
129 | } | ||
130 | |||
131 | static bool falcon_xmac_link_ok(struct efx_nic *efx) | ||
132 | { | ||
133 | /* | ||
134 | * Check MAC's XGXS link status except when using XGMII loopback | ||
135 | * which bypasses the XGXS block. | ||
136 | * If possible, check PHY's XGXS link status except when using | ||
137 | * MAC loopback. | ||
138 | */ | ||
139 | return (efx->loopback_mode == LOOPBACK_XGMII || | ||
140 | falcon_xgxs_link_ok(efx)) && | ||
141 | (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || | ||
142 | LOOPBACK_INTERNAL(efx) || | ||
143 | efx_mdio_phyxgxs_lane_sync(efx)); | ||
144 | } | ||
145 | |||
146 | static void falcon_reconfigure_xmac_core(struct efx_nic *efx) | ||
147 | { | ||
148 | unsigned int max_frame_len; | ||
149 | efx_oword_t reg; | ||
150 | bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX); | ||
151 | bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX); | ||
152 | |||
153 | /* Configure MAC - cut-thru mode is hard wired on */ | ||
154 | EFX_POPULATE_OWORD_3(reg, | ||
155 | FRF_AB_XM_RX_JUMBO_MODE, 1, | ||
156 | FRF_AB_XM_TX_STAT_EN, 1, | ||
157 | FRF_AB_XM_RX_STAT_EN, 1); | ||
158 | efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); | ||
159 | |||
160 | /* Configure TX */ | ||
161 | EFX_POPULATE_OWORD_6(reg, | ||
162 | FRF_AB_XM_TXEN, 1, | ||
163 | FRF_AB_XM_TX_PRMBL, 1, | ||
164 | FRF_AB_XM_AUTO_PAD, 1, | ||
165 | FRF_AB_XM_TXCRC, 1, | ||
166 | FRF_AB_XM_FCNTL, tx_fc, | ||
167 | FRF_AB_XM_IPG, 0x3); | ||
168 | efx_writeo(efx, ®, FR_AB_XM_TX_CFG); | ||
169 | |||
170 | /* Configure RX */ | ||
171 | EFX_POPULATE_OWORD_5(reg, | ||
172 | FRF_AB_XM_RXEN, 1, | ||
173 | FRF_AB_XM_AUTO_DEPAD, 0, | ||
174 | FRF_AB_XM_ACPT_ALL_MCAST, 1, | ||
175 | FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous, | ||
176 | FRF_AB_XM_PASS_CRC_ERR, 1); | ||
177 | efx_writeo(efx, ®, FR_AB_XM_RX_CFG); | ||
178 | |||
179 | /* Set frame length */ | ||
180 | max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); | ||
181 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len); | ||
182 | efx_writeo(efx, ®, FR_AB_XM_RX_PARAM); | ||
183 | EFX_POPULATE_OWORD_2(reg, | ||
184 | FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len, | ||
185 | FRF_AB_XM_TX_JUMBO_MODE, 1); | ||
186 | efx_writeo(efx, ®, FR_AB_XM_TX_PARAM); | ||
187 | |||
188 | EFX_POPULATE_OWORD_2(reg, | ||
189 | FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ | ||
190 | FRF_AB_XM_DIS_FCNTL, !rx_fc); | ||
191 | efx_writeo(efx, ®, FR_AB_XM_FC); | ||
192 | |||
193 | /* Set MAC address */ | ||
194 | memcpy(®, &efx->net_dev->dev_addr[0], 4); | ||
195 | efx_writeo(efx, ®, FR_AB_XM_ADR_LO); | ||
196 | memcpy(®, &efx->net_dev->dev_addr[4], 2); | ||
197 | efx_writeo(efx, ®, FR_AB_XM_ADR_HI); | ||
198 | } | ||
199 | |||
200 | static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) | ||
201 | { | ||
202 | efx_oword_t reg; | ||
203 | bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS); | ||
204 | bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI); | ||
205 | bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII); | ||
206 | |||
207 | /* XGXS block is flaky and will need to be reset if moving | ||
208 | * into our out of XGMII, XGXS or XAUI loopbacks. */ | ||
209 | if (EFX_WORKAROUND_5147(efx)) { | ||
210 | bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; | ||
211 | bool reset_xgxs; | ||
212 | |||
213 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); | ||
214 | old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); | ||
215 | old_xgmii_loopback = | ||
216 | EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN); | ||
217 | |||
218 | efx_reado(efx, ®, FR_AB_XX_SD_CTL); | ||
219 | old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); | ||
220 | |||
221 | /* The PHY driver may have turned XAUI off */ | ||
222 | reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || | ||
223 | (xaui_loopback != old_xaui_loopback) || | ||
224 | (xgmii_loopback != old_xgmii_loopback)); | ||
225 | |||
226 | if (reset_xgxs) | ||
227 | falcon_reset_xaui(efx); | ||
228 | } | ||
229 | |||
230 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); | ||
231 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG, | ||
232 | (xgxs_loopback || xaui_loopback) ? | ||
233 | FFE_AB_XX_FORCE_SIG_ALL_LANES : 0); | ||
234 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback); | ||
235 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback); | ||
236 | efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); | ||
237 | |||
238 | efx_reado(efx, ®, FR_AB_XX_SD_CTL); | ||
239 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback); | ||
240 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback); | ||
241 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback); | ||
242 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback); | ||
243 | efx_writeo(efx, ®, FR_AB_XX_SD_CTL); | ||
244 | } | ||
245 | |||
246 | |||
247 | /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ | ||
248 | static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) | ||
249 | { | ||
250 | bool mac_up = falcon_xmac_link_ok(efx); | ||
251 | |||
252 | if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || | ||
253 | efx_phy_mode_disabled(efx->phy_mode)) | ||
254 | /* XAUI link is expected to be down */ | ||
255 | return mac_up; | ||
256 | |||
257 | falcon_stop_nic_stats(efx); | ||
258 | |||
259 | while (!mac_up && tries) { | ||
260 | netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n"); | ||
261 | falcon_reset_xaui(efx); | ||
262 | udelay(200); | ||
263 | |||
264 | mac_up = falcon_xmac_link_ok(efx); | ||
265 | --tries; | ||
266 | } | ||
267 | |||
268 | falcon_start_nic_stats(efx); | ||
269 | |||
270 | return mac_up; | ||
271 | } | ||
272 | |||
273 | static bool falcon_xmac_check_fault(struct efx_nic *efx) | ||
274 | { | ||
275 | return !falcon_xmac_link_ok_retry(efx, 5); | ||
276 | } | ||
277 | |||
278 | static int falcon_reconfigure_xmac(struct efx_nic *efx) | ||
279 | { | ||
280 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
281 | |||
282 | falcon_reconfigure_xgxs_core(efx); | ||
283 | falcon_reconfigure_xmac_core(efx); | ||
284 | |||
285 | falcon_reconfigure_mac_wrapper(efx); | ||
286 | |||
287 | nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); | ||
288 | falcon_ack_status_intr(efx); | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static void falcon_update_stats_xmac(struct efx_nic *efx) | ||
294 | { | ||
295 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | ||
296 | |||
297 | /* Update MAC stats from DMAed values */ | ||
298 | FALCON_STAT(efx, XgRxOctets, rx_bytes); | ||
299 | FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes); | ||
300 | FALCON_STAT(efx, XgRxPkts, rx_packets); | ||
301 | FALCON_STAT(efx, XgRxPktsOK, rx_good); | ||
302 | FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast); | ||
303 | FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast); | ||
304 | FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast); | ||
305 | FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64); | ||
306 | FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo); | ||
307 | FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo); | ||
308 | FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64); | ||
309 | FALCON_STAT(efx, XgRxDropEvents, rx_overflow); | ||
310 | FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad); | ||
311 | FALCON_STAT(efx, XgRxAlignError, rx_align_error); | ||
312 | FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error); | ||
313 | FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error); | ||
314 | FALCON_STAT(efx, XgRxControlPkts, rx_control); | ||
315 | FALCON_STAT(efx, XgRxPausePkts, rx_pause); | ||
316 | FALCON_STAT(efx, XgRxPkts64Octets, rx_64); | ||
317 | FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127); | ||
318 | FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255); | ||
319 | FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511); | ||
320 | FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023); | ||
321 | FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx); | ||
322 | FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo); | ||
323 | FALCON_STAT(efx, XgRxLengthError, rx_length_error); | ||
324 | FALCON_STAT(efx, XgTxPkts, tx_packets); | ||
325 | FALCON_STAT(efx, XgTxOctets, tx_bytes); | ||
326 | FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast); | ||
327 | FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast); | ||
328 | FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast); | ||
329 | FALCON_STAT(efx, XgTxControlPkts, tx_control); | ||
330 | FALCON_STAT(efx, XgTxPausePkts, tx_pause); | ||
331 | FALCON_STAT(efx, XgTxPkts64Octets, tx_64); | ||
332 | FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127); | ||
333 | FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255); | ||
334 | FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511); | ||
335 | FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023); | ||
336 | FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx); | ||
337 | FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo); | ||
338 | FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64); | ||
339 | FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo); | ||
340 | FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp); | ||
341 | FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error); | ||
342 | FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); | ||
343 | |||
344 | /* Update derived statistics */ | ||
345 | mac_stats->tx_good_bytes = | ||
346 | (mac_stats->tx_bytes - mac_stats->tx_bad_bytes - | ||
347 | mac_stats->tx_control * 64); | ||
348 | mac_stats->rx_bad_bytes = | ||
349 | (mac_stats->rx_bytes - mac_stats->rx_good_bytes - | ||
350 | mac_stats->rx_control * 64); | ||
351 | } | ||
352 | |||
353 | void falcon_poll_xmac(struct efx_nic *efx) | ||
354 | { | ||
355 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
356 | |||
357 | if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || | ||
358 | !nic_data->xmac_poll_required) | ||
359 | return; | ||
360 | |||
361 | nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); | ||
362 | falcon_ack_status_intr(efx); | ||
363 | } | ||
364 | |||
365 | const struct efx_mac_operations falcon_xmac_operations = { | ||
366 | .reconfigure = falcon_reconfigure_xmac, | ||
367 | .update_stats = falcon_update_stats_xmac, | ||
368 | .check_fault = falcon_xmac_check_fault, | ||
369 | }; | ||
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c new file mode 100644 index 00000000000..2b9636f96e0 --- /dev/null +++ b/drivers/net/sfc/filter.c | |||
@@ -0,0 +1,727 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2010 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include <linux/in.h> | ||
11 | #include <net/ip.h> | ||
12 | #include "efx.h" | ||
13 | #include "filter.h" | ||
14 | #include "io.h" | ||
15 | #include "nic.h" | ||
16 | #include "regs.h" | ||
17 | |||
18 | /* "Fudge factors" - difference between programmed value and actual depth. | ||
19 | * Due to pipelined implementation we need to program H/W with a value that | ||
20 | * is larger than the hop limit we want. | ||
21 | */ | ||
22 | #define FILTER_CTL_SRCH_FUDGE_WILD 3 | ||
23 | #define FILTER_CTL_SRCH_FUDGE_FULL 1 | ||
24 | |||
25 | /* Hard maximum hop limit. Hardware will time-out beyond 200-something. | ||
26 | * We also need to avoid infinite loops in efx_filter_search() when the | ||
27 | * table is full. | ||
28 | */ | ||
29 | #define FILTER_CTL_SRCH_MAX 200 | ||
30 | |||
31 | /* Don't try very hard to find space for performance hints, as this is | ||
32 | * counter-productive. */ | ||
33 | #define FILTER_CTL_SRCH_HINT_MAX 5 | ||
34 | |||
35 | enum efx_filter_table_id { | ||
36 | EFX_FILTER_TABLE_RX_IP = 0, | ||
37 | EFX_FILTER_TABLE_RX_MAC, | ||
38 | EFX_FILTER_TABLE_COUNT, | ||
39 | }; | ||
40 | |||
41 | struct efx_filter_table { | ||
42 | enum efx_filter_table_id id; | ||
43 | u32 offset; /* address of table relative to BAR */ | ||
44 | unsigned size; /* number of entries */ | ||
45 | unsigned step; /* step between entries */ | ||
46 | unsigned used; /* number currently used */ | ||
47 | unsigned long *used_bitmap; | ||
48 | struct efx_filter_spec *spec; | ||
49 | unsigned search_depth[EFX_FILTER_TYPE_COUNT]; | ||
50 | }; | ||
51 | |||
52 | struct efx_filter_state { | ||
53 | spinlock_t lock; | ||
54 | struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; | ||
55 | #ifdef CONFIG_RFS_ACCEL | ||
56 | u32 *rps_flow_id; | ||
57 | unsigned rps_expire_index; | ||
58 | #endif | ||
59 | }; | ||
60 | |||
61 | /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit | ||
62 | * key derived from the n-tuple. The initial LFSR state is 0xffff. */ | ||
63 | static u16 efx_filter_hash(u32 key) | ||
64 | { | ||
65 | u16 tmp; | ||
66 | |||
67 | /* First 16 rounds */ | ||
68 | tmp = 0x1fff ^ key >> 16; | ||
69 | tmp = tmp ^ tmp >> 3 ^ tmp >> 6; | ||
70 | tmp = tmp ^ tmp >> 9; | ||
71 | /* Last 16 rounds */ | ||
72 | tmp = tmp ^ tmp << 13 ^ key; | ||
73 | tmp = tmp ^ tmp >> 3 ^ tmp >> 6; | ||
74 | return tmp ^ tmp >> 9; | ||
75 | } | ||
76 | |||
77 | /* To allow for hash collisions, filter search continues at these | ||
78 | * increments from the first possible entry selected by the hash. */ | ||
79 | static u16 efx_filter_increment(u32 key) | ||
80 | { | ||
81 | return key * 2 - 1; | ||
82 | } | ||
83 | |||
84 | static enum efx_filter_table_id | ||
85 | efx_filter_spec_table_id(const struct efx_filter_spec *spec) | ||
86 | { | ||
87 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2)); | ||
88 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2)); | ||
89 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2)); | ||
90 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2)); | ||
91 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2)); | ||
92 | BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2)); | ||
93 | EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC); | ||
94 | return spec->type >> 2; | ||
95 | } | ||
96 | |||
97 | static struct efx_filter_table * | ||
98 | efx_filter_spec_table(struct efx_filter_state *state, | ||
99 | const struct efx_filter_spec *spec) | ||
100 | { | ||
101 | if (spec->type == EFX_FILTER_UNSPEC) | ||
102 | return NULL; | ||
103 | else | ||
104 | return &state->table[efx_filter_spec_table_id(spec)]; | ||
105 | } | ||
106 | |||
107 | static void efx_filter_table_reset_search_depth(struct efx_filter_table *table) | ||
108 | { | ||
109 | memset(table->search_depth, 0, sizeof(table->search_depth)); | ||
110 | } | ||
111 | |||
112 | static void efx_filter_push_rx_limits(struct efx_nic *efx) | ||
113 | { | ||
114 | struct efx_filter_state *state = efx->filter_state; | ||
115 | struct efx_filter_table *table; | ||
116 | efx_oword_t filter_ctl; | ||
117 | |||
118 | efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); | ||
119 | |||
120 | table = &state->table[EFX_FILTER_TABLE_RX_IP]; | ||
121 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, | ||
122 | table->search_depth[EFX_FILTER_TCP_FULL] + | ||
123 | FILTER_CTL_SRCH_FUDGE_FULL); | ||
124 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, | ||
125 | table->search_depth[EFX_FILTER_TCP_WILD] + | ||
126 | FILTER_CTL_SRCH_FUDGE_WILD); | ||
127 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, | ||
128 | table->search_depth[EFX_FILTER_UDP_FULL] + | ||
129 | FILTER_CTL_SRCH_FUDGE_FULL); | ||
130 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, | ||
131 | table->search_depth[EFX_FILTER_UDP_WILD] + | ||
132 | FILTER_CTL_SRCH_FUDGE_WILD); | ||
133 | |||
134 | table = &state->table[EFX_FILTER_TABLE_RX_MAC]; | ||
135 | if (table->size) { | ||
136 | EFX_SET_OWORD_FIELD( | ||
137 | filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, | ||
138 | table->search_depth[EFX_FILTER_MAC_FULL] + | ||
139 | FILTER_CTL_SRCH_FUDGE_FULL); | ||
140 | EFX_SET_OWORD_FIELD( | ||
141 | filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, | ||
142 | table->search_depth[EFX_FILTER_MAC_WILD] + | ||
143 | FILTER_CTL_SRCH_FUDGE_WILD); | ||
144 | } | ||
145 | |||
146 | efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); | ||
147 | } | ||
148 | |||
149 | static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec, | ||
150 | __be32 host1, __be16 port1, | ||
151 | __be32 host2, __be16 port2) | ||
152 | { | ||
153 | spec->data[0] = ntohl(host1) << 16 | ntohs(port1); | ||
154 | spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; | ||
155 | spec->data[2] = ntohl(host2); | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port | ||
160 | * @spec: Specification to initialise | ||
161 | * @proto: Transport layer protocol number | ||
162 | * @host: Local host address (network byte order) | ||
163 | * @port: Local port (network byte order) | ||
164 | */ | ||
165 | int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, | ||
166 | __be32 host, __be16 port) | ||
167 | { | ||
168 | __be32 host1; | ||
169 | __be16 port1; | ||
170 | |||
171 | EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); | ||
172 | |||
173 | /* This cannot currently be combined with other filtering */ | ||
174 | if (spec->type != EFX_FILTER_UNSPEC) | ||
175 | return -EPROTONOSUPPORT; | ||
176 | |||
177 | if (port == 0) | ||
178 | return -EINVAL; | ||
179 | |||
180 | switch (proto) { | ||
181 | case IPPROTO_TCP: | ||
182 | spec->type = EFX_FILTER_TCP_WILD; | ||
183 | break; | ||
184 | case IPPROTO_UDP: | ||
185 | spec->type = EFX_FILTER_UDP_WILD; | ||
186 | break; | ||
187 | default: | ||
188 | return -EPROTONOSUPPORT; | ||
189 | } | ||
190 | |||
191 | /* Filter is constructed in terms of source and destination, | ||
192 | * with the odd wrinkle that the ports are swapped in a UDP | ||
193 | * wildcard filter. We need to convert from local and remote | ||
194 | * (= zero for wildcard) addresses. | ||
195 | */ | ||
196 | host1 = 0; | ||
197 | if (proto != IPPROTO_UDP) { | ||
198 | port1 = 0; | ||
199 | } else { | ||
200 | port1 = port; | ||
201 | port = 0; | ||
202 | } | ||
203 | |||
204 | __efx_filter_set_ipv4(spec, host1, port1, host, port); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports | ||
210 | * @spec: Specification to initialise | ||
211 | * @proto: Transport layer protocol number | ||
212 | * @host: Local host address (network byte order) | ||
213 | * @port: Local port (network byte order) | ||
214 | * @rhost: Remote host address (network byte order) | ||
215 | * @rport: Remote port (network byte order) | ||
216 | */ | ||
217 | int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, | ||
218 | __be32 host, __be16 port, | ||
219 | __be32 rhost, __be16 rport) | ||
220 | { | ||
221 | EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); | ||
222 | |||
223 | /* This cannot currently be combined with other filtering */ | ||
224 | if (spec->type != EFX_FILTER_UNSPEC) | ||
225 | return -EPROTONOSUPPORT; | ||
226 | |||
227 | if (port == 0 || rport == 0) | ||
228 | return -EINVAL; | ||
229 | |||
230 | switch (proto) { | ||
231 | case IPPROTO_TCP: | ||
232 | spec->type = EFX_FILTER_TCP_FULL; | ||
233 | break; | ||
234 | case IPPROTO_UDP: | ||
235 | spec->type = EFX_FILTER_UDP_FULL; | ||
236 | break; | ||
237 | default: | ||
238 | return -EPROTONOSUPPORT; | ||
239 | } | ||
240 | |||
241 | __efx_filter_set_ipv4(spec, rhost, rport, host, port); | ||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | /** | ||
246 | * efx_filter_set_eth_local - specify local Ethernet address and optional VID | ||
247 | * @spec: Specification to initialise | ||
248 | * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC | ||
249 | * @addr: Local Ethernet MAC address | ||
250 | */ | ||
251 | int efx_filter_set_eth_local(struct efx_filter_spec *spec, | ||
252 | u16 vid, const u8 *addr) | ||
253 | { | ||
254 | EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); | ||
255 | |||
256 | /* This cannot currently be combined with other filtering */ | ||
257 | if (spec->type != EFX_FILTER_UNSPEC) | ||
258 | return -EPROTONOSUPPORT; | ||
259 | |||
260 | if (vid == EFX_FILTER_VID_UNSPEC) { | ||
261 | spec->type = EFX_FILTER_MAC_WILD; | ||
262 | spec->data[0] = 0; | ||
263 | } else { | ||
264 | spec->type = EFX_FILTER_MAC_FULL; | ||
265 | spec->data[0] = vid; | ||
266 | } | ||
267 | |||
268 | spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5]; | ||
269 | spec->data[2] = addr[0] << 8 | addr[1]; | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /* Build a filter entry and return its n-tuple key. */ | ||
274 | static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) | ||
275 | { | ||
276 | u32 data3; | ||
277 | |||
278 | switch (efx_filter_spec_table_id(spec)) { | ||
279 | case EFX_FILTER_TABLE_RX_IP: { | ||
280 | bool is_udp = (spec->type == EFX_FILTER_UDP_FULL || | ||
281 | spec->type == EFX_FILTER_UDP_WILD); | ||
282 | EFX_POPULATE_OWORD_7( | ||
283 | *filter, | ||
284 | FRF_BZ_RSS_EN, | ||
285 | !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), | ||
286 | FRF_BZ_SCATTER_EN, | ||
287 | !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), | ||
288 | FRF_BZ_TCP_UDP, is_udp, | ||
289 | FRF_BZ_RXQ_ID, spec->dmaq_id, | ||
290 | EFX_DWORD_2, spec->data[2], | ||
291 | EFX_DWORD_1, spec->data[1], | ||
292 | EFX_DWORD_0, spec->data[0]); | ||
293 | data3 = is_udp; | ||
294 | break; | ||
295 | } | ||
296 | |||
297 | case EFX_FILTER_TABLE_RX_MAC: { | ||
298 | bool is_wild = spec->type == EFX_FILTER_MAC_WILD; | ||
299 | EFX_POPULATE_OWORD_8( | ||
300 | *filter, | ||
301 | FRF_CZ_RMFT_RSS_EN, | ||
302 | !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), | ||
303 | FRF_CZ_RMFT_SCATTER_EN, | ||
304 | !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), | ||
305 | FRF_CZ_RMFT_IP_OVERRIDE, | ||
306 | !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP), | ||
307 | FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, | ||
308 | FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, | ||
309 | FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], | ||
310 | FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], | ||
311 | FRF_CZ_RMFT_VLAN_ID, spec->data[0]); | ||
312 | data3 = is_wild; | ||
313 | break; | ||
314 | } | ||
315 | |||
316 | default: | ||
317 | BUG(); | ||
318 | } | ||
319 | |||
320 | return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; | ||
321 | } | ||
322 | |||
323 | static bool efx_filter_equal(const struct efx_filter_spec *left, | ||
324 | const struct efx_filter_spec *right) | ||
325 | { | ||
326 | if (left->type != right->type || | ||
327 | memcmp(left->data, right->data, sizeof(left->data))) | ||
328 | return false; | ||
329 | |||
330 | return true; | ||
331 | } | ||
332 | |||
333 | static int efx_filter_search(struct efx_filter_table *table, | ||
334 | struct efx_filter_spec *spec, u32 key, | ||
335 | bool for_insert, int *depth_required) | ||
336 | { | ||
337 | unsigned hash, incr, filter_idx, depth, depth_max; | ||
338 | |||
339 | hash = efx_filter_hash(key); | ||
340 | incr = efx_filter_increment(key); | ||
341 | |||
342 | filter_idx = hash & (table->size - 1); | ||
343 | depth = 1; | ||
344 | depth_max = (for_insert ? | ||
345 | (spec->priority <= EFX_FILTER_PRI_HINT ? | ||
346 | FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) : | ||
347 | table->search_depth[spec->type]); | ||
348 | |||
349 | for (;;) { | ||
350 | /* Return success if entry is used and matches this spec | ||
351 | * or entry is unused and we are trying to insert. | ||
352 | */ | ||
353 | if (test_bit(filter_idx, table->used_bitmap) ? | ||
354 | efx_filter_equal(spec, &table->spec[filter_idx]) : | ||
355 | for_insert) { | ||
356 | *depth_required = depth; | ||
357 | return filter_idx; | ||
358 | } | ||
359 | |||
360 | /* Return failure if we reached the maximum search depth */ | ||
361 | if (depth == depth_max) | ||
362 | return for_insert ? -EBUSY : -ENOENT; | ||
363 | |||
364 | filter_idx = (filter_idx + incr) & (table->size - 1); | ||
365 | ++depth; | ||
366 | } | ||
367 | } | ||
368 | |||
369 | /* Construct/deconstruct external filter IDs */ | ||
370 | |||
371 | static inline int | ||
372 | efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index) | ||
373 | { | ||
374 | return table_id << 16 | index; | ||
375 | } | ||
376 | |||
377 | /** | ||
378 | * efx_filter_insert_filter - add or replace a filter | ||
379 | * @efx: NIC in which to insert the filter | ||
380 | * @spec: Specification for the filter | ||
381 | * @replace: Flag for whether the specified filter may replace a filter | ||
382 | * with an identical match expression and equal or lower priority | ||
383 | * | ||
384 | * On success, return the filter ID. | ||
385 | * On failure, return a negative error code. | ||
386 | */ | ||
387 | int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, | ||
388 | bool replace) | ||
389 | { | ||
390 | struct efx_filter_state *state = efx->filter_state; | ||
391 | struct efx_filter_table *table = efx_filter_spec_table(state, spec); | ||
392 | struct efx_filter_spec *saved_spec; | ||
393 | efx_oword_t filter; | ||
394 | int filter_idx, depth; | ||
395 | u32 key; | ||
396 | int rc; | ||
397 | |||
398 | if (!table || table->size == 0) | ||
399 | return -EINVAL; | ||
400 | |||
401 | key = efx_filter_build(&filter, spec); | ||
402 | |||
403 | netif_vdbg(efx, hw, efx->net_dev, | ||
404 | "%s: type %d search_depth=%d", __func__, spec->type, | ||
405 | table->search_depth[spec->type]); | ||
406 | |||
407 | spin_lock_bh(&state->lock); | ||
408 | |||
409 | rc = efx_filter_search(table, spec, key, true, &depth); | ||
410 | if (rc < 0) | ||
411 | goto out; | ||
412 | filter_idx = rc; | ||
413 | BUG_ON(filter_idx >= table->size); | ||
414 | saved_spec = &table->spec[filter_idx]; | ||
415 | |||
416 | if (test_bit(filter_idx, table->used_bitmap)) { | ||
417 | /* Should we replace the existing filter? */ | ||
418 | if (!replace) { | ||
419 | rc = -EEXIST; | ||
420 | goto out; | ||
421 | } | ||
422 | if (spec->priority < saved_spec->priority) { | ||
423 | rc = -EPERM; | ||
424 | goto out; | ||
425 | } | ||
426 | } else { | ||
427 | __set_bit(filter_idx, table->used_bitmap); | ||
428 | ++table->used; | ||
429 | } | ||
430 | *saved_spec = *spec; | ||
431 | |||
432 | if (table->search_depth[spec->type] < depth) { | ||
433 | table->search_depth[spec->type] = depth; | ||
434 | efx_filter_push_rx_limits(efx); | ||
435 | } | ||
436 | |||
437 | efx_writeo(efx, &filter, table->offset + table->step * filter_idx); | ||
438 | |||
439 | netif_vdbg(efx, hw, efx->net_dev, | ||
440 | "%s: filter type %d index %d rxq %u set", | ||
441 | __func__, spec->type, filter_idx, spec->dmaq_id); | ||
442 | rc = efx_filter_make_id(table->id, filter_idx); | ||
443 | |||
444 | out: | ||
445 | spin_unlock_bh(&state->lock); | ||
446 | return rc; | ||
447 | } | ||
448 | |||
449 | static void efx_filter_table_clear_entry(struct efx_nic *efx, | ||
450 | struct efx_filter_table *table, | ||
451 | int filter_idx) | ||
452 | { | ||
453 | static efx_oword_t filter; | ||
454 | |||
455 | if (test_bit(filter_idx, table->used_bitmap)) { | ||
456 | __clear_bit(filter_idx, table->used_bitmap); | ||
457 | --table->used; | ||
458 | memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); | ||
459 | |||
460 | efx_writeo(efx, &filter, | ||
461 | table->offset + table->step * filter_idx); | ||
462 | } | ||
463 | } | ||
464 | |||
465 | /** | ||
466 | * efx_filter_remove_filter - remove a filter by specification | ||
467 | * @efx: NIC from which to remove the filter | ||
468 | * @spec: Specification for the filter | ||
469 | * | ||
470 | * On success, return zero. | ||
471 | * On failure, return a negative error code. | ||
472 | */ | ||
473 | int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec) | ||
474 | { | ||
475 | struct efx_filter_state *state = efx->filter_state; | ||
476 | struct efx_filter_table *table = efx_filter_spec_table(state, spec); | ||
477 | struct efx_filter_spec *saved_spec; | ||
478 | efx_oword_t filter; | ||
479 | int filter_idx, depth; | ||
480 | u32 key; | ||
481 | int rc; | ||
482 | |||
483 | if (!table) | ||
484 | return -EINVAL; | ||
485 | |||
486 | key = efx_filter_build(&filter, spec); | ||
487 | |||
488 | spin_lock_bh(&state->lock); | ||
489 | |||
490 | rc = efx_filter_search(table, spec, key, false, &depth); | ||
491 | if (rc < 0) | ||
492 | goto out; | ||
493 | filter_idx = rc; | ||
494 | saved_spec = &table->spec[filter_idx]; | ||
495 | |||
496 | if (spec->priority < saved_spec->priority) { | ||
497 | rc = -EPERM; | ||
498 | goto out; | ||
499 | } | ||
500 | |||
501 | efx_filter_table_clear_entry(efx, table, filter_idx); | ||
502 | if (table->used == 0) | ||
503 | efx_filter_table_reset_search_depth(table); | ||
504 | rc = 0; | ||
505 | |||
506 | out: | ||
507 | spin_unlock_bh(&state->lock); | ||
508 | return rc; | ||
509 | } | ||
510 | |||
511 | static void efx_filter_table_clear(struct efx_nic *efx, | ||
512 | enum efx_filter_table_id table_id, | ||
513 | enum efx_filter_priority priority) | ||
514 | { | ||
515 | struct efx_filter_state *state = efx->filter_state; | ||
516 | struct efx_filter_table *table = &state->table[table_id]; | ||
517 | int filter_idx; | ||
518 | |||
519 | spin_lock_bh(&state->lock); | ||
520 | |||
521 | for (filter_idx = 0; filter_idx < table->size; ++filter_idx) | ||
522 | if (table->spec[filter_idx].priority <= priority) | ||
523 | efx_filter_table_clear_entry(efx, table, filter_idx); | ||
524 | if (table->used == 0) | ||
525 | efx_filter_table_reset_search_depth(table); | ||
526 | |||
527 | spin_unlock_bh(&state->lock); | ||
528 | } | ||
529 | |||
530 | /** | ||
531 | * efx_filter_clear_rx - remove RX filters by priority | ||
532 | * @efx: NIC from which to remove the filters | ||
533 | * @priority: Maximum priority to remove | ||
534 | */ | ||
535 | void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority) | ||
536 | { | ||
537 | efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority); | ||
538 | efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority); | ||
539 | } | ||
540 | |||
541 | /* Restore filter stater after reset */ | ||
542 | void efx_restore_filters(struct efx_nic *efx) | ||
543 | { | ||
544 | struct efx_filter_state *state = efx->filter_state; | ||
545 | enum efx_filter_table_id table_id; | ||
546 | struct efx_filter_table *table; | ||
547 | efx_oword_t filter; | ||
548 | int filter_idx; | ||
549 | |||
550 | spin_lock_bh(&state->lock); | ||
551 | |||
552 | for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { | ||
553 | table = &state->table[table_id]; | ||
554 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { | ||
555 | if (!test_bit(filter_idx, table->used_bitmap)) | ||
556 | continue; | ||
557 | efx_filter_build(&filter, &table->spec[filter_idx]); | ||
558 | efx_writeo(efx, &filter, | ||
559 | table->offset + table->step * filter_idx); | ||
560 | } | ||
561 | } | ||
562 | |||
563 | efx_filter_push_rx_limits(efx); | ||
564 | |||
565 | spin_unlock_bh(&state->lock); | ||
566 | } | ||
567 | |||
568 | int efx_probe_filters(struct efx_nic *efx) | ||
569 | { | ||
570 | struct efx_filter_state *state; | ||
571 | struct efx_filter_table *table; | ||
572 | unsigned table_id; | ||
573 | |||
574 | state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL); | ||
575 | if (!state) | ||
576 | return -ENOMEM; | ||
577 | efx->filter_state = state; | ||
578 | |||
579 | spin_lock_init(&state->lock); | ||
580 | |||
581 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
582 | #ifdef CONFIG_RFS_ACCEL | ||
583 | state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS, | ||
584 | sizeof(*state->rps_flow_id), | ||
585 | GFP_KERNEL); | ||
586 | if (!state->rps_flow_id) | ||
587 | goto fail; | ||
588 | #endif | ||
589 | table = &state->table[EFX_FILTER_TABLE_RX_IP]; | ||
590 | table->id = EFX_FILTER_TABLE_RX_IP; | ||
591 | table->offset = FR_BZ_RX_FILTER_TBL0; | ||
592 | table->size = FR_BZ_RX_FILTER_TBL0_ROWS; | ||
593 | table->step = FR_BZ_RX_FILTER_TBL0_STEP; | ||
594 | } | ||
595 | |||
596 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | ||
597 | table = &state->table[EFX_FILTER_TABLE_RX_MAC]; | ||
598 | table->id = EFX_FILTER_TABLE_RX_MAC; | ||
599 | table->offset = FR_CZ_RX_MAC_FILTER_TBL0; | ||
600 | table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; | ||
601 | table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; | ||
602 | } | ||
603 | |||
604 | for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { | ||
605 | table = &state->table[table_id]; | ||
606 | if (table->size == 0) | ||
607 | continue; | ||
608 | table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), | ||
609 | sizeof(unsigned long), | ||
610 | GFP_KERNEL); | ||
611 | if (!table->used_bitmap) | ||
612 | goto fail; | ||
613 | table->spec = vzalloc(table->size * sizeof(*table->spec)); | ||
614 | if (!table->spec) | ||
615 | goto fail; | ||
616 | } | ||
617 | |||
618 | return 0; | ||
619 | |||
620 | fail: | ||
621 | efx_remove_filters(efx); | ||
622 | return -ENOMEM; | ||
623 | } | ||
624 | |||
625 | void efx_remove_filters(struct efx_nic *efx) | ||
626 | { | ||
627 | struct efx_filter_state *state = efx->filter_state; | ||
628 | enum efx_filter_table_id table_id; | ||
629 | |||
630 | for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { | ||
631 | kfree(state->table[table_id].used_bitmap); | ||
632 | vfree(state->table[table_id].spec); | ||
633 | } | ||
634 | #ifdef CONFIG_RFS_ACCEL | ||
635 | kfree(state->rps_flow_id); | ||
636 | #endif | ||
637 | kfree(state); | ||
638 | } | ||
639 | |||
640 | #ifdef CONFIG_RFS_ACCEL | ||
641 | |||
642 | int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | ||
643 | u16 rxq_index, u32 flow_id) | ||
644 | { | ||
645 | struct efx_nic *efx = netdev_priv(net_dev); | ||
646 | struct efx_channel *channel; | ||
647 | struct efx_filter_state *state = efx->filter_state; | ||
648 | struct efx_filter_spec spec; | ||
649 | const struct iphdr *ip; | ||
650 | const __be16 *ports; | ||
651 | int nhoff; | ||
652 | int rc; | ||
653 | |||
654 | nhoff = skb_network_offset(skb); | ||
655 | |||
656 | if (skb->protocol != htons(ETH_P_IP)) | ||
657 | return -EPROTONOSUPPORT; | ||
658 | |||
659 | /* RFS must validate the IP header length before calling us */ | ||
660 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); | ||
661 | ip = (const struct iphdr *)(skb->data + nhoff); | ||
662 | if (ip_is_fragment(ip)) | ||
663 | return -EPROTONOSUPPORT; | ||
664 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); | ||
665 | ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); | ||
666 | |||
667 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index); | ||
668 | rc = efx_filter_set_ipv4_full(&spec, ip->protocol, | ||
669 | ip->daddr, ports[1], ip->saddr, ports[0]); | ||
670 | if (rc) | ||
671 | return rc; | ||
672 | |||
673 | rc = efx_filter_insert_filter(efx, &spec, true); | ||
674 | if (rc < 0) | ||
675 | return rc; | ||
676 | |||
677 | /* Remember this so we can check whether to expire the filter later */ | ||
678 | state->rps_flow_id[rc] = flow_id; | ||
679 | channel = efx_get_channel(efx, skb_get_rx_queue(skb)); | ||
680 | ++channel->rfs_filters_added; | ||
681 | |||
682 | netif_info(efx, rx_status, efx->net_dev, | ||
683 | "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", | ||
684 | (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP", | ||
685 | &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]), | ||
686 | rxq_index, flow_id, rc); | ||
687 | |||
688 | return rc; | ||
689 | } | ||
690 | |||
691 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota) | ||
692 | { | ||
693 | struct efx_filter_state *state = efx->filter_state; | ||
694 | struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP]; | ||
695 | unsigned mask = table->size - 1; | ||
696 | unsigned index; | ||
697 | unsigned stop; | ||
698 | |||
699 | if (!spin_trylock_bh(&state->lock)) | ||
700 | return false; | ||
701 | |||
702 | index = state->rps_expire_index; | ||
703 | stop = (index + quota) & mask; | ||
704 | |||
705 | while (index != stop) { | ||
706 | if (test_bit(index, table->used_bitmap) && | ||
707 | table->spec[index].priority == EFX_FILTER_PRI_HINT && | ||
708 | rps_may_expire_flow(efx->net_dev, | ||
709 | table->spec[index].dmaq_id, | ||
710 | state->rps_flow_id[index], index)) { | ||
711 | netif_info(efx, rx_status, efx->net_dev, | ||
712 | "expiring filter %d [flow %u]\n", | ||
713 | index, state->rps_flow_id[index]); | ||
714 | efx_filter_table_clear_entry(efx, table, index); | ||
715 | } | ||
716 | index = (index + 1) & mask; | ||
717 | } | ||
718 | |||
719 | state->rps_expire_index = stop; | ||
720 | if (table->used == 0) | ||
721 | efx_filter_table_reset_search_depth(table); | ||
722 | |||
723 | spin_unlock_bh(&state->lock); | ||
724 | return true; | ||
725 | } | ||
726 | |||
727 | #endif /* CONFIG_RFS_ACCEL */ | ||
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h new file mode 100644 index 00000000000..872f2132a49 --- /dev/null +++ b/drivers/net/sfc/filter.h | |||
@@ -0,0 +1,112 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2010 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #ifndef EFX_FILTER_H | ||
11 | #define EFX_FILTER_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | |||
15 | /** | ||
16 | * enum efx_filter_type - type of hardware filter | ||
17 | * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple | ||
18 | * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port) | ||
19 | * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple | ||
20 | * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port) | ||
21 | * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID | ||
22 | * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address | ||
23 | * @EFX_FILTER_UNSPEC: Match type is unspecified | ||
24 | * | ||
25 | * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types. | ||
26 | */ | ||
27 | enum efx_filter_type { | ||
28 | EFX_FILTER_TCP_FULL = 0, | ||
29 | EFX_FILTER_TCP_WILD, | ||
30 | EFX_FILTER_UDP_FULL, | ||
31 | EFX_FILTER_UDP_WILD, | ||
32 | EFX_FILTER_MAC_FULL = 4, | ||
33 | EFX_FILTER_MAC_WILD, | ||
34 | EFX_FILTER_TYPE_COUNT, /* number of specific types */ | ||
35 | EFX_FILTER_UNSPEC = 0xf, | ||
36 | }; | ||
37 | |||
38 | /** | ||
39 | * enum efx_filter_priority - priority of a hardware filter specification | ||
40 | * @EFX_FILTER_PRI_HINT: Performance hint | ||
41 | * @EFX_FILTER_PRI_MANUAL: Manually configured filter | ||
42 | * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour | ||
43 | */ | ||
44 | enum efx_filter_priority { | ||
45 | EFX_FILTER_PRI_HINT = 0, | ||
46 | EFX_FILTER_PRI_MANUAL, | ||
47 | EFX_FILTER_PRI_REQUIRED, | ||
48 | }; | ||
49 | |||
50 | /** | ||
51 | * enum efx_filter_flags - flags for hardware filter specifications | ||
52 | * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues. | ||
53 | * By default, matching packets will be delivered only to the | ||
54 | * specified queue. If this flag is set, they will be delivered | ||
55 | * to a range of queues offset from the specified queue number | ||
56 | * according to the indirection table. | ||
57 | * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving | ||
58 | * queue. | ||
59 | * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override | ||
60 | * any IP filter that matches the same packet. By default, IP | ||
61 | * filters take precedence. | ||
62 | * @EFX_FILTER_FLAG_RX: Filter is for RX | ||
63 | */ | ||
64 | enum efx_filter_flags { | ||
65 | EFX_FILTER_FLAG_RX_RSS = 0x01, | ||
66 | EFX_FILTER_FLAG_RX_SCATTER = 0x02, | ||
67 | EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04, | ||
68 | EFX_FILTER_FLAG_RX = 0x08, | ||
69 | }; | ||
70 | |||
71 | /** | ||
72 | * struct efx_filter_spec - specification for a hardware filter | ||
73 | * @type: Type of match to be performed, from &enum efx_filter_type | ||
74 | * @priority: Priority of the filter, from &enum efx_filter_priority | ||
75 | * @flags: Miscellaneous flags, from &enum efx_filter_flags | ||
76 | * @dmaq_id: Source/target queue index | ||
77 | * @data: Match data (type-dependent) | ||
78 | * | ||
79 | * Use the efx_filter_set_*() functions to initialise the @type and | ||
80 | * @data fields. | ||
81 | */ | ||
82 | struct efx_filter_spec { | ||
83 | u8 type:4; | ||
84 | u8 priority:4; | ||
85 | u8 flags; | ||
86 | u16 dmaq_id; | ||
87 | u32 data[3]; | ||
88 | }; | ||
89 | |||
90 | static inline void efx_filter_init_rx(struct efx_filter_spec *spec, | ||
91 | enum efx_filter_priority priority, | ||
92 | enum efx_filter_flags flags, | ||
93 | unsigned rxq_id) | ||
94 | { | ||
95 | spec->type = EFX_FILTER_UNSPEC; | ||
96 | spec->priority = priority; | ||
97 | spec->flags = EFX_FILTER_FLAG_RX | flags; | ||
98 | spec->dmaq_id = rxq_id; | ||
99 | } | ||
100 | |||
101 | extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, | ||
102 | __be32 host, __be16 port); | ||
103 | extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, | ||
104 | __be32 host, __be16 port, | ||
105 | __be32 rhost, __be16 rport); | ||
106 | extern int efx_filter_set_eth_local(struct efx_filter_spec *spec, | ||
107 | u16 vid, const u8 *addr); | ||
108 | enum { | ||
109 | EFX_FILTER_VID_UNSPEC = 0xffff, | ||
110 | }; | ||
111 | |||
112 | #endif /* EFX_FILTER_H */ | ||
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h new file mode 100644 index 00000000000..751d1ec112c --- /dev/null +++ b/drivers/net/sfc/io.h | |||
@@ -0,0 +1,293 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_IO_H | ||
12 | #define EFX_IO_H | ||
13 | |||
14 | #include <linux/io.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | |||
17 | /************************************************************************** | ||
18 | * | ||
19 | * NIC register I/O | ||
20 | * | ||
21 | ************************************************************************** | ||
22 | * | ||
23 | * Notes on locking strategy: | ||
24 | * | ||
25 | * Most CSRs are 128-bit (oword) and therefore cannot be read or | ||
26 | * written atomically. Access from the host is buffered by the Bus | ||
27 | * Interface Unit (BIU). Whenever the host reads from the lowest | ||
28 | * address of such a register, or from the address of a different such | ||
29 | * register, the BIU latches the register's value. Subsequent reads | ||
30 | * from higher addresses of the same register will read the latched | ||
31 | * value. Whenever the host writes part of such a register, the BIU | ||
32 | * collects the written value and does not write to the underlying | ||
33 | * register until all 4 dwords have been written. A similar buffering | ||
34 | * scheme applies to host access to the NIC's 64-bit SRAM. | ||
35 | * | ||
36 | * Access to different CSRs and 64-bit SRAM words must be serialised, | ||
37 | * since interleaved access can result in lost writes or lost | ||
38 | * information from read-to-clear fields. We use efx_nic::biu_lock | ||
39 | * for this. (We could use separate locks for read and write, but | ||
40 | * this is not normally a performance bottleneck.) | ||
41 | * | ||
42 | * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are | ||
43 | * 128-bit but are special-cased in the BIU to avoid the need for | ||
44 | * locking in the host: | ||
45 | * | ||
46 | * - They are write-only. | ||
47 | * - The semantics of writing to these registers are such that | ||
48 | * replacing the low 96 bits with zero does not affect functionality. | ||
49 | * - If the host writes to the last dword address of such a register | ||
50 | * (i.e. the high 32 bits) the underlying register will always be | ||
51 | * written. If the collector and the current write together do not | ||
52 | * provide values for all 128 bits of the register, the low 96 bits | ||
53 | * will be written as zero. | ||
54 | * - If the host writes to the address of any other part of such a | ||
55 | * register while the collector already holds values for some other | ||
56 | * register, the write is discarded and the collector maintains its | ||
57 | * current state. | ||
58 | */ | ||
59 | |||
60 | #if BITS_PER_LONG == 64 | ||
61 | #define EFX_USE_QWORD_IO 1 | ||
62 | #endif | ||
63 | |||
64 | #ifdef EFX_USE_QWORD_IO | ||
65 | static inline void _efx_writeq(struct efx_nic *efx, __le64 value, | ||
66 | unsigned int reg) | ||
67 | { | ||
68 | __raw_writeq((__force u64)value, efx->membase + reg); | ||
69 | } | ||
70 | static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg) | ||
71 | { | ||
72 | return (__force __le64)__raw_readq(efx->membase + reg); | ||
73 | } | ||
74 | #endif | ||
75 | |||
76 | static inline void _efx_writed(struct efx_nic *efx, __le32 value, | ||
77 | unsigned int reg) | ||
78 | { | ||
79 | __raw_writel((__force u32)value, efx->membase + reg); | ||
80 | } | ||
81 | static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg) | ||
82 | { | ||
83 | return (__force __le32)__raw_readl(efx->membase + reg); | ||
84 | } | ||
85 | |||
86 | /* Write a normal 128-bit CSR, locking as appropriate. */ | ||
87 | static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, | ||
88 | unsigned int reg) | ||
89 | { | ||
90 | unsigned long flags __attribute__ ((unused)); | ||
91 | |||
92 | netif_vdbg(efx, hw, efx->net_dev, | ||
93 | "writing register %x with " EFX_OWORD_FMT "\n", reg, | ||
94 | EFX_OWORD_VAL(*value)); | ||
95 | |||
96 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
97 | #ifdef EFX_USE_QWORD_IO | ||
98 | _efx_writeq(efx, value->u64[0], reg + 0); | ||
99 | _efx_writeq(efx, value->u64[1], reg + 8); | ||
100 | #else | ||
101 | _efx_writed(efx, value->u32[0], reg + 0); | ||
102 | _efx_writed(efx, value->u32[1], reg + 4); | ||
103 | _efx_writed(efx, value->u32[2], reg + 8); | ||
104 | _efx_writed(efx, value->u32[3], reg + 12); | ||
105 | #endif | ||
106 | mmiowb(); | ||
107 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
108 | } | ||
109 | |||
110 | /* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ | ||
111 | static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, | ||
112 | efx_qword_t *value, unsigned int index) | ||
113 | { | ||
114 | unsigned int addr = index * sizeof(*value); | ||
115 | unsigned long flags __attribute__ ((unused)); | ||
116 | |||
117 | netif_vdbg(efx, hw, efx->net_dev, | ||
118 | "writing SRAM address %x with " EFX_QWORD_FMT "\n", | ||
119 | addr, EFX_QWORD_VAL(*value)); | ||
120 | |||
121 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
122 | #ifdef EFX_USE_QWORD_IO | ||
123 | __raw_writeq((__force u64)value->u64[0], membase + addr); | ||
124 | #else | ||
125 | __raw_writel((__force u32)value->u32[0], membase + addr); | ||
126 | __raw_writel((__force u32)value->u32[1], membase + addr + 4); | ||
127 | #endif | ||
128 | mmiowb(); | ||
129 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
130 | } | ||
131 | |||
132 | /* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ | ||
133 | static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, | ||
134 | unsigned int reg) | ||
135 | { | ||
136 | netif_vdbg(efx, hw, efx->net_dev, | ||
137 | "writing register %x with "EFX_DWORD_FMT"\n", | ||
138 | reg, EFX_DWORD_VAL(*value)); | ||
139 | |||
140 | /* No lock required */ | ||
141 | _efx_writed(efx, value->u32[0], reg); | ||
142 | } | ||
143 | |||
144 | /* Read a 128-bit CSR, locking as appropriate. */ | ||
145 | static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | ||
146 | unsigned int reg) | ||
147 | { | ||
148 | unsigned long flags __attribute__ ((unused)); | ||
149 | |||
150 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
151 | value->u32[0] = _efx_readd(efx, reg + 0); | ||
152 | value->u32[1] = _efx_readd(efx, reg + 4); | ||
153 | value->u32[2] = _efx_readd(efx, reg + 8); | ||
154 | value->u32[3] = _efx_readd(efx, reg + 12); | ||
155 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
156 | |||
157 | netif_vdbg(efx, hw, efx->net_dev, | ||
158 | "read from register %x, got " EFX_OWORD_FMT "\n", reg, | ||
159 | EFX_OWORD_VAL(*value)); | ||
160 | } | ||
161 | |||
162 | /* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */ | ||
163 | static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | ||
164 | efx_qword_t *value, unsigned int index) | ||
165 | { | ||
166 | unsigned int addr = index * sizeof(*value); | ||
167 | unsigned long flags __attribute__ ((unused)); | ||
168 | |||
169 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
170 | #ifdef EFX_USE_QWORD_IO | ||
171 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); | ||
172 | #else | ||
173 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); | ||
174 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); | ||
175 | #endif | ||
176 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
177 | |||
178 | netif_vdbg(efx, hw, efx->net_dev, | ||
179 | "read from SRAM address %x, got "EFX_QWORD_FMT"\n", | ||
180 | addr, EFX_QWORD_VAL(*value)); | ||
181 | } | ||
182 | |||
183 | /* Read a 32-bit CSR or SRAM */ | ||
184 | static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, | ||
185 | unsigned int reg) | ||
186 | { | ||
187 | value->u32[0] = _efx_readd(efx, reg); | ||
188 | netif_vdbg(efx, hw, efx->net_dev, | ||
189 | "read from register %x, got "EFX_DWORD_FMT"\n", | ||
190 | reg, EFX_DWORD_VAL(*value)); | ||
191 | } | ||
192 | |||
193 | /* Write a 128-bit CSR forming part of a table */ | ||
194 | static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, | ||
195 | unsigned int reg, unsigned int index) | ||
196 | { | ||
197 | efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); | ||
198 | } | ||
199 | |||
200 | /* Read a 128-bit CSR forming part of a table */ | ||
201 | static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, | ||
202 | unsigned int reg, unsigned int index) | ||
203 | { | ||
204 | efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); | ||
205 | } | ||
206 | |||
207 | /* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */ | ||
208 | static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value, | ||
209 | unsigned int reg, unsigned int index) | ||
210 | { | ||
211 | efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); | ||
212 | } | ||
213 | |||
214 | /* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */ | ||
215 | static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value, | ||
216 | unsigned int reg, unsigned int index) | ||
217 | { | ||
218 | efx_readd(efx, value, reg + index * sizeof(efx_dword_t)); | ||
219 | } | ||
220 | |||
221 | /* Page-mapped register block size */ | ||
222 | #define EFX_PAGE_BLOCK_SIZE 0x2000 | ||
223 | |||
224 | /* Calculate offset to page-mapped register block */ | ||
225 | #define EFX_PAGED_REG(page, reg) \ | ||
226 | ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) | ||
227 | |||
228 | /* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ | ||
229 | static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, | ||
230 | unsigned int reg, unsigned int page) | ||
231 | { | ||
232 | reg = EFX_PAGED_REG(page, reg); | ||
233 | |||
234 | netif_vdbg(efx, hw, efx->net_dev, | ||
235 | "writing register %x with " EFX_OWORD_FMT "\n", reg, | ||
236 | EFX_OWORD_VAL(*value)); | ||
237 | |||
238 | #ifdef EFX_USE_QWORD_IO | ||
239 | _efx_writeq(efx, value->u64[0], reg + 0); | ||
240 | _efx_writeq(efx, value->u64[1], reg + 8); | ||
241 | #else | ||
242 | _efx_writed(efx, value->u32[0], reg + 0); | ||
243 | _efx_writed(efx, value->u32[1], reg + 4); | ||
244 | _efx_writed(efx, value->u32[2], reg + 8); | ||
245 | _efx_writed(efx, value->u32[3], reg + 12); | ||
246 | #endif | ||
247 | } | ||
248 | #define efx_writeo_page(efx, value, reg, page) \ | ||
249 | _efx_writeo_page(efx, value, \ | ||
250 | reg + \ | ||
251 | BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ | ||
252 | page) | ||
253 | |||
254 | /* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of | ||
255 | * RX_DESC_UPD or TX_DESC_UPD) | ||
256 | */ | ||
257 | static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value, | ||
258 | unsigned int reg, unsigned int page) | ||
259 | { | ||
260 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | ||
261 | } | ||
262 | #define efx_writed_page(efx, value, reg, page) \ | ||
263 | _efx_writed_page(efx, value, \ | ||
264 | reg + \ | ||
265 | BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \ | ||
266 | && (reg) != 0xa1c), \ | ||
267 | page) | ||
268 | |||
269 | /* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug | ||
270 | * in the BIU means that writes to TIMER_COMMAND[0] invalidate the | ||
271 | * collector register. | ||
272 | */ | ||
273 | static inline void _efx_writed_page_locked(struct efx_nic *efx, | ||
274 | efx_dword_t *value, | ||
275 | unsigned int reg, | ||
276 | unsigned int page) | ||
277 | { | ||
278 | unsigned long flags __attribute__ ((unused)); | ||
279 | |||
280 | if (page == 0) { | ||
281 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
282 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | ||
283 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
284 | } else { | ||
285 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | ||
286 | } | ||
287 | } | ||
288 | #define efx_writed_page_locked(efx, value, reg, page) \ | ||
289 | _efx_writed_page_locked(efx, value, \ | ||
290 | reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \ | ||
291 | page) | ||
292 | |||
293 | #endif /* EFX_IO_H */ | ||
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h new file mode 100644 index 00000000000..d6a255d0856 --- /dev/null +++ b/drivers/net/sfc/mac.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2009 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_MAC_H | ||
12 | #define EFX_MAC_H | ||
13 | |||
14 | #include "net_driver.h" | ||
15 | |||
16 | extern const struct efx_mac_operations falcon_xmac_operations; | ||
17 | extern const struct efx_mac_operations efx_mcdi_mac_operations; | ||
18 | extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, | ||
19 | u32 dma_len, int enable, int clear); | ||
20 | |||
21 | #endif | ||
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c new file mode 100644 index 00000000000..81a42539746 --- /dev/null +++ b/drivers/net/sfc/mcdi.c | |||
@@ -0,0 +1,1191 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2008-2011 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include <linux/delay.h> | ||
11 | #include "net_driver.h" | ||
12 | #include "nic.h" | ||
13 | #include "io.h" | ||
14 | #include "regs.h" | ||
15 | #include "mcdi_pcol.h" | ||
16 | #include "phy.h" | ||
17 | |||
18 | /************************************************************************** | ||
19 | * | ||
20 | * Management-Controller-to-Driver Interface | ||
21 | * | ||
22 | ************************************************************************** | ||
23 | */ | ||
24 | |||
25 | /* Software-defined structure to the shared-memory */ | ||
26 | #define CMD_NOTIFY_PORT0 0 | ||
27 | #define CMD_NOTIFY_PORT1 4 | ||
28 | #define CMD_PDU_PORT0 0x008 | ||
29 | #define CMD_PDU_PORT1 0x108 | ||
30 | #define REBOOT_FLAG_PORT0 0x3f8 | ||
31 | #define REBOOT_FLAG_PORT1 0x3fc | ||
32 | |||
33 | #define MCDI_RPC_TIMEOUT 10 /*seconds */ | ||
34 | |||
35 | #define MCDI_PDU(efx) \ | ||
36 | (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) | ||
37 | #define MCDI_DOORBELL(efx) \ | ||
38 | (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0) | ||
39 | #define MCDI_REBOOT_FLAG(efx) \ | ||
40 | (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0) | ||
41 | |||
42 | #define SEQ_MASK \ | ||
43 | EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) | ||
44 | |||
45 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | ||
46 | { | ||
47 | struct siena_nic_data *nic_data; | ||
48 | EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | ||
49 | nic_data = efx->nic_data; | ||
50 | return &nic_data->mcdi; | ||
51 | } | ||
52 | |||
53 | void efx_mcdi_init(struct efx_nic *efx) | ||
54 | { | ||
55 | struct efx_mcdi_iface *mcdi; | ||
56 | |||
57 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
58 | return; | ||
59 | |||
60 | mcdi = efx_mcdi(efx); | ||
61 | init_waitqueue_head(&mcdi->wq); | ||
62 | spin_lock_init(&mcdi->iface_lock); | ||
63 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | ||
64 | mcdi->mode = MCDI_MODE_POLL; | ||
65 | |||
66 | (void) efx_mcdi_poll_reboot(efx); | ||
67 | } | ||
68 | |||
69 | static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | ||
70 | const u8 *inbuf, size_t inlen) | ||
71 | { | ||
72 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
73 | unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | ||
74 | unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); | ||
75 | unsigned int i; | ||
76 | efx_dword_t hdr; | ||
77 | u32 xflags, seqno; | ||
78 | |||
79 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | ||
80 | BUG_ON(inlen & 3 || inlen >= 0x100); | ||
81 | |||
82 | seqno = mcdi->seqno & SEQ_MASK; | ||
83 | xflags = 0; | ||
84 | if (mcdi->mode == MCDI_MODE_EVENTS) | ||
85 | xflags |= MCDI_HEADER_XFLAGS_EVREQ; | ||
86 | |||
87 | EFX_POPULATE_DWORD_6(hdr, | ||
88 | MCDI_HEADER_RESPONSE, 0, | ||
89 | MCDI_HEADER_RESYNC, 1, | ||
90 | MCDI_HEADER_CODE, cmd, | ||
91 | MCDI_HEADER_DATALEN, inlen, | ||
92 | MCDI_HEADER_SEQ, seqno, | ||
93 | MCDI_HEADER_XFLAGS, xflags); | ||
94 | |||
95 | efx_writed(efx, &hdr, pdu); | ||
96 | |||
97 | for (i = 0; i < inlen; i += 4) | ||
98 | _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); | ||
99 | |||
100 | /* Ensure the payload is written out before the header */ | ||
101 | wmb(); | ||
102 | |||
103 | /* ring the doorbell with a distinctive value */ | ||
104 | _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); | ||
105 | } | ||
106 | |||
107 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) | ||
108 | { | ||
109 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
110 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | ||
111 | int i; | ||
112 | |||
113 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | ||
114 | BUG_ON(outlen & 3 || outlen >= 0x100); | ||
115 | |||
116 | for (i = 0; i < outlen; i += 4) | ||
117 | *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); | ||
118 | } | ||
119 | |||
120 | static int efx_mcdi_poll(struct efx_nic *efx) | ||
121 | { | ||
122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
123 | unsigned int time, finish; | ||
124 | unsigned int respseq, respcmd, error; | ||
125 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | ||
126 | unsigned int rc, spins; | ||
127 | efx_dword_t reg; | ||
128 | |||
129 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ | ||
130 | rc = -efx_mcdi_poll_reboot(efx); | ||
131 | if (rc) | ||
132 | goto out; | ||
133 | |||
134 | /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, | ||
135 | * because generally mcdi responses are fast. After that, back off | ||
136 | * and poll once a jiffy (approximately) | ||
137 | */ | ||
138 | spins = TICK_USEC; | ||
139 | finish = get_seconds() + MCDI_RPC_TIMEOUT; | ||
140 | |||
141 | while (1) { | ||
142 | if (spins != 0) { | ||
143 | --spins; | ||
144 | udelay(1); | ||
145 | } else { | ||
146 | schedule_timeout_uninterruptible(1); | ||
147 | } | ||
148 | |||
149 | time = get_seconds(); | ||
150 | |||
151 | rmb(); | ||
152 | efx_readd(efx, ®, pdu); | ||
153 | |||
154 | /* All 1's indicates that shared memory is in reset (and is | ||
155 | * not a valid header). Wait for it to come out reset before | ||
156 | * completing the command */ | ||
157 | if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && | ||
158 | EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) | ||
159 | break; | ||
160 | |||
161 | if (time >= finish) | ||
162 | return -ETIMEDOUT; | ||
163 | } | ||
164 | |||
165 | mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); | ||
166 | respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); | ||
167 | respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); | ||
168 | error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); | ||
169 | |||
170 | if (error && mcdi->resplen == 0) { | ||
171 | netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); | ||
172 | rc = EIO; | ||
173 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { | ||
174 | netif_err(efx, hw, efx->net_dev, | ||
175 | "MC response mismatch tx seq 0x%x rx seq 0x%x\n", | ||
176 | respseq, mcdi->seqno); | ||
177 | rc = EIO; | ||
178 | } else if (error) { | ||
179 | efx_readd(efx, ®, pdu + 4); | ||
180 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { | ||
181 | #define TRANSLATE_ERROR(name) \ | ||
182 | case MC_CMD_ERR_ ## name: \ | ||
183 | rc = name; \ | ||
184 | break | ||
185 | TRANSLATE_ERROR(ENOENT); | ||
186 | TRANSLATE_ERROR(EINTR); | ||
187 | TRANSLATE_ERROR(EACCES); | ||
188 | TRANSLATE_ERROR(EBUSY); | ||
189 | TRANSLATE_ERROR(EINVAL); | ||
190 | TRANSLATE_ERROR(EDEADLK); | ||
191 | TRANSLATE_ERROR(ENOSYS); | ||
192 | TRANSLATE_ERROR(ETIME); | ||
193 | #undef TRANSLATE_ERROR | ||
194 | default: | ||
195 | rc = EIO; | ||
196 | break; | ||
197 | } | ||
198 | } else | ||
199 | rc = 0; | ||
200 | |||
201 | out: | ||
202 | mcdi->resprc = rc; | ||
203 | if (rc) | ||
204 | mcdi->resplen = 0; | ||
205 | |||
206 | /* Return rc=0 like wait_event_timeout() */ | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | /* Test and clear MC-rebooted flag for this port/function */ | ||
211 | int efx_mcdi_poll_reboot(struct efx_nic *efx) | ||
212 | { | ||
213 | unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); | ||
214 | efx_dword_t reg; | ||
215 | uint32_t value; | ||
216 | |||
217 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
218 | return false; | ||
219 | |||
220 | efx_readd(efx, ®, addr); | ||
221 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); | ||
222 | |||
223 | if (value == 0) | ||
224 | return 0; | ||
225 | |||
226 | EFX_ZERO_DWORD(reg); | ||
227 | efx_writed(efx, ®, addr); | ||
228 | |||
229 | if (value == MC_STATUS_DWORD_ASSERT) | ||
230 | return -EINTR; | ||
231 | else | ||
232 | return -EIO; | ||
233 | } | ||
234 | |||
235 | static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) | ||
236 | { | ||
237 | /* Wait until the interface becomes QUIESCENT and we win the race | ||
238 | * to mark it RUNNING. */ | ||
239 | wait_event(mcdi->wq, | ||
240 | atomic_cmpxchg(&mcdi->state, | ||
241 | MCDI_STATE_QUIESCENT, | ||
242 | MCDI_STATE_RUNNING) | ||
243 | == MCDI_STATE_QUIESCENT); | ||
244 | } | ||
245 | |||
246 | static int efx_mcdi_await_completion(struct efx_nic *efx) | ||
247 | { | ||
248 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
249 | |||
250 | if (wait_event_timeout( | ||
251 | mcdi->wq, | ||
252 | atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, | ||
253 | msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) | ||
254 | return -ETIMEDOUT; | ||
255 | |||
256 | /* Check if efx_mcdi_set_mode() switched us back to polled completions. | ||
257 | * In which case, poll for completions directly. If efx_mcdi_ev_cpl() | ||
258 | * completed the request first, then we'll just end up completing the | ||
259 | * request again, which is safe. | ||
260 | * | ||
261 | * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which | ||
262 | * wait_event_timeout() implicitly provides. | ||
263 | */ | ||
264 | if (mcdi->mode == MCDI_MODE_POLL) | ||
265 | return efx_mcdi_poll(efx); | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) | ||
271 | { | ||
272 | /* If the interface is RUNNING, then move to COMPLETED and wake any | ||
273 | * waiters. If the interface isn't in RUNNING then we've received a | ||
274 | * duplicate completion after we've already transitioned back to | ||
275 | * QUIESCENT. [A subsequent invocation would increment seqno, so would | ||
276 | * have failed the seqno check]. | ||
277 | */ | ||
278 | if (atomic_cmpxchg(&mcdi->state, | ||
279 | MCDI_STATE_RUNNING, | ||
280 | MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { | ||
281 | wake_up(&mcdi->wq); | ||
282 | return true; | ||
283 | } | ||
284 | |||
285 | return false; | ||
286 | } | ||
287 | |||
288 | static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) | ||
289 | { | ||
290 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | ||
291 | wake_up(&mcdi->wq); | ||
292 | } | ||
293 | |||
294 | static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, | ||
295 | unsigned int datalen, unsigned int errno) | ||
296 | { | ||
297 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
298 | bool wake = false; | ||
299 | |||
300 | spin_lock(&mcdi->iface_lock); | ||
301 | |||
302 | if ((seqno ^ mcdi->seqno) & SEQ_MASK) { | ||
303 | if (mcdi->credits) | ||
304 | /* The request has been cancelled */ | ||
305 | --mcdi->credits; | ||
306 | else | ||
307 | netif_err(efx, hw, efx->net_dev, | ||
308 | "MC response mismatch tx seq 0x%x rx " | ||
309 | "seq 0x%x\n", seqno, mcdi->seqno); | ||
310 | } else { | ||
311 | mcdi->resprc = errno; | ||
312 | mcdi->resplen = datalen; | ||
313 | |||
314 | wake = true; | ||
315 | } | ||
316 | |||
317 | spin_unlock(&mcdi->iface_lock); | ||
318 | |||
319 | if (wake) | ||
320 | efx_mcdi_complete(mcdi); | ||
321 | } | ||
322 | |||
323 | /* Issue the given command by writing the data into the shared memory PDU, | ||
324 | * ring the doorbell and wait for completion. Copyout the result. */ | ||
325 | int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, | ||
326 | const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, | ||
327 | size_t *outlen_actual) | ||
328 | { | ||
329 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
330 | int rc; | ||
331 | BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | ||
332 | |||
333 | efx_mcdi_acquire(mcdi); | ||
334 | |||
335 | /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ | ||
336 | spin_lock_bh(&mcdi->iface_lock); | ||
337 | ++mcdi->seqno; | ||
338 | spin_unlock_bh(&mcdi->iface_lock); | ||
339 | |||
340 | efx_mcdi_copyin(efx, cmd, inbuf, inlen); | ||
341 | |||
342 | if (mcdi->mode == MCDI_MODE_POLL) | ||
343 | rc = efx_mcdi_poll(efx); | ||
344 | else | ||
345 | rc = efx_mcdi_await_completion(efx); | ||
346 | |||
347 | if (rc != 0) { | ||
348 | /* Close the race with efx_mcdi_ev_cpl() executing just too late | ||
349 | * and completing a request we've just cancelled, by ensuring | ||
350 | * that the seqno check therein fails. | ||
351 | */ | ||
352 | spin_lock_bh(&mcdi->iface_lock); | ||
353 | ++mcdi->seqno; | ||
354 | ++mcdi->credits; | ||
355 | spin_unlock_bh(&mcdi->iface_lock); | ||
356 | |||
357 | netif_err(efx, hw, efx->net_dev, | ||
358 | "MC command 0x%x inlen %d mode %d timed out\n", | ||
359 | cmd, (int)inlen, mcdi->mode); | ||
360 | } else { | ||
361 | size_t resplen; | ||
362 | |||
363 | /* At the very least we need a memory barrier here to ensure | ||
364 | * we pick up changes from efx_mcdi_ev_cpl(). Protect against | ||
365 | * a spurious efx_mcdi_ev_cpl() running concurrently by | ||
366 | * acquiring the iface_lock. */ | ||
367 | spin_lock_bh(&mcdi->iface_lock); | ||
368 | rc = -mcdi->resprc; | ||
369 | resplen = mcdi->resplen; | ||
370 | spin_unlock_bh(&mcdi->iface_lock); | ||
371 | |||
372 | if (rc == 0) { | ||
373 | efx_mcdi_copyout(efx, outbuf, | ||
374 | min(outlen, mcdi->resplen + 3) & ~0x3); | ||
375 | if (outlen_actual != NULL) | ||
376 | *outlen_actual = resplen; | ||
377 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) | ||
378 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ | ||
379 | else if (rc == -EIO || rc == -EINTR) { | ||
380 | netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", | ||
381 | -rc); | ||
382 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
383 | } else | ||
384 | netif_dbg(efx, hw, efx->net_dev, | ||
385 | "MC command 0x%x inlen %d failed rc=%d\n", | ||
386 | cmd, (int)inlen, -rc); | ||
387 | } | ||
388 | |||
389 | efx_mcdi_release(mcdi); | ||
390 | return rc; | ||
391 | } | ||
392 | |||
393 | void efx_mcdi_mode_poll(struct efx_nic *efx) | ||
394 | { | ||
395 | struct efx_mcdi_iface *mcdi; | ||
396 | |||
397 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
398 | return; | ||
399 | |||
400 | mcdi = efx_mcdi(efx); | ||
401 | if (mcdi->mode == MCDI_MODE_POLL) | ||
402 | return; | ||
403 | |||
404 | /* We can switch from event completion to polled completion, because | ||
405 | * mcdi requests are always completed in shared memory. We do this by | ||
406 | * switching the mode to POLL'd then completing the request. | ||
407 | * efx_mcdi_await_completion() will then call efx_mcdi_poll(). | ||
408 | * | ||
409 | * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), | ||
410 | * which efx_mcdi_complete() provides for us. | ||
411 | */ | ||
412 | mcdi->mode = MCDI_MODE_POLL; | ||
413 | |||
414 | efx_mcdi_complete(mcdi); | ||
415 | } | ||
416 | |||
417 | void efx_mcdi_mode_event(struct efx_nic *efx) | ||
418 | { | ||
419 | struct efx_mcdi_iface *mcdi; | ||
420 | |||
421 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
422 | return; | ||
423 | |||
424 | mcdi = efx_mcdi(efx); | ||
425 | |||
426 | if (mcdi->mode == MCDI_MODE_EVENTS) | ||
427 | return; | ||
428 | |||
429 | /* We can't switch from polled to event completion in the middle of a | ||
430 | * request, because the completion method is specified in the request. | ||
431 | * So acquire the interface to serialise the requestors. We don't need | ||
432 | * to acquire the iface_lock to change the mode here, but we do need a | ||
433 | * write memory barrier ensure that efx_mcdi_rpc() sees it, which | ||
434 | * efx_mcdi_acquire() provides. | ||
435 | */ | ||
436 | efx_mcdi_acquire(mcdi); | ||
437 | mcdi->mode = MCDI_MODE_EVENTS; | ||
438 | efx_mcdi_release(mcdi); | ||
439 | } | ||
440 | |||
441 | static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | ||
442 | { | ||
443 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
444 | |||
445 | /* If there is an outstanding MCDI request, it has been terminated | ||
446 | * either by a BADASSERT or REBOOT event. If the mcdi interface is | ||
447 | * in polled mode, then do nothing because the MC reboot handler will | ||
448 | * set the header correctly. However, if the mcdi interface is waiting | ||
449 | * for a CMDDONE event it won't receive it [and since all MCDI events | ||
450 | * are sent to the same queue, we can't be racing with | ||
451 | * efx_mcdi_ev_cpl()] | ||
452 | * | ||
453 | * There's a race here with efx_mcdi_rpc(), because we might receive | ||
454 | * a REBOOT event *before* the request has been copied out. In polled | ||
455 | * mode (during startup) this is irrelevant, because efx_mcdi_complete() | ||
456 | * is ignored. In event mode, this condition is just an edge-case of | ||
457 | * receiving a REBOOT event after posting the MCDI request. Did the mc | ||
458 | * reboot before or after the copyout? The best we can do always is | ||
459 | * just return failure. | ||
460 | */ | ||
461 | spin_lock(&mcdi->iface_lock); | ||
462 | if (efx_mcdi_complete(mcdi)) { | ||
463 | if (mcdi->mode == MCDI_MODE_EVENTS) { | ||
464 | mcdi->resprc = rc; | ||
465 | mcdi->resplen = 0; | ||
466 | ++mcdi->credits; | ||
467 | } | ||
468 | } else | ||
469 | /* Nobody was waiting for an MCDI request, so trigger a reset */ | ||
470 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
471 | |||
472 | spin_unlock(&mcdi->iface_lock); | ||
473 | } | ||
474 | |||
475 | static unsigned int efx_mcdi_event_link_speed[] = { | ||
476 | [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, | ||
477 | [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, | ||
478 | [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, | ||
479 | }; | ||
480 | |||
481 | |||
482 | static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) | ||
483 | { | ||
484 | u32 flags, fcntl, speed, lpa; | ||
485 | |||
486 | speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); | ||
487 | EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); | ||
488 | speed = efx_mcdi_event_link_speed[speed]; | ||
489 | |||
490 | flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); | ||
491 | fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); | ||
492 | lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); | ||
493 | |||
494 | /* efx->link_state is only modified by efx_mcdi_phy_get_link(), | ||
495 | * which is only run after flushing the event queues. Therefore, it | ||
496 | * is safe to modify the link state outside of the mac_lock here. | ||
497 | */ | ||
498 | efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); | ||
499 | |||
500 | efx_mcdi_phy_check_fcntl(efx, lpa); | ||
501 | |||
502 | efx_link_status_changed(efx); | ||
503 | } | ||
504 | |||
505 | static const char *sensor_names[] = { | ||
506 | [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor", | ||
507 | [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor", | ||
508 | [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling", | ||
509 | [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor", | ||
510 | [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling", | ||
511 | [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor", | ||
512 | [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling", | ||
513 | [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor", | ||
514 | [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor", | ||
515 | [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor", | ||
516 | [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor", | ||
517 | [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor", | ||
518 | [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor" | ||
519 | }; | ||
520 | |||
521 | static const char *sensor_status_names[] = { | ||
522 | [MC_CMD_SENSOR_STATE_OK] = "OK", | ||
523 | [MC_CMD_SENSOR_STATE_WARNING] = "Warning", | ||
524 | [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", | ||
525 | [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", | ||
526 | }; | ||
527 | |||
528 | static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) | ||
529 | { | ||
530 | unsigned int monitor, state, value; | ||
531 | const char *name, *state_txt; | ||
532 | monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); | ||
533 | state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); | ||
534 | value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); | ||
535 | /* Deal gracefully with the board having more drivers than we | ||
536 | * know about, but do not expect new sensor states. */ | ||
537 | name = (monitor >= ARRAY_SIZE(sensor_names)) | ||
538 | ? "No sensor name available" : | ||
539 | sensor_names[monitor]; | ||
540 | EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); | ||
541 | state_txt = sensor_status_names[state]; | ||
542 | |||
543 | netif_err(efx, hw, efx->net_dev, | ||
544 | "Sensor %d (%s) reports condition '%s' for raw value %d\n", | ||
545 | monitor, name, state_txt, value); | ||
546 | } | ||
547 | |||
548 | /* Called from falcon_process_eventq for MCDI events */ | ||
549 | void efx_mcdi_process_event(struct efx_channel *channel, | ||
550 | efx_qword_t *event) | ||
551 | { | ||
552 | struct efx_nic *efx = channel->efx; | ||
553 | int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); | ||
554 | u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); | ||
555 | |||
556 | switch (code) { | ||
557 | case MCDI_EVENT_CODE_BADSSERT: | ||
558 | netif_err(efx, hw, efx->net_dev, | ||
559 | "MC watchdog or assertion failure at 0x%x\n", data); | ||
560 | efx_mcdi_ev_death(efx, EINTR); | ||
561 | break; | ||
562 | |||
563 | case MCDI_EVENT_CODE_PMNOTICE: | ||
564 | netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); | ||
565 | break; | ||
566 | |||
567 | case MCDI_EVENT_CODE_CMDDONE: | ||
568 | efx_mcdi_ev_cpl(efx, | ||
569 | MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), | ||
570 | MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), | ||
571 | MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); | ||
572 | break; | ||
573 | |||
574 | case MCDI_EVENT_CODE_LINKCHANGE: | ||
575 | efx_mcdi_process_link_change(efx, event); | ||
576 | break; | ||
577 | case MCDI_EVENT_CODE_SENSOREVT: | ||
578 | efx_mcdi_sensor_event(efx, event); | ||
579 | break; | ||
580 | case MCDI_EVENT_CODE_SCHEDERR: | ||
581 | netif_info(efx, hw, efx->net_dev, | ||
582 | "MC Scheduler error address=0x%x\n", data); | ||
583 | break; | ||
584 | case MCDI_EVENT_CODE_REBOOT: | ||
585 | netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); | ||
586 | efx_mcdi_ev_death(efx, EIO); | ||
587 | break; | ||
588 | case MCDI_EVENT_CODE_MAC_STATS_DMA: | ||
589 | /* MAC stats are gather lazily. We can ignore this. */ | ||
590 | break; | ||
591 | |||
592 | default: | ||
593 | netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", | ||
594 | code); | ||
595 | } | ||
596 | } | ||
597 | |||
598 | /************************************************************************** | ||
599 | * | ||
600 | * Specific request functions | ||
601 | * | ||
602 | ************************************************************************** | ||
603 | */ | ||
604 | |||
605 | void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) | ||
606 | { | ||
607 | u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; | ||
608 | size_t outlength; | ||
609 | const __le16 *ver_words; | ||
610 | int rc; | ||
611 | |||
612 | BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); | ||
613 | |||
614 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, | ||
615 | outbuf, sizeof(outbuf), &outlength); | ||
616 | if (rc) | ||
617 | goto fail; | ||
618 | |||
619 | if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { | ||
620 | rc = -EIO; | ||
621 | goto fail; | ||
622 | } | ||
623 | |||
624 | ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); | ||
625 | snprintf(buf, len, "%u.%u.%u.%u", | ||
626 | le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), | ||
627 | le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); | ||
628 | return; | ||
629 | |||
630 | fail: | ||
631 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
632 | buf[0] = 0; | ||
633 | } | ||
634 | |||
635 | int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | ||
636 | bool *was_attached) | ||
637 | { | ||
638 | u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; | ||
639 | u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; | ||
640 | size_t outlen; | ||
641 | int rc; | ||
642 | |||
643 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, | ||
644 | driver_operating ? 1 : 0); | ||
645 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); | ||
646 | |||
647 | rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), | ||
648 | outbuf, sizeof(outbuf), &outlen); | ||
649 | if (rc) | ||
650 | goto fail; | ||
651 | if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { | ||
652 | rc = -EIO; | ||
653 | goto fail; | ||
654 | } | ||
655 | |||
656 | if (was_attached != NULL) | ||
657 | *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); | ||
658 | return 0; | ||
659 | |||
660 | fail: | ||
661 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
662 | return rc; | ||
663 | } | ||
664 | |||
665 | int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | ||
666 | u16 *fw_subtype_list) | ||
667 | { | ||
668 | uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; | ||
669 | size_t outlen; | ||
670 | int port_num = efx_port_num(efx); | ||
671 | int offset; | ||
672 | int rc; | ||
673 | |||
674 | BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); | ||
675 | |||
676 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, | ||
677 | outbuf, sizeof(outbuf), &outlen); | ||
678 | if (rc) | ||
679 | goto fail; | ||
680 | |||
681 | if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { | ||
682 | rc = -EIO; | ||
683 | goto fail; | ||
684 | } | ||
685 | |||
686 | offset = (port_num) | ||
687 | ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST | ||
688 | : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; | ||
689 | if (mac_address) | ||
690 | memcpy(mac_address, outbuf + offset, ETH_ALEN); | ||
691 | if (fw_subtype_list) | ||
692 | memcpy(fw_subtype_list, | ||
693 | outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, | ||
694 | MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); | ||
695 | |||
696 | return 0; | ||
697 | |||
698 | fail: | ||
699 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", | ||
700 | __func__, rc, (int)outlen); | ||
701 | |||
702 | return rc; | ||
703 | } | ||
704 | |||
705 | int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) | ||
706 | { | ||
707 | u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; | ||
708 | u32 dest = 0; | ||
709 | int rc; | ||
710 | |||
711 | if (uart) | ||
712 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; | ||
713 | if (evq) | ||
714 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; | ||
715 | |||
716 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); | ||
717 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); | ||
718 | |||
719 | BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); | ||
720 | |||
721 | rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), | ||
722 | NULL, 0, NULL); | ||
723 | if (rc) | ||
724 | goto fail; | ||
725 | |||
726 | return 0; | ||
727 | |||
728 | fail: | ||
729 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
730 | return rc; | ||
731 | } | ||
732 | |||
733 | int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) | ||
734 | { | ||
735 | u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; | ||
736 | size_t outlen; | ||
737 | int rc; | ||
738 | |||
739 | BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); | ||
740 | |||
741 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, | ||
742 | outbuf, sizeof(outbuf), &outlen); | ||
743 | if (rc) | ||
744 | goto fail; | ||
745 | if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { | ||
746 | rc = -EIO; | ||
747 | goto fail; | ||
748 | } | ||
749 | |||
750 | *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); | ||
751 | return 0; | ||
752 | |||
753 | fail: | ||
754 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | ||
755 | __func__, rc); | ||
756 | return rc; | ||
757 | } | ||
758 | |||
759 | int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | ||
760 | size_t *size_out, size_t *erase_size_out, | ||
761 | bool *protected_out) | ||
762 | { | ||
763 | u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; | ||
764 | u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; | ||
765 | size_t outlen; | ||
766 | int rc; | ||
767 | |||
768 | MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); | ||
769 | |||
770 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), | ||
771 | outbuf, sizeof(outbuf), &outlen); | ||
772 | if (rc) | ||
773 | goto fail; | ||
774 | if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { | ||
775 | rc = -EIO; | ||
776 | goto fail; | ||
777 | } | ||
778 | |||
779 | *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); | ||
780 | *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); | ||
781 | *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & | ||
782 | (1 << MC_CMD_NVRAM_PROTECTED_LBN)); | ||
783 | return 0; | ||
784 | |||
785 | fail: | ||
786 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
787 | return rc; | ||
788 | } | ||
789 | |||
790 | int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) | ||
791 | { | ||
792 | u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; | ||
793 | int rc; | ||
794 | |||
795 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); | ||
796 | |||
797 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); | ||
798 | |||
799 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), | ||
800 | NULL, 0, NULL); | ||
801 | if (rc) | ||
802 | goto fail; | ||
803 | |||
804 | return 0; | ||
805 | |||
806 | fail: | ||
807 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
808 | return rc; | ||
809 | } | ||
810 | |||
811 | int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | ||
812 | loff_t offset, u8 *buffer, size_t length) | ||
813 | { | ||
814 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; | ||
815 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | ||
816 | size_t outlen; | ||
817 | int rc; | ||
818 | |||
819 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); | ||
820 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); | ||
821 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); | ||
822 | |||
823 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), | ||
824 | outbuf, sizeof(outbuf), &outlen); | ||
825 | if (rc) | ||
826 | goto fail; | ||
827 | |||
828 | memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); | ||
829 | return 0; | ||
830 | |||
831 | fail: | ||
832 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
833 | return rc; | ||
834 | } | ||
835 | |||
836 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | ||
837 | loff_t offset, const u8 *buffer, size_t length) | ||
838 | { | ||
839 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | ||
840 | int rc; | ||
841 | |||
842 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); | ||
843 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); | ||
844 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); | ||
845 | memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); | ||
846 | |||
847 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); | ||
848 | |||
849 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, | ||
850 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), | ||
851 | NULL, 0, NULL); | ||
852 | if (rc) | ||
853 | goto fail; | ||
854 | |||
855 | return 0; | ||
856 | |||
857 | fail: | ||
858 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
859 | return rc; | ||
860 | } | ||
861 | |||
862 | int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | ||
863 | loff_t offset, size_t length) | ||
864 | { | ||
865 | u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; | ||
866 | int rc; | ||
867 | |||
868 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); | ||
869 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); | ||
870 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); | ||
871 | |||
872 | BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); | ||
873 | |||
874 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), | ||
875 | NULL, 0, NULL); | ||
876 | if (rc) | ||
877 | goto fail; | ||
878 | |||
879 | return 0; | ||
880 | |||
881 | fail: | ||
882 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
883 | return rc; | ||
884 | } | ||
885 | |||
886 | int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) | ||
887 | { | ||
888 | u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; | ||
889 | int rc; | ||
890 | |||
891 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); | ||
892 | |||
893 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); | ||
894 | |||
895 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), | ||
896 | NULL, 0, NULL); | ||
897 | if (rc) | ||
898 | goto fail; | ||
899 | |||
900 | return 0; | ||
901 | |||
902 | fail: | ||
903 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
904 | return rc; | ||
905 | } | ||
906 | |||
907 | static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) | ||
908 | { | ||
909 | u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; | ||
910 | u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; | ||
911 | int rc; | ||
912 | |||
913 | MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); | ||
914 | |||
915 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), | ||
916 | outbuf, sizeof(outbuf), NULL); | ||
917 | if (rc) | ||
918 | return rc; | ||
919 | |||
920 | switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { | ||
921 | case MC_CMD_NVRAM_TEST_PASS: | ||
922 | case MC_CMD_NVRAM_TEST_NOTSUPP: | ||
923 | return 0; | ||
924 | default: | ||
925 | return -EIO; | ||
926 | } | ||
927 | } | ||
928 | |||
929 | int efx_mcdi_nvram_test_all(struct efx_nic *efx) | ||
930 | { | ||
931 | u32 nvram_types; | ||
932 | unsigned int type; | ||
933 | int rc; | ||
934 | |||
935 | rc = efx_mcdi_nvram_types(efx, &nvram_types); | ||
936 | if (rc) | ||
937 | goto fail1; | ||
938 | |||
939 | type = 0; | ||
940 | while (nvram_types != 0) { | ||
941 | if (nvram_types & 1) { | ||
942 | rc = efx_mcdi_nvram_test(efx, type); | ||
943 | if (rc) | ||
944 | goto fail2; | ||
945 | } | ||
946 | type++; | ||
947 | nvram_types >>= 1; | ||
948 | } | ||
949 | |||
950 | return 0; | ||
951 | |||
952 | fail2: | ||
953 | netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", | ||
954 | __func__, type); | ||
955 | fail1: | ||
956 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
957 | return rc; | ||
958 | } | ||
959 | |||
960 | static int efx_mcdi_read_assertion(struct efx_nic *efx) | ||
961 | { | ||
962 | u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; | ||
963 | u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; | ||
964 | unsigned int flags, index, ofst; | ||
965 | const char *reason; | ||
966 | size_t outlen; | ||
967 | int retry; | ||
968 | int rc; | ||
969 | |||
970 | /* Attempt to read any stored assertion state before we reboot | ||
971 | * the mcfw out of the assertion handler. Retry twice, once | ||
972 | * because a boot-time assertion might cause this command to fail | ||
973 | * with EINTR. And once again because GET_ASSERTS can race with | ||
974 | * MC_CMD_REBOOT running on the other port. */ | ||
975 | retry = 2; | ||
976 | do { | ||
977 | MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); | ||
978 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, | ||
979 | inbuf, MC_CMD_GET_ASSERTS_IN_LEN, | ||
980 | outbuf, sizeof(outbuf), &outlen); | ||
981 | } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); | ||
982 | |||
983 | if (rc) | ||
984 | return rc; | ||
985 | if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) | ||
986 | return -EIO; | ||
987 | |||
988 | /* Print out any recorded assertion state */ | ||
989 | flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); | ||
990 | if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) | ||
991 | return 0; | ||
992 | |||
993 | reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) | ||
994 | ? "system-level assertion" | ||
995 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) | ||
996 | ? "thread-level assertion" | ||
997 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) | ||
998 | ? "watchdog reset" | ||
999 | : "unknown assertion"; | ||
1000 | netif_err(efx, hw, efx->net_dev, | ||
1001 | "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, | ||
1002 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), | ||
1003 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); | ||
1004 | |||
1005 | /* Print out the registers */ | ||
1006 | ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; | ||
1007 | for (index = 1; index < 32; index++) { | ||
1008 | netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, | ||
1009 | MCDI_DWORD2(outbuf, ofst)); | ||
1010 | ofst += sizeof(efx_dword_t); | ||
1011 | } | ||
1012 | |||
1013 | return 0; | ||
1014 | } | ||
1015 | |||
1016 | static void efx_mcdi_exit_assertion(struct efx_nic *efx) | ||
1017 | { | ||
1018 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | ||
1019 | |||
1020 | /* Atomically reboot the mcfw out of the assertion handler */ | ||
1021 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | ||
1022 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, | ||
1023 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); | ||
1024 | efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, | ||
1025 | NULL, 0, NULL); | ||
1026 | } | ||
1027 | |||
1028 | int efx_mcdi_handle_assertion(struct efx_nic *efx) | ||
1029 | { | ||
1030 | int rc; | ||
1031 | |||
1032 | rc = efx_mcdi_read_assertion(efx); | ||
1033 | if (rc) | ||
1034 | return rc; | ||
1035 | |||
1036 | efx_mcdi_exit_assertion(efx); | ||
1037 | |||
1038 | return 0; | ||
1039 | } | ||
1040 | |||
1041 | void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
1042 | { | ||
1043 | u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; | ||
1044 | int rc; | ||
1045 | |||
1046 | BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); | ||
1047 | BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); | ||
1048 | BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); | ||
1049 | |||
1050 | BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); | ||
1051 | |||
1052 | MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); | ||
1053 | |||
1054 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), | ||
1055 | NULL, 0, NULL); | ||
1056 | if (rc) | ||
1057 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | ||
1058 | __func__, rc); | ||
1059 | } | ||
1060 | |||
1061 | int efx_mcdi_reset_port(struct efx_nic *efx) | ||
1062 | { | ||
1063 | int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); | ||
1064 | if (rc) | ||
1065 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | ||
1066 | __func__, rc); | ||
1067 | return rc; | ||
1068 | } | ||
1069 | |||
1070 | int efx_mcdi_reset_mc(struct efx_nic *efx) | ||
1071 | { | ||
1072 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | ||
1073 | int rc; | ||
1074 | |||
1075 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | ||
1076 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); | ||
1077 | rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), | ||
1078 | NULL, 0, NULL); | ||
1079 | /* White is black, and up is down */ | ||
1080 | if (rc == -EIO) | ||
1081 | return 0; | ||
1082 | if (rc == 0) | ||
1083 | rc = -EIO; | ||
1084 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
1085 | return rc; | ||
1086 | } | ||
1087 | |||
1088 | static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, | ||
1089 | const u8 *mac, int *id_out) | ||
1090 | { | ||
1091 | u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; | ||
1092 | u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; | ||
1093 | size_t outlen; | ||
1094 | int rc; | ||
1095 | |||
1096 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); | ||
1097 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, | ||
1098 | MC_CMD_FILTER_MODE_SIMPLE); | ||
1099 | memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); | ||
1100 | |||
1101 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), | ||
1102 | outbuf, sizeof(outbuf), &outlen); | ||
1103 | if (rc) | ||
1104 | goto fail; | ||
1105 | |||
1106 | if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { | ||
1107 | rc = -EIO; | ||
1108 | goto fail; | ||
1109 | } | ||
1110 | |||
1111 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); | ||
1112 | |||
1113 | return 0; | ||
1114 | |||
1115 | fail: | ||
1116 | *id_out = -1; | ||
1117 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
1118 | return rc; | ||
1119 | |||
1120 | } | ||
1121 | |||
1122 | |||
1123 | int | ||
1124 | efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) | ||
1125 | { | ||
1126 | return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); | ||
1127 | } | ||
1128 | |||
1129 | |||
1130 | int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) | ||
1131 | { | ||
1132 | u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; | ||
1133 | size_t outlen; | ||
1134 | int rc; | ||
1135 | |||
1136 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, | ||
1137 | outbuf, sizeof(outbuf), &outlen); | ||
1138 | if (rc) | ||
1139 | goto fail; | ||
1140 | |||
1141 | if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { | ||
1142 | rc = -EIO; | ||
1143 | goto fail; | ||
1144 | } | ||
1145 | |||
1146 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); | ||
1147 | |||
1148 | return 0; | ||
1149 | |||
1150 | fail: | ||
1151 | *id_out = -1; | ||
1152 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
1153 | return rc; | ||
1154 | } | ||
1155 | |||
1156 | |||
1157 | int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) | ||
1158 | { | ||
1159 | u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; | ||
1160 | int rc; | ||
1161 | |||
1162 | MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); | ||
1163 | |||
1164 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), | ||
1165 | NULL, 0, NULL); | ||
1166 | if (rc) | ||
1167 | goto fail; | ||
1168 | |||
1169 | return 0; | ||
1170 | |||
1171 | fail: | ||
1172 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
1173 | return rc; | ||
1174 | } | ||
1175 | |||
1176 | |||
1177 | int efx_mcdi_wol_filter_reset(struct efx_nic *efx) | ||
1178 | { | ||
1179 | int rc; | ||
1180 | |||
1181 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); | ||
1182 | if (rc) | ||
1183 | goto fail; | ||
1184 | |||
1185 | return 0; | ||
1186 | |||
1187 | fail: | ||
1188 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
1189 | return rc; | ||
1190 | } | ||
1191 | |||
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h new file mode 100644 index 00000000000..aced2a7856f --- /dev/null +++ b/drivers/net/sfc/mcdi.h | |||
@@ -0,0 +1,130 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2008-2010 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #ifndef EFX_MCDI_H | ||
11 | #define EFX_MCDI_H | ||
12 | |||
13 | /** | ||
14 | * enum efx_mcdi_state | ||
15 | * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the | ||
16 | * mcdi_lock then they are able to move to MCDI_STATE_RUNNING | ||
17 | * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that | ||
18 | * moved into this state is allowed to move out of it. | ||
19 | * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread | ||
20 | * has not yet consumed the result. For all other threads, equivalent to | ||
21 | * MCDI_STATE_RUNNING. | ||
22 | */ | ||
23 | enum efx_mcdi_state { | ||
24 | MCDI_STATE_QUIESCENT, | ||
25 | MCDI_STATE_RUNNING, | ||
26 | MCDI_STATE_COMPLETED, | ||
27 | }; | ||
28 | |||
29 | enum efx_mcdi_mode { | ||
30 | MCDI_MODE_POLL, | ||
31 | MCDI_MODE_EVENTS, | ||
32 | }; | ||
33 | |||
34 | /** | ||
35 | * struct efx_mcdi_iface | ||
36 | * @state: Interface state. Waited for by mcdi_wq. | ||
37 | * @wq: Wait queue for threads waiting for state != STATE_RUNNING | ||
38 | * @iface_lock: Protects @credits, @seqno, @resprc, @resplen | ||
39 | * @mode: Poll for mcdi completion, or wait for an mcdi_event. | ||
40 | * Serialised by @lock | ||
41 | * @seqno: The next sequence number to use for mcdi requests. | ||
42 | * Serialised by @lock | ||
43 | * @credits: Number of spurious MCDI completion events allowed before we | ||
44 | * trigger a fatal error. Protected by @lock | ||
45 | * @resprc: Returned MCDI completion | ||
46 | * @resplen: Returned payload length | ||
47 | */ | ||
48 | struct efx_mcdi_iface { | ||
49 | atomic_t state; | ||
50 | wait_queue_head_t wq; | ||
51 | spinlock_t iface_lock; | ||
52 | enum efx_mcdi_mode mode; | ||
53 | unsigned int credits; | ||
54 | unsigned int seqno; | ||
55 | unsigned int resprc; | ||
56 | size_t resplen; | ||
57 | }; | ||
58 | |||
59 | extern void efx_mcdi_init(struct efx_nic *efx); | ||
60 | |||
61 | extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, | ||
62 | size_t inlen, u8 *outbuf, size_t outlen, | ||
63 | size_t *outlen_actual); | ||
64 | |||
65 | extern int efx_mcdi_poll_reboot(struct efx_nic *efx); | ||
66 | extern void efx_mcdi_mode_poll(struct efx_nic *efx); | ||
67 | extern void efx_mcdi_mode_event(struct efx_nic *efx); | ||
68 | |||
69 | extern void efx_mcdi_process_event(struct efx_channel *channel, | ||
70 | efx_qword_t *event); | ||
71 | |||
72 | #define MCDI_PTR2(_buf, _ofst) \ | ||
73 | (((u8 *)_buf) + _ofst) | ||
74 | #define MCDI_SET_DWORD2(_buf, _ofst, _value) \ | ||
75 | EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ | ||
76 | EFX_DWORD_0, _value) | ||
77 | #define MCDI_DWORD2(_buf, _ofst) \ | ||
78 | EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ | ||
79 | EFX_DWORD_0) | ||
80 | #define MCDI_QWORD2(_buf, _ofst) \ | ||
81 | EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \ | ||
82 | EFX_QWORD_0) | ||
83 | |||
84 | #define MCDI_PTR(_buf, _ofst) \ | ||
85 | MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST) | ||
86 | #define MCDI_SET_DWORD(_buf, _ofst, _value) \ | ||
87 | MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value) | ||
88 | #define MCDI_DWORD(_buf, _ofst) \ | ||
89 | MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) | ||
90 | #define MCDI_QWORD(_buf, _ofst) \ | ||
91 | MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) | ||
92 | |||
93 | #define MCDI_EVENT_FIELD(_ev, _field) \ | ||
94 | EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) | ||
95 | |||
96 | extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); | ||
97 | extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | ||
98 | bool *was_attached_out); | ||
99 | extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | ||
100 | u16 *fw_subtype_list); | ||
101 | extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, | ||
102 | u32 dest_evq); | ||
103 | extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); | ||
104 | extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | ||
105 | size_t *size_out, size_t *erase_size_out, | ||
106 | bool *protected_out); | ||
107 | extern int efx_mcdi_nvram_update_start(struct efx_nic *efx, | ||
108 | unsigned int type); | ||
109 | extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | ||
110 | loff_t offset, u8 *buffer, size_t length); | ||
111 | extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | ||
112 | loff_t offset, const u8 *buffer, | ||
113 | size_t length); | ||
114 | #define EFX_MCDI_NVRAM_LEN_MAX 128 | ||
115 | extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | ||
116 | loff_t offset, size_t length); | ||
117 | extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, | ||
118 | unsigned int type); | ||
119 | extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); | ||
120 | extern int efx_mcdi_handle_assertion(struct efx_nic *efx); | ||
121 | extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); | ||
122 | extern int efx_mcdi_reset_port(struct efx_nic *efx); | ||
123 | extern int efx_mcdi_reset_mc(struct efx_nic *efx); | ||
124 | extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, | ||
125 | const u8 *mac, int *id_out); | ||
126 | extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); | ||
127 | extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); | ||
128 | extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); | ||
129 | |||
130 | #endif /* EFX_MCDI_H */ | ||
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c new file mode 100644 index 00000000000..50c20777a56 --- /dev/null +++ b/drivers/net/sfc/mcdi_mac.c | |||
@@ -0,0 +1,145 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2009-2010 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include "net_driver.h" | ||
11 | #include "efx.h" | ||
12 | #include "mac.h" | ||
13 | #include "mcdi.h" | ||
14 | #include "mcdi_pcol.h" | ||
15 | |||
16 | static int efx_mcdi_set_mac(struct efx_nic *efx) | ||
17 | { | ||
18 | u32 reject, fcntl; | ||
19 | u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN]; | ||
20 | |||
21 | memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST, | ||
22 | efx->net_dev->dev_addr, ETH_ALEN); | ||
23 | |||
24 | MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, | ||
25 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); | ||
26 | MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); | ||
27 | |||
28 | /* The MCDI command provides for controlling accept/reject | ||
29 | * of broadcast packets too, but the driver doesn't currently | ||
30 | * expose this. */ | ||
31 | reject = (efx->promiscuous) ? 0 : | ||
32 | (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN); | ||
33 | MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject); | ||
34 | |||
35 | switch (efx->wanted_fc) { | ||
36 | case EFX_FC_RX | EFX_FC_TX: | ||
37 | fcntl = MC_CMD_FCNTL_BIDIR; | ||
38 | break; | ||
39 | case EFX_FC_RX: | ||
40 | fcntl = MC_CMD_FCNTL_RESPOND; | ||
41 | break; | ||
42 | default: | ||
43 | fcntl = MC_CMD_FCNTL_OFF; | ||
44 | break; | ||
45 | } | ||
46 | if (efx->wanted_fc & EFX_FC_AUTO) | ||
47 | fcntl = MC_CMD_FCNTL_AUTO; | ||
48 | |||
49 | MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); | ||
50 | |||
51 | return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes), | ||
52 | NULL, 0, NULL); | ||
53 | } | ||
54 | |||
55 | static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults) | ||
56 | { | ||
57 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | ||
58 | size_t outlength; | ||
59 | int rc; | ||
60 | |||
61 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | ||
62 | |||
63 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | ||
64 | outbuf, sizeof(outbuf), &outlength); | ||
65 | if (rc) | ||
66 | goto fail; | ||
67 | |||
68 | *faults = MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT); | ||
69 | return 0; | ||
70 | |||
71 | fail: | ||
72 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | ||
73 | __func__, rc); | ||
74 | return rc; | ||
75 | } | ||
76 | |||
77 | int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, | ||
78 | u32 dma_len, int enable, int clear) | ||
79 | { | ||
80 | u8 inbuf[MC_CMD_MAC_STATS_IN_LEN]; | ||
81 | int rc; | ||
82 | efx_dword_t *cmd_ptr; | ||
83 | int period = enable ? 1000 : 0; | ||
84 | u32 addr_hi; | ||
85 | u32 addr_lo; | ||
86 | |||
87 | BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_LEN != 0); | ||
88 | |||
89 | addr_lo = ((u64)dma_addr) >> 0; | ||
90 | addr_hi = ((u64)dma_addr) >> 32; | ||
91 | |||
92 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo); | ||
93 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi); | ||
94 | cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); | ||
95 | EFX_POPULATE_DWORD_7(*cmd_ptr, | ||
96 | MC_CMD_MAC_STATS_CMD_DMA, !!enable, | ||
97 | MC_CMD_MAC_STATS_CMD_CLEAR, clear, | ||
98 | MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1, | ||
99 | MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable, | ||
100 | MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0, | ||
101 | MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1, | ||
102 | MC_CMD_MAC_STATS_CMD_PERIOD_MS, period); | ||
103 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); | ||
104 | |||
105 | rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), | ||
106 | NULL, 0, NULL); | ||
107 | if (rc) | ||
108 | goto fail; | ||
109 | |||
110 | return 0; | ||
111 | |||
112 | fail: | ||
113 | netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n", | ||
114 | __func__, enable ? "enable" : "disable", rc); | ||
115 | return rc; | ||
116 | } | ||
117 | |||
118 | static int efx_mcdi_mac_reconfigure(struct efx_nic *efx) | ||
119 | { | ||
120 | int rc; | ||
121 | |||
122 | rc = efx_mcdi_set_mac(efx); | ||
123 | if (rc != 0) | ||
124 | return rc; | ||
125 | |||
126 | /* Restore the multicast hash registers. */ | ||
127 | efx->type->push_multicast_hash(efx); | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | |||
133 | static bool efx_mcdi_mac_check_fault(struct efx_nic *efx) | ||
134 | { | ||
135 | u32 faults; | ||
136 | int rc = efx_mcdi_get_mac_faults(efx, &faults); | ||
137 | return (rc != 0) || (faults != 0); | ||
138 | } | ||
139 | |||
140 | |||
141 | const struct efx_mac_operations efx_mcdi_mac_operations = { | ||
142 | .reconfigure = efx_mcdi_mac_reconfigure, | ||
143 | .update_stats = efx_port_dummy_op_void, | ||
144 | .check_fault = efx_mcdi_mac_check_fault, | ||
145 | }; | ||
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h new file mode 100644 index 00000000000..41fe06fa060 --- /dev/null +++ b/drivers/net/sfc/mcdi_pcol.h | |||
@@ -0,0 +1,1775 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2009-2011 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | |||
11 | #ifndef MCDI_PCOL_H | ||
12 | #define MCDI_PCOL_H | ||
13 | |||
14 | /* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */ | ||
15 | /* Power-on reset state */ | ||
16 | #define MC_FW_STATE_POR (1) | ||
17 | /* If this is set in MC_RESET_STATE_REG then it should be | ||
18 | * possible to jump into IMEM without loading code from flash. */ | ||
19 | #define MC_FW_WARM_BOOT_OK (2) | ||
20 | /* The MC main image has started to boot. */ | ||
21 | #define MC_FW_STATE_BOOTING (4) | ||
22 | /* The Scheduler has started. */ | ||
23 | #define MC_FW_STATE_SCHED (8) | ||
24 | |||
25 | /* Values to be written to the per-port status dword in shared | ||
26 | * memory on reboot and assert */ | ||
27 | #define MC_STATUS_DWORD_REBOOT (0xb007b007) | ||
28 | #define MC_STATUS_DWORD_ASSERT (0xdeaddead) | ||
29 | |||
30 | /* The current version of the MCDI protocol. | ||
31 | * | ||
32 | * Note that the ROM burnt into the card only talks V0, so at the very | ||
33 | * least every driver must support version 0 and MCDI_PCOL_VERSION | ||
34 | */ | ||
35 | #define MCDI_PCOL_VERSION 1 | ||
36 | |||
37 | /** | ||
38 | * MCDI version 1 | ||
39 | * | ||
40 | * Each MCDI request starts with an MCDI_HEADER, which is a 32byte | ||
41 | * structure, filled in by the client. | ||
42 | * | ||
43 | * 0 7 8 16 20 22 23 24 31 | ||
44 | * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS | | ||
45 | * | | | | ||
46 | * | | \--- Response | ||
47 | * | \------- Error | ||
48 | * \------------------------------ Resync (always set) | ||
49 | * | ||
50 | * The client writes it's request into MC shared memory, and rings the | ||
51 | * doorbell. Each request is completed by either by the MC writting | ||
52 | * back into shared memory, or by writting out an event. | ||
53 | * | ||
54 | * All MCDI commands support completion by shared memory response. Each | ||
55 | * request may also contain additional data (accounted for by HEADER.LEN), | ||
56 | * and some response's may also contain additional data (again, accounted | ||
57 | * for by HEADER.LEN). | ||
58 | * | ||
59 | * Some MCDI commands support completion by event, in which any associated | ||
60 | * response data is included in the event. | ||
61 | * | ||
62 | * The protocol requires one response to be delivered for every request, a | ||
63 | * request should not be sent unless the response for the previous request | ||
64 | * has been received (either by polling shared memory, or by receiving | ||
65 | * an event). | ||
66 | */ | ||
67 | |||
68 | /** Request/Response structure */ | ||
69 | #define MCDI_HEADER_OFST 0 | ||
70 | #define MCDI_HEADER_CODE_LBN 0 | ||
71 | #define MCDI_HEADER_CODE_WIDTH 7 | ||
72 | #define MCDI_HEADER_RESYNC_LBN 7 | ||
73 | #define MCDI_HEADER_RESYNC_WIDTH 1 | ||
74 | #define MCDI_HEADER_DATALEN_LBN 8 | ||
75 | #define MCDI_HEADER_DATALEN_WIDTH 8 | ||
76 | #define MCDI_HEADER_SEQ_LBN 16 | ||
77 | #define MCDI_HEADER_RSVD_LBN 20 | ||
78 | #define MCDI_HEADER_RSVD_WIDTH 2 | ||
79 | #define MCDI_HEADER_SEQ_WIDTH 4 | ||
80 | #define MCDI_HEADER_ERROR_LBN 22 | ||
81 | #define MCDI_HEADER_ERROR_WIDTH 1 | ||
82 | #define MCDI_HEADER_RESPONSE_LBN 23 | ||
83 | #define MCDI_HEADER_RESPONSE_WIDTH 1 | ||
84 | #define MCDI_HEADER_XFLAGS_LBN 24 | ||
85 | #define MCDI_HEADER_XFLAGS_WIDTH 8 | ||
86 | /* Request response using event */ | ||
87 | #define MCDI_HEADER_XFLAGS_EVREQ 0x01 | ||
88 | |||
89 | /* Maximum number of payload bytes */ | ||
90 | #define MCDI_CTL_SDU_LEN_MAX 0xfc | ||
91 | |||
92 | /* The MC can generate events for two reasons: | ||
93 | * - To complete a shared memory request if XFLAGS_EVREQ was set | ||
94 | * - As a notification (link state, i2c event), controlled | ||
95 | * via MC_CMD_LOG_CTRL | ||
96 | * | ||
97 | * Both events share a common structure: | ||
98 | * | ||
99 | * 0 32 33 36 44 52 60 | ||
100 | * | Data | Cont | Level | Src | Code | Rsvd | | ||
101 | * | | ||
102 | * \ There is another event pending in this notification | ||
103 | * | ||
104 | * If Code==CMDDONE, then the fields are further interpreted as: | ||
105 | * | ||
106 | * - LEVEL==INFO Command succeeded | ||
107 | * - LEVEL==ERR Command failed | ||
108 | * | ||
109 | * 0 8 16 24 32 | ||
110 | * | Seq | Datalen | Errno | Rsvd | | ||
111 | * | ||
112 | * These fields are taken directly out of the standard MCDI header, i.e., | ||
113 | * LEVEL==ERR, Datalen == 0 => Reboot | ||
114 | * | ||
115 | * Events can be squirted out of the UART (using LOG_CTRL) without a | ||
116 | * MCDI header. An event can be distinguished from a MCDI response by | ||
117 | * examining the first byte which is 0xc0. This corresponds to the | ||
118 | * non-existent MCDI command MC_CMD_DEBUG_LOG. | ||
119 | * | ||
120 | * 0 7 8 | ||
121 | * | command | Resync | = 0xc0 | ||
122 | * | ||
123 | * Since the event is written in big-endian byte order, this works | ||
124 | * providing bits 56-63 of the event are 0xc0. | ||
125 | * | ||
126 | * 56 60 63 | ||
127 | * | Rsvd | Code | = 0xc0 | ||
128 | * | ||
129 | * Which means for convenience the event code is 0xc for all MC | ||
130 | * generated events. | ||
131 | */ | ||
132 | #define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc | ||
133 | |||
134 | #define MCDI_EVENT_DATA_LBN 0 | ||
135 | #define MCDI_EVENT_DATA_WIDTH 32 | ||
136 | #define MCDI_EVENT_CONT_LBN 32 | ||
137 | #define MCDI_EVENT_CONT_WIDTH 1 | ||
138 | #define MCDI_EVENT_LEVEL_LBN 33 | ||
139 | #define MCDI_EVENT_LEVEL_WIDTH 3 | ||
140 | #define MCDI_EVENT_LEVEL_INFO (0) | ||
141 | #define MCDI_EVENT_LEVEL_WARN (1) | ||
142 | #define MCDI_EVENT_LEVEL_ERR (2) | ||
143 | #define MCDI_EVENT_LEVEL_FATAL (3) | ||
144 | #define MCDI_EVENT_SRC_LBN 36 | ||
145 | #define MCDI_EVENT_SRC_WIDTH 8 | ||
146 | #define MCDI_EVENT_CODE_LBN 44 | ||
147 | #define MCDI_EVENT_CODE_WIDTH 8 | ||
148 | #define MCDI_EVENT_CODE_BADSSERT (1) | ||
149 | #define MCDI_EVENT_CODE_PMNOTICE (2) | ||
150 | #define MCDI_EVENT_CODE_CMDDONE (3) | ||
151 | #define MCDI_EVENT_CMDDONE_SEQ_LBN 0 | ||
152 | #define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 | ||
153 | #define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 | ||
154 | #define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8 | ||
155 | #define MCDI_EVENT_CMDDONE_ERRNO_LBN 16 | ||
156 | #define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8 | ||
157 | #define MCDI_EVENT_CODE_LINKCHANGE (4) | ||
158 | #define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0 | ||
159 | #define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 | ||
160 | #define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 | ||
161 | #define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 | ||
162 | #define MCDI_EVENT_LINKCHANGE_SPEED_100M 1 | ||
163 | #define MCDI_EVENT_LINKCHANGE_SPEED_1G 2 | ||
164 | #define MCDI_EVENT_LINKCHANGE_SPEED_10G 3 | ||
165 | #define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 | ||
166 | #define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 | ||
167 | #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 | ||
168 | #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8 | ||
169 | #define MCDI_EVENT_CODE_SENSOREVT (5) | ||
170 | #define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0 | ||
171 | #define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8 | ||
172 | #define MCDI_EVENT_SENSOREVT_STATE_LBN 8 | ||
173 | #define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8 | ||
174 | #define MCDI_EVENT_SENSOREVT_VALUE_LBN 16 | ||
175 | #define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16 | ||
176 | #define MCDI_EVENT_CODE_SCHEDERR (6) | ||
177 | #define MCDI_EVENT_CODE_REBOOT (7) | ||
178 | #define MCDI_EVENT_CODE_MAC_STATS_DMA (8) | ||
179 | #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 | ||
180 | #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 | ||
181 | |||
182 | /* Non-existent command target */ | ||
183 | #define MC_CMD_ERR_ENOENT 2 | ||
184 | /* assert() has killed the MC */ | ||
185 | #define MC_CMD_ERR_EINTR 4 | ||
186 | /* Caller does not hold required locks */ | ||
187 | #define MC_CMD_ERR_EACCES 13 | ||
188 | /* Resource is currently unavailable (e.g. lock contention) */ | ||
189 | #define MC_CMD_ERR_EBUSY 16 | ||
190 | /* Invalid argument to target */ | ||
191 | #define MC_CMD_ERR_EINVAL 22 | ||
192 | /* Non-recursive resource is already acquired */ | ||
193 | #define MC_CMD_ERR_EDEADLK 35 | ||
194 | /* Operation not implemented */ | ||
195 | #define MC_CMD_ERR_ENOSYS 38 | ||
196 | /* Operation timed out */ | ||
197 | #define MC_CMD_ERR_ETIME 62 | ||
198 | |||
199 | #define MC_CMD_ERR_CODE_OFST 0 | ||
200 | |||
201 | |||
202 | /* MC_CMD_READ32: (debug, variadic out) | ||
203 | * Read multiple 32byte words from MC memory | ||
204 | */ | ||
205 | #define MC_CMD_READ32 0x01 | ||
206 | #define MC_CMD_READ32_IN_LEN 8 | ||
207 | #define MC_CMD_READ32_IN_ADDR_OFST 0 | ||
208 | #define MC_CMD_READ32_IN_NUMWORDS_OFST 4 | ||
209 | #define MC_CMD_READ32_OUT_LEN(_numwords) \ | ||
210 | (4 * (_numwords)) | ||
211 | #define MC_CMD_READ32_OUT_BUFFER_OFST 0 | ||
212 | |||
213 | /* MC_CMD_WRITE32: (debug, variadic in) | ||
214 | * Write multiple 32byte words to MC memory | ||
215 | */ | ||
216 | #define MC_CMD_WRITE32 0x02 | ||
217 | #define MC_CMD_WRITE32_IN_LEN(_numwords) (((_numwords) * 4) + 4) | ||
218 | #define MC_CMD_WRITE32_IN_ADDR_OFST 0 | ||
219 | #define MC_CMD_WRITE32_IN_BUFFER_OFST 4 | ||
220 | #define MC_CMD_WRITE32_OUT_LEN 0 | ||
221 | |||
222 | /* MC_CMD_COPYCODE: (debug) | ||
223 | * Copy MC code between two locations and jump | ||
224 | */ | ||
225 | #define MC_CMD_COPYCODE 0x03 | ||
226 | #define MC_CMD_COPYCODE_IN_LEN 16 | ||
227 | #define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 | ||
228 | #define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 | ||
229 | #define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 | ||
230 | #define MC_CMD_COPYCODE_IN_JUMP_OFST 12 | ||
231 | /* Control should return to the caller rather than jumping */ | ||
232 | #define MC_CMD_COPYCODE_JUMP_NONE 1 | ||
233 | #define MC_CMD_COPYCODE_OUT_LEN 0 | ||
234 | |||
235 | /* MC_CMD_SET_FUNC: (debug) | ||
236 | * Select function for function-specific commands. | ||
237 | */ | ||
238 | #define MC_CMD_SET_FUNC 0x04 | ||
239 | #define MC_CMD_SET_FUNC_IN_LEN 4 | ||
240 | #define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 | ||
241 | #define MC_CMD_SET_FUNC_OUT_LEN 0 | ||
242 | |||
243 | /* MC_CMD_GET_BOOT_STATUS: | ||
244 | * Get the instruction address from which the MC booted. | ||
245 | */ | ||
246 | #define MC_CMD_GET_BOOT_STATUS 0x05 | ||
247 | #define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 | ||
248 | #define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 | ||
249 | #define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 | ||
250 | #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 | ||
251 | /* Reboot caused by watchdog */ | ||
252 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_LBN (0) | ||
253 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_WIDTH (1) | ||
254 | /* MC booted from primary flash partition */ | ||
255 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_LBN (1) | ||
256 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_WIDTH (1) | ||
257 | /* MC booted from backup flash partition */ | ||
258 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_LBN (2) | ||
259 | #define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_WIDTH (1) | ||
260 | |||
261 | /* MC_CMD_GET_ASSERTS: (debug, variadic out) | ||
262 | * Get (and optionally clear) the current assertion status. | ||
263 | * | ||
264 | * Only OUT.GLOBAL_FLAGS is guaranteed to exist in the completion | ||
265 | * payload. The other fields will only be present if | ||
266 | * OUT.GLOBAL_FLAGS != NO_FAILS | ||
267 | */ | ||
268 | #define MC_CMD_GET_ASSERTS 0x06 | ||
269 | #define MC_CMD_GET_ASSERTS_IN_LEN 4 | ||
270 | #define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 | ||
271 | #define MC_CMD_GET_ASSERTS_OUT_LEN 140 | ||
272 | /* Assertion status flag */ | ||
273 | #define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 | ||
274 | /*! No assertions have failed. */ | ||
275 | #define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 1 | ||
276 | /*! A system-level assertion has failed. */ | ||
277 | #define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 2 | ||
278 | /*! A thread-level assertion has failed. */ | ||
279 | #define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 3 | ||
280 | /*! The system was reset by the watchdog. */ | ||
281 | #define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 4 | ||
282 | /* Failing PC value */ | ||
283 | #define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 | ||
284 | /* Saved GP regs */ | ||
285 | #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 | ||
286 | #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_LEN 124 | ||
287 | /* Failing thread address */ | ||
288 | #define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 | ||
289 | |||
290 | /* MC_CMD_LOG_CTRL: | ||
291 | * Determine the output stream for various events and messages | ||
292 | */ | ||
293 | #define MC_CMD_LOG_CTRL 0x07 | ||
294 | #define MC_CMD_LOG_CTRL_IN_LEN 8 | ||
295 | #define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 | ||
296 | #define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART (1) | ||
297 | #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ (2) | ||
298 | #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 | ||
299 | #define MC_CMD_LOG_CTRL_OUT_LEN 0 | ||
300 | |||
301 | /* MC_CMD_GET_VERSION: | ||
302 | * Get version information about the MC firmware | ||
303 | */ | ||
304 | #define MC_CMD_GET_VERSION 0x08 | ||
305 | #define MC_CMD_GET_VERSION_IN_LEN 0 | ||
306 | #define MC_CMD_GET_VERSION_V0_OUT_LEN 4 | ||
307 | #define MC_CMD_GET_VERSION_V1_OUT_LEN 32 | ||
308 | #define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 | ||
309 | /* Reserved version number to indicate "any" version. */ | ||
310 | #define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff | ||
311 | /* The version response of a boot ROM awaiting rescue */ | ||
312 | #define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 | ||
313 | #define MC_CMD_GET_VERSION_V1_OUT_PCOL_OFST 4 | ||
314 | /* 128bit mask of functions supported by the current firmware */ | ||
315 | #define MC_CMD_GET_VERSION_V1_OUT_SUPPORTED_FUNCS_OFST 8 | ||
316 | /* The command set exported by the boot ROM (MCDI v0) */ | ||
317 | #define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ | ||
318 | (1 << MC_CMD_READ32) | \ | ||
319 | (1 << MC_CMD_WRITE32) | \ | ||
320 | (1 << MC_CMD_COPYCODE) | \ | ||
321 | (1 << MC_CMD_GET_VERSION), \ | ||
322 | 0, 0, 0 } | ||
323 | #define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24 | ||
324 | |||
325 | /* Vectors in the boot ROM */ | ||
326 | /* Point to the copycode entry point. */ | ||
327 | #define MC_BOOTROM_COPYCODE_VEC (0x7f4) | ||
328 | /* Points to the recovery mode entry point. */ | ||
329 | #define MC_BOOTROM_NOFLASH_VEC (0x7f8) | ||
330 | |||
331 | /* Test execution limits */ | ||
332 | #define MC_TESTEXEC_VARIANT_COUNT 16 | ||
333 | #define MC_TESTEXEC_RESULT_COUNT 7 | ||
334 | |||
335 | /* MC_CMD_SET_TESTVARS: (debug, variadic in) | ||
336 | * Write variant words for test. | ||
337 | * | ||
338 | * The user supplies a bitmap of the variants they wish to set. | ||
339 | * They must ensure that IN.LEN >= 4 + 4 * ffs(BITMAP) | ||
340 | */ | ||
341 | #define MC_CMD_SET_TESTVARS 0x09 | ||
342 | #define MC_CMD_SET_TESTVARS_IN_LEN(_numwords) \ | ||
343 | (4 + 4*(_numwords)) | ||
344 | #define MC_CMD_SET_TESTVARS_IN_ARGS_BITMAP_OFST 0 | ||
345 | /* Up to MC_TESTEXEC_VARIANT_COUNT of 32byte words start here */ | ||
346 | #define MC_CMD_SET_TESTVARS_IN_ARGS_BUFFER_OFST 4 | ||
347 | #define MC_CMD_SET_TESTVARS_OUT_LEN 0 | ||
348 | |||
349 | /* MC_CMD_GET_TESTRCS: (debug, variadic out) | ||
350 | * Return result words from test. | ||
351 | */ | ||
352 | #define MC_CMD_GET_TESTRCS 0x0a | ||
353 | #define MC_CMD_GET_TESTRCS_IN_LEN 4 | ||
354 | #define MC_CMD_GET_TESTRCS_IN_NUMWORDS_OFST 0 | ||
355 | #define MC_CMD_GET_TESTRCS_OUT_LEN(_numwords) \ | ||
356 | (4 * (_numwords)) | ||
357 | #define MC_CMD_GET_TESTRCS_OUT_BUFFER_OFST 0 | ||
358 | |||
359 | /* MC_CMD_RUN_TEST: (debug) | ||
360 | * Run the test exported by this firmware image | ||
361 | */ | ||
362 | #define MC_CMD_RUN_TEST 0x0b | ||
363 | #define MC_CMD_RUN_TEST_IN_LEN 0 | ||
364 | #define MC_CMD_RUN_TEST_OUT_LEN 0 | ||
365 | |||
366 | /* MC_CMD_CSR_READ32: (debug, variadic out) | ||
367 | * Read 32bit words from the indirect memory map | ||
368 | */ | ||
369 | #define MC_CMD_CSR_READ32 0x0c | ||
370 | #define MC_CMD_CSR_READ32_IN_LEN 12 | ||
371 | #define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 | ||
372 | #define MC_CMD_CSR_READ32_IN_STEP_OFST 4 | ||
373 | #define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 | ||
374 | #define MC_CMD_CSR_READ32_OUT_LEN(_numwords) \ | ||
375 | (((_numwords) * 4) + 4) | ||
376 | /* IN.NUMWORDS of 32bit words start here */ | ||
377 | #define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 | ||
378 | #define MC_CMD_CSR_READ32_OUT_IREG_STATUS_OFST(_numwords) \ | ||
379 | ((_numwords) * 4) | ||
380 | |||
381 | /* MC_CMD_CSR_WRITE32: (debug, variadic in) | ||
382 | * Write 32bit dwords to the indirect memory map | ||
383 | */ | ||
384 | #define MC_CMD_CSR_WRITE32 0x0d | ||
385 | #define MC_CMD_CSR_WRITE32_IN_LEN(_numwords) \ | ||
386 | (((_numwords) * 4) + 8) | ||
387 | #define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 | ||
388 | #define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 | ||
389 | /* Multiple 32bit words of data to write start here */ | ||
390 | #define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 | ||
391 | #define MC_CMD_CSR_WRITE32_OUT_LEN 4 | ||
392 | #define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 | ||
393 | |||
394 | /* MC_CMD_JTAG_WORK: (debug, fpga only) | ||
395 | * Process JTAG work buffer for RBF acceleration. | ||
396 | * | ||
397 | * Host: bit count, (up to) 32 words of data to clock out to JTAG | ||
398 | * (bits 1,0=TMS,TDO for first bit; bits 3,2=TMS,TDO for second bit, etc.) | ||
399 | * MC: bit count, (up to) 32 words of data clocked in from JTAG | ||
400 | * (bit 0=TDI for first bit, bit 1=TDI for second bit, etc.; [31:16] unused) | ||
401 | */ | ||
402 | #define MC_CMD_JTAG_WORK 0x0e | ||
403 | |||
404 | /* MC_CMD_STACKINFO: (debug, variadic out) | ||
405 | * Get stack information | ||
406 | * | ||
407 | * Host: nothing | ||
408 | * MC: (thread ptr, stack size, free space) for each thread in system | ||
409 | */ | ||
410 | #define MC_CMD_STACKINFO 0x0f | ||
411 | |||
412 | /* MC_CMD_MDIO_READ: | ||
413 | * MDIO register read | ||
414 | */ | ||
415 | #define MC_CMD_MDIO_READ 0x10 | ||
416 | #define MC_CMD_MDIO_READ_IN_LEN 16 | ||
417 | #define MC_CMD_MDIO_READ_IN_BUS_OFST 0 | ||
418 | #define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 | ||
419 | #define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 | ||
420 | #define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 | ||
421 | #define MC_CMD_MDIO_READ_OUT_LEN 8 | ||
422 | #define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 | ||
423 | #define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 | ||
424 | |||
425 | /* MC_CMD_MDIO_WRITE: | ||
426 | * MDIO register write | ||
427 | */ | ||
428 | #define MC_CMD_MDIO_WRITE 0x11 | ||
429 | #define MC_CMD_MDIO_WRITE_IN_LEN 20 | ||
430 | #define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 | ||
431 | #define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 | ||
432 | #define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 | ||
433 | #define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 | ||
434 | #define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 | ||
435 | #define MC_CMD_MDIO_WRITE_OUT_LEN 4 | ||
436 | #define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 | ||
437 | |||
438 | /* By default all the MCDI MDIO operations perform clause45 mode. | ||
439 | * If you want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. | ||
440 | */ | ||
441 | #define MC_CMD_MDIO_CLAUSE22 32 | ||
442 | |||
443 | /* There are two MDIO buses: one for the internal PHY, and one for external | ||
444 | * devices. | ||
445 | */ | ||
446 | #define MC_CMD_MDIO_BUS_INTERNAL 0 | ||
447 | #define MC_CMD_MDIO_BUS_EXTERNAL 1 | ||
448 | |||
449 | /* The MDIO commands return the raw status bits from the MDIO block. A "good" | ||
450 | * transaction should have the DONE bit set and all other bits clear. | ||
451 | */ | ||
452 | #define MC_CMD_MDIO_STATUS_GOOD 0x08 | ||
453 | |||
454 | |||
455 | /* MC_CMD_DBI_WRITE: (debug) | ||
456 | * Write DBI register(s) | ||
457 | * | ||
458 | * Host: address, byte-enables (and VF selection, and cs2 flag), | ||
459 | * value [,address ...] | ||
460 | * MC: nothing | ||
461 | */ | ||
462 | #define MC_CMD_DBI_WRITE 0x12 | ||
463 | #define MC_CMD_DBI_WRITE_IN_LEN(_numwords) \ | ||
464 | (12 * (_numwords)) | ||
465 | #define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(_word) \ | ||
466 | (((_word) * 12) + 0) | ||
467 | #define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(_word) \ | ||
468 | (((_word) * 12) + 4) | ||
469 | #define MC_CMD_DBI_WRITE_IN_VALUE_OFST(_word) \ | ||
470 | (((_word) * 12) + 8) | ||
471 | #define MC_CMD_DBI_WRITE_OUT_LEN 0 | ||
472 | |||
473 | /* MC_CMD_DBI_READ: (debug) | ||
474 | * Read DBI register(s) | ||
475 | * | ||
476 | * Host: address, [,address ...] | ||
477 | * MC: value [,value ...] | ||
478 | * (note: this does not support reading from VFs, but is retained for backwards | ||
479 | * compatibility; see MC_CMD_DBI_READX below) | ||
480 | */ | ||
481 | #define MC_CMD_DBI_READ 0x13 | ||
482 | #define MC_CMD_DBI_READ_IN_LEN(_numwords) \ | ||
483 | (4 * (_numwords)) | ||
484 | #define MC_CMD_DBI_READ_OUT_LEN(_numwords) \ | ||
485 | (4 * (_numwords)) | ||
486 | |||
487 | /* MC_CMD_PORT_READ32: (debug) | ||
488 | * Read a 32-bit register from the indirect port register map. | ||
489 | * | ||
490 | * The port to access is implied by the Shared memory channel used. | ||
491 | */ | ||
492 | #define MC_CMD_PORT_READ32 0x14 | ||
493 | #define MC_CMD_PORT_READ32_IN_LEN 4 | ||
494 | #define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 | ||
495 | #define MC_CMD_PORT_READ32_OUT_LEN 8 | ||
496 | #define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 | ||
497 | #define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 | ||
498 | |||
499 | /* MC_CMD_PORT_WRITE32: (debug) | ||
500 | * Write a 32-bit register to the indirect port register map. | ||
501 | * | ||
502 | * The port to access is implied by the Shared memory channel used. | ||
503 | */ | ||
504 | #define MC_CMD_PORT_WRITE32 0x15 | ||
505 | #define MC_CMD_PORT_WRITE32_IN_LEN 8 | ||
506 | #define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 | ||
507 | #define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 | ||
508 | #define MC_CMD_PORT_WRITE32_OUT_LEN 4 | ||
509 | #define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 | ||
510 | |||
511 | /* MC_CMD_PORT_READ128: (debug) | ||
512 | * Read a 128-bit register from indirect port register map | ||
513 | * | ||
514 | * The port to access is implied by the Shared memory channel used. | ||
515 | */ | ||
516 | #define MC_CMD_PORT_READ128 0x16 | ||
517 | #define MC_CMD_PORT_READ128_IN_LEN 4 | ||
518 | #define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 | ||
519 | #define MC_CMD_PORT_READ128_OUT_LEN 20 | ||
520 | #define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 | ||
521 | #define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 | ||
522 | |||
523 | /* MC_CMD_PORT_WRITE128: (debug) | ||
524 | * Write a 128-bit register to indirect port register map. | ||
525 | * | ||
526 | * The port to access is implied by the Shared memory channel used. | ||
527 | */ | ||
528 | #define MC_CMD_PORT_WRITE128 0x17 | ||
529 | #define MC_CMD_PORT_WRITE128_IN_LEN 20 | ||
530 | #define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 | ||
531 | #define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 | ||
532 | #define MC_CMD_PORT_WRITE128_OUT_LEN 4 | ||
533 | #define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 | ||
534 | |||
535 | /* MC_CMD_GET_BOARD_CFG: | ||
536 | * Returns the MC firmware configuration structure | ||
537 | * | ||
538 | * The FW_SUBTYPE_LIST contains a 16-bit value for each of the 12 types of | ||
539 | * NVRAM area. The values are defined in the firmware/mc/platform/<xxx>.c file | ||
540 | * for a specific board type, but otherwise have no meaning to the MC; they | ||
541 | * are used by the driver to manage selection of appropriate firmware updates. | ||
542 | */ | ||
543 | #define MC_CMD_GET_BOARD_CFG 0x18 | ||
544 | #define MC_CMD_GET_BOARD_CFG_IN_LEN 0 | ||
545 | #define MC_CMD_GET_BOARD_CFG_OUT_LEN 96 | ||
546 | #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 | ||
547 | #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 | ||
548 | #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 | ||
549 | #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 | ||
550 | #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 | ||
551 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 | ||
552 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 | ||
553 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 | ||
554 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 | ||
555 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 | ||
556 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 | ||
557 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 | ||
558 | #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 | ||
559 | #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 | ||
560 | #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 24 | ||
561 | |||
562 | /* MC_CMD_DBI_READX: (debug) | ||
563 | * Read DBI register(s) -- extended functionality | ||
564 | * | ||
565 | * Host: vf selection, address, [,vf selection ...] | ||
566 | * MC: value [,value ...] | ||
567 | */ | ||
568 | #define MC_CMD_DBI_READX 0x19 | ||
569 | #define MC_CMD_DBI_READX_IN_LEN(_numwords) \ | ||
570 | (8*(_numwords)) | ||
571 | #define MC_CMD_DBI_READX_OUT_LEN(_numwords) \ | ||
572 | (4*(_numwords)) | ||
573 | |||
574 | /* MC_CMD_SET_RAND_SEED: | ||
575 | * Set the 16byte seed for the MC pseudo-random generator | ||
576 | */ | ||
577 | #define MC_CMD_SET_RAND_SEED 0x1a | ||
578 | #define MC_CMD_SET_RAND_SEED_IN_LEN 16 | ||
579 | #define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 | ||
580 | #define MC_CMD_SET_RAND_SEED_OUT_LEN 0 | ||
581 | |||
582 | /* MC_CMD_LTSSM_HIST: (debug) | ||
583 | * Retrieve the history of the LTSSM, if the build supports it. | ||
584 | * | ||
585 | * Host: nothing | ||
586 | * MC: variable number of LTSSM values, as bytes | ||
587 | * The history is read-to-clear. | ||
588 | */ | ||
589 | #define MC_CMD_LTSSM_HIST 0x1b | ||
590 | |||
591 | /* MC_CMD_DRV_ATTACH: | ||
592 | * Inform MCPU that this port is managed on the host (i.e. driver active) | ||
593 | */ | ||
594 | #define MC_CMD_DRV_ATTACH 0x1c | ||
595 | #define MC_CMD_DRV_ATTACH_IN_LEN 8 | ||
596 | #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 | ||
597 | #define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 | ||
598 | #define MC_CMD_DRV_ATTACH_OUT_LEN 4 | ||
599 | #define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 | ||
600 | |||
601 | /* MC_CMD_NCSI_PROD: (debug) | ||
602 | * Trigger an NC-SI event (and possibly an AEN in response) | ||
603 | */ | ||
604 | #define MC_CMD_NCSI_PROD 0x1d | ||
605 | #define MC_CMD_NCSI_PROD_IN_LEN 4 | ||
606 | #define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0 | ||
607 | #define MC_CMD_NCSI_PROD_LINKCHANGE_LBN 0 | ||
608 | #define MC_CMD_NCSI_PROD_LINKCHANGE_WIDTH 1 | ||
609 | #define MC_CMD_NCSI_PROD_RESET_LBN 1 | ||
610 | #define MC_CMD_NCSI_PROD_RESET_WIDTH 1 | ||
611 | #define MC_CMD_NCSI_PROD_DRVATTACH_LBN 2 | ||
612 | #define MC_CMD_NCSI_PROD_DRVATTACH_WIDTH 1 | ||
613 | #define MC_CMD_NCSI_PROD_OUT_LEN 0 | ||
614 | |||
615 | /* Enumeration */ | ||
616 | #define MC_CMD_NCSI_PROD_LINKCHANGE 0 | ||
617 | #define MC_CMD_NCSI_PROD_RESET 1 | ||
618 | #define MC_CMD_NCSI_PROD_DRVATTACH 2 | ||
619 | |||
620 | /* MC_CMD_DEVEL: (debug) | ||
621 | * Reserved for development | ||
622 | */ | ||
623 | #define MC_CMD_DEVEL 0x1e | ||
624 | |||
625 | /* MC_CMD_SHMUART: (debug) | ||
626 | * Route UART output to circular buffer in shared memory instead. | ||
627 | */ | ||
628 | #define MC_CMD_SHMUART 0x1f | ||
629 | #define MC_CMD_SHMUART_IN_FLAG_OFST 0 | ||
630 | #define MC_CMD_SHMUART_IN_LEN 4 | ||
631 | #define MC_CMD_SHMUART_OUT_LEN 0 | ||
632 | |||
633 | /* MC_CMD_PORT_RESET: | ||
634 | * Generic per-port reset. There is no equivalent for per-board reset. | ||
635 | * | ||
636 | * Locks required: None | ||
637 | * Return code: 0, ETIME | ||
638 | */ | ||
639 | #define MC_CMD_PORT_RESET 0x20 | ||
640 | #define MC_CMD_PORT_RESET_IN_LEN 0 | ||
641 | #define MC_CMD_PORT_RESET_OUT_LEN 0 | ||
642 | |||
643 | /* MC_CMD_RESOURCE_LOCK: | ||
644 | * Generic resource lock/unlock interface. | ||
645 | * | ||
646 | * Locks required: None | ||
647 | * Return code: 0, | ||
648 | * EBUSY (if trylock is contended by other port), | ||
649 | * EDEADLK (if trylock is already acquired by this port) | ||
650 | * EINVAL (if unlock doesn't own the lock) | ||
651 | */ | ||
652 | #define MC_CMD_RESOURCE_LOCK 0x21 | ||
653 | #define MC_CMD_RESOURCE_LOCK_IN_LEN 8 | ||
654 | #define MC_CMD_RESOURCE_LOCK_IN_ACTION_OFST 0 | ||
655 | #define MC_CMD_RESOURCE_LOCK_ACTION_TRYLOCK 1 | ||
656 | #define MC_CMD_RESOURCE_LOCK_ACTION_UNLOCK 0 | ||
657 | #define MC_CMD_RESOURCE_LOCK_IN_RESOURCE_OFST 4 | ||
658 | #define MC_CMD_RESOURCE_LOCK_I2C 2 | ||
659 | #define MC_CMD_RESOURCE_LOCK_PHY 3 | ||
660 | #define MC_CMD_RESOURCE_LOCK_OUT_LEN 0 | ||
661 | |||
662 | /* MC_CMD_SPI_COMMAND: (variadic in, variadic out) | ||
663 | * Read/Write to/from the SPI device. | ||
664 | * | ||
665 | * Locks required: SPI_LOCK | ||
666 | * Return code: 0, ETIME, EINVAL, EACCES (if SPI_LOCK is not held) | ||
667 | */ | ||
668 | #define MC_CMD_SPI_COMMAND 0x22 | ||
669 | #define MC_CMD_SPI_COMMAND_IN_LEN(_write_bytes) (12 + (_write_bytes)) | ||
670 | #define MC_CMD_SPI_COMMAND_IN_ARGS_OFST 0 | ||
671 | #define MC_CMD_SPI_COMMAND_IN_ARGS_ADDRESS_OFST 0 | ||
672 | #define MC_CMD_SPI_COMMAND_IN_ARGS_READ_BYTES_OFST 4 | ||
673 | #define MC_CMD_SPI_COMMAND_IN_ARGS_CHIP_SELECT_OFST 8 | ||
674 | /* Data to write here */ | ||
675 | #define MC_CMD_SPI_COMMAND_IN_WRITE_BUFFER_OFST 12 | ||
676 | #define MC_CMD_SPI_COMMAND_OUT_LEN(_read_bytes) (_read_bytes) | ||
677 | /* Data read here */ | ||
678 | #define MC_CMD_SPI_COMMAND_OUT_READ_BUFFER_OFST 0 | ||
679 | |||
680 | /* MC_CMD_I2C_READ_WRITE: (variadic in, variadic out) | ||
681 | * Read/Write to/from the I2C bus. | ||
682 | * | ||
683 | * Locks required: I2C_LOCK | ||
684 | * Return code: 0, ETIME, EINVAL, EACCES (if I2C_LOCK is not held) | ||
685 | */ | ||
686 | #define MC_CMD_I2C_RW 0x23 | ||
687 | #define MC_CMD_I2C_RW_IN_LEN(_write_bytes) (8 + (_write_bytes)) | ||
688 | #define MC_CMD_I2C_RW_IN_ARGS_OFST 0 | ||
689 | #define MC_CMD_I2C_RW_IN_ARGS_ADDR_OFST 0 | ||
690 | #define MC_CMD_I2C_RW_IN_ARGS_READ_BYTES_OFST 4 | ||
691 | /* Data to write here */ | ||
692 | #define MC_CMD_I2C_RW_IN_WRITE_BUFFER_OFSET 8 | ||
693 | #define MC_CMD_I2C_RW_OUT_LEN(_read_bytes) (_read_bytes) | ||
694 | /* Data read here */ | ||
695 | #define MC_CMD_I2C_RW_OUT_READ_BUFFER_OFST 0 | ||
696 | |||
697 | /* Generic phy capability bitmask */ | ||
698 | #define MC_CMD_PHY_CAP_10HDX_LBN 1 | ||
699 | #define MC_CMD_PHY_CAP_10HDX_WIDTH 1 | ||
700 | #define MC_CMD_PHY_CAP_10FDX_LBN 2 | ||
701 | #define MC_CMD_PHY_CAP_10FDX_WIDTH 1 | ||
702 | #define MC_CMD_PHY_CAP_100HDX_LBN 3 | ||
703 | #define MC_CMD_PHY_CAP_100HDX_WIDTH 1 | ||
704 | #define MC_CMD_PHY_CAP_100FDX_LBN 4 | ||
705 | #define MC_CMD_PHY_CAP_100FDX_WIDTH 1 | ||
706 | #define MC_CMD_PHY_CAP_1000HDX_LBN 5 | ||
707 | #define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 | ||
708 | #define MC_CMD_PHY_CAP_1000FDX_LBN 6 | ||
709 | #define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 | ||
710 | #define MC_CMD_PHY_CAP_10000FDX_LBN 7 | ||
711 | #define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 | ||
712 | #define MC_CMD_PHY_CAP_PAUSE_LBN 8 | ||
713 | #define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 | ||
714 | #define MC_CMD_PHY_CAP_ASYM_LBN 9 | ||
715 | #define MC_CMD_PHY_CAP_ASYM_WIDTH 1 | ||
716 | #define MC_CMD_PHY_CAP_AN_LBN 10 | ||
717 | #define MC_CMD_PHY_CAP_AN_WIDTH 1 | ||
718 | |||
719 | /* Generic loopback enumeration */ | ||
720 | #define MC_CMD_LOOPBACK_NONE 0 | ||
721 | #define MC_CMD_LOOPBACK_DATA 1 | ||
722 | #define MC_CMD_LOOPBACK_GMAC 2 | ||
723 | #define MC_CMD_LOOPBACK_XGMII 3 | ||
724 | #define MC_CMD_LOOPBACK_XGXS 4 | ||
725 | #define MC_CMD_LOOPBACK_XAUI 5 | ||
726 | #define MC_CMD_LOOPBACK_GMII 6 | ||
727 | #define MC_CMD_LOOPBACK_SGMII 7 | ||
728 | #define MC_CMD_LOOPBACK_XGBR 8 | ||
729 | #define MC_CMD_LOOPBACK_XFI 9 | ||
730 | #define MC_CMD_LOOPBACK_XAUI_FAR 10 | ||
731 | #define MC_CMD_LOOPBACK_GMII_FAR 11 | ||
732 | #define MC_CMD_LOOPBACK_SGMII_FAR 12 | ||
733 | #define MC_CMD_LOOPBACK_XFI_FAR 13 | ||
734 | #define MC_CMD_LOOPBACK_GPHY 14 | ||
735 | #define MC_CMD_LOOPBACK_PHYXS 15 | ||
736 | #define MC_CMD_LOOPBACK_PCS 16 | ||
737 | #define MC_CMD_LOOPBACK_PMAPMD 17 | ||
738 | #define MC_CMD_LOOPBACK_XPORT 18 | ||
739 | #define MC_CMD_LOOPBACK_XGMII_WS 19 | ||
740 | #define MC_CMD_LOOPBACK_XAUI_WS 20 | ||
741 | #define MC_CMD_LOOPBACK_XAUI_WS_FAR 21 | ||
742 | #define MC_CMD_LOOPBACK_XAUI_WS_NEAR 22 | ||
743 | #define MC_CMD_LOOPBACK_GMII_WS 23 | ||
744 | #define MC_CMD_LOOPBACK_XFI_WS 24 | ||
745 | #define MC_CMD_LOOPBACK_XFI_WS_FAR 25 | ||
746 | #define MC_CMD_LOOPBACK_PHYXS_WS 26 | ||
747 | |||
748 | /* Generic PHY statistics enumeration */ | ||
749 | #define MC_CMD_OUI 0 | ||
750 | #define MC_CMD_PMA_PMD_LINK_UP 1 | ||
751 | #define MC_CMD_PMA_PMD_RX_FAULT 2 | ||
752 | #define MC_CMD_PMA_PMD_TX_FAULT 3 | ||
753 | #define MC_CMD_PMA_PMD_SIGNAL 4 | ||
754 | #define MC_CMD_PMA_PMD_SNR_A 5 | ||
755 | #define MC_CMD_PMA_PMD_SNR_B 6 | ||
756 | #define MC_CMD_PMA_PMD_SNR_C 7 | ||
757 | #define MC_CMD_PMA_PMD_SNR_D 8 | ||
758 | #define MC_CMD_PCS_LINK_UP 9 | ||
759 | #define MC_CMD_PCS_RX_FAULT 10 | ||
760 | #define MC_CMD_PCS_TX_FAULT 11 | ||
761 | #define MC_CMD_PCS_BER 12 | ||
762 | #define MC_CMD_PCS_BLOCK_ERRORS 13 | ||
763 | #define MC_CMD_PHYXS_LINK_UP 14 | ||
764 | #define MC_CMD_PHYXS_RX_FAULT 15 | ||
765 | #define MC_CMD_PHYXS_TX_FAULT 16 | ||
766 | #define MC_CMD_PHYXS_ALIGN 17 | ||
767 | #define MC_CMD_PHYXS_SYNC 18 | ||
768 | #define MC_CMD_AN_LINK_UP 19 | ||
769 | #define MC_CMD_AN_COMPLETE 20 | ||
770 | #define MC_CMD_AN_10GBT_STATUS 21 | ||
771 | #define MC_CMD_CL22_LINK_UP 22 | ||
772 | #define MC_CMD_PHY_NSTATS 23 | ||
773 | |||
774 | /* MC_CMD_GET_PHY_CFG: | ||
775 | * Report PHY configuration. This guarantees to succeed even if the PHY is in | ||
776 | * a "zombie" state. | ||
777 | * | ||
778 | * Locks required: None | ||
779 | * Return code: 0 | ||
780 | */ | ||
781 | #define MC_CMD_GET_PHY_CFG 0x24 | ||
782 | |||
783 | #define MC_CMD_GET_PHY_CFG_IN_LEN 0 | ||
784 | #define MC_CMD_GET_PHY_CFG_OUT_LEN 72 | ||
785 | |||
786 | #define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 | ||
787 | #define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0 | ||
788 | #define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1 | ||
789 | #define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1 | ||
790 | #define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1 | ||
791 | #define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2 | ||
792 | #define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1 | ||
793 | #define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3 | ||
794 | #define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1 | ||
795 | #define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4 | ||
796 | #define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1 | ||
797 | #define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5 | ||
798 | #define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1 | ||
799 | #define MC_CMD_GET_PHY_CFG_BIST_LBN 6 | ||
800 | #define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1 | ||
801 | #define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 | ||
802 | /* Bitmask of supported capabilities */ | ||
803 | #define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 | ||
804 | #define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 | ||
805 | #define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 | ||
806 | /* PHY statistics bitmap */ | ||
807 | #define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 | ||
808 | /* PHY type/name string */ | ||
809 | #define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 | ||
810 | #define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 | ||
811 | #define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 | ||
812 | #define MC_CMD_MEDIA_XAUI 1 | ||
813 | #define MC_CMD_MEDIA_CX4 2 | ||
814 | #define MC_CMD_MEDIA_KX4 3 | ||
815 | #define MC_CMD_MEDIA_XFP 4 | ||
816 | #define MC_CMD_MEDIA_SFP_PLUS 5 | ||
817 | #define MC_CMD_MEDIA_BASE_T 6 | ||
818 | /* MDIO "MMDS" supported */ | ||
819 | #define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 | ||
820 | /* Native clause 22 */ | ||
821 | #define MC_CMD_MMD_CLAUSE22 0 | ||
822 | #define MC_CMD_MMD_CLAUSE45_PMAPMD 1 | ||
823 | #define MC_CMD_MMD_CLAUSE45_WIS 2 | ||
824 | #define MC_CMD_MMD_CLAUSE45_PCS 3 | ||
825 | #define MC_CMD_MMD_CLAUSE45_PHYXS 4 | ||
826 | #define MC_CMD_MMD_CLAUSE45_DTEXS 5 | ||
827 | #define MC_CMD_MMD_CLAUSE45_TC 6 | ||
828 | #define MC_CMD_MMD_CLAUSE45_AN 7 | ||
829 | /* Clause22 proxied over clause45 by PHY */ | ||
830 | #define MC_CMD_MMD_CLAUSE45_C22EXT 29 | ||
831 | #define MC_CMD_MMD_CLAUSE45_VEND1 30 | ||
832 | #define MC_CMD_MMD_CLAUSE45_VEND2 31 | ||
833 | /* PHY stepping version */ | ||
834 | #define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 | ||
835 | #define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 | ||
836 | |||
837 | /* MC_CMD_START_BIST: | ||
838 | * Start a BIST test on the PHY. | ||
839 | * | ||
840 | * Locks required: PHY_LOCK if doing a PHY BIST | ||
841 | * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held) | ||
842 | */ | ||
843 | #define MC_CMD_START_BIST 0x25 | ||
844 | #define MC_CMD_START_BIST_IN_LEN 4 | ||
845 | #define MC_CMD_START_BIST_IN_TYPE_OFST 0 | ||
846 | #define MC_CMD_START_BIST_OUT_LEN 0 | ||
847 | |||
848 | /* Run the PHY's short cable BIST */ | ||
849 | #define MC_CMD_PHY_BIST_CABLE_SHORT 1 | ||
850 | /* Run the PHY's long cable BIST */ | ||
851 | #define MC_CMD_PHY_BIST_CABLE_LONG 2 | ||
852 | /* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */ | ||
853 | #define MC_CMD_BPX_SERDES_BIST 3 | ||
854 | /* Run the MC loopback tests */ | ||
855 | #define MC_CMD_MC_LOOPBACK_BIST 4 | ||
856 | /* Run the PHY's standard BIST */ | ||
857 | #define MC_CMD_PHY_BIST 5 | ||
858 | |||
859 | /* MC_CMD_POLL_PHY_BIST: (variadic output) | ||
860 | * Poll for BIST completion | ||
861 | * | ||
862 | * Returns a single status code, and optionally some PHY specific | ||
863 | * bist output. The driver should only consume the BIST output | ||
864 | * after validating OUTLEN and PHY_CFG.PHY_TYPE. | ||
865 | * | ||
866 | * If a driver can't successfully parse the BIST output, it should | ||
867 | * still respect the pass/Fail in OUT.RESULT | ||
868 | * | ||
869 | * Locks required: PHY_LOCK if doing a PHY BIST | ||
870 | * Return code: 0, EACCES (if PHY_LOCK is not held) | ||
871 | */ | ||
872 | #define MC_CMD_POLL_BIST 0x26 | ||
873 | #define MC_CMD_POLL_BIST_IN_LEN 0 | ||
874 | #define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN | ||
875 | #define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 | ||
876 | #define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 | ||
877 | #define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 | ||
878 | #define MC_CMD_POLL_BIST_RUNNING 1 | ||
879 | #define MC_CMD_POLL_BIST_PASSED 2 | ||
880 | #define MC_CMD_POLL_BIST_FAILED 3 | ||
881 | #define MC_CMD_POLL_BIST_TIMEOUT 4 | ||
882 | /* Generic: */ | ||
883 | #define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 | ||
884 | /* SFT9001-specific: */ | ||
885 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 | ||
886 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 | ||
887 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 | ||
888 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 | ||
889 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 | ||
890 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 | ||
891 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 | ||
892 | #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 | ||
893 | #define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1 | ||
894 | #define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2 | ||
895 | #define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3 | ||
896 | #define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4 | ||
897 | #define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9 | ||
898 | /* mrsfp "PHY" driver: */ | ||
899 | #define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 | ||
900 | #define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0 | ||
901 | #define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1 | ||
902 | #define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2 | ||
903 | #define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3 | ||
904 | #define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4 | ||
905 | #define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5 | ||
906 | #define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6 | ||
907 | #define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7 | ||
908 | #define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8 | ||
909 | |||
910 | /* MC_CMD_PHY_SPI: (variadic in, variadic out) | ||
911 | * Read/Write/Erase the PHY SPI device | ||
912 | * | ||
913 | * Locks required: PHY_LOCK | ||
914 | * Return code: 0, ETIME, EINVAL, EACCES (if PHY_LOCK is not held) | ||
915 | */ | ||
916 | #define MC_CMD_PHY_SPI 0x27 | ||
917 | #define MC_CMD_PHY_SPI_IN_LEN(_write_bytes) (12 + (_write_bytes)) | ||
918 | #define MC_CMD_PHY_SPI_IN_ARGS_OFST 0 | ||
919 | #define MC_CMD_PHY_SPI_IN_ARGS_ADDR_OFST 0 | ||
920 | #define MC_CMD_PHY_SPI_IN_ARGS_READ_BYTES_OFST 4 | ||
921 | #define MC_CMD_PHY_SPI_IN_ARGS_ERASE_ALL_OFST 8 | ||
922 | /* Data to write here */ | ||
923 | #define MC_CMD_PHY_SPI_IN_WRITE_BUFFER_OFSET 12 | ||
924 | #define MC_CMD_PHY_SPI_OUT_LEN(_read_bytes) (_read_bytes) | ||
925 | /* Data read here */ | ||
926 | #define MC_CMD_PHY_SPI_OUT_READ_BUFFER_OFST 0 | ||
927 | |||
928 | |||
929 | /* MC_CMD_GET_LOOPBACK_MODES: | ||
930 | * Returns a bitmask of loopback modes evailable at each speed. | ||
931 | * | ||
932 | * Locks required: None | ||
933 | * Return code: 0 | ||
934 | */ | ||
935 | #define MC_CMD_GET_LOOPBACK_MODES 0x28 | ||
936 | #define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 | ||
937 | #define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32 | ||
938 | #define MC_CMD_GET_LOOPBACK_MODES_100M_OFST 0 | ||
939 | #define MC_CMD_GET_LOOPBACK_MODES_1G_OFST 8 | ||
940 | #define MC_CMD_GET_LOOPBACK_MODES_10G_OFST 16 | ||
941 | #define MC_CMD_GET_LOOPBACK_MODES_SUGGESTED_OFST 24 | ||
942 | |||
943 | /* Flow control enumeration */ | ||
944 | #define MC_CMD_FCNTL_OFF 0 | ||
945 | #define MC_CMD_FCNTL_RESPOND 1 | ||
946 | #define MC_CMD_FCNTL_BIDIR 2 | ||
947 | /* Auto - Use what the link has autonegotiated | ||
948 | * - The driver should modify the advertised capabilities via SET_LINK.CAP | ||
949 | * to control the negotiated flow control mode. | ||
950 | * - Can only be set if the PHY supports PAUSE+ASYM capabilities | ||
951 | * - Never returned by GET_LINK as the value programmed into the MAC | ||
952 | */ | ||
953 | #define MC_CMD_FCNTL_AUTO 3 | ||
954 | |||
955 | /* Generic mac fault bitmask */ | ||
956 | #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 | ||
957 | #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 | ||
958 | #define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 | ||
959 | #define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 | ||
960 | #define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 | ||
961 | #define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 | ||
962 | |||
963 | /* MC_CMD_GET_LINK: | ||
964 | * Read the unified MAC/PHY link state | ||
965 | * | ||
966 | * Locks required: None | ||
967 | * Return code: 0, ETIME | ||
968 | */ | ||
969 | #define MC_CMD_GET_LINK 0x29 | ||
970 | #define MC_CMD_GET_LINK_IN_LEN 0 | ||
971 | #define MC_CMD_GET_LINK_OUT_LEN 28 | ||
972 | /* near-side and link-partner advertised capabilities */ | ||
973 | #define MC_CMD_GET_LINK_OUT_CAP_OFST 0 | ||
974 | #define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 | ||
975 | /* Autonegotiated speed in mbit/s. The link may still be down | ||
976 | * even if this reads non-zero */ | ||
977 | #define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 | ||
978 | #define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 | ||
979 | #define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 | ||
980 | /* Whether we have overall link up */ | ||
981 | #define MC_CMD_GET_LINK_LINK_UP_LBN 0 | ||
982 | #define MC_CMD_GET_LINK_LINK_UP_WIDTH 1 | ||
983 | #define MC_CMD_GET_LINK_FULL_DUPLEX_LBN 1 | ||
984 | #define MC_CMD_GET_LINK_FULL_DUPLEX_WIDTH 1 | ||
985 | /* Whether we have link at the layers provided by the BPX */ | ||
986 | #define MC_CMD_GET_LINK_BPX_LINK_LBN 2 | ||
987 | #define MC_CMD_GET_LINK_BPX_LINK_WIDTH 1 | ||
988 | /* Whether the PHY has external link */ | ||
989 | #define MC_CMD_GET_LINK_PHY_LINK_LBN 3 | ||
990 | #define MC_CMD_GET_LINK_PHY_LINK_WIDTH 1 | ||
991 | #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 | ||
992 | #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 | ||
993 | |||
994 | /* MC_CMD_SET_LINK: | ||
995 | * Write the unified MAC/PHY link configuration | ||
996 | * | ||
997 | * A loopback speed of "0" is supported, and means | ||
998 | * (choose any available speed) | ||
999 | * | ||
1000 | * Locks required: None | ||
1001 | * Return code: 0, EINVAL, ETIME | ||
1002 | */ | ||
1003 | #define MC_CMD_SET_LINK 0x2a | ||
1004 | #define MC_CMD_SET_LINK_IN_LEN 16 | ||
1005 | #define MC_CMD_SET_LINK_IN_CAP_OFST 0 | ||
1006 | #define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 | ||
1007 | #define MC_CMD_SET_LINK_LOWPOWER_LBN 0 | ||
1008 | #define MC_CMD_SET_LINK_LOWPOWER_WIDTH 1 | ||
1009 | #define MC_CMD_SET_LINK_POWEROFF_LBN 1 | ||
1010 | #define MC_CMD_SET_LINK_POWEROFF_WIDTH 1 | ||
1011 | #define MC_CMD_SET_LINK_TXDIS_LBN 2 | ||
1012 | #define MC_CMD_SET_LINK_TXDIS_WIDTH 1 | ||
1013 | #define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 | ||
1014 | #define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 | ||
1015 | #define MC_CMD_SET_LINK_OUT_LEN 0 | ||
1016 | |||
1017 | /* MC_CMD_SET_ID_LED: | ||
1018 | * Set indentification LED state | ||
1019 | * | ||
1020 | * Locks required: None | ||
1021 | * Return code: 0, EINVAL | ||
1022 | */ | ||
1023 | #define MC_CMD_SET_ID_LED 0x2b | ||
1024 | #define MC_CMD_SET_ID_LED_IN_LEN 4 | ||
1025 | #define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 | ||
1026 | #define MC_CMD_LED_OFF 0 | ||
1027 | #define MC_CMD_LED_ON 1 | ||
1028 | #define MC_CMD_LED_DEFAULT 2 | ||
1029 | #define MC_CMD_SET_ID_LED_OUT_LEN 0 | ||
1030 | |||
1031 | /* MC_CMD_SET_MAC: | ||
1032 | * Set MAC configuration | ||
1033 | * | ||
1034 | * The MTU is the MTU programmed directly into the XMAC/GMAC | ||
1035 | * (inclusive of EtherII, VLAN, bug16011 padding) | ||
1036 | * | ||
1037 | * Locks required: None | ||
1038 | * Return code: 0, EINVAL | ||
1039 | */ | ||
1040 | #define MC_CMD_SET_MAC 0x2c | ||
1041 | #define MC_CMD_SET_MAC_IN_LEN 24 | ||
1042 | #define MC_CMD_SET_MAC_IN_MTU_OFST 0 | ||
1043 | #define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 | ||
1044 | #define MC_CMD_SET_MAC_IN_ADDR_OFST 8 | ||
1045 | #define MC_CMD_SET_MAC_IN_REJECT_OFST 16 | ||
1046 | #define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 | ||
1047 | #define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 | ||
1048 | #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 | ||
1049 | #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 | ||
1050 | #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 | ||
1051 | #define MC_CMD_SET_MAC_OUT_LEN 0 | ||
1052 | |||
1053 | /* MC_CMD_PHY_STATS: | ||
1054 | * Get generic PHY statistics | ||
1055 | * | ||
1056 | * This call returns the statistics for a generic PHY in a sparse | ||
1057 | * array (indexed by the enumerate). Each value is represented by | ||
1058 | * a 32bit number. | ||
1059 | * | ||
1060 | * If the DMA_ADDR is 0, then no DMA is performed, and the statistics | ||
1061 | * may be read directly out of shared memory. If DMA_ADDR != 0, then | ||
1062 | * the statistics are dmad to that (page-aligned location) | ||
1063 | * | ||
1064 | * Locks required: None | ||
1065 | * Returns: 0, ETIME | ||
1066 | * Response methods: shared memory, event | ||
1067 | */ | ||
1068 | #define MC_CMD_PHY_STATS 0x2d | ||
1069 | #define MC_CMD_PHY_STATS_IN_LEN 8 | ||
1070 | #define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 | ||
1071 | #define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4 | ||
1072 | #define MC_CMD_PHY_STATS_OUT_DMA_LEN 0 | ||
1073 | #define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4) | ||
1074 | |||
1075 | /* Unified MAC statistics enumeration */ | ||
1076 | #define MC_CMD_MAC_GENERATION_START 0 | ||
1077 | #define MC_CMD_MAC_TX_PKTS 1 | ||
1078 | #define MC_CMD_MAC_TX_PAUSE_PKTS 2 | ||
1079 | #define MC_CMD_MAC_TX_CONTROL_PKTS 3 | ||
1080 | #define MC_CMD_MAC_TX_UNICAST_PKTS 4 | ||
1081 | #define MC_CMD_MAC_TX_MULTICAST_PKTS 5 | ||
1082 | #define MC_CMD_MAC_TX_BROADCAST_PKTS 6 | ||
1083 | #define MC_CMD_MAC_TX_BYTES 7 | ||
1084 | #define MC_CMD_MAC_TX_BAD_BYTES 8 | ||
1085 | #define MC_CMD_MAC_TX_LT64_PKTS 9 | ||
1086 | #define MC_CMD_MAC_TX_64_PKTS 10 | ||
1087 | #define MC_CMD_MAC_TX_65_TO_127_PKTS 11 | ||
1088 | #define MC_CMD_MAC_TX_128_TO_255_PKTS 12 | ||
1089 | #define MC_CMD_MAC_TX_256_TO_511_PKTS 13 | ||
1090 | #define MC_CMD_MAC_TX_512_TO_1023_PKTS 14 | ||
1091 | #define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 15 | ||
1092 | #define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 16 | ||
1093 | #define MC_CMD_MAC_TX_GTJUMBO_PKTS 17 | ||
1094 | #define MC_CMD_MAC_TX_BAD_FCS_PKTS 18 | ||
1095 | #define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 19 | ||
1096 | #define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 20 | ||
1097 | #define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 21 | ||
1098 | #define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 22 | ||
1099 | #define MC_CMD_MAC_TX_DEFERRED_PKTS 23 | ||
1100 | #define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 24 | ||
1101 | #define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 25 | ||
1102 | #define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 26 | ||
1103 | #define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 27 | ||
1104 | #define MC_CMD_MAC_RX_PKTS 28 | ||
1105 | #define MC_CMD_MAC_RX_PAUSE_PKTS 29 | ||
1106 | #define MC_CMD_MAC_RX_GOOD_PKTS 30 | ||
1107 | #define MC_CMD_MAC_RX_CONTROL_PKTS 31 | ||
1108 | #define MC_CMD_MAC_RX_UNICAST_PKTS 32 | ||
1109 | #define MC_CMD_MAC_RX_MULTICAST_PKTS 33 | ||
1110 | #define MC_CMD_MAC_RX_BROADCAST_PKTS 34 | ||
1111 | #define MC_CMD_MAC_RX_BYTES 35 | ||
1112 | #define MC_CMD_MAC_RX_BAD_BYTES 36 | ||
1113 | #define MC_CMD_MAC_RX_64_PKTS 37 | ||
1114 | #define MC_CMD_MAC_RX_65_TO_127_PKTS 38 | ||
1115 | #define MC_CMD_MAC_RX_128_TO_255_PKTS 39 | ||
1116 | #define MC_CMD_MAC_RX_256_TO_511_PKTS 40 | ||
1117 | #define MC_CMD_MAC_RX_512_TO_1023_PKTS 41 | ||
1118 | #define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 42 | ||
1119 | #define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 43 | ||
1120 | #define MC_CMD_MAC_RX_GTJUMBO_PKTS 44 | ||
1121 | #define MC_CMD_MAC_RX_UNDERSIZE_PKTS 45 | ||
1122 | #define MC_CMD_MAC_RX_BAD_FCS_PKTS 46 | ||
1123 | #define MC_CMD_MAC_RX_OVERFLOW_PKTS 47 | ||
1124 | #define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 48 | ||
1125 | #define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 49 | ||
1126 | #define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 50 | ||
1127 | #define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 51 | ||
1128 | #define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 52 | ||
1129 | #define MC_CMD_MAC_RX_JABBER_PKTS 53 | ||
1130 | #define MC_CMD_MAC_RX_NODESC_DROPS 54 | ||
1131 | #define MC_CMD_MAC_RX_LANES01_CHAR_ERR 55 | ||
1132 | #define MC_CMD_MAC_RX_LANES23_CHAR_ERR 56 | ||
1133 | #define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 | ||
1134 | #define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 | ||
1135 | #define MC_CMD_MAC_RX_MATCH_FAULT 59 | ||
1136 | #define MC_CMD_GMAC_DMABUF_START 64 | ||
1137 | #define MC_CMD_GMAC_DMABUF_END 95 | ||
1138 | /* Insert new members here. */ | ||
1139 | #define MC_CMD_MAC_GENERATION_END 96 | ||
1140 | #define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) | ||
1141 | |||
1142 | /* MC_CMD_MAC_STATS: | ||
1143 | * Get unified GMAC/XMAC statistics | ||
1144 | * | ||
1145 | * This call returns unified statistics maintained by the MC as it | ||
1146 | * switches between the GMAC and XMAC. The MC will write out all | ||
1147 | * supported stats. The driver should zero initialise the buffer to | ||
1148 | * guarantee consistent results. | ||
1149 | * | ||
1150 | * Locks required: None | ||
1151 | * Returns: 0 | ||
1152 | * Response methods: shared memory, event | ||
1153 | */ | ||
1154 | #define MC_CMD_MAC_STATS 0x2e | ||
1155 | #define MC_CMD_MAC_STATS_IN_LEN 16 | ||
1156 | #define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 | ||
1157 | #define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 | ||
1158 | #define MC_CMD_MAC_STATS_IN_CMD_OFST 8 | ||
1159 | #define MC_CMD_MAC_STATS_CMD_DMA_LBN 0 | ||
1160 | #define MC_CMD_MAC_STATS_CMD_DMA_WIDTH 1 | ||
1161 | #define MC_CMD_MAC_STATS_CMD_CLEAR_LBN 1 | ||
1162 | #define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1 | ||
1163 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2 | ||
1164 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1 | ||
1165 | /* Remaining PERIOD* fields only relevant when PERIODIC_CHANGE is set */ | ||
1166 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3 | ||
1167 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1 | ||
1168 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4 | ||
1169 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1 | ||
1170 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5 | ||
1171 | #define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1 | ||
1172 | #define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16 | ||
1173 | #define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16 | ||
1174 | #define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 | ||
1175 | |||
1176 | #define MC_CMD_MAC_STATS_OUT_LEN 0 | ||
1177 | |||
1178 | /* Callisto flags */ | ||
1179 | #define MC_CMD_SFT9001_ROBUST_LBN 0 | ||
1180 | #define MC_CMD_SFT9001_ROBUST_WIDTH 1 | ||
1181 | #define MC_CMD_SFT9001_SHORT_REACH_LBN 1 | ||
1182 | #define MC_CMD_SFT9001_SHORT_REACH_WIDTH 1 | ||
1183 | |||
1184 | /* MC_CMD_SFT9001_GET: | ||
1185 | * Read current callisto specific setting | ||
1186 | * | ||
1187 | * Locks required: None | ||
1188 | * Returns: 0, ETIME | ||
1189 | */ | ||
1190 | #define MC_CMD_SFT9001_GET 0x30 | ||
1191 | #define MC_CMD_SFT9001_GET_IN_LEN 0 | ||
1192 | #define MC_CMD_SFT9001_GET_OUT_LEN 4 | ||
1193 | #define MC_CMD_SFT9001_GET_OUT_FLAGS_OFST 0 | ||
1194 | |||
1195 | /* MC_CMD_SFT9001_SET: | ||
1196 | * Write current callisto specific setting | ||
1197 | * | ||
1198 | * Locks required: None | ||
1199 | * Returns: 0, ETIME, EINVAL | ||
1200 | */ | ||
1201 | #define MC_CMD_SFT9001_SET 0x31 | ||
1202 | #define MC_CMD_SFT9001_SET_IN_LEN 4 | ||
1203 | #define MC_CMD_SFT9001_SET_IN_FLAGS_OFST 0 | ||
1204 | #define MC_CMD_SFT9001_SET_OUT_LEN 0 | ||
1205 | |||
1206 | |||
1207 | /* MC_CMD_WOL_FILTER_SET: | ||
1208 | * Set a WoL filter | ||
1209 | * | ||
1210 | * Locks required: None | ||
1211 | * Returns: 0, EBUSY, EINVAL, ENOSYS | ||
1212 | */ | ||
1213 | #define MC_CMD_WOL_FILTER_SET 0x32 | ||
1214 | #define MC_CMD_WOL_FILTER_SET_IN_LEN 192 /* 190 rounded up to a word */ | ||
1215 | #define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 | ||
1216 | #define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 | ||
1217 | |||
1218 | /* There is a union at offset 8, following defines overlap due to | ||
1219 | * this */ | ||
1220 | #define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 | ||
1221 | |||
1222 | #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST \ | ||
1223 | MC_CMD_WOL_FILTER_SET_IN_DATA_OFST | ||
1224 | |||
1225 | #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST \ | ||
1226 | MC_CMD_WOL_FILTER_SET_IN_DATA_OFST | ||
1227 | #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST \ | ||
1228 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 4) | ||
1229 | #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST \ | ||
1230 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 8) | ||
1231 | #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST \ | ||
1232 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 10) | ||
1233 | |||
1234 | #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST \ | ||
1235 | MC_CMD_WOL_FILTER_SET_IN_DATA_OFST | ||
1236 | #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST \ | ||
1237 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 16) | ||
1238 | #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST \ | ||
1239 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 32) | ||
1240 | #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST \ | ||
1241 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 34) | ||
1242 | |||
1243 | #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST \ | ||
1244 | MC_CMD_WOL_FILTER_SET_IN_DATA_OFST | ||
1245 | #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_OFST \ | ||
1246 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 48) | ||
1247 | #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST \ | ||
1248 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 176) | ||
1249 | #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST \ | ||
1250 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 177) | ||
1251 | #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \ | ||
1252 | (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178) | ||
1253 | |||
1254 | #define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \ | ||
1255 | MC_CMD_WOL_FILTER_SET_IN_DATA_OFST | ||
1256 | #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 | ||
1257 | #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 | ||
1258 | #define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 | ||
1259 | #define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1 | ||
1260 | |||
1261 | #define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 | ||
1262 | #define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 | ||
1263 | |||
1264 | /* WOL Filter types enumeration */ | ||
1265 | #define MC_CMD_WOL_TYPE_MAGIC 0x0 | ||
1266 | /* unused 0x1 */ | ||
1267 | #define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 | ||
1268 | #define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 | ||
1269 | #define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 | ||
1270 | #define MC_CMD_WOL_TYPE_BITMAP 0x5 | ||
1271 | #define MC_CMD_WOL_TYPE_LINK 0x6 | ||
1272 | #define MC_CMD_WOL_TYPE_MAX 0x7 | ||
1273 | |||
1274 | #define MC_CMD_FILTER_MODE_SIMPLE 0x0 | ||
1275 | #define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff | ||
1276 | |||
1277 | /* MC_CMD_WOL_FILTER_REMOVE: | ||
1278 | * Remove a WoL filter | ||
1279 | * | ||
1280 | * Locks required: None | ||
1281 | * Returns: 0, EINVAL, ENOSYS | ||
1282 | */ | ||
1283 | #define MC_CMD_WOL_FILTER_REMOVE 0x33 | ||
1284 | #define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 | ||
1285 | #define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 | ||
1286 | #define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0 | ||
1287 | |||
1288 | |||
1289 | /* MC_CMD_WOL_FILTER_RESET: | ||
1290 | * Reset (i.e. remove all) WoL filters | ||
1291 | * | ||
1292 | * Locks required: None | ||
1293 | * Returns: 0, ENOSYS | ||
1294 | */ | ||
1295 | #define MC_CMD_WOL_FILTER_RESET 0x34 | ||
1296 | #define MC_CMD_WOL_FILTER_RESET_IN_LEN 0 | ||
1297 | #define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0 | ||
1298 | |||
1299 | /* MC_CMD_SET_MCAST_HASH: | ||
1300 | * Set the MCASH hash value without otherwise | ||
1301 | * reconfiguring the MAC | ||
1302 | */ | ||
1303 | #define MC_CMD_SET_MCAST_HASH 0x35 | ||
1304 | #define MC_CMD_SET_MCAST_HASH_IN_LEN 32 | ||
1305 | #define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0 | ||
1306 | #define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16 | ||
1307 | #define MC_CMD_SET_MCAST_HASH_OUT_LEN 0 | ||
1308 | |||
1309 | /* MC_CMD_NVRAM_TYPES: | ||
1310 | * Return bitfield indicating available types of virtual NVRAM partitions | ||
1311 | * | ||
1312 | * Locks required: none | ||
1313 | * Returns: 0 | ||
1314 | */ | ||
1315 | #define MC_CMD_NVRAM_TYPES 0x36 | ||
1316 | #define MC_CMD_NVRAM_TYPES_IN_LEN 0 | ||
1317 | #define MC_CMD_NVRAM_TYPES_OUT_LEN 4 | ||
1318 | #define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 | ||
1319 | |||
1320 | /* Supported NVRAM types */ | ||
1321 | #define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0 | ||
1322 | #define MC_CMD_NVRAM_TYPE_MC_FW 1 | ||
1323 | #define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 2 | ||
1324 | #define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 3 | ||
1325 | #define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 4 | ||
1326 | #define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 5 | ||
1327 | #define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 6 | ||
1328 | #define MC_CMD_NVRAM_TYPE_EXP_ROM 7 | ||
1329 | #define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 8 | ||
1330 | #define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 9 | ||
1331 | #define MC_CMD_NVRAM_TYPE_PHY_PORT0 10 | ||
1332 | #define MC_CMD_NVRAM_TYPE_PHY_PORT1 11 | ||
1333 | #define MC_CMD_NVRAM_TYPE_LOG 12 | ||
1334 | |||
1335 | /* MC_CMD_NVRAM_INFO: | ||
1336 | * Read info about a virtual NVRAM partition | ||
1337 | * | ||
1338 | * Locks required: none | ||
1339 | * Returns: 0, EINVAL (bad type) | ||
1340 | */ | ||
1341 | #define MC_CMD_NVRAM_INFO 0x37 | ||
1342 | #define MC_CMD_NVRAM_INFO_IN_LEN 4 | ||
1343 | #define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 | ||
1344 | #define MC_CMD_NVRAM_INFO_OUT_LEN 24 | ||
1345 | #define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 | ||
1346 | #define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 | ||
1347 | #define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 | ||
1348 | #define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 | ||
1349 | #define MC_CMD_NVRAM_PROTECTED_LBN 0 | ||
1350 | #define MC_CMD_NVRAM_PROTECTED_WIDTH 1 | ||
1351 | #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 | ||
1352 | #define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 | ||
1353 | |||
1354 | /* MC_CMD_NVRAM_UPDATE_START: | ||
1355 | * Start a group of update operations on a virtual NVRAM partition | ||
1356 | * | ||
1357 | * Locks required: PHY_LOCK if type==*PHY* | ||
1358 | * Returns: 0, EINVAL (bad type), EACCES (if PHY_LOCK required and not held) | ||
1359 | */ | ||
1360 | #define MC_CMD_NVRAM_UPDATE_START 0x38 | ||
1361 | #define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 | ||
1362 | #define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 | ||
1363 | #define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0 | ||
1364 | |||
1365 | /* MC_CMD_NVRAM_READ: | ||
1366 | * Read data from a virtual NVRAM partition | ||
1367 | * | ||
1368 | * Locks required: PHY_LOCK if type==*PHY* | ||
1369 | * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) | ||
1370 | */ | ||
1371 | #define MC_CMD_NVRAM_READ 0x39 | ||
1372 | #define MC_CMD_NVRAM_READ_IN_LEN 12 | ||
1373 | #define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 | ||
1374 | #define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 | ||
1375 | #define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 | ||
1376 | #define MC_CMD_NVRAM_READ_OUT_LEN(_read_bytes) (_read_bytes) | ||
1377 | #define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 | ||
1378 | |||
1379 | /* MC_CMD_NVRAM_WRITE: | ||
1380 | * Write data to a virtual NVRAM partition | ||
1381 | * | ||
1382 | * Locks required: PHY_LOCK if type==*PHY* | ||
1383 | * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) | ||
1384 | */ | ||
1385 | #define MC_CMD_NVRAM_WRITE 0x3a | ||
1386 | #define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 | ||
1387 | #define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 | ||
1388 | #define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 | ||
1389 | #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 | ||
1390 | #define MC_CMD_NVRAM_WRITE_IN_LEN(_write_bytes) (12 + _write_bytes) | ||
1391 | #define MC_CMD_NVRAM_WRITE_OUT_LEN 0 | ||
1392 | |||
1393 | /* MC_CMD_NVRAM_ERASE: | ||
1394 | * Erase sector(s) from a virtual NVRAM partition | ||
1395 | * | ||
1396 | * Locks required: PHY_LOCK if type==*PHY* | ||
1397 | * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) | ||
1398 | */ | ||
1399 | #define MC_CMD_NVRAM_ERASE 0x3b | ||
1400 | #define MC_CMD_NVRAM_ERASE_IN_LEN 12 | ||
1401 | #define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 | ||
1402 | #define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 | ||
1403 | #define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 | ||
1404 | #define MC_CMD_NVRAM_ERASE_OUT_LEN 0 | ||
1405 | |||
1406 | /* MC_CMD_NVRAM_UPDATE_FINISH: | ||
1407 | * Finish a group of update operations on a virtual NVRAM partition | ||
1408 | * | ||
1409 | * Locks required: PHY_LOCK if type==*PHY* | ||
1410 | * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) | ||
1411 | */ | ||
1412 | #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c | ||
1413 | #define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 | ||
1414 | #define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 | ||
1415 | #define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4 | ||
1416 | #define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0 | ||
1417 | |||
1418 | /* MC_CMD_REBOOT: | ||
1419 | * Reboot the MC. | ||
1420 | * | ||
1421 | * The AFTER_ASSERTION flag is intended to be used when the driver notices | ||
1422 | * an assertion failure (at which point it is expected to perform a complete | ||
1423 | * tear down and reinitialise), to allow both ports to reset the MC once | ||
1424 | * in an atomic fashion. | ||
1425 | * | ||
1426 | * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1, | ||
1427 | * which means that they will automatically reboot out of the assertion | ||
1428 | * handler, so this is in practise an optional operation. It is still | ||
1429 | * recommended that drivers execute this to support custom firmwares | ||
1430 | * with REBOOT_ON_ASSERT=0. | ||
1431 | * | ||
1432 | * Locks required: NONE | ||
1433 | * Returns: Nothing. You get back a response with ERR=1, DATALEN=0 | ||
1434 | */ | ||
1435 | #define MC_CMD_REBOOT 0x3d | ||
1436 | #define MC_CMD_REBOOT_IN_LEN 4 | ||
1437 | #define MC_CMD_REBOOT_IN_FLAGS_OFST 0 | ||
1438 | #define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 1 | ||
1439 | #define MC_CMD_REBOOT_OUT_LEN 0 | ||
1440 | |||
1441 | /* MC_CMD_SCHEDINFO: | ||
1442 | * Request scheduler info. from the MC. | ||
1443 | * | ||
1444 | * Locks required: NONE | ||
1445 | * Returns: An array of (timeslice,maximum overrun), one for each thread, | ||
1446 | * in ascending order of thread address.s | ||
1447 | */ | ||
1448 | #define MC_CMD_SCHEDINFO 0x3e | ||
1449 | #define MC_CMD_SCHEDINFO_IN_LEN 0 | ||
1450 | |||
1451 | |||
1452 | /* MC_CMD_SET_REBOOT_MODE: (debug) | ||
1453 | * Set the mode for the next MC reboot. | ||
1454 | * | ||
1455 | * Locks required: NONE | ||
1456 | * | ||
1457 | * Sets the reboot mode to the specified value. Returns the old mode. | ||
1458 | */ | ||
1459 | #define MC_CMD_REBOOT_MODE 0x3f | ||
1460 | #define MC_CMD_REBOOT_MODE_IN_LEN 4 | ||
1461 | #define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 | ||
1462 | #define MC_CMD_REBOOT_MODE_OUT_LEN 4 | ||
1463 | #define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 | ||
1464 | #define MC_CMD_REBOOT_MODE_NORMAL 0 | ||
1465 | #define MC_CMD_REBOOT_MODE_SNAPPER 3 | ||
1466 | |||
1467 | /* MC_CMD_DEBUG_LOG: | ||
1468 | * Null request/response command (debug) | ||
1469 | * - sequence number is always zero | ||
1470 | * - only supported on the UART interface | ||
1471 | * (the same set of bytes is delivered as an | ||
1472 | * event over PCI) | ||
1473 | */ | ||
1474 | #define MC_CMD_DEBUG_LOG 0x40 | ||
1475 | #define MC_CMD_DEBUG_LOG_IN_LEN 0 | ||
1476 | #define MC_CMD_DEBUG_LOG_OUT_LEN 0 | ||
1477 | |||
1478 | /* Generic sensor enumeration. Note that a dual port NIC | ||
1479 | * will EITHER expose PHY_COMMON_TEMP OR PHY0_TEMP and | ||
1480 | * PHY1_TEMP depending on whether there is a single sensor | ||
1481 | * in the vicinity of the two port, or one per port. | ||
1482 | */ | ||
1483 | #define MC_CMD_SENSOR_CONTROLLER_TEMP 0 /* degC */ | ||
1484 | #define MC_CMD_SENSOR_PHY_COMMON_TEMP 1 /* degC */ | ||
1485 | #define MC_CMD_SENSOR_CONTROLLER_COOLING 2 /* bool */ | ||
1486 | #define MC_CMD_SENSOR_PHY0_TEMP 3 /* degC */ | ||
1487 | #define MC_CMD_SENSOR_PHY0_COOLING 4 /* bool */ | ||
1488 | #define MC_CMD_SENSOR_PHY1_TEMP 5 /* degC */ | ||
1489 | #define MC_CMD_SENSOR_PHY1_COOLING 6 /* bool */ | ||
1490 | #define MC_CMD_SENSOR_IN_1V0 7 /* mV */ | ||
1491 | #define MC_CMD_SENSOR_IN_1V2 8 /* mV */ | ||
1492 | #define MC_CMD_SENSOR_IN_1V8 9 /* mV */ | ||
1493 | #define MC_CMD_SENSOR_IN_2V5 10 /* mV */ | ||
1494 | #define MC_CMD_SENSOR_IN_3V3 11 /* mV */ | ||
1495 | #define MC_CMD_SENSOR_IN_12V0 12 /* mV */ | ||
1496 | |||
1497 | |||
1498 | /* Sensor state */ | ||
1499 | #define MC_CMD_SENSOR_STATE_OK 0 | ||
1500 | #define MC_CMD_SENSOR_STATE_WARNING 1 | ||
1501 | #define MC_CMD_SENSOR_STATE_FATAL 2 | ||
1502 | #define MC_CMD_SENSOR_STATE_BROKEN 3 | ||
1503 | |||
1504 | /* MC_CMD_SENSOR_INFO: | ||
1505 | * Returns information about every available sensor. | ||
1506 | * | ||
1507 | * Each sensor has a single (16bit) value, and a corresponding state. | ||
1508 | * The mapping between value and sensor is nominally determined by the | ||
1509 | * MC, but in practise is implemented as zero (BROKEN), one (TEMPERATURE), | ||
1510 | * or two (VOLTAGE) ranges per sensor per state. | ||
1511 | * | ||
1512 | * This call returns a mask (32bit) of the sensors that are supported | ||
1513 | * by this platform, then an array (indexed by MC_CMD_SENSOR) of byte | ||
1514 | * offsets to the per-sensor arrays. Each sensor array has four 16bit | ||
1515 | * numbers, min1, max1, min2, max2. | ||
1516 | * | ||
1517 | * Locks required: None | ||
1518 | * Returns: 0 | ||
1519 | */ | ||
1520 | #define MC_CMD_SENSOR_INFO 0x41 | ||
1521 | #define MC_CMD_SENSOR_INFO_IN_LEN 0 | ||
1522 | #define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 | ||
1523 | #define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \ | ||
1524 | (4 + (_x)) | ||
1525 | #define MC_CMD_SENSOR_INFO_OUT_MIN1_OFST(_ofst) \ | ||
1526 | ((_ofst) + 0) | ||
1527 | #define MC_CMD_SENSOR_INFO_OUT_MAX1_OFST(_ofst) \ | ||
1528 | ((_ofst) + 2) | ||
1529 | #define MC_CMD_SENSOR_INFO_OUT_MIN2_OFST(_ofst) \ | ||
1530 | ((_ofst) + 4) | ||
1531 | #define MC_CMD_SENSOR_INFO_OUT_MAX2_OFST(_ofst) \ | ||
1532 | ((_ofst) + 6) | ||
1533 | |||
1534 | /* MC_CMD_READ_SENSORS | ||
1535 | * Returns the current reading from each sensor | ||
1536 | * | ||
1537 | * Returns a sparse array of sensor readings (indexed by the sensor | ||
1538 | * type) into host memory. Each array element is a dword. | ||
1539 | * | ||
1540 | * The MC will send a SENSOREVT event every time any sensor changes state. The | ||
1541 | * driver is responsible for ensuring that it doesn't miss any events. The board | ||
1542 | * will function normally if all sensors are in STATE_OK or state_WARNING. | ||
1543 | * Otherwise the board should not be expected to function. | ||
1544 | */ | ||
1545 | #define MC_CMD_READ_SENSORS 0x42 | ||
1546 | #define MC_CMD_READ_SENSORS_IN_LEN 8 | ||
1547 | #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 | ||
1548 | #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 | ||
1549 | #define MC_CMD_READ_SENSORS_OUT_LEN 0 | ||
1550 | |||
1551 | /* Sensor reading fields */ | ||
1552 | #define MC_CMD_READ_SENSOR_VALUE_LBN 0 | ||
1553 | #define MC_CMD_READ_SENSOR_VALUE_WIDTH 16 | ||
1554 | #define MC_CMD_READ_SENSOR_STATE_LBN 16 | ||
1555 | #define MC_CMD_READ_SENSOR_STATE_WIDTH 8 | ||
1556 | |||
1557 | |||
1558 | /* MC_CMD_GET_PHY_STATE: | ||
1559 | * Report current state of PHY. A "zombie" PHY is a PHY that has failed to | ||
1560 | * boot (e.g. due to missing or corrupted firmware). | ||
1561 | * | ||
1562 | * Locks required: None | ||
1563 | * Return code: 0 | ||
1564 | */ | ||
1565 | #define MC_CMD_GET_PHY_STATE 0x43 | ||
1566 | |||
1567 | #define MC_CMD_GET_PHY_STATE_IN_LEN 0 | ||
1568 | #define MC_CMD_GET_PHY_STATE_OUT_LEN 4 | ||
1569 | #define MC_CMD_GET_PHY_STATE_STATE_OFST 0 | ||
1570 | /* PHY state enumeration: */ | ||
1571 | #define MC_CMD_PHY_STATE_OK 1 | ||
1572 | #define MC_CMD_PHY_STATE_ZOMBIE 2 | ||
1573 | |||
1574 | |||
1575 | /* 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to | ||
1576 | * disable 802.Qbb for a given priority. */ | ||
1577 | #define MC_CMD_SETUP_8021QBB 0x44 | ||
1578 | #define MC_CMD_SETUP_8021QBB_IN_LEN 32 | ||
1579 | #define MC_CMD_SETUP_8021QBB_OUT_LEN 0 | ||
1580 | #define MC_CMD_SETUP_8021QBB_IN_TXQS_OFFST 0 | ||
1581 | |||
1582 | |||
1583 | /* MC_CMD_WOL_FILTER_GET: | ||
1584 | * Retrieve ID of any WoL filters | ||
1585 | * | ||
1586 | * Locks required: None | ||
1587 | * Returns: 0, ENOSYS | ||
1588 | */ | ||
1589 | #define MC_CMD_WOL_FILTER_GET 0x45 | ||
1590 | #define MC_CMD_WOL_FILTER_GET_IN_LEN 0 | ||
1591 | #define MC_CMD_WOL_FILTER_GET_OUT_LEN 4 | ||
1592 | #define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0 | ||
1593 | |||
1594 | |||
1595 | /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD: | ||
1596 | * Offload a protocol to NIC for lights-out state | ||
1597 | * | ||
1598 | * Locks required: None | ||
1599 | * Returns: 0, ENOSYS | ||
1600 | */ | ||
1601 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 | ||
1602 | |||
1603 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN 16 | ||
1604 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 | ||
1605 | |||
1606 | /* There is a union at offset 4, following defines overlap due to | ||
1607 | * this */ | ||
1608 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 | ||
1609 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPMAC_OFST 4 | ||
1610 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPIP_OFST 10 | ||
1611 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSMAC_OFST 4 | ||
1612 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSSNIPV6_OFST 10 | ||
1613 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSIPV6_OFST 26 | ||
1614 | |||
1615 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 | ||
1616 | #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 | ||
1617 | |||
1618 | |||
1619 | /* MC_CMD_REMOVE_LIGHTSOUT_PROTOCOL_OFFLOAD: | ||
1620 | * Offload a protocol to NIC for lights-out state | ||
1621 | * | ||
1622 | * Locks required: None | ||
1623 | * Returns: 0, ENOSYS | ||
1624 | */ | ||
1625 | #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 | ||
1626 | #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 | ||
1627 | #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 | ||
1628 | |||
1629 | #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 | ||
1630 | #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 | ||
1631 | |||
1632 | /* Lights-out offload protocols enumeration */ | ||
1633 | #define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 | ||
1634 | #define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 | ||
1635 | |||
1636 | |||
1637 | /* MC_CMD_MAC_RESET_RESTORE: | ||
1638 | * Restore MAC after block reset | ||
1639 | * | ||
1640 | * Locks required: None | ||
1641 | * Returns: 0 | ||
1642 | */ | ||
1643 | |||
1644 | #define MC_CMD_MAC_RESET_RESTORE 0x48 | ||
1645 | #define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0 | ||
1646 | #define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0 | ||
1647 | |||
1648 | |||
1649 | /* MC_CMD_TEST_ASSERT: | ||
1650 | * Deliberately trigger an assert-detonation in the firmware for testing | ||
1651 | * purposes (i.e. to allow tests that the driver copes gracefully). | ||
1652 | * | ||
1653 | * Locks required: None | ||
1654 | * Returns: 0 | ||
1655 | */ | ||
1656 | |||
1657 | #define MC_CMD_TESTASSERT 0x49 | ||
1658 | #define MC_CMD_TESTASSERT_IN_LEN 0 | ||
1659 | #define MC_CMD_TESTASSERT_OUT_LEN 0 | ||
1660 | |||
1661 | /* MC_CMD_WORKAROUND 0x4a | ||
1662 | * | ||
1663 | * Enable/Disable a given workaround. The mcfw will return EINVAL if it | ||
1664 | * doesn't understand the given workaround number - which should not | ||
1665 | * be treated as a hard error by client code. | ||
1666 | * | ||
1667 | * This op does not imply any semantics about each workaround, that's between | ||
1668 | * the driver and the mcfw on a per-workaround basis. | ||
1669 | * | ||
1670 | * Locks required: None | ||
1671 | * Returns: 0, EINVAL | ||
1672 | */ | ||
1673 | #define MC_CMD_WORKAROUND 0x4a | ||
1674 | #define MC_CMD_WORKAROUND_IN_LEN 8 | ||
1675 | #define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 | ||
1676 | #define MC_CMD_WORKAROUND_BUG17230 1 | ||
1677 | #define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 | ||
1678 | #define MC_CMD_WORKAROUND_OUT_LEN 0 | ||
1679 | |||
1680 | /* MC_CMD_GET_PHY_MEDIA_INFO: | ||
1681 | * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for | ||
1682 | * SFP+ PHYs). | ||
1683 | * | ||
1684 | * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE); | ||
1685 | * the valid "page number" input values, and the output data, are interpreted | ||
1686 | * on a per-type basis. | ||
1687 | * | ||
1688 | * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address | ||
1689 | * 0xA0 offset 0 or 0x80. | ||
1690 | * Anything else: currently undefined. | ||
1691 | * | ||
1692 | * Locks required: None | ||
1693 | * Return code: 0 | ||
1694 | */ | ||
1695 | #define MC_CMD_GET_PHY_MEDIA_INFO 0x4b | ||
1696 | #define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 | ||
1697 | #define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 | ||
1698 | #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes)) | ||
1699 | #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 | ||
1700 | #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 | ||
1701 | |||
1702 | /* MC_CMD_NVRAM_TEST: | ||
1703 | * Test a particular NVRAM partition for valid contents (where "valid" | ||
1704 | * depends on the type of partition). | ||
1705 | * | ||
1706 | * Locks required: None | ||
1707 | * Return code: 0 | ||
1708 | */ | ||
1709 | #define MC_CMD_NVRAM_TEST 0x4c | ||
1710 | #define MC_CMD_NVRAM_TEST_IN_LEN 4 | ||
1711 | #define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 | ||
1712 | #define MC_CMD_NVRAM_TEST_OUT_LEN 4 | ||
1713 | #define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 | ||
1714 | #define MC_CMD_NVRAM_TEST_PASS 0 | ||
1715 | #define MC_CMD_NVRAM_TEST_FAIL 1 | ||
1716 | #define MC_CMD_NVRAM_TEST_NOTSUPP 2 | ||
1717 | |||
1718 | /* MC_CMD_MRSFP_TWEAK: (debug) | ||
1719 | * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds. | ||
1720 | * I2C I/O expander bits are always read; if equaliser parameters are supplied, | ||
1721 | * they are configured first. | ||
1722 | * | ||
1723 | * Locks required: None | ||
1724 | * Return code: 0, EINVAL | ||
1725 | */ | ||
1726 | #define MC_CMD_MRSFP_TWEAK 0x4d | ||
1727 | #define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0 | ||
1728 | #define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16 | ||
1729 | #define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */ | ||
1730 | #define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */ | ||
1731 | #define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */ | ||
1732 | #define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */ | ||
1733 | #define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 | ||
1734 | #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */ | ||
1735 | #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */ | ||
1736 | #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */ | ||
1737 | |||
1738 | /* MC_CMD_TEST_HACK: (debug (unsurprisingly)) | ||
1739 | * Change bits of network port state for test purposes in ways that would never be | ||
1740 | * useful in normal operation and so need a special command to change. */ | ||
1741 | #define MC_CMD_TEST_HACK 0x2f | ||
1742 | #define MC_CMD_TEST_HACK_IN_LEN 8 | ||
1743 | #define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0 | ||
1744 | #define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */ | ||
1745 | #define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */ | ||
1746 | #define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */ | ||
1747 | #define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */ | ||
1748 | #define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */ | ||
1749 | #define MC_CMD_TEST_HACK_OUT_LEN 0 | ||
1750 | |||
1751 | /* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This | ||
1752 | * is a warranty-voiding operation. | ||
1753 | * | ||
1754 | * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP | ||
1755 | * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which | ||
1756 | * of these limits are meaningful and what their interpretation is is sensor-specific. | ||
1757 | * | ||
1758 | * OUT: nothing | ||
1759 | * | ||
1760 | * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are | ||
1761 | * out of range. | ||
1762 | */ | ||
1763 | #define MC_CMD_SENSOR_SET_LIMS 0x4e | ||
1764 | #define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 | ||
1765 | #define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 | ||
1766 | #define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 | ||
1767 | #define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 | ||
1768 | #define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 | ||
1769 | #define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 | ||
1770 | |||
1771 | /* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be | ||
1772 | * used for post-3.0 extensions. If you run out of space, look for gaps or | ||
1773 | * commands that are unused in the existing range. */ | ||
1774 | |||
1775 | #endif /* MCDI_PCOL_H */ | ||
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c new file mode 100644 index 00000000000..6c63ab0710a --- /dev/null +++ b/drivers/net/sfc/mcdi_phy.c | |||
@@ -0,0 +1,754 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2009-2010 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * Driver for PHY related operations via MCDI. | ||
12 | */ | ||
13 | |||
14 | #include <linux/slab.h> | ||
15 | #include "efx.h" | ||
16 | #include "phy.h" | ||
17 | #include "mcdi.h" | ||
18 | #include "mcdi_pcol.h" | ||
19 | #include "nic.h" | ||
20 | #include "selftest.h" | ||
21 | |||
22 | struct efx_mcdi_phy_data { | ||
23 | u32 flags; | ||
24 | u32 type; | ||
25 | u32 supported_cap; | ||
26 | u32 channel; | ||
27 | u32 port; | ||
28 | u32 stats_mask; | ||
29 | u8 name[20]; | ||
30 | u32 media; | ||
31 | u32 mmd_mask; | ||
32 | u8 revision[20]; | ||
33 | u32 forced_cap; | ||
34 | }; | ||
35 | |||
36 | static int | ||
37 | efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) | ||
38 | { | ||
39 | u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; | ||
40 | size_t outlen; | ||
41 | int rc; | ||
42 | |||
43 | BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); | ||
44 | BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); | ||
45 | |||
46 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, | ||
47 | outbuf, sizeof(outbuf), &outlen); | ||
48 | if (rc) | ||
49 | goto fail; | ||
50 | |||
51 | if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { | ||
52 | rc = -EIO; | ||
53 | goto fail; | ||
54 | } | ||
55 | |||
56 | cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS); | ||
57 | cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE); | ||
58 | cfg->supported_cap = | ||
59 | MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP); | ||
60 | cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL); | ||
61 | cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT); | ||
62 | cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK); | ||
63 | memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME), | ||
64 | sizeof(cfg->name)); | ||
65 | cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE); | ||
66 | cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK); | ||
67 | memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION), | ||
68 | sizeof(cfg->revision)); | ||
69 | |||
70 | return 0; | ||
71 | |||
72 | fail: | ||
73 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
74 | return rc; | ||
75 | } | ||
76 | |||
77 | static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, | ||
78 | u32 flags, u32 loopback_mode, | ||
79 | u32 loopback_speed) | ||
80 | { | ||
81 | u8 inbuf[MC_CMD_SET_LINK_IN_LEN]; | ||
82 | int rc; | ||
83 | |||
84 | BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); | ||
85 | |||
86 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities); | ||
87 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags); | ||
88 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); | ||
89 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); | ||
90 | |||
91 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), | ||
92 | NULL, 0, NULL); | ||
93 | if (rc) | ||
94 | goto fail; | ||
95 | |||
96 | return 0; | ||
97 | |||
98 | fail: | ||
99 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
100 | return rc; | ||
101 | } | ||
102 | |||
103 | static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) | ||
104 | { | ||
105 | u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN]; | ||
106 | size_t outlen; | ||
107 | int rc; | ||
108 | |||
109 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, | ||
110 | outbuf, sizeof(outbuf), &outlen); | ||
111 | if (rc) | ||
112 | goto fail; | ||
113 | |||
114 | if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { | ||
115 | rc = -EIO; | ||
116 | goto fail; | ||
117 | } | ||
118 | |||
119 | *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED); | ||
120 | |||
121 | return 0; | ||
122 | |||
123 | fail: | ||
124 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
125 | return rc; | ||
126 | } | ||
127 | |||
128 | int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, | ||
129 | unsigned int prtad, unsigned int devad, u16 addr, | ||
130 | u16 *value_out, u32 *status_out) | ||
131 | { | ||
132 | u8 inbuf[MC_CMD_MDIO_READ_IN_LEN]; | ||
133 | u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN]; | ||
134 | size_t outlen; | ||
135 | int rc; | ||
136 | |||
137 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus); | ||
138 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); | ||
139 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); | ||
140 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); | ||
141 | |||
142 | rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), | ||
143 | outbuf, sizeof(outbuf), &outlen); | ||
144 | if (rc) | ||
145 | goto fail; | ||
146 | |||
147 | *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); | ||
148 | *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS); | ||
149 | return 0; | ||
150 | |||
151 | fail: | ||
152 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
153 | return rc; | ||
154 | } | ||
155 | |||
156 | int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, | ||
157 | unsigned int prtad, unsigned int devad, u16 addr, | ||
158 | u16 value, u32 *status_out) | ||
159 | { | ||
160 | u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN]; | ||
161 | u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN]; | ||
162 | size_t outlen; | ||
163 | int rc; | ||
164 | |||
165 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus); | ||
166 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); | ||
167 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); | ||
168 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); | ||
169 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); | ||
170 | |||
171 | rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), | ||
172 | outbuf, sizeof(outbuf), &outlen); | ||
173 | if (rc) | ||
174 | goto fail; | ||
175 | |||
176 | *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS); | ||
177 | return 0; | ||
178 | |||
179 | fail: | ||
180 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
181 | return rc; | ||
182 | } | ||
183 | |||
184 | static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) | ||
185 | { | ||
186 | u32 result = 0; | ||
187 | |||
188 | switch (media) { | ||
189 | case MC_CMD_MEDIA_KX4: | ||
190 | result |= SUPPORTED_Backplane; | ||
191 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) | ||
192 | result |= SUPPORTED_1000baseKX_Full; | ||
193 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) | ||
194 | result |= SUPPORTED_10000baseKX4_Full; | ||
195 | break; | ||
196 | |||
197 | case MC_CMD_MEDIA_XFP: | ||
198 | case MC_CMD_MEDIA_SFP_PLUS: | ||
199 | result |= SUPPORTED_FIBRE; | ||
200 | break; | ||
201 | |||
202 | case MC_CMD_MEDIA_BASE_T: | ||
203 | result |= SUPPORTED_TP; | ||
204 | if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) | ||
205 | result |= SUPPORTED_10baseT_Half; | ||
206 | if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) | ||
207 | result |= SUPPORTED_10baseT_Full; | ||
208 | if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) | ||
209 | result |= SUPPORTED_100baseT_Half; | ||
210 | if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) | ||
211 | result |= SUPPORTED_100baseT_Full; | ||
212 | if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) | ||
213 | result |= SUPPORTED_1000baseT_Half; | ||
214 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) | ||
215 | result |= SUPPORTED_1000baseT_Full; | ||
216 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) | ||
217 | result |= SUPPORTED_10000baseT_Full; | ||
218 | break; | ||
219 | } | ||
220 | |||
221 | if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) | ||
222 | result |= SUPPORTED_Pause; | ||
223 | if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) | ||
224 | result |= SUPPORTED_Asym_Pause; | ||
225 | if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) | ||
226 | result |= SUPPORTED_Autoneg; | ||
227 | |||
228 | return result; | ||
229 | } | ||
230 | |||
231 | static u32 ethtool_to_mcdi_cap(u32 cap) | ||
232 | { | ||
233 | u32 result = 0; | ||
234 | |||
235 | if (cap & SUPPORTED_10baseT_Half) | ||
236 | result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); | ||
237 | if (cap & SUPPORTED_10baseT_Full) | ||
238 | result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); | ||
239 | if (cap & SUPPORTED_100baseT_Half) | ||
240 | result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); | ||
241 | if (cap & SUPPORTED_100baseT_Full) | ||
242 | result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); | ||
243 | if (cap & SUPPORTED_1000baseT_Half) | ||
244 | result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); | ||
245 | if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) | ||
246 | result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); | ||
247 | if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full)) | ||
248 | result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); | ||
249 | if (cap & SUPPORTED_Pause) | ||
250 | result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); | ||
251 | if (cap & SUPPORTED_Asym_Pause) | ||
252 | result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); | ||
253 | if (cap & SUPPORTED_Autoneg) | ||
254 | result |= (1 << MC_CMD_PHY_CAP_AN_LBN); | ||
255 | |||
256 | return result; | ||
257 | } | ||
258 | |||
259 | static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) | ||
260 | { | ||
261 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | ||
262 | enum efx_phy_mode mode, supported; | ||
263 | u32 flags; | ||
264 | |||
265 | /* TODO: Advertise the capabilities supported by this PHY */ | ||
266 | supported = 0; | ||
267 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN)) | ||
268 | supported |= PHY_MODE_TX_DISABLED; | ||
269 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN)) | ||
270 | supported |= PHY_MODE_LOW_POWER; | ||
271 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN)) | ||
272 | supported |= PHY_MODE_OFF; | ||
273 | |||
274 | mode = efx->phy_mode & supported; | ||
275 | |||
276 | flags = 0; | ||
277 | if (mode & PHY_MODE_TX_DISABLED) | ||
278 | flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN); | ||
279 | if (mode & PHY_MODE_LOW_POWER) | ||
280 | flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN); | ||
281 | if (mode & PHY_MODE_OFF) | ||
282 | flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN); | ||
283 | |||
284 | return flags; | ||
285 | } | ||
286 | |||
287 | static u32 mcdi_to_ethtool_media(u32 media) | ||
288 | { | ||
289 | switch (media) { | ||
290 | case MC_CMD_MEDIA_XAUI: | ||
291 | case MC_CMD_MEDIA_CX4: | ||
292 | case MC_CMD_MEDIA_KX4: | ||
293 | return PORT_OTHER; | ||
294 | |||
295 | case MC_CMD_MEDIA_XFP: | ||
296 | case MC_CMD_MEDIA_SFP_PLUS: | ||
297 | return PORT_FIBRE; | ||
298 | |||
299 | case MC_CMD_MEDIA_BASE_T: | ||
300 | return PORT_TP; | ||
301 | |||
302 | default: | ||
303 | return PORT_OTHER; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | static int efx_mcdi_phy_probe(struct efx_nic *efx) | ||
308 | { | ||
309 | struct efx_mcdi_phy_data *phy_data; | ||
310 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | ||
311 | u32 caps; | ||
312 | int rc; | ||
313 | |||
314 | /* Initialise and populate phy_data */ | ||
315 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | ||
316 | if (phy_data == NULL) | ||
317 | return -ENOMEM; | ||
318 | |||
319 | rc = efx_mcdi_get_phy_cfg(efx, phy_data); | ||
320 | if (rc != 0) | ||
321 | goto fail; | ||
322 | |||
323 | /* Read initial link advertisement */ | ||
324 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | ||
325 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | ||
326 | outbuf, sizeof(outbuf), NULL); | ||
327 | if (rc) | ||
328 | goto fail; | ||
329 | |||
330 | /* Fill out nic state */ | ||
331 | efx->phy_data = phy_data; | ||
332 | efx->phy_type = phy_data->type; | ||
333 | |||
334 | efx->mdio_bus = phy_data->channel; | ||
335 | efx->mdio.prtad = phy_data->port; | ||
336 | efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); | ||
337 | efx->mdio.mode_support = 0; | ||
338 | if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) | ||
339 | efx->mdio.mode_support |= MDIO_SUPPORTS_C22; | ||
340 | if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) | ||
341 | efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
342 | |||
343 | caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); | ||
344 | if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) | ||
345 | efx->link_advertising = | ||
346 | mcdi_to_ethtool_cap(phy_data->media, caps); | ||
347 | else | ||
348 | phy_data->forced_cap = caps; | ||
349 | |||
350 | /* Assert that we can map efx -> mcdi loopback modes */ | ||
351 | BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); | ||
352 | BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); | ||
353 | BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC); | ||
354 | BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII); | ||
355 | BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS); | ||
356 | BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI); | ||
357 | BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII); | ||
358 | BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII); | ||
359 | BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR); | ||
360 | BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI); | ||
361 | BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR); | ||
362 | BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR); | ||
363 | BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR); | ||
364 | BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR); | ||
365 | BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY); | ||
366 | BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS); | ||
367 | BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS); | ||
368 | BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD); | ||
369 | BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT); | ||
370 | BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS); | ||
371 | BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS); | ||
372 | BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR); | ||
373 | BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR); | ||
374 | BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS); | ||
375 | BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS); | ||
376 | BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR); | ||
377 | BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS); | ||
378 | |||
379 | rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes); | ||
380 | if (rc != 0) | ||
381 | goto fail; | ||
382 | /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, | ||
383 | * but by convention we don't */ | ||
384 | efx->loopback_modes &= ~(1 << LOOPBACK_NONE); | ||
385 | |||
386 | /* Set the initial link mode */ | ||
387 | efx_mcdi_phy_decode_link( | ||
388 | efx, &efx->link_state, | ||
389 | MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), | ||
390 | MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), | ||
391 | MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); | ||
392 | |||
393 | /* Default to Autonegotiated flow control if the PHY supports it */ | ||
394 | efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; | ||
395 | if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) | ||
396 | efx->wanted_fc |= EFX_FC_AUTO; | ||
397 | efx_link_set_wanted_fc(efx, efx->wanted_fc); | ||
398 | |||
399 | return 0; | ||
400 | |||
401 | fail: | ||
402 | kfree(phy_data); | ||
403 | return rc; | ||
404 | } | ||
405 | |||
406 | int efx_mcdi_phy_reconfigure(struct efx_nic *efx) | ||
407 | { | ||
408 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | ||
409 | u32 caps = (efx->link_advertising ? | ||
410 | ethtool_to_mcdi_cap(efx->link_advertising) : | ||
411 | phy_cfg->forced_cap); | ||
412 | |||
413 | return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), | ||
414 | efx->loopback_mode, 0); | ||
415 | } | ||
416 | |||
417 | void efx_mcdi_phy_decode_link(struct efx_nic *efx, | ||
418 | struct efx_link_state *link_state, | ||
419 | u32 speed, u32 flags, u32 fcntl) | ||
420 | { | ||
421 | switch (fcntl) { | ||
422 | case MC_CMD_FCNTL_AUTO: | ||
423 | WARN_ON(1); /* This is not a link mode */ | ||
424 | link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX; | ||
425 | break; | ||
426 | case MC_CMD_FCNTL_BIDIR: | ||
427 | link_state->fc = EFX_FC_TX | EFX_FC_RX; | ||
428 | break; | ||
429 | case MC_CMD_FCNTL_RESPOND: | ||
430 | link_state->fc = EFX_FC_RX; | ||
431 | break; | ||
432 | default: | ||
433 | WARN_ON(1); | ||
434 | case MC_CMD_FCNTL_OFF: | ||
435 | link_state->fc = 0; | ||
436 | break; | ||
437 | } | ||
438 | |||
439 | link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN)); | ||
440 | link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN)); | ||
441 | link_state->speed = speed; | ||
442 | } | ||
443 | |||
444 | /* Verify that the forced flow control settings (!EFX_FC_AUTO) are | ||
445 | * supported by the link partner. Warn the user if this isn't the case | ||
446 | */ | ||
447 | void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) | ||
448 | { | ||
449 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | ||
450 | u32 rmtadv; | ||
451 | |||
452 | /* The link partner capabilities are only relevant if the | ||
453 | * link supports flow control autonegotiation */ | ||
454 | if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) | ||
455 | return; | ||
456 | |||
457 | /* If flow control autoneg is supported and enabled, then fine */ | ||
458 | if (efx->wanted_fc & EFX_FC_AUTO) | ||
459 | return; | ||
460 | |||
461 | rmtadv = 0; | ||
462 | if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) | ||
463 | rmtadv |= ADVERTISED_Pause; | ||
464 | if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) | ||
465 | rmtadv |= ADVERTISED_Asym_Pause; | ||
466 | |||
467 | if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) | ||
468 | netif_err(efx, link, efx->net_dev, | ||
469 | "warning: link partner doesn't support pause frames"); | ||
470 | } | ||
471 | |||
472 | static bool efx_mcdi_phy_poll(struct efx_nic *efx) | ||
473 | { | ||
474 | struct efx_link_state old_state = efx->link_state; | ||
475 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | ||
476 | int rc; | ||
477 | |||
478 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
479 | |||
480 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | ||
481 | |||
482 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | ||
483 | outbuf, sizeof(outbuf), NULL); | ||
484 | if (rc) { | ||
485 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | ||
486 | __func__, rc); | ||
487 | efx->link_state.up = false; | ||
488 | } else { | ||
489 | efx_mcdi_phy_decode_link( | ||
490 | efx, &efx->link_state, | ||
491 | MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), | ||
492 | MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), | ||
493 | MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); | ||
494 | } | ||
495 | |||
496 | return !efx_link_state_equal(&efx->link_state, &old_state); | ||
497 | } | ||
498 | |||
499 | static void efx_mcdi_phy_remove(struct efx_nic *efx) | ||
500 | { | ||
501 | struct efx_mcdi_phy_data *phy_data = efx->phy_data; | ||
502 | |||
503 | efx->phy_data = NULL; | ||
504 | kfree(phy_data); | ||
505 | } | ||
506 | |||
507 | static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
508 | { | ||
509 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | ||
510 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | ||
511 | int rc; | ||
512 | |||
513 | ecmd->supported = | ||
514 | mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); | ||
515 | ecmd->advertising = efx->link_advertising; | ||
516 | ethtool_cmd_speed_set(ecmd, efx->link_state.speed); | ||
517 | ecmd->duplex = efx->link_state.fd; | ||
518 | ecmd->port = mcdi_to_ethtool_media(phy_cfg->media); | ||
519 | ecmd->phy_address = phy_cfg->port; | ||
520 | ecmd->transceiver = XCVR_INTERNAL; | ||
521 | ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg); | ||
522 | ecmd->mdio_support = (efx->mdio.mode_support & | ||
523 | (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); | ||
524 | |||
525 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | ||
526 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | ||
527 | outbuf, sizeof(outbuf), NULL); | ||
528 | if (rc) { | ||
529 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | ||
530 | __func__, rc); | ||
531 | return; | ||
532 | } | ||
533 | ecmd->lp_advertising = | ||
534 | mcdi_to_ethtool_cap(phy_cfg->media, | ||
535 | MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); | ||
536 | } | ||
537 | |||
538 | static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
539 | { | ||
540 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | ||
541 | u32 caps; | ||
542 | int rc; | ||
543 | |||
544 | if (ecmd->autoneg) { | ||
545 | caps = (ethtool_to_mcdi_cap(ecmd->advertising) | | ||
546 | 1 << MC_CMD_PHY_CAP_AN_LBN); | ||
547 | } else if (ecmd->duplex) { | ||
548 | switch (ethtool_cmd_speed(ecmd)) { | ||
549 | case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; | ||
550 | case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; | ||
551 | case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; | ||
552 | case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; | ||
553 | default: return -EINVAL; | ||
554 | } | ||
555 | } else { | ||
556 | switch (ethtool_cmd_speed(ecmd)) { | ||
557 | case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; | ||
558 | case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; | ||
559 | case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; | ||
560 | default: return -EINVAL; | ||
561 | } | ||
562 | } | ||
563 | |||
564 | rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), | ||
565 | efx->loopback_mode, 0); | ||
566 | if (rc) | ||
567 | return rc; | ||
568 | |||
569 | if (ecmd->autoneg) { | ||
570 | efx_link_set_advertising( | ||
571 | efx, ecmd->advertising | ADVERTISED_Autoneg); | ||
572 | phy_cfg->forced_cap = 0; | ||
573 | } else { | ||
574 | efx_link_set_advertising(efx, 0); | ||
575 | phy_cfg->forced_cap = caps; | ||
576 | } | ||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | static int efx_mcdi_phy_test_alive(struct efx_nic *efx) | ||
581 | { | ||
582 | u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN]; | ||
583 | size_t outlen; | ||
584 | int rc; | ||
585 | |||
586 | BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); | ||
587 | |||
588 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, | ||
589 | outbuf, sizeof(outbuf), &outlen); | ||
590 | if (rc) | ||
591 | return rc; | ||
592 | |||
593 | if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) | ||
594 | return -EIO; | ||
595 | if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK) | ||
596 | return -EINVAL; | ||
597 | |||
598 | return 0; | ||
599 | } | ||
600 | |||
601 | static const char *const mcdi_sft9001_cable_diag_names[] = { | ||
602 | "cable.pairA.length", | ||
603 | "cable.pairB.length", | ||
604 | "cable.pairC.length", | ||
605 | "cable.pairD.length", | ||
606 | "cable.pairA.status", | ||
607 | "cable.pairB.status", | ||
608 | "cable.pairC.status", | ||
609 | "cable.pairD.status", | ||
610 | }; | ||
611 | |||
612 | static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, | ||
613 | int *results) | ||
614 | { | ||
615 | unsigned int retry, i, count = 0; | ||
616 | size_t outlen; | ||
617 | u32 status; | ||
618 | u8 *buf, *ptr; | ||
619 | int rc; | ||
620 | |||
621 | buf = kzalloc(0x100, GFP_KERNEL); | ||
622 | if (buf == NULL) | ||
623 | return -ENOMEM; | ||
624 | |||
625 | BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); | ||
626 | MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode); | ||
627 | rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN, | ||
628 | NULL, 0, NULL); | ||
629 | if (rc) | ||
630 | goto out; | ||
631 | |||
632 | /* Wait up to 10s for BIST to finish */ | ||
633 | for (retry = 0; retry < 100; ++retry) { | ||
634 | BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); | ||
635 | rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, | ||
636 | buf, 0x100, &outlen); | ||
637 | if (rc) | ||
638 | goto out; | ||
639 | |||
640 | status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT); | ||
641 | if (status != MC_CMD_POLL_BIST_RUNNING) | ||
642 | goto finished; | ||
643 | |||
644 | msleep(100); | ||
645 | } | ||
646 | |||
647 | rc = -ETIMEDOUT; | ||
648 | goto out; | ||
649 | |||
650 | finished: | ||
651 | results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1; | ||
652 | |||
653 | /* SFT9001 specific cable diagnostics output */ | ||
654 | if (efx->phy_type == PHY_TYPE_SFT9001B && | ||
655 | (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || | ||
656 | bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { | ||
657 | ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); | ||
658 | if (status == MC_CMD_POLL_BIST_PASSED && | ||
659 | outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { | ||
660 | for (i = 0; i < 8; i++) { | ||
661 | results[count + i] = | ||
662 | EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i], | ||
663 | EFX_DWORD_0); | ||
664 | } | ||
665 | } | ||
666 | count += 8; | ||
667 | } | ||
668 | rc = count; | ||
669 | |||
670 | out: | ||
671 | kfree(buf); | ||
672 | |||
673 | return rc; | ||
674 | } | ||
675 | |||
676 | static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, | ||
677 | unsigned flags) | ||
678 | { | ||
679 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | ||
680 | u32 mode; | ||
681 | int rc; | ||
682 | |||
683 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { | ||
684 | rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); | ||
685 | if (rc < 0) | ||
686 | return rc; | ||
687 | |||
688 | results += rc; | ||
689 | } | ||
690 | |||
691 | /* If we support both LONG and SHORT, then run each in response to | ||
692 | * break or not. Otherwise, run the one we support */ | ||
693 | mode = 0; | ||
694 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) { | ||
695 | if ((flags & ETH_TEST_FL_OFFLINE) && | ||
696 | (phy_cfg->flags & | ||
697 | (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) | ||
698 | mode = MC_CMD_PHY_BIST_CABLE_LONG; | ||
699 | else | ||
700 | mode = MC_CMD_PHY_BIST_CABLE_SHORT; | ||
701 | } else if (phy_cfg->flags & | ||
702 | (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)) | ||
703 | mode = MC_CMD_PHY_BIST_CABLE_LONG; | ||
704 | |||
705 | if (mode != 0) { | ||
706 | rc = efx_mcdi_bist(efx, mode, results); | ||
707 | if (rc < 0) | ||
708 | return rc; | ||
709 | results += rc; | ||
710 | } | ||
711 | |||
712 | return 0; | ||
713 | } | ||
714 | |||
715 | static const char *efx_mcdi_phy_test_name(struct efx_nic *efx, | ||
716 | unsigned int index) | ||
717 | { | ||
718 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | ||
719 | |||
720 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { | ||
721 | if (index == 0) | ||
722 | return "bist"; | ||
723 | --index; | ||
724 | } | ||
725 | |||
726 | if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) | | ||
727 | (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) { | ||
728 | if (index == 0) | ||
729 | return "cable"; | ||
730 | --index; | ||
731 | |||
732 | if (efx->phy_type == PHY_TYPE_SFT9001B) { | ||
733 | if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names)) | ||
734 | return mcdi_sft9001_cable_diag_names[index]; | ||
735 | index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names); | ||
736 | } | ||
737 | } | ||
738 | |||
739 | return NULL; | ||
740 | } | ||
741 | |||
742 | const struct efx_phy_operations efx_mcdi_phy_ops = { | ||
743 | .probe = efx_mcdi_phy_probe, | ||
744 | .init = efx_port_dummy_op_int, | ||
745 | .reconfigure = efx_mcdi_phy_reconfigure, | ||
746 | .poll = efx_mcdi_phy_poll, | ||
747 | .fini = efx_port_dummy_op_void, | ||
748 | .remove = efx_mcdi_phy_remove, | ||
749 | .get_settings = efx_mcdi_phy_get_settings, | ||
750 | .set_settings = efx_mcdi_phy_set_settings, | ||
751 | .test_alive = efx_mcdi_phy_test_alive, | ||
752 | .run_tests = efx_mcdi_phy_run_tests, | ||
753 | .test_name = efx_mcdi_phy_test_name, | ||
754 | }; | ||
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c new file mode 100644 index 00000000000..7ab385c8136 --- /dev/null +++ b/drivers/net/sfc/mdio_10g.c | |||
@@ -0,0 +1,323 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2006-2011 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | /* | ||
10 | * Useful functions for working with MDIO clause 45 PHYs | ||
11 | */ | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/ethtool.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include "net_driver.h" | ||
16 | #include "mdio_10g.h" | ||
17 | #include "workarounds.h" | ||
18 | |||
19 | unsigned efx_mdio_id_oui(u32 id) | ||
20 | { | ||
21 | unsigned oui = 0; | ||
22 | int i; | ||
23 | |||
24 | /* The bits of the OUI are designated a..x, with a=0 and b variable. | ||
25 | * In the id register c is the MSB but the OUI is conventionally | ||
26 | * written as bytes h..a, p..i, x..q. Reorder the bits accordingly. */ | ||
27 | for (i = 0; i < 22; ++i) | ||
28 | if (id & (1 << (i + 10))) | ||
29 | oui |= 1 << (i ^ 7); | ||
30 | |||
31 | return oui; | ||
32 | } | ||
33 | |||
34 | int efx_mdio_reset_mmd(struct efx_nic *port, int mmd, | ||
35 | int spins, int spintime) | ||
36 | { | ||
37 | u32 ctrl; | ||
38 | |||
39 | /* Catch callers passing values in the wrong units (or just silly) */ | ||
40 | EFX_BUG_ON_PARANOID(spins * spintime >= 5000); | ||
41 | |||
42 | efx_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET); | ||
43 | /* Wait for the reset bit to clear. */ | ||
44 | do { | ||
45 | msleep(spintime); | ||
46 | ctrl = efx_mdio_read(port, mmd, MDIO_CTRL1); | ||
47 | spins--; | ||
48 | |||
49 | } while (spins && (ctrl & MDIO_CTRL1_RESET)); | ||
50 | |||
51 | return spins ? spins : -ETIMEDOUT; | ||
52 | } | ||
53 | |||
54 | static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd) | ||
55 | { | ||
56 | int status; | ||
57 | |||
58 | if (mmd != MDIO_MMD_AN) { | ||
59 | /* Read MMD STATUS2 to check it is responding. */ | ||
60 | status = efx_mdio_read(efx, mmd, MDIO_STAT2); | ||
61 | if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) { | ||
62 | netif_err(efx, hw, efx->net_dev, | ||
63 | "PHY MMD %d not responding.\n", mmd); | ||
64 | return -EIO; | ||
65 | } | ||
66 | } | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | /* This ought to be ridiculous overkill. We expect it to fail rarely */ | ||
72 | #define MDIO45_RESET_TIME 1000 /* ms */ | ||
73 | #define MDIO45_RESET_ITERS 100 | ||
74 | |||
75 | int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask) | ||
76 | { | ||
77 | const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS; | ||
78 | int tries = MDIO45_RESET_ITERS; | ||
79 | int rc = 0; | ||
80 | int in_reset; | ||
81 | |||
82 | while (tries) { | ||
83 | int mask = mmd_mask; | ||
84 | int mmd = 0; | ||
85 | int stat; | ||
86 | in_reset = 0; | ||
87 | while (mask) { | ||
88 | if (mask & 1) { | ||
89 | stat = efx_mdio_read(efx, mmd, MDIO_CTRL1); | ||
90 | if (stat < 0) { | ||
91 | netif_err(efx, hw, efx->net_dev, | ||
92 | "failed to read status of" | ||
93 | " MMD %d\n", mmd); | ||
94 | return -EIO; | ||
95 | } | ||
96 | if (stat & MDIO_CTRL1_RESET) | ||
97 | in_reset |= (1 << mmd); | ||
98 | } | ||
99 | mask = mask >> 1; | ||
100 | mmd++; | ||
101 | } | ||
102 | if (!in_reset) | ||
103 | break; | ||
104 | tries--; | ||
105 | msleep(spintime); | ||
106 | } | ||
107 | if (in_reset != 0) { | ||
108 | netif_err(efx, hw, efx->net_dev, | ||
109 | "not all MMDs came out of reset in time." | ||
110 | " MMDs still in reset: %x\n", in_reset); | ||
111 | rc = -ETIMEDOUT; | ||
112 | } | ||
113 | return rc; | ||
114 | } | ||
115 | |||
116 | int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask) | ||
117 | { | ||
118 | int mmd = 0, probe_mmd, devs1, devs2; | ||
119 | u32 devices; | ||
120 | |||
121 | /* Historically we have probed the PHYXS to find out what devices are | ||
122 | * present,but that doesn't work so well if the PHYXS isn't expected | ||
123 | * to exist, if so just find the first item in the list supplied. */ | ||
124 | probe_mmd = (mmd_mask & MDIO_DEVS_PHYXS) ? MDIO_MMD_PHYXS : | ||
125 | __ffs(mmd_mask); | ||
126 | |||
127 | /* Check all the expected MMDs are present */ | ||
128 | devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1); | ||
129 | devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2); | ||
130 | if (devs1 < 0 || devs2 < 0) { | ||
131 | netif_err(efx, hw, efx->net_dev, | ||
132 | "failed to read devices present\n"); | ||
133 | return -EIO; | ||
134 | } | ||
135 | devices = devs1 | (devs2 << 16); | ||
136 | if ((devices & mmd_mask) != mmd_mask) { | ||
137 | netif_err(efx, hw, efx->net_dev, | ||
138 | "required MMDs not present: got %x, wanted %x\n", | ||
139 | devices, mmd_mask); | ||
140 | return -ENODEV; | ||
141 | } | ||
142 | netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices); | ||
143 | |||
144 | /* Check all required MMDs are responding and happy. */ | ||
145 | while (mmd_mask) { | ||
146 | if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd)) | ||
147 | return -EIO; | ||
148 | mmd_mask = mmd_mask >> 1; | ||
149 | mmd++; | ||
150 | } | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask) | ||
156 | { | ||
157 | /* If the port is in loopback, then we should only consider a subset | ||
158 | * of mmd's */ | ||
159 | if (LOOPBACK_INTERNAL(efx)) | ||
160 | return true; | ||
161 | else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS) | ||
162 | return false; | ||
163 | else if (efx_phy_mode_disabled(efx->phy_mode)) | ||
164 | return false; | ||
165 | else if (efx->loopback_mode == LOOPBACK_PHYXS) | ||
166 | mmd_mask &= ~(MDIO_DEVS_PHYXS | | ||
167 | MDIO_DEVS_PCS | | ||
168 | MDIO_DEVS_PMAPMD | | ||
169 | MDIO_DEVS_AN); | ||
170 | else if (efx->loopback_mode == LOOPBACK_PCS) | ||
171 | mmd_mask &= ~(MDIO_DEVS_PCS | | ||
172 | MDIO_DEVS_PMAPMD | | ||
173 | MDIO_DEVS_AN); | ||
174 | else if (efx->loopback_mode == LOOPBACK_PMAPMD) | ||
175 | mmd_mask &= ~(MDIO_DEVS_PMAPMD | | ||
176 | MDIO_DEVS_AN); | ||
177 | |||
178 | return mdio45_links_ok(&efx->mdio, mmd_mask); | ||
179 | } | ||
180 | |||
181 | void efx_mdio_transmit_disable(struct efx_nic *efx) | ||
182 | { | ||
183 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, | ||
184 | MDIO_PMA_TXDIS, MDIO_PMD_TXDIS_GLOBAL, | ||
185 | efx->phy_mode & PHY_MODE_TX_DISABLED); | ||
186 | } | ||
187 | |||
188 | void efx_mdio_phy_reconfigure(struct efx_nic *efx) | ||
189 | { | ||
190 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, | ||
191 | MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK, | ||
192 | efx->loopback_mode == LOOPBACK_PMAPMD); | ||
193 | efx_mdio_set_flag(efx, MDIO_MMD_PCS, | ||
194 | MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK, | ||
195 | efx->loopback_mode == LOOPBACK_PCS); | ||
196 | efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, | ||
197 | MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK, | ||
198 | efx->loopback_mode == LOOPBACK_PHYXS_WS); | ||
199 | } | ||
200 | |||
201 | static void efx_mdio_set_mmd_lpower(struct efx_nic *efx, | ||
202 | int lpower, int mmd) | ||
203 | { | ||
204 | int stat = efx_mdio_read(efx, mmd, MDIO_STAT1); | ||
205 | |||
206 | netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n", | ||
207 | mmd, lpower); | ||
208 | |||
209 | if (stat & MDIO_STAT1_LPOWERABLE) { | ||
210 | efx_mdio_set_flag(efx, mmd, MDIO_CTRL1, | ||
211 | MDIO_CTRL1_LPOWER, lpower); | ||
212 | } | ||
213 | } | ||
214 | |||
215 | void efx_mdio_set_mmds_lpower(struct efx_nic *efx, | ||
216 | int low_power, unsigned int mmd_mask) | ||
217 | { | ||
218 | int mmd = 0; | ||
219 | mmd_mask &= ~MDIO_DEVS_AN; | ||
220 | while (mmd_mask) { | ||
221 | if (mmd_mask & 1) | ||
222 | efx_mdio_set_mmd_lpower(efx, low_power, mmd); | ||
223 | mmd_mask = (mmd_mask >> 1); | ||
224 | mmd++; | ||
225 | } | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * efx_mdio_set_settings - Set (some of) the PHY settings over MDIO. | ||
230 | * @efx: Efx NIC | ||
231 | * @ecmd: New settings | ||
232 | */ | ||
233 | int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
234 | { | ||
235 | struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET }; | ||
236 | |||
237 | efx->phy_op->get_settings(efx, &prev); | ||
238 | |||
239 | if (ecmd->advertising == prev.advertising && | ||
240 | ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) && | ||
241 | ecmd->duplex == prev.duplex && | ||
242 | ecmd->port == prev.port && | ||
243 | ecmd->autoneg == prev.autoneg) | ||
244 | return 0; | ||
245 | |||
246 | /* We can only change these settings for -T PHYs */ | ||
247 | if (prev.port != PORT_TP || ecmd->port != PORT_TP) | ||
248 | return -EINVAL; | ||
249 | |||
250 | /* Check that PHY supports these settings */ | ||
251 | if (!ecmd->autoneg || | ||
252 | (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported) | ||
253 | return -EINVAL; | ||
254 | |||
255 | efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg); | ||
256 | efx_mdio_an_reconfigure(efx); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | /** | ||
261 | * efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation | ||
262 | * @efx: Efx NIC | ||
263 | */ | ||
264 | void efx_mdio_an_reconfigure(struct efx_nic *efx) | ||
265 | { | ||
266 | int reg; | ||
267 | |||
268 | WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN)); | ||
269 | |||
270 | /* Set up the base page */ | ||
271 | reg = ADVERTISE_CSMA | ADVERTISE_RESV; | ||
272 | if (efx->link_advertising & ADVERTISED_Pause) | ||
273 | reg |= ADVERTISE_PAUSE_CAP; | ||
274 | if (efx->link_advertising & ADVERTISED_Asym_Pause) | ||
275 | reg |= ADVERTISE_PAUSE_ASYM; | ||
276 | efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); | ||
277 | |||
278 | /* Set up the (extended) next page */ | ||
279 | efx->phy_op->set_npage_adv(efx, efx->link_advertising); | ||
280 | |||
281 | /* Enable and restart AN */ | ||
282 | reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1); | ||
283 | reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP; | ||
284 | efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg); | ||
285 | } | ||
286 | |||
287 | u8 efx_mdio_get_pause(struct efx_nic *efx) | ||
288 | { | ||
289 | BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX)); | ||
290 | |||
291 | if (!(efx->wanted_fc & EFX_FC_AUTO)) | ||
292 | return efx->wanted_fc; | ||
293 | |||
294 | WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN)); | ||
295 | |||
296 | return mii_resolve_flowctrl_fdx( | ||
297 | mii_advertise_flowctrl(efx->wanted_fc), | ||
298 | efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA)); | ||
299 | } | ||
300 | |||
301 | int efx_mdio_test_alive(struct efx_nic *efx) | ||
302 | { | ||
303 | int rc; | ||
304 | int devad = __ffs(efx->mdio.mmds); | ||
305 | u16 physid1, physid2; | ||
306 | |||
307 | mutex_lock(&efx->mac_lock); | ||
308 | |||
309 | physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1); | ||
310 | physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2); | ||
311 | |||
312 | if ((physid1 == 0x0000) || (physid1 == 0xffff) || | ||
313 | (physid2 == 0x0000) || (physid2 == 0xffff)) { | ||
314 | netif_err(efx, hw, efx->net_dev, | ||
315 | "no MDIO PHY present with ID %d\n", efx->mdio.prtad); | ||
316 | rc = -EINVAL; | ||
317 | } else { | ||
318 | rc = efx_mdio_check_mmds(efx, efx->mdio.mmds); | ||
319 | } | ||
320 | |||
321 | mutex_unlock(&efx->mac_lock); | ||
322 | return rc; | ||
323 | } | ||
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h new file mode 100644 index 00000000000..a97dbbd2de9 --- /dev/null +++ b/drivers/net/sfc/mdio_10g.h | |||
@@ -0,0 +1,112 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2006-2011 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #ifndef EFX_MDIO_10G_H | ||
11 | #define EFX_MDIO_10G_H | ||
12 | |||
13 | #include <linux/mdio.h> | ||
14 | |||
15 | /* | ||
16 | * Helper functions for doing 10G MDIO as specified in IEEE 802.3 clause 45. | ||
17 | */ | ||
18 | |||
19 | #include "efx.h" | ||
20 | |||
21 | static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } | ||
22 | static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } | ||
23 | extern unsigned efx_mdio_id_oui(u32 id); | ||
24 | |||
25 | static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr) | ||
26 | { | ||
27 | return efx->mdio.mdio_read(efx->net_dev, efx->mdio.prtad, devad, addr); | ||
28 | } | ||
29 | |||
30 | static inline void | ||
31 | efx_mdio_write(struct efx_nic *efx, int devad, int addr, int value) | ||
32 | { | ||
33 | efx->mdio.mdio_write(efx->net_dev, efx->mdio.prtad, devad, addr, value); | ||
34 | } | ||
35 | |||
36 | static inline u32 efx_mdio_read_id(struct efx_nic *efx, int mmd) | ||
37 | { | ||
38 | u16 id_low = efx_mdio_read(efx, mmd, MDIO_DEVID2); | ||
39 | u16 id_hi = efx_mdio_read(efx, mmd, MDIO_DEVID1); | ||
40 | return (id_hi << 16) | (id_low); | ||
41 | } | ||
42 | |||
43 | static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx) | ||
44 | { | ||
45 | int i, lane_status; | ||
46 | bool sync; | ||
47 | |||
48 | for (i = 0; i < 2; ++i) | ||
49 | lane_status = efx_mdio_read(efx, MDIO_MMD_PHYXS, | ||
50 | MDIO_PHYXS_LNSTAT); | ||
51 | |||
52 | sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN); | ||
53 | if (!sync) | ||
54 | netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n", | ||
55 | lane_status); | ||
56 | return sync; | ||
57 | } | ||
58 | |||
59 | extern const char *efx_mdio_mmd_name(int mmd); | ||
60 | |||
61 | /* | ||
62 | * Reset a specific MMD and wait for reset to clear. | ||
63 | * Return number of spins left (>0) on success, -%ETIMEDOUT on failure. | ||
64 | * | ||
65 | * This function will sleep | ||
66 | */ | ||
67 | extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, | ||
68 | int spins, int spintime); | ||
69 | |||
70 | /* As efx_mdio_check_mmd but for multiple MMDs */ | ||
71 | int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask); | ||
72 | |||
73 | /* Check the link status of specified mmds in bit mask */ | ||
74 | extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); | ||
75 | |||
76 | /* Generic transmit disable support though PMAPMD */ | ||
77 | extern void efx_mdio_transmit_disable(struct efx_nic *efx); | ||
78 | |||
79 | /* Generic part of reconfigure: set/clear loopback bits */ | ||
80 | extern void efx_mdio_phy_reconfigure(struct efx_nic *efx); | ||
81 | |||
82 | /* Set the power state of the specified MMDs */ | ||
83 | extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx, | ||
84 | int low_power, unsigned int mmd_mask); | ||
85 | |||
86 | /* Set (some of) the PHY settings over MDIO */ | ||
87 | extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); | ||
88 | |||
89 | /* Push advertising flags and restart autonegotiation */ | ||
90 | extern void efx_mdio_an_reconfigure(struct efx_nic *efx); | ||
91 | |||
92 | /* Get pause parameters from AN if available (otherwise return | ||
93 | * requested pause parameters) | ||
94 | */ | ||
95 | u8 efx_mdio_get_pause(struct efx_nic *efx); | ||
96 | |||
97 | /* Wait for specified MMDs to exit reset within a timeout */ | ||
98 | extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx, | ||
99 | unsigned int mmd_mask); | ||
100 | |||
101 | /* Set or clear flag, debouncing */ | ||
102 | static inline void | ||
103 | efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr, | ||
104 | int mask, bool state) | ||
105 | { | ||
106 | mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state); | ||
107 | } | ||
108 | |||
109 | /* Liveness self-test for MDIO PHYs */ | ||
110 | extern int efx_mdio_test_alive(struct efx_nic *efx); | ||
111 | |||
112 | #endif /* EFX_MDIO_10G_H */ | ||
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c new file mode 100644 index 00000000000..b6304486f24 --- /dev/null +++ b/drivers/net/sfc/mtd.c | |||
@@ -0,0 +1,693 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/mtd/mtd.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/rtnetlink.h> | ||
17 | |||
18 | #include "net_driver.h" | ||
19 | #include "spi.h" | ||
20 | #include "efx.h" | ||
21 | #include "nic.h" | ||
22 | #include "mcdi.h" | ||
23 | #include "mcdi_pcol.h" | ||
24 | |||
25 | #define EFX_SPI_VERIFY_BUF_LEN 16 | ||
26 | |||
27 | struct efx_mtd_partition { | ||
28 | struct mtd_info mtd; | ||
29 | union { | ||
30 | struct { | ||
31 | bool updating; | ||
32 | u8 nvram_type; | ||
33 | u16 fw_subtype; | ||
34 | } mcdi; | ||
35 | size_t offset; | ||
36 | }; | ||
37 | const char *type_name; | ||
38 | char name[IFNAMSIZ + 20]; | ||
39 | }; | ||
40 | |||
41 | struct efx_mtd_ops { | ||
42 | int (*read)(struct mtd_info *mtd, loff_t start, size_t len, | ||
43 | size_t *retlen, u8 *buffer); | ||
44 | int (*erase)(struct mtd_info *mtd, loff_t start, size_t len); | ||
45 | int (*write)(struct mtd_info *mtd, loff_t start, size_t len, | ||
46 | size_t *retlen, const u8 *buffer); | ||
47 | int (*sync)(struct mtd_info *mtd); | ||
48 | }; | ||
49 | |||
50 | struct efx_mtd { | ||
51 | struct list_head node; | ||
52 | struct efx_nic *efx; | ||
53 | const struct efx_spi_device *spi; | ||
54 | const char *name; | ||
55 | const struct efx_mtd_ops *ops; | ||
56 | size_t n_parts; | ||
57 | struct efx_mtd_partition part[0]; | ||
58 | }; | ||
59 | |||
60 | #define efx_for_each_partition(part, efx_mtd) \ | ||
61 | for ((part) = &(efx_mtd)->part[0]; \ | ||
62 | (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \ | ||
63 | (part)++) | ||
64 | |||
65 | #define to_efx_mtd_partition(mtd) \ | ||
66 | container_of(mtd, struct efx_mtd_partition, mtd) | ||
67 | |||
68 | static int falcon_mtd_probe(struct efx_nic *efx); | ||
69 | static int siena_mtd_probe(struct efx_nic *efx); | ||
70 | |||
71 | /* SPI utilities */ | ||
72 | |||
73 | static int | ||
74 | efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) | ||
75 | { | ||
76 | struct efx_mtd *efx_mtd = part->mtd.priv; | ||
77 | const struct efx_spi_device *spi = efx_mtd->spi; | ||
78 | struct efx_nic *efx = efx_mtd->efx; | ||
79 | u8 status; | ||
80 | int rc, i; | ||
81 | |||
82 | /* Wait up to 4s for flash/EEPROM to finish a slow operation. */ | ||
83 | for (i = 0; i < 40; i++) { | ||
84 | __set_current_state(uninterruptible ? | ||
85 | TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); | ||
86 | schedule_timeout(HZ / 10); | ||
87 | rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, | ||
88 | &status, sizeof(status)); | ||
89 | if (rc) | ||
90 | return rc; | ||
91 | if (!(status & SPI_STATUS_NRDY)) | ||
92 | return 0; | ||
93 | if (signal_pending(current)) | ||
94 | return -EINTR; | ||
95 | } | ||
96 | pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name); | ||
97 | return -ETIMEDOUT; | ||
98 | } | ||
99 | |||
100 | static int | ||
101 | efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi) | ||
102 | { | ||
103 | const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 | | ||
104 | SPI_STATUS_BP0); | ||
105 | u8 status; | ||
106 | int rc; | ||
107 | |||
108 | rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, | ||
109 | &status, sizeof(status)); | ||
110 | if (rc) | ||
111 | return rc; | ||
112 | |||
113 | if (!(status & unlock_mask)) | ||
114 | return 0; /* already unlocked */ | ||
115 | |||
116 | rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); | ||
117 | if (rc) | ||
118 | return rc; | ||
119 | rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0); | ||
120 | if (rc) | ||
121 | return rc; | ||
122 | |||
123 | status &= ~unlock_mask; | ||
124 | rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status, | ||
125 | NULL, sizeof(status)); | ||
126 | if (rc) | ||
127 | return rc; | ||
128 | rc = falcon_spi_wait_write(efx, spi); | ||
129 | if (rc) | ||
130 | return rc; | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static int | ||
136 | efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) | ||
137 | { | ||
138 | struct efx_mtd *efx_mtd = part->mtd.priv; | ||
139 | const struct efx_spi_device *spi = efx_mtd->spi; | ||
140 | struct efx_nic *efx = efx_mtd->efx; | ||
141 | unsigned pos, block_len; | ||
142 | u8 empty[EFX_SPI_VERIFY_BUF_LEN]; | ||
143 | u8 buffer[EFX_SPI_VERIFY_BUF_LEN]; | ||
144 | int rc; | ||
145 | |||
146 | if (len != spi->erase_size) | ||
147 | return -EINVAL; | ||
148 | |||
149 | if (spi->erase_command == 0) | ||
150 | return -EOPNOTSUPP; | ||
151 | |||
152 | rc = efx_spi_unlock(efx, spi); | ||
153 | if (rc) | ||
154 | return rc; | ||
155 | rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); | ||
156 | if (rc) | ||
157 | return rc; | ||
158 | rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL, | ||
159 | NULL, 0); | ||
160 | if (rc) | ||
161 | return rc; | ||
162 | rc = efx_spi_slow_wait(part, false); | ||
163 | |||
164 | /* Verify the entire region has been wiped */ | ||
165 | memset(empty, 0xff, sizeof(empty)); | ||
166 | for (pos = 0; pos < len; pos += block_len) { | ||
167 | block_len = min(len - pos, sizeof(buffer)); | ||
168 | rc = falcon_spi_read(efx, spi, start + pos, block_len, | ||
169 | NULL, buffer); | ||
170 | if (rc) | ||
171 | return rc; | ||
172 | if (memcmp(empty, buffer, block_len)) | ||
173 | return -EIO; | ||
174 | |||
175 | /* Avoid locking up the system */ | ||
176 | cond_resched(); | ||
177 | if (signal_pending(current)) | ||
178 | return -EINTR; | ||
179 | } | ||
180 | |||
181 | return rc; | ||
182 | } | ||
183 | |||
184 | /* MTD interface */ | ||
185 | |||
186 | static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) | ||
187 | { | ||
188 | struct efx_mtd *efx_mtd = mtd->priv; | ||
189 | int rc; | ||
190 | |||
191 | rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len); | ||
192 | if (rc == 0) { | ||
193 | erase->state = MTD_ERASE_DONE; | ||
194 | } else { | ||
195 | erase->state = MTD_ERASE_FAILED; | ||
196 | erase->fail_addr = 0xffffffff; | ||
197 | } | ||
198 | mtd_erase_callback(erase); | ||
199 | return rc; | ||
200 | } | ||
201 | |||
202 | static void efx_mtd_sync(struct mtd_info *mtd) | ||
203 | { | ||
204 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
205 | struct efx_mtd *efx_mtd = mtd->priv; | ||
206 | int rc; | ||
207 | |||
208 | rc = efx_mtd->ops->sync(mtd); | ||
209 | if (rc) | ||
210 | pr_err("%s: %s sync failed (%d)\n", | ||
211 | part->name, efx_mtd->name, rc); | ||
212 | } | ||
213 | |||
214 | static void efx_mtd_remove_partition(struct efx_mtd_partition *part) | ||
215 | { | ||
216 | int rc; | ||
217 | |||
218 | for (;;) { | ||
219 | rc = mtd_device_unregister(&part->mtd); | ||
220 | if (rc != -EBUSY) | ||
221 | break; | ||
222 | ssleep(1); | ||
223 | } | ||
224 | WARN_ON(rc); | ||
225 | } | ||
226 | |||
227 | static void efx_mtd_remove_device(struct efx_mtd *efx_mtd) | ||
228 | { | ||
229 | struct efx_mtd_partition *part; | ||
230 | |||
231 | efx_for_each_partition(part, efx_mtd) | ||
232 | efx_mtd_remove_partition(part); | ||
233 | list_del(&efx_mtd->node); | ||
234 | kfree(efx_mtd); | ||
235 | } | ||
236 | |||
237 | static void efx_mtd_rename_device(struct efx_mtd *efx_mtd) | ||
238 | { | ||
239 | struct efx_mtd_partition *part; | ||
240 | |||
241 | efx_for_each_partition(part, efx_mtd) | ||
242 | if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0) | ||
243 | snprintf(part->name, sizeof(part->name), | ||
244 | "%s %s:%02x", efx_mtd->efx->name, | ||
245 | part->type_name, part->mcdi.fw_subtype); | ||
246 | else | ||
247 | snprintf(part->name, sizeof(part->name), | ||
248 | "%s %s", efx_mtd->efx->name, | ||
249 | part->type_name); | ||
250 | } | ||
251 | |||
252 | static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd) | ||
253 | { | ||
254 | struct efx_mtd_partition *part; | ||
255 | |||
256 | efx_mtd->efx = efx; | ||
257 | |||
258 | efx_mtd_rename_device(efx_mtd); | ||
259 | |||
260 | efx_for_each_partition(part, efx_mtd) { | ||
261 | part->mtd.writesize = 1; | ||
262 | |||
263 | part->mtd.owner = THIS_MODULE; | ||
264 | part->mtd.priv = efx_mtd; | ||
265 | part->mtd.name = part->name; | ||
266 | part->mtd.erase = efx_mtd_erase; | ||
267 | part->mtd.read = efx_mtd->ops->read; | ||
268 | part->mtd.write = efx_mtd->ops->write; | ||
269 | part->mtd.sync = efx_mtd_sync; | ||
270 | |||
271 | if (mtd_device_register(&part->mtd, NULL, 0)) | ||
272 | goto fail; | ||
273 | } | ||
274 | |||
275 | list_add(&efx_mtd->node, &efx->mtd_list); | ||
276 | return 0; | ||
277 | |||
278 | fail: | ||
279 | while (part != &efx_mtd->part[0]) { | ||
280 | --part; | ||
281 | efx_mtd_remove_partition(part); | ||
282 | } | ||
283 | /* mtd_device_register() returns 1 if the MTD table is full */ | ||
284 | return -ENOMEM; | ||
285 | } | ||
286 | |||
287 | void efx_mtd_remove(struct efx_nic *efx) | ||
288 | { | ||
289 | struct efx_mtd *efx_mtd, *next; | ||
290 | |||
291 | WARN_ON(efx_dev_registered(efx)); | ||
292 | |||
293 | list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node) | ||
294 | efx_mtd_remove_device(efx_mtd); | ||
295 | } | ||
296 | |||
297 | void efx_mtd_rename(struct efx_nic *efx) | ||
298 | { | ||
299 | struct efx_mtd *efx_mtd; | ||
300 | |||
301 | ASSERT_RTNL(); | ||
302 | |||
303 | list_for_each_entry(efx_mtd, &efx->mtd_list, node) | ||
304 | efx_mtd_rename_device(efx_mtd); | ||
305 | } | ||
306 | |||
307 | int efx_mtd_probe(struct efx_nic *efx) | ||
308 | { | ||
309 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
310 | return siena_mtd_probe(efx); | ||
311 | else | ||
312 | return falcon_mtd_probe(efx); | ||
313 | } | ||
314 | |||
315 | /* Implementation of MTD operations for Falcon */ | ||
316 | |||
317 | static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, | ||
318 | size_t len, size_t *retlen, u8 *buffer) | ||
319 | { | ||
320 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
321 | struct efx_mtd *efx_mtd = mtd->priv; | ||
322 | const struct efx_spi_device *spi = efx_mtd->spi; | ||
323 | struct efx_nic *efx = efx_mtd->efx; | ||
324 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
325 | int rc; | ||
326 | |||
327 | rc = mutex_lock_interruptible(&nic_data->spi_lock); | ||
328 | if (rc) | ||
329 | return rc; | ||
330 | rc = falcon_spi_read(efx, spi, part->offset + start, len, | ||
331 | retlen, buffer); | ||
332 | mutex_unlock(&nic_data->spi_lock); | ||
333 | return rc; | ||
334 | } | ||
335 | |||
336 | static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) | ||
337 | { | ||
338 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
339 | struct efx_mtd *efx_mtd = mtd->priv; | ||
340 | struct efx_nic *efx = efx_mtd->efx; | ||
341 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
342 | int rc; | ||
343 | |||
344 | rc = mutex_lock_interruptible(&nic_data->spi_lock); | ||
345 | if (rc) | ||
346 | return rc; | ||
347 | rc = efx_spi_erase(part, part->offset + start, len); | ||
348 | mutex_unlock(&nic_data->spi_lock); | ||
349 | return rc; | ||
350 | } | ||
351 | |||
352 | static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, | ||
353 | size_t len, size_t *retlen, const u8 *buffer) | ||
354 | { | ||
355 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
356 | struct efx_mtd *efx_mtd = mtd->priv; | ||
357 | const struct efx_spi_device *spi = efx_mtd->spi; | ||
358 | struct efx_nic *efx = efx_mtd->efx; | ||
359 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
360 | int rc; | ||
361 | |||
362 | rc = mutex_lock_interruptible(&nic_data->spi_lock); | ||
363 | if (rc) | ||
364 | return rc; | ||
365 | rc = falcon_spi_write(efx, spi, part->offset + start, len, | ||
366 | retlen, buffer); | ||
367 | mutex_unlock(&nic_data->spi_lock); | ||
368 | return rc; | ||
369 | } | ||
370 | |||
371 | static int falcon_mtd_sync(struct mtd_info *mtd) | ||
372 | { | ||
373 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
374 | struct efx_mtd *efx_mtd = mtd->priv; | ||
375 | struct efx_nic *efx = efx_mtd->efx; | ||
376 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
377 | int rc; | ||
378 | |||
379 | mutex_lock(&nic_data->spi_lock); | ||
380 | rc = efx_spi_slow_wait(part, true); | ||
381 | mutex_unlock(&nic_data->spi_lock); | ||
382 | return rc; | ||
383 | } | ||
384 | |||
385 | static struct efx_mtd_ops falcon_mtd_ops = { | ||
386 | .read = falcon_mtd_read, | ||
387 | .erase = falcon_mtd_erase, | ||
388 | .write = falcon_mtd_write, | ||
389 | .sync = falcon_mtd_sync, | ||
390 | }; | ||
391 | |||
392 | static int falcon_mtd_probe(struct efx_nic *efx) | ||
393 | { | ||
394 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
395 | struct efx_spi_device *spi; | ||
396 | struct efx_mtd *efx_mtd; | ||
397 | int rc = -ENODEV; | ||
398 | |||
399 | ASSERT_RTNL(); | ||
400 | |||
401 | spi = &nic_data->spi_flash; | ||
402 | if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) { | ||
403 | efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), | ||
404 | GFP_KERNEL); | ||
405 | if (!efx_mtd) | ||
406 | return -ENOMEM; | ||
407 | |||
408 | efx_mtd->spi = spi; | ||
409 | efx_mtd->name = "flash"; | ||
410 | efx_mtd->ops = &falcon_mtd_ops; | ||
411 | |||
412 | efx_mtd->n_parts = 1; | ||
413 | efx_mtd->part[0].mtd.type = MTD_NORFLASH; | ||
414 | efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH; | ||
415 | efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; | ||
416 | efx_mtd->part[0].mtd.erasesize = spi->erase_size; | ||
417 | efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START; | ||
418 | efx_mtd->part[0].type_name = "sfc_flash_bootrom"; | ||
419 | |||
420 | rc = efx_mtd_probe_device(efx, efx_mtd); | ||
421 | if (rc) { | ||
422 | kfree(efx_mtd); | ||
423 | return rc; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | spi = &nic_data->spi_eeprom; | ||
428 | if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) { | ||
429 | efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), | ||
430 | GFP_KERNEL); | ||
431 | if (!efx_mtd) | ||
432 | return -ENOMEM; | ||
433 | |||
434 | efx_mtd->spi = spi; | ||
435 | efx_mtd->name = "EEPROM"; | ||
436 | efx_mtd->ops = &falcon_mtd_ops; | ||
437 | |||
438 | efx_mtd->n_parts = 1; | ||
439 | efx_mtd->part[0].mtd.type = MTD_RAM; | ||
440 | efx_mtd->part[0].mtd.flags = MTD_CAP_RAM; | ||
441 | efx_mtd->part[0].mtd.size = | ||
442 | min(spi->size, EFX_EEPROM_BOOTCONFIG_END) - | ||
443 | EFX_EEPROM_BOOTCONFIG_START; | ||
444 | efx_mtd->part[0].mtd.erasesize = spi->erase_size; | ||
445 | efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START; | ||
446 | efx_mtd->part[0].type_name = "sfc_bootconfig"; | ||
447 | |||
448 | rc = efx_mtd_probe_device(efx, efx_mtd); | ||
449 | if (rc) { | ||
450 | kfree(efx_mtd); | ||
451 | return rc; | ||
452 | } | ||
453 | } | ||
454 | |||
455 | return rc; | ||
456 | } | ||
457 | |||
458 | /* Implementation of MTD operations for Siena */ | ||
459 | |||
460 | static int siena_mtd_read(struct mtd_info *mtd, loff_t start, | ||
461 | size_t len, size_t *retlen, u8 *buffer) | ||
462 | { | ||
463 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
464 | struct efx_mtd *efx_mtd = mtd->priv; | ||
465 | struct efx_nic *efx = efx_mtd->efx; | ||
466 | loff_t offset = start; | ||
467 | loff_t end = min_t(loff_t, start + len, mtd->size); | ||
468 | size_t chunk; | ||
469 | int rc = 0; | ||
470 | |||
471 | while (offset < end) { | ||
472 | chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); | ||
473 | rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, | ||
474 | buffer, chunk); | ||
475 | if (rc) | ||
476 | goto out; | ||
477 | offset += chunk; | ||
478 | buffer += chunk; | ||
479 | } | ||
480 | out: | ||
481 | *retlen = offset - start; | ||
482 | return rc; | ||
483 | } | ||
484 | |||
485 | static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) | ||
486 | { | ||
487 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
488 | struct efx_mtd *efx_mtd = mtd->priv; | ||
489 | struct efx_nic *efx = efx_mtd->efx; | ||
490 | loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); | ||
491 | loff_t end = min_t(loff_t, start + len, mtd->size); | ||
492 | size_t chunk = part->mtd.erasesize; | ||
493 | int rc = 0; | ||
494 | |||
495 | if (!part->mcdi.updating) { | ||
496 | rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type); | ||
497 | if (rc) | ||
498 | goto out; | ||
499 | part->mcdi.updating = 1; | ||
500 | } | ||
501 | |||
502 | /* The MCDI interface can in fact do multiple erase blocks at once; | ||
503 | * but erasing may be slow, so we make multiple calls here to avoid | ||
504 | * tripping the MCDI RPC timeout. */ | ||
505 | while (offset < end) { | ||
506 | rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset, | ||
507 | chunk); | ||
508 | if (rc) | ||
509 | goto out; | ||
510 | offset += chunk; | ||
511 | } | ||
512 | out: | ||
513 | return rc; | ||
514 | } | ||
515 | |||
516 | static int siena_mtd_write(struct mtd_info *mtd, loff_t start, | ||
517 | size_t len, size_t *retlen, const u8 *buffer) | ||
518 | { | ||
519 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
520 | struct efx_mtd *efx_mtd = mtd->priv; | ||
521 | struct efx_nic *efx = efx_mtd->efx; | ||
522 | loff_t offset = start; | ||
523 | loff_t end = min_t(loff_t, start + len, mtd->size); | ||
524 | size_t chunk; | ||
525 | int rc = 0; | ||
526 | |||
527 | if (!part->mcdi.updating) { | ||
528 | rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type); | ||
529 | if (rc) | ||
530 | goto out; | ||
531 | part->mcdi.updating = 1; | ||
532 | } | ||
533 | |||
534 | while (offset < end) { | ||
535 | chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); | ||
536 | rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, | ||
537 | buffer, chunk); | ||
538 | if (rc) | ||
539 | goto out; | ||
540 | offset += chunk; | ||
541 | buffer += chunk; | ||
542 | } | ||
543 | out: | ||
544 | *retlen = offset - start; | ||
545 | return rc; | ||
546 | } | ||
547 | |||
548 | static int siena_mtd_sync(struct mtd_info *mtd) | ||
549 | { | ||
550 | struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); | ||
551 | struct efx_mtd *efx_mtd = mtd->priv; | ||
552 | struct efx_nic *efx = efx_mtd->efx; | ||
553 | int rc = 0; | ||
554 | |||
555 | if (part->mcdi.updating) { | ||
556 | part->mcdi.updating = 0; | ||
557 | rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type); | ||
558 | } | ||
559 | |||
560 | return rc; | ||
561 | } | ||
562 | |||
563 | static struct efx_mtd_ops siena_mtd_ops = { | ||
564 | .read = siena_mtd_read, | ||
565 | .erase = siena_mtd_erase, | ||
566 | .write = siena_mtd_write, | ||
567 | .sync = siena_mtd_sync, | ||
568 | }; | ||
569 | |||
570 | struct siena_nvram_type_info { | ||
571 | int port; | ||
572 | const char *name; | ||
573 | }; | ||
574 | |||
575 | static struct siena_nvram_type_info siena_nvram_types[] = { | ||
576 | [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, | ||
577 | [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" }, | ||
578 | [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" }, | ||
579 | [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" }, | ||
580 | [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" }, | ||
581 | [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" }, | ||
582 | [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" }, | ||
583 | [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" }, | ||
584 | [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" }, | ||
585 | [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, | ||
586 | [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, | ||
587 | [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, | ||
588 | }; | ||
589 | |||
590 | static int siena_mtd_probe_partition(struct efx_nic *efx, | ||
591 | struct efx_mtd *efx_mtd, | ||
592 | unsigned int part_id, | ||
593 | unsigned int type) | ||
594 | { | ||
595 | struct efx_mtd_partition *part = &efx_mtd->part[part_id]; | ||
596 | struct siena_nvram_type_info *info; | ||
597 | size_t size, erase_size; | ||
598 | bool protected; | ||
599 | int rc; | ||
600 | |||
601 | if (type >= ARRAY_SIZE(siena_nvram_types)) | ||
602 | return -ENODEV; | ||
603 | |||
604 | info = &siena_nvram_types[type]; | ||
605 | |||
606 | if (info->port != efx_port_num(efx)) | ||
607 | return -ENODEV; | ||
608 | |||
609 | rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); | ||
610 | if (rc) | ||
611 | return rc; | ||
612 | if (protected) | ||
613 | return -ENODEV; /* hide it */ | ||
614 | |||
615 | part->mcdi.nvram_type = type; | ||
616 | part->type_name = info->name; | ||
617 | |||
618 | part->mtd.type = MTD_NORFLASH; | ||
619 | part->mtd.flags = MTD_CAP_NORFLASH; | ||
620 | part->mtd.size = size; | ||
621 | part->mtd.erasesize = erase_size; | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, | ||
627 | struct efx_mtd *efx_mtd) | ||
628 | { | ||
629 | struct efx_mtd_partition *part; | ||
630 | uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN / | ||
631 | sizeof(uint16_t)]; | ||
632 | int rc; | ||
633 | |||
634 | rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list); | ||
635 | if (rc) | ||
636 | return rc; | ||
637 | |||
638 | efx_for_each_partition(part, efx_mtd) | ||
639 | part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type]; | ||
640 | |||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | static int siena_mtd_probe(struct efx_nic *efx) | ||
645 | { | ||
646 | struct efx_mtd *efx_mtd; | ||
647 | int rc = -ENODEV; | ||
648 | u32 nvram_types; | ||
649 | unsigned int type; | ||
650 | |||
651 | ASSERT_RTNL(); | ||
652 | |||
653 | rc = efx_mcdi_nvram_types(efx, &nvram_types); | ||
654 | if (rc) | ||
655 | return rc; | ||
656 | |||
657 | efx_mtd = kzalloc(sizeof(*efx_mtd) + | ||
658 | hweight32(nvram_types) * sizeof(efx_mtd->part[0]), | ||
659 | GFP_KERNEL); | ||
660 | if (!efx_mtd) | ||
661 | return -ENOMEM; | ||
662 | |||
663 | efx_mtd->name = "Siena NVRAM manager"; | ||
664 | |||
665 | efx_mtd->ops = &siena_mtd_ops; | ||
666 | |||
667 | type = 0; | ||
668 | efx_mtd->n_parts = 0; | ||
669 | |||
670 | while (nvram_types != 0) { | ||
671 | if (nvram_types & 1) { | ||
672 | rc = siena_mtd_probe_partition(efx, efx_mtd, | ||
673 | efx_mtd->n_parts, type); | ||
674 | if (rc == 0) | ||
675 | efx_mtd->n_parts++; | ||
676 | else if (rc != -ENODEV) | ||
677 | goto fail; | ||
678 | } | ||
679 | type++; | ||
680 | nvram_types >>= 1; | ||
681 | } | ||
682 | |||
683 | rc = siena_mtd_get_fw_subtypes(efx, efx_mtd); | ||
684 | if (rc) | ||
685 | goto fail; | ||
686 | |||
687 | rc = efx_mtd_probe_device(efx, efx_mtd); | ||
688 | fail: | ||
689 | if (rc) | ||
690 | kfree(efx_mtd); | ||
691 | return rc; | ||
692 | } | ||
693 | |||
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h new file mode 100644 index 00000000000..b8e251a1ee4 --- /dev/null +++ b/drivers/net/sfc/net_driver.h | |||
@@ -0,0 +1,1060 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2005-2011 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | /* Common definitions for all Efx net driver code */ | ||
12 | |||
13 | #ifndef EFX_NET_DRIVER_H | ||
14 | #define EFX_NET_DRIVER_H | ||
15 | |||
16 | #if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG) | ||
17 | #define DEBUG | ||
18 | #endif | ||
19 | |||
20 | #include <linux/netdevice.h> | ||
21 | #include <linux/etherdevice.h> | ||
22 | #include <linux/ethtool.h> | ||
23 | #include <linux/if_vlan.h> | ||
24 | #include <linux/timer.h> | ||
25 | #include <linux/mdio.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/device.h> | ||
29 | #include <linux/highmem.h> | ||
30 | #include <linux/workqueue.h> | ||
31 | #include <linux/vmalloc.h> | ||
32 | #include <linux/i2c.h> | ||
33 | |||
34 | #include "enum.h" | ||
35 | #include "bitfield.h" | ||
36 | |||
37 | /************************************************************************** | ||
38 | * | ||
39 | * Build definitions | ||
40 | * | ||
41 | **************************************************************************/ | ||
42 | |||
43 | #define EFX_DRIVER_VERSION "3.1" | ||
44 | |||
45 | #ifdef EFX_ENABLE_DEBUG | ||
46 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) | ||
47 | #define EFX_WARN_ON_PARANOID(x) WARN_ON(x) | ||
48 | #else | ||
49 | #define EFX_BUG_ON_PARANOID(x) do {} while (0) | ||
50 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) | ||
51 | #endif | ||
52 | |||
53 | /************************************************************************** | ||
54 | * | ||
55 | * Efx data structures | ||
56 | * | ||
57 | **************************************************************************/ | ||
58 | |||
59 | #define EFX_MAX_CHANNELS 32 | ||
60 | #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS | ||
61 | |||
62 | /* Checksum generation is a per-queue option in hardware, so each | ||
63 | * queue visible to the networking core is backed by two hardware TX | ||
64 | * queues. */ | ||
65 | #define EFX_MAX_TX_TC 2 | ||
66 | #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) | ||
67 | #define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ | ||
68 | #define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ | ||
69 | #define EFX_TXQ_TYPES 4 | ||
70 | #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) | ||
71 | |||
72 | /** | ||
73 | * struct efx_special_buffer - An Efx special buffer | ||
74 | * @addr: CPU base address of the buffer | ||
75 | * @dma_addr: DMA base address of the buffer | ||
76 | * @len: Buffer length, in bytes | ||
77 | * @index: Buffer index within controller;s buffer table | ||
78 | * @entries: Number of buffer table entries | ||
79 | * | ||
80 | * Special buffers are used for the event queues and the TX and RX | ||
81 | * descriptor queues for each channel. They are *not* used for the | ||
82 | * actual transmit and receive buffers. | ||
83 | */ | ||
84 | struct efx_special_buffer { | ||
85 | void *addr; | ||
86 | dma_addr_t dma_addr; | ||
87 | unsigned int len; | ||
88 | int index; | ||
89 | int entries; | ||
90 | }; | ||
91 | |||
92 | enum efx_flush_state { | ||
93 | FLUSH_NONE, | ||
94 | FLUSH_PENDING, | ||
95 | FLUSH_FAILED, | ||
96 | FLUSH_DONE, | ||
97 | }; | ||
98 | |||
99 | /** | ||
100 | * struct efx_tx_buffer - An Efx TX buffer | ||
101 | * @skb: The associated socket buffer. | ||
102 | * Set only on the final fragment of a packet; %NULL for all other | ||
103 | * fragments. When this fragment completes, then we can free this | ||
104 | * skb. | ||
105 | * @tsoh: The associated TSO header structure, or %NULL if this | ||
106 | * buffer is not a TSO header. | ||
107 | * @dma_addr: DMA address of the fragment. | ||
108 | * @len: Length of this fragment. | ||
109 | * This field is zero when the queue slot is empty. | ||
110 | * @continuation: True if this fragment is not the end of a packet. | ||
111 | * @unmap_single: True if pci_unmap_single should be used. | ||
112 | * @unmap_len: Length of this fragment to unmap | ||
113 | */ | ||
114 | struct efx_tx_buffer { | ||
115 | const struct sk_buff *skb; | ||
116 | struct efx_tso_header *tsoh; | ||
117 | dma_addr_t dma_addr; | ||
118 | unsigned short len; | ||
119 | bool continuation; | ||
120 | bool unmap_single; | ||
121 | unsigned short unmap_len; | ||
122 | }; | ||
123 | |||
124 | /** | ||
125 | * struct efx_tx_queue - An Efx TX queue | ||
126 | * | ||
127 | * This is a ring buffer of TX fragments. | ||
128 | * Since the TX completion path always executes on the same | ||
129 | * CPU and the xmit path can operate on different CPUs, | ||
130 | * performance is increased by ensuring that the completion | ||
131 | * path and the xmit path operate on different cache lines. | ||
132 | * This is particularly important if the xmit path is always | ||
133 | * executing on one CPU which is different from the completion | ||
134 | * path. There is also a cache line for members which are | ||
135 | * read but not written on the fast path. | ||
136 | * | ||
137 | * @efx: The associated Efx NIC | ||
138 | * @queue: DMA queue number | ||
139 | * @channel: The associated channel | ||
140 | * @core_txq: The networking core TX queue structure | ||
141 | * @buffer: The software buffer ring | ||
142 | * @txd: The hardware descriptor ring | ||
143 | * @ptr_mask: The size of the ring minus 1. | ||
144 | * @initialised: Has hardware queue been initialised? | ||
145 | * @flushed: Used when handling queue flushing | ||
146 | * @read_count: Current read pointer. | ||
147 | * This is the number of buffers that have been removed from both rings. | ||
148 | * @old_write_count: The value of @write_count when last checked. | ||
149 | * This is here for performance reasons. The xmit path will | ||
150 | * only get the up-to-date value of @write_count if this | ||
151 | * variable indicates that the queue is empty. This is to | ||
152 | * avoid cache-line ping-pong between the xmit path and the | ||
153 | * completion path. | ||
154 | * @insert_count: Current insert pointer | ||
155 | * This is the number of buffers that have been added to the | ||
156 | * software ring. | ||
157 | * @write_count: Current write pointer | ||
158 | * This is the number of buffers that have been added to the | ||
159 | * hardware ring. | ||
160 | * @old_read_count: The value of read_count when last checked. | ||
161 | * This is here for performance reasons. The xmit path will | ||
162 | * only get the up-to-date value of read_count if this | ||
163 | * variable indicates that the queue is full. This is to | ||
164 | * avoid cache-line ping-pong between the xmit path and the | ||
165 | * completion path. | ||
166 | * @tso_headers_free: A list of TSO headers allocated for this TX queue | ||
167 | * that are not in use, and so available for new TSO sends. The list | ||
168 | * is protected by the TX queue lock. | ||
169 | * @tso_bursts: Number of times TSO xmit invoked by kernel | ||
170 | * @tso_long_headers: Number of packets with headers too long for standard | ||
171 | * blocks | ||
172 | * @tso_packets: Number of packets via the TSO xmit path | ||
173 | * @pushes: Number of times the TX push feature has been used | ||
174 | * @empty_read_count: If the completion path has seen the queue as empty | ||
175 | * and the transmission path has not yet checked this, the value of | ||
176 | * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. | ||
177 | */ | ||
178 | struct efx_tx_queue { | ||
179 | /* Members which don't change on the fast path */ | ||
180 | struct efx_nic *efx ____cacheline_aligned_in_smp; | ||
181 | unsigned queue; | ||
182 | struct efx_channel *channel; | ||
183 | struct netdev_queue *core_txq; | ||
184 | struct efx_tx_buffer *buffer; | ||
185 | struct efx_special_buffer txd; | ||
186 | unsigned int ptr_mask; | ||
187 | bool initialised; | ||
188 | enum efx_flush_state flushed; | ||
189 | |||
190 | /* Members used mainly on the completion path */ | ||
191 | unsigned int read_count ____cacheline_aligned_in_smp; | ||
192 | unsigned int old_write_count; | ||
193 | |||
194 | /* Members used only on the xmit path */ | ||
195 | unsigned int insert_count ____cacheline_aligned_in_smp; | ||
196 | unsigned int write_count; | ||
197 | unsigned int old_read_count; | ||
198 | struct efx_tso_header *tso_headers_free; | ||
199 | unsigned int tso_bursts; | ||
200 | unsigned int tso_long_headers; | ||
201 | unsigned int tso_packets; | ||
202 | unsigned int pushes; | ||
203 | |||
204 | /* Members shared between paths and sometimes updated */ | ||
205 | unsigned int empty_read_count ____cacheline_aligned_in_smp; | ||
206 | #define EFX_EMPTY_COUNT_VALID 0x80000000 | ||
207 | }; | ||
208 | |||
209 | /** | ||
210 | * struct efx_rx_buffer - An Efx RX data buffer | ||
211 | * @dma_addr: DMA base address of the buffer | ||
212 | * @skb: The associated socket buffer, if any. | ||
213 | * If both this and page are %NULL, the buffer slot is currently free. | ||
214 | * @page: The associated page buffer, if any. | ||
215 | * If both this and skb are %NULL, the buffer slot is currently free. | ||
216 | * @len: Buffer length, in bytes. | ||
217 | * @is_page: Indicates if @page is valid. If false, @skb is valid. | ||
218 | */ | ||
219 | struct efx_rx_buffer { | ||
220 | dma_addr_t dma_addr; | ||
221 | union { | ||
222 | struct sk_buff *skb; | ||
223 | struct page *page; | ||
224 | } u; | ||
225 | unsigned int len; | ||
226 | bool is_page; | ||
227 | }; | ||
228 | |||
229 | /** | ||
230 | * struct efx_rx_page_state - Page-based rx buffer state | ||
231 | * | ||
232 | * Inserted at the start of every page allocated for receive buffers. | ||
233 | * Used to facilitate sharing dma mappings between recycled rx buffers | ||
234 | * and those passed up to the kernel. | ||
235 | * | ||
236 | * @refcnt: Number of struct efx_rx_buffer's referencing this page. | ||
237 | * When refcnt falls to zero, the page is unmapped for dma | ||
238 | * @dma_addr: The dma address of this page. | ||
239 | */ | ||
240 | struct efx_rx_page_state { | ||
241 | unsigned refcnt; | ||
242 | dma_addr_t dma_addr; | ||
243 | |||
244 | unsigned int __pad[0] ____cacheline_aligned; | ||
245 | }; | ||
246 | |||
247 | /** | ||
248 | * struct efx_rx_queue - An Efx RX queue | ||
249 | * @efx: The associated Efx NIC | ||
250 | * @buffer: The software buffer ring | ||
251 | * @rxd: The hardware descriptor ring | ||
252 | * @ptr_mask: The size of the ring minus 1. | ||
253 | * @added_count: Number of buffers added to the receive queue. | ||
254 | * @notified_count: Number of buffers given to NIC (<= @added_count). | ||
255 | * @removed_count: Number of buffers removed from the receive queue. | ||
256 | * @max_fill: RX descriptor maximum fill level (<= ring size) | ||
257 | * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill | ||
258 | * (<= @max_fill) | ||
259 | * @fast_fill_limit: The level to which a fast fill will fill | ||
260 | * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill) | ||
261 | * @min_fill: RX descriptor minimum non-zero fill level. | ||
262 | * This records the minimum fill level observed when a ring | ||
263 | * refill was triggered. | ||
264 | * @alloc_page_count: RX allocation strategy counter. | ||
265 | * @alloc_skb_count: RX allocation strategy counter. | ||
266 | * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). | ||
267 | * @flushed: Use when handling queue flushing | ||
268 | */ | ||
269 | struct efx_rx_queue { | ||
270 | struct efx_nic *efx; | ||
271 | struct efx_rx_buffer *buffer; | ||
272 | struct efx_special_buffer rxd; | ||
273 | unsigned int ptr_mask; | ||
274 | |||
275 | int added_count; | ||
276 | int notified_count; | ||
277 | int removed_count; | ||
278 | unsigned int max_fill; | ||
279 | unsigned int fast_fill_trigger; | ||
280 | unsigned int fast_fill_limit; | ||
281 | unsigned int min_fill; | ||
282 | unsigned int min_overfill; | ||
283 | unsigned int alloc_page_count; | ||
284 | unsigned int alloc_skb_count; | ||
285 | struct timer_list slow_fill; | ||
286 | unsigned int slow_fill_count; | ||
287 | |||
288 | enum efx_flush_state flushed; | ||
289 | }; | ||
290 | |||
291 | /** | ||
292 | * struct efx_buffer - An Efx general-purpose buffer | ||
293 | * @addr: host base address of the buffer | ||
294 | * @dma_addr: DMA base address of the buffer | ||
295 | * @len: Buffer length, in bytes | ||
296 | * | ||
297 | * The NIC uses these buffers for its interrupt status registers and | ||
298 | * MAC stats dumps. | ||
299 | */ | ||
300 | struct efx_buffer { | ||
301 | void *addr; | ||
302 | dma_addr_t dma_addr; | ||
303 | unsigned int len; | ||
304 | }; | ||
305 | |||
306 | |||
307 | enum efx_rx_alloc_method { | ||
308 | RX_ALLOC_METHOD_AUTO = 0, | ||
309 | RX_ALLOC_METHOD_SKB = 1, | ||
310 | RX_ALLOC_METHOD_PAGE = 2, | ||
311 | }; | ||
312 | |||
313 | /** | ||
314 | * struct efx_channel - An Efx channel | ||
315 | * | ||
316 | * A channel comprises an event queue, at least one TX queue, at least | ||
317 | * one RX queue, and an associated tasklet for processing the event | ||
318 | * queue. | ||
319 | * | ||
320 | * @efx: Associated Efx NIC | ||
321 | * @channel: Channel instance number | ||
322 | * @enabled: Channel enabled indicator | ||
323 | * @irq: IRQ number (MSI and MSI-X only) | ||
324 | * @irq_moderation: IRQ moderation value (in hardware ticks) | ||
325 | * @napi_dev: Net device used with NAPI | ||
326 | * @napi_str: NAPI control structure | ||
327 | * @work_pending: Is work pending via NAPI? | ||
328 | * @eventq: Event queue buffer | ||
329 | * @eventq_mask: Event queue pointer mask | ||
330 | * @eventq_read_ptr: Event queue read pointer | ||
331 | * @last_eventq_read_ptr: Last event queue read pointer value. | ||
332 | * @irq_count: Number of IRQs since last adaptive moderation decision | ||
333 | * @irq_mod_score: IRQ moderation score | ||
334 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors | ||
335 | * and diagnostic counters | ||
336 | * @rx_alloc_push_pages: RX allocation method currently in use for pushing | ||
337 | * descriptors | ||
338 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors | ||
339 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors | ||
340 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors | ||
341 | * @n_rx_mcast_mismatch: Count of unmatched multicast frames | ||
342 | * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors | ||
343 | * @n_rx_overlength: Count of RX_OVERLENGTH errors | ||
344 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun | ||
345 | * @rx_queue: RX queue for this channel | ||
346 | * @tx_queue: TX queues for this channel | ||
347 | */ | ||
348 | struct efx_channel { | ||
349 | struct efx_nic *efx; | ||
350 | int channel; | ||
351 | bool enabled; | ||
352 | int irq; | ||
353 | unsigned int irq_moderation; | ||
354 | struct net_device *napi_dev; | ||
355 | struct napi_struct napi_str; | ||
356 | bool work_pending; | ||
357 | struct efx_special_buffer eventq; | ||
358 | unsigned int eventq_mask; | ||
359 | unsigned int eventq_read_ptr; | ||
360 | unsigned int last_eventq_read_ptr; | ||
361 | |||
362 | unsigned int irq_count; | ||
363 | unsigned int irq_mod_score; | ||
364 | #ifdef CONFIG_RFS_ACCEL | ||
365 | unsigned int rfs_filters_added; | ||
366 | #endif | ||
367 | |||
368 | int rx_alloc_level; | ||
369 | int rx_alloc_push_pages; | ||
370 | |||
371 | unsigned n_rx_tobe_disc; | ||
372 | unsigned n_rx_ip_hdr_chksum_err; | ||
373 | unsigned n_rx_tcp_udp_chksum_err; | ||
374 | unsigned n_rx_mcast_mismatch; | ||
375 | unsigned n_rx_frm_trunc; | ||
376 | unsigned n_rx_overlength; | ||
377 | unsigned n_skbuff_leaks; | ||
378 | |||
379 | /* Used to pipeline received packets in order to optimise memory | ||
380 | * access with prefetches. | ||
381 | */ | ||
382 | struct efx_rx_buffer *rx_pkt; | ||
383 | bool rx_pkt_csummed; | ||
384 | |||
385 | struct efx_rx_queue rx_queue; | ||
386 | struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; | ||
387 | }; | ||
388 | |||
389 | enum efx_led_mode { | ||
390 | EFX_LED_OFF = 0, | ||
391 | EFX_LED_ON = 1, | ||
392 | EFX_LED_DEFAULT = 2 | ||
393 | }; | ||
394 | |||
395 | #define STRING_TABLE_LOOKUP(val, member) \ | ||
396 | ((val) < member ## _max) ? member ## _names[val] : "(invalid)" | ||
397 | |||
398 | extern const char *efx_loopback_mode_names[]; | ||
399 | extern const unsigned int efx_loopback_mode_max; | ||
400 | #define LOOPBACK_MODE(efx) \ | ||
401 | STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) | ||
402 | |||
403 | extern const char *efx_reset_type_names[]; | ||
404 | extern const unsigned int efx_reset_type_max; | ||
405 | #define RESET_TYPE(type) \ | ||
406 | STRING_TABLE_LOOKUP(type, efx_reset_type) | ||
407 | |||
408 | enum efx_int_mode { | ||
409 | /* Be careful if altering to correct macro below */ | ||
410 | EFX_INT_MODE_MSIX = 0, | ||
411 | EFX_INT_MODE_MSI = 1, | ||
412 | EFX_INT_MODE_LEGACY = 2, | ||
413 | EFX_INT_MODE_MAX /* Insert any new items before this */ | ||
414 | }; | ||
415 | #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) | ||
416 | |||
417 | enum nic_state { | ||
418 | STATE_INIT = 0, | ||
419 | STATE_RUNNING = 1, | ||
420 | STATE_FINI = 2, | ||
421 | STATE_DISABLED = 3, | ||
422 | STATE_MAX, | ||
423 | }; | ||
424 | |||
425 | /* | ||
426 | * Alignment of page-allocated RX buffers | ||
427 | * | ||
428 | * Controls the number of bytes inserted at the start of an RX buffer. | ||
429 | * This is the equivalent of NET_IP_ALIGN [which controls the alignment | ||
430 | * of the skb->head for hardware DMA]. | ||
431 | */ | ||
432 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
433 | #define EFX_PAGE_IP_ALIGN 0 | ||
434 | #else | ||
435 | #define EFX_PAGE_IP_ALIGN NET_IP_ALIGN | ||
436 | #endif | ||
437 | |||
438 | /* | ||
439 | * Alignment of the skb->head which wraps a page-allocated RX buffer | ||
440 | * | ||
441 | * The skb allocated to wrap an rx_buffer can have this alignment. Since | ||
442 | * the data is memcpy'd from the rx_buf, it does not need to be equal to | ||
443 | * EFX_PAGE_IP_ALIGN. | ||
444 | */ | ||
445 | #define EFX_PAGE_SKB_ALIGN 2 | ||
446 | |||
447 | /* Forward declaration */ | ||
448 | struct efx_nic; | ||
449 | |||
450 | /* Pseudo bit-mask flow control field */ | ||
451 | #define EFX_FC_RX FLOW_CTRL_RX | ||
452 | #define EFX_FC_TX FLOW_CTRL_TX | ||
453 | #define EFX_FC_AUTO 4 | ||
454 | |||
455 | /** | ||
456 | * struct efx_link_state - Current state of the link | ||
457 | * @up: Link is up | ||
458 | * @fd: Link is full-duplex | ||
459 | * @fc: Actual flow control flags | ||
460 | * @speed: Link speed (Mbps) | ||
461 | */ | ||
462 | struct efx_link_state { | ||
463 | bool up; | ||
464 | bool fd; | ||
465 | u8 fc; | ||
466 | unsigned int speed; | ||
467 | }; | ||
468 | |||
469 | static inline bool efx_link_state_equal(const struct efx_link_state *left, | ||
470 | const struct efx_link_state *right) | ||
471 | { | ||
472 | return left->up == right->up && left->fd == right->fd && | ||
473 | left->fc == right->fc && left->speed == right->speed; | ||
474 | } | ||
475 | |||
476 | /** | ||
477 | * struct efx_mac_operations - Efx MAC operations table | ||
478 | * @reconfigure: Reconfigure MAC. Serialised by the mac_lock | ||
479 | * @update_stats: Update statistics | ||
480 | * @check_fault: Check fault state. True if fault present. | ||
481 | */ | ||
482 | struct efx_mac_operations { | ||
483 | int (*reconfigure) (struct efx_nic *efx); | ||
484 | void (*update_stats) (struct efx_nic *efx); | ||
485 | bool (*check_fault)(struct efx_nic *efx); | ||
486 | }; | ||
487 | |||
488 | /** | ||
489 | * struct efx_phy_operations - Efx PHY operations table | ||
490 | * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, | ||
491 | * efx->loopback_modes. | ||
492 | * @init: Initialise PHY | ||
493 | * @fini: Shut down PHY | ||
494 | * @reconfigure: Reconfigure PHY (e.g. for new link parameters) | ||
495 | * @poll: Update @link_state and report whether it changed. | ||
496 | * Serialised by the mac_lock. | ||
497 | * @get_settings: Get ethtool settings. Serialised by the mac_lock. | ||
498 | * @set_settings: Set ethtool settings. Serialised by the mac_lock. | ||
499 | * @set_npage_adv: Set abilities advertised in (Extended) Next Page | ||
500 | * (only needed where AN bit is set in mmds) | ||
501 | * @test_alive: Test that PHY is 'alive' (online) | ||
502 | * @test_name: Get the name of a PHY-specific test/result | ||
503 | * @run_tests: Run tests and record results as appropriate (offline). | ||
504 | * Flags are the ethtool tests flags. | ||
505 | */ | ||
506 | struct efx_phy_operations { | ||
507 | int (*probe) (struct efx_nic *efx); | ||
508 | int (*init) (struct efx_nic *efx); | ||
509 | void (*fini) (struct efx_nic *efx); | ||
510 | void (*remove) (struct efx_nic *efx); | ||
511 | int (*reconfigure) (struct efx_nic *efx); | ||
512 | bool (*poll) (struct efx_nic *efx); | ||
513 | void (*get_settings) (struct efx_nic *efx, | ||
514 | struct ethtool_cmd *ecmd); | ||
515 | int (*set_settings) (struct efx_nic *efx, | ||
516 | struct ethtool_cmd *ecmd); | ||
517 | void (*set_npage_adv) (struct efx_nic *efx, u32); | ||
518 | int (*test_alive) (struct efx_nic *efx); | ||
519 | const char *(*test_name) (struct efx_nic *efx, unsigned int index); | ||
520 | int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); | ||
521 | }; | ||
522 | |||
523 | /** | ||
524 | * @enum efx_phy_mode - PHY operating mode flags | ||
525 | * @PHY_MODE_NORMAL: on and should pass traffic | ||
526 | * @PHY_MODE_TX_DISABLED: on with TX disabled | ||
527 | * @PHY_MODE_LOW_POWER: set to low power through MDIO | ||
528 | * @PHY_MODE_OFF: switched off through external control | ||
529 | * @PHY_MODE_SPECIAL: on but will not pass traffic | ||
530 | */ | ||
531 | enum efx_phy_mode { | ||
532 | PHY_MODE_NORMAL = 0, | ||
533 | PHY_MODE_TX_DISABLED = 1, | ||
534 | PHY_MODE_LOW_POWER = 2, | ||
535 | PHY_MODE_OFF = 4, | ||
536 | PHY_MODE_SPECIAL = 8, | ||
537 | }; | ||
538 | |||
539 | static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) | ||
540 | { | ||
541 | return !!(mode & ~PHY_MODE_TX_DISABLED); | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Efx extended statistics | ||
546 | * | ||
547 | * Not all statistics are provided by all supported MACs. The purpose | ||
548 | * is this structure is to contain the raw statistics provided by each | ||
549 | * MAC. | ||
550 | */ | ||
551 | struct efx_mac_stats { | ||
552 | u64 tx_bytes; | ||
553 | u64 tx_good_bytes; | ||
554 | u64 tx_bad_bytes; | ||
555 | unsigned long tx_packets; | ||
556 | unsigned long tx_bad; | ||
557 | unsigned long tx_pause; | ||
558 | unsigned long tx_control; | ||
559 | unsigned long tx_unicast; | ||
560 | unsigned long tx_multicast; | ||
561 | unsigned long tx_broadcast; | ||
562 | unsigned long tx_lt64; | ||
563 | unsigned long tx_64; | ||
564 | unsigned long tx_65_to_127; | ||
565 | unsigned long tx_128_to_255; | ||
566 | unsigned long tx_256_to_511; | ||
567 | unsigned long tx_512_to_1023; | ||
568 | unsigned long tx_1024_to_15xx; | ||
569 | unsigned long tx_15xx_to_jumbo; | ||
570 | unsigned long tx_gtjumbo; | ||
571 | unsigned long tx_collision; | ||
572 | unsigned long tx_single_collision; | ||
573 | unsigned long tx_multiple_collision; | ||
574 | unsigned long tx_excessive_collision; | ||
575 | unsigned long tx_deferred; | ||
576 | unsigned long tx_late_collision; | ||
577 | unsigned long tx_excessive_deferred; | ||
578 | unsigned long tx_non_tcpudp; | ||
579 | unsigned long tx_mac_src_error; | ||
580 | unsigned long tx_ip_src_error; | ||
581 | u64 rx_bytes; | ||
582 | u64 rx_good_bytes; | ||
583 | u64 rx_bad_bytes; | ||
584 | unsigned long rx_packets; | ||
585 | unsigned long rx_good; | ||
586 | unsigned long rx_bad; | ||
587 | unsigned long rx_pause; | ||
588 | unsigned long rx_control; | ||
589 | unsigned long rx_unicast; | ||
590 | unsigned long rx_multicast; | ||
591 | unsigned long rx_broadcast; | ||
592 | unsigned long rx_lt64; | ||
593 | unsigned long rx_64; | ||
594 | unsigned long rx_65_to_127; | ||
595 | unsigned long rx_128_to_255; | ||
596 | unsigned long rx_256_to_511; | ||
597 | unsigned long rx_512_to_1023; | ||
598 | unsigned long rx_1024_to_15xx; | ||
599 | unsigned long rx_15xx_to_jumbo; | ||
600 | unsigned long rx_gtjumbo; | ||
601 | unsigned long rx_bad_lt64; | ||
602 | unsigned long rx_bad_64_to_15xx; | ||
603 | unsigned long rx_bad_15xx_to_jumbo; | ||
604 | unsigned long rx_bad_gtjumbo; | ||
605 | unsigned long rx_overflow; | ||
606 | unsigned long rx_missed; | ||
607 | unsigned long rx_false_carrier; | ||
608 | unsigned long rx_symbol_error; | ||
609 | unsigned long rx_align_error; | ||
610 | unsigned long rx_length_error; | ||
611 | unsigned long rx_internal_error; | ||
612 | unsigned long rx_good_lt64; | ||
613 | }; | ||
614 | |||
615 | /* Number of bits used in a multicast filter hash address */ | ||
616 | #define EFX_MCAST_HASH_BITS 8 | ||
617 | |||
618 | /* Number of (single-bit) entries in a multicast filter hash */ | ||
619 | #define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) | ||
620 | |||
621 | /* An Efx multicast filter hash */ | ||
622 | union efx_multicast_hash { | ||
623 | u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; | ||
624 | efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; | ||
625 | }; | ||
626 | |||
627 | struct efx_filter_state; | ||
628 | |||
629 | /** | ||
630 | * struct efx_nic - an Efx NIC | ||
631 | * @name: Device name (net device name or bus id before net device registered) | ||
632 | * @pci_dev: The PCI device | ||
633 | * @type: Controller type attributes | ||
634 | * @legacy_irq: IRQ number | ||
635 | * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)? | ||
636 | * @workqueue: Workqueue for port reconfigures and the HW monitor. | ||
637 | * Work items do not hold and must not acquire RTNL. | ||
638 | * @workqueue_name: Name of workqueue | ||
639 | * @reset_work: Scheduled reset workitem | ||
640 | * @membase_phys: Memory BAR value as physical address | ||
641 | * @membase: Memory BAR value | ||
642 | * @interrupt_mode: Interrupt mode | ||
643 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues | ||
644 | * @irq_rx_moderation: IRQ moderation time for RX event queues | ||
645 | * @msg_enable: Log message enable flags | ||
646 | * @state: Device state flag. Serialised by the rtnl_lock. | ||
647 | * @reset_pending: Bitmask for pending resets | ||
648 | * @tx_queue: TX DMA queues | ||
649 | * @rx_queue: RX DMA queues | ||
650 | * @channel: Channels | ||
651 | * @channel_name: Names for channels and their IRQs | ||
652 | * @rxq_entries: Size of receive queues requested by user. | ||
653 | * @txq_entries: Size of transmit queues requested by user. | ||
654 | * @next_buffer_table: First available buffer table id | ||
655 | * @n_channels: Number of channels in use | ||
656 | * @n_rx_channels: Number of channels used for RX (= number of RX queues) | ||
657 | * @n_tx_channels: Number of channels used for TX | ||
658 | * @rx_buffer_len: RX buffer length | ||
659 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer | ||
660 | * @rx_hash_key: Toeplitz hash key for RSS | ||
661 | * @rx_indir_table: Indirection table for RSS | ||
662 | * @int_error_count: Number of internal errors seen recently | ||
663 | * @int_error_expire: Time at which error count will be expired | ||
664 | * @irq_status: Interrupt status buffer | ||
665 | * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 | ||
666 | * @fatal_irq_level: IRQ level (bit number) used for serious errors | ||
667 | * @mtd_list: List of MTDs attached to the NIC | ||
668 | * @nic_data: Hardware dependent state | ||
669 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, | ||
670 | * efx_monitor() and efx_reconfigure_port() | ||
671 | * @port_enabled: Port enabled indicator. | ||
672 | * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and | ||
673 | * efx_mac_work() with kernel interfaces. Safe to read under any | ||
674 | * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must | ||
675 | * be held to modify it. | ||
676 | * @port_initialized: Port initialized? | ||
677 | * @net_dev: Operating system network device. Consider holding the rtnl lock | ||
678 | * @stats_buffer: DMA buffer for statistics | ||
679 | * @mac_op: MAC interface | ||
680 | * @phy_type: PHY type | ||
681 | * @phy_op: PHY interface | ||
682 | * @phy_data: PHY private data (including PHY-specific stats) | ||
683 | * @mdio: PHY MDIO interface | ||
684 | * @mdio_bus: PHY MDIO bus ID (only used by Siena) | ||
685 | * @phy_mode: PHY operating mode. Serialised by @mac_lock. | ||
686 | * @link_advertising: Autonegotiation advertising flags | ||
687 | * @link_state: Current state of the link | ||
688 | * @n_link_state_changes: Number of times the link has changed state | ||
689 | * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. | ||
690 | * @multicast_hash: Multicast hash table | ||
691 | * @wanted_fc: Wanted flow control flags | ||
692 | * @mac_work: Work item for changing MAC promiscuity and multicast hash | ||
693 | * @loopback_mode: Loopback status | ||
694 | * @loopback_modes: Supported loopback mode bitmask | ||
695 | * @loopback_selftest: Offline self-test private state | ||
696 | * @monitor_work: Hardware monitor workitem | ||
697 | * @biu_lock: BIU (bus interface unit) lock | ||
698 | * @last_irq_cpu: Last CPU to handle interrupt. | ||
699 | * This register is written with the SMP processor ID whenever an | ||
700 | * interrupt is handled. It is used by efx_nic_test_interrupt() | ||
701 | * to verify that an interrupt has occurred. | ||
702 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count | ||
703 | * @mac_stats: MAC statistics. These include all statistics the MACs | ||
704 | * can provide. Generic code converts these into a standard | ||
705 | * &struct net_device_stats. | ||
706 | * @stats_lock: Statistics update lock. Serialises statistics fetches | ||
707 | * | ||
708 | * This is stored in the private area of the &struct net_device. | ||
709 | */ | ||
710 | struct efx_nic { | ||
711 | /* The following fields should be written very rarely */ | ||
712 | |||
713 | char name[IFNAMSIZ]; | ||
714 | struct pci_dev *pci_dev; | ||
715 | const struct efx_nic_type *type; | ||
716 | int legacy_irq; | ||
717 | bool legacy_irq_enabled; | ||
718 | struct workqueue_struct *workqueue; | ||
719 | char workqueue_name[16]; | ||
720 | struct work_struct reset_work; | ||
721 | resource_size_t membase_phys; | ||
722 | void __iomem *membase; | ||
723 | |||
724 | enum efx_int_mode interrupt_mode; | ||
725 | bool irq_rx_adaptive; | ||
726 | unsigned int irq_rx_moderation; | ||
727 | u32 msg_enable; | ||
728 | |||
729 | enum nic_state state; | ||
730 | unsigned long reset_pending; | ||
731 | |||
732 | struct efx_channel *channel[EFX_MAX_CHANNELS]; | ||
733 | char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; | ||
734 | |||
735 | unsigned rxq_entries; | ||
736 | unsigned txq_entries; | ||
737 | unsigned next_buffer_table; | ||
738 | unsigned n_channels; | ||
739 | unsigned n_rx_channels; | ||
740 | unsigned tx_channel_offset; | ||
741 | unsigned n_tx_channels; | ||
742 | unsigned int rx_buffer_len; | ||
743 | unsigned int rx_buffer_order; | ||
744 | u8 rx_hash_key[40]; | ||
745 | u32 rx_indir_table[128]; | ||
746 | |||
747 | unsigned int_error_count; | ||
748 | unsigned long int_error_expire; | ||
749 | |||
750 | struct efx_buffer irq_status; | ||
751 | unsigned irq_zero_count; | ||
752 | unsigned fatal_irq_level; | ||
753 | |||
754 | #ifdef CONFIG_SFC_MTD | ||
755 | struct list_head mtd_list; | ||
756 | #endif | ||
757 | |||
758 | void *nic_data; | ||
759 | |||
760 | struct mutex mac_lock; | ||
761 | struct work_struct mac_work; | ||
762 | bool port_enabled; | ||
763 | |||
764 | bool port_initialized; | ||
765 | struct net_device *net_dev; | ||
766 | |||
767 | struct efx_buffer stats_buffer; | ||
768 | |||
769 | const struct efx_mac_operations *mac_op; | ||
770 | |||
771 | unsigned int phy_type; | ||
772 | const struct efx_phy_operations *phy_op; | ||
773 | void *phy_data; | ||
774 | struct mdio_if_info mdio; | ||
775 | unsigned int mdio_bus; | ||
776 | enum efx_phy_mode phy_mode; | ||
777 | |||
778 | u32 link_advertising; | ||
779 | struct efx_link_state link_state; | ||
780 | unsigned int n_link_state_changes; | ||
781 | |||
782 | bool promiscuous; | ||
783 | union efx_multicast_hash multicast_hash; | ||
784 | u8 wanted_fc; | ||
785 | |||
786 | atomic_t rx_reset; | ||
787 | enum efx_loopback_mode loopback_mode; | ||
788 | u64 loopback_modes; | ||
789 | |||
790 | void *loopback_selftest; | ||
791 | |||
792 | struct efx_filter_state *filter_state; | ||
793 | |||
794 | /* The following fields may be written more often */ | ||
795 | |||
796 | struct delayed_work monitor_work ____cacheline_aligned_in_smp; | ||
797 | spinlock_t biu_lock; | ||
798 | volatile signed int last_irq_cpu; | ||
799 | unsigned n_rx_nodesc_drop_cnt; | ||
800 | struct efx_mac_stats mac_stats; | ||
801 | spinlock_t stats_lock; | ||
802 | }; | ||
803 | |||
804 | static inline int efx_dev_registered(struct efx_nic *efx) | ||
805 | { | ||
806 | return efx->net_dev->reg_state == NETREG_REGISTERED; | ||
807 | } | ||
808 | |||
809 | /* Net device name, for inclusion in log messages if it has been registered. | ||
810 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
811 | * are harmless. | ||
812 | */ | ||
813 | static inline const char *efx_dev_name(struct efx_nic *efx) | ||
814 | { | ||
815 | return efx_dev_registered(efx) ? efx->name : ""; | ||
816 | } | ||
817 | |||
818 | static inline unsigned int efx_port_num(struct efx_nic *efx) | ||
819 | { | ||
820 | return efx->net_dev->dev_id; | ||
821 | } | ||
822 | |||
823 | /** | ||
824 | * struct efx_nic_type - Efx device type definition | ||
825 | * @probe: Probe the controller | ||
826 | * @remove: Free resources allocated by probe() | ||
827 | * @init: Initialise the controller | ||
828 | * @fini: Shut down the controller | ||
829 | * @monitor: Periodic function for polling link state and hardware monitor | ||
830 | * @map_reset_reason: Map ethtool reset reason to a reset method | ||
831 | * @map_reset_flags: Map ethtool reset flags to a reset method, if possible | ||
832 | * @reset: Reset the controller hardware and possibly the PHY. This will | ||
833 | * be called while the controller is uninitialised. | ||
834 | * @probe_port: Probe the MAC and PHY | ||
835 | * @remove_port: Free resources allocated by probe_port() | ||
836 | * @handle_global_event: Handle a "global" event (may be %NULL) | ||
837 | * @prepare_flush: Prepare the hardware for flushing the DMA queues | ||
838 | * @update_stats: Update statistics not provided by event handling | ||
839 | * @start_stats: Start the regular fetching of statistics | ||
840 | * @stop_stats: Stop the regular fetching of statistics | ||
841 | * @set_id_led: Set state of identifying LED or revert to automatic function | ||
842 | * @push_irq_moderation: Apply interrupt moderation value | ||
843 | * @push_multicast_hash: Apply multicast hash table | ||
844 | * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY | ||
845 | * @get_wol: Get WoL configuration from driver state | ||
846 | * @set_wol: Push WoL configuration to the NIC | ||
847 | * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) | ||
848 | * @test_registers: Test read/write functionality of control registers | ||
849 | * @test_nvram: Test validity of NVRAM contents | ||
850 | * @default_mac_ops: efx_mac_operations to set at startup | ||
851 | * @revision: Hardware architecture revision | ||
852 | * @mem_map_size: Memory BAR mapped size | ||
853 | * @txd_ptr_tbl_base: TX descriptor ring base address | ||
854 | * @rxd_ptr_tbl_base: RX descriptor ring base address | ||
855 | * @buf_tbl_base: Buffer table base address | ||
856 | * @evq_ptr_tbl_base: Event queue pointer table base address | ||
857 | * @evq_rptr_tbl_base: Event queue read-pointer table base address | ||
858 | * @max_dma_mask: Maximum possible DMA mask | ||
859 | * @rx_buffer_hash_size: Size of hash at start of RX buffer | ||
860 | * @rx_buffer_padding: Size of padding at end of RX buffer | ||
861 | * @max_interrupt_mode: Highest capability interrupt mode supported | ||
862 | * from &enum efx_init_mode. | ||
863 | * @phys_addr_channels: Number of channels with physically addressed | ||
864 | * descriptors | ||
865 | * @tx_dc_base: Base address in SRAM of TX queue descriptor caches | ||
866 | * @rx_dc_base: Base address in SRAM of RX queue descriptor caches | ||
867 | * @offload_features: net_device feature flags for protocol offload | ||
868 | * features implemented in hardware | ||
869 | */ | ||
870 | struct efx_nic_type { | ||
871 | int (*probe)(struct efx_nic *efx); | ||
872 | void (*remove)(struct efx_nic *efx); | ||
873 | int (*init)(struct efx_nic *efx); | ||
874 | void (*fini)(struct efx_nic *efx); | ||
875 | void (*monitor)(struct efx_nic *efx); | ||
876 | enum reset_type (*map_reset_reason)(enum reset_type reason); | ||
877 | int (*map_reset_flags)(u32 *flags); | ||
878 | int (*reset)(struct efx_nic *efx, enum reset_type method); | ||
879 | int (*probe_port)(struct efx_nic *efx); | ||
880 | void (*remove_port)(struct efx_nic *efx); | ||
881 | bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); | ||
882 | void (*prepare_flush)(struct efx_nic *efx); | ||
883 | void (*update_stats)(struct efx_nic *efx); | ||
884 | void (*start_stats)(struct efx_nic *efx); | ||
885 | void (*stop_stats)(struct efx_nic *efx); | ||
886 | void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); | ||
887 | void (*push_irq_moderation)(struct efx_channel *channel); | ||
888 | void (*push_multicast_hash)(struct efx_nic *efx); | ||
889 | int (*reconfigure_port)(struct efx_nic *efx); | ||
890 | void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); | ||
891 | int (*set_wol)(struct efx_nic *efx, u32 type); | ||
892 | void (*resume_wol)(struct efx_nic *efx); | ||
893 | int (*test_registers)(struct efx_nic *efx); | ||
894 | int (*test_nvram)(struct efx_nic *efx); | ||
895 | const struct efx_mac_operations *default_mac_ops; | ||
896 | |||
897 | int revision; | ||
898 | unsigned int mem_map_size; | ||
899 | unsigned int txd_ptr_tbl_base; | ||
900 | unsigned int rxd_ptr_tbl_base; | ||
901 | unsigned int buf_tbl_base; | ||
902 | unsigned int evq_ptr_tbl_base; | ||
903 | unsigned int evq_rptr_tbl_base; | ||
904 | u64 max_dma_mask; | ||
905 | unsigned int rx_buffer_hash_size; | ||
906 | unsigned int rx_buffer_padding; | ||
907 | unsigned int max_interrupt_mode; | ||
908 | unsigned int phys_addr_channels; | ||
909 | unsigned int tx_dc_base; | ||
910 | unsigned int rx_dc_base; | ||
911 | u32 offload_features; | ||
912 | }; | ||
913 | |||
914 | /************************************************************************** | ||
915 | * | ||
916 | * Prototypes and inline functions | ||
917 | * | ||
918 | *************************************************************************/ | ||
919 | |||
920 | static inline struct efx_channel * | ||
921 | efx_get_channel(struct efx_nic *efx, unsigned index) | ||
922 | { | ||
923 | EFX_BUG_ON_PARANOID(index >= efx->n_channels); | ||
924 | return efx->channel[index]; | ||
925 | } | ||
926 | |||
927 | /* Iterate over all used channels */ | ||
928 | #define efx_for_each_channel(_channel, _efx) \ | ||
929 | for (_channel = (_efx)->channel[0]; \ | ||
930 | _channel; \ | ||
931 | _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ | ||
932 | (_efx)->channel[_channel->channel + 1] : NULL) | ||
933 | |||
934 | static inline struct efx_tx_queue * | ||
935 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) | ||
936 | { | ||
937 | EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || | ||
938 | type >= EFX_TXQ_TYPES); | ||
939 | return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; | ||
940 | } | ||
941 | |||
942 | static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) | ||
943 | { | ||
944 | return channel->channel - channel->efx->tx_channel_offset < | ||
945 | channel->efx->n_tx_channels; | ||
946 | } | ||
947 | |||
948 | static inline struct efx_tx_queue * | ||
949 | efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) | ||
950 | { | ||
951 | EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) || | ||
952 | type >= EFX_TXQ_TYPES); | ||
953 | return &channel->tx_queue[type]; | ||
954 | } | ||
955 | |||
956 | static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) | ||
957 | { | ||
958 | return !(tx_queue->efx->net_dev->num_tc < 2 && | ||
959 | tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); | ||
960 | } | ||
961 | |||
962 | /* Iterate over all TX queues belonging to a channel */ | ||
963 | #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ | ||
964 | if (!efx_channel_has_tx_queues(_channel)) \ | ||
965 | ; \ | ||
966 | else \ | ||
967 | for (_tx_queue = (_channel)->tx_queue; \ | ||
968 | _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ | ||
969 | efx_tx_queue_used(_tx_queue); \ | ||
970 | _tx_queue++) | ||
971 | |||
972 | /* Iterate over all possible TX queues belonging to a channel */ | ||
973 | #define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ | ||
974 | for (_tx_queue = (_channel)->tx_queue; \ | ||
975 | _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ | ||
976 | _tx_queue++) | ||
977 | |||
978 | static inline struct efx_rx_queue * | ||
979 | efx_get_rx_queue(struct efx_nic *efx, unsigned index) | ||
980 | { | ||
981 | EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels); | ||
982 | return &efx->channel[index]->rx_queue; | ||
983 | } | ||
984 | |||
985 | static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) | ||
986 | { | ||
987 | return channel->channel < channel->efx->n_rx_channels; | ||
988 | } | ||
989 | |||
990 | static inline struct efx_rx_queue * | ||
991 | efx_channel_get_rx_queue(struct efx_channel *channel) | ||
992 | { | ||
993 | EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel)); | ||
994 | return &channel->rx_queue; | ||
995 | } | ||
996 | |||
997 | /* Iterate over all RX queues belonging to a channel */ | ||
998 | #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ | ||
999 | if (!efx_channel_has_rx_queue(_channel)) \ | ||
1000 | ; \ | ||
1001 | else \ | ||
1002 | for (_rx_queue = &(_channel)->rx_queue; \ | ||
1003 | _rx_queue; \ | ||
1004 | _rx_queue = NULL) | ||
1005 | |||
1006 | static inline struct efx_channel * | ||
1007 | efx_rx_queue_channel(struct efx_rx_queue *rx_queue) | ||
1008 | { | ||
1009 | return container_of(rx_queue, struct efx_channel, rx_queue); | ||
1010 | } | ||
1011 | |||
1012 | static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) | ||
1013 | { | ||
1014 | return efx_rx_queue_channel(rx_queue)->channel; | ||
1015 | } | ||
1016 | |||
1017 | /* Returns a pointer to the specified receive buffer in the RX | ||
1018 | * descriptor queue. | ||
1019 | */ | ||
1020 | static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, | ||
1021 | unsigned int index) | ||
1022 | { | ||
1023 | return &rx_queue->buffer[index]; | ||
1024 | } | ||
1025 | |||
1026 | /* Set bit in a little-endian bitfield */ | ||
1027 | static inline void set_bit_le(unsigned nr, unsigned char *addr) | ||
1028 | { | ||
1029 | addr[nr / 8] |= (1 << (nr % 8)); | ||
1030 | } | ||
1031 | |||
1032 | /* Clear bit in a little-endian bitfield */ | ||
1033 | static inline void clear_bit_le(unsigned nr, unsigned char *addr) | ||
1034 | { | ||
1035 | addr[nr / 8] &= ~(1 << (nr % 8)); | ||
1036 | } | ||
1037 | |||
1038 | |||
1039 | /** | ||
1040 | * EFX_MAX_FRAME_LEN - calculate maximum frame length | ||
1041 | * | ||
1042 | * This calculates the maximum frame length that will be used for a | ||
1043 | * given MTU. The frame length will be equal to the MTU plus a | ||
1044 | * constant amount of header space and padding. This is the quantity | ||
1045 | * that the net driver will program into the MAC as the maximum frame | ||
1046 | * length. | ||
1047 | * | ||
1048 | * The 10G MAC requires 8-byte alignment on the frame | ||
1049 | * length, so we round up to the nearest 8. | ||
1050 | * | ||
1051 | * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an | ||
1052 | * XGMII cycle). If the frame length reaches the maximum value in the | ||
1053 | * same cycle, the XMAC can miss the IPG altogether. We work around | ||
1054 | * this by adding a further 16 bytes. | ||
1055 | */ | ||
1056 | #define EFX_MAX_FRAME_LEN(mtu) \ | ||
1057 | ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) | ||
1058 | |||
1059 | |||
1060 | #endif /* EFX_NET_DRIVER_H */ | ||
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c new file mode 100644 index 00000000000..3edfbaf5f02 --- /dev/null +++ b/drivers/net/sfc/nic.c | |||
@@ -0,0 +1,1962 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2011 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include "net_driver.h" | ||
18 | #include "bitfield.h" | ||
19 | #include "efx.h" | ||
20 | #include "nic.h" | ||
21 | #include "regs.h" | ||
22 | #include "io.h" | ||
23 | #include "workarounds.h" | ||
24 | |||
25 | /************************************************************************** | ||
26 | * | ||
27 | * Configurable values | ||
28 | * | ||
29 | ************************************************************************** | ||
30 | */ | ||
31 | |||
32 | /* This is set to 16 for a good reason. In summary, if larger than | ||
33 | * 16, the descriptor cache holds more than a default socket | ||
34 | * buffer's worth of packets (for UDP we can only have at most one | ||
35 | * socket buffer's worth outstanding). This combined with the fact | ||
36 | * that we only get 1 TX event per descriptor cache means the NIC | ||
37 | * goes idle. | ||
38 | */ | ||
39 | #define TX_DC_ENTRIES 16 | ||
40 | #define TX_DC_ENTRIES_ORDER 1 | ||
41 | |||
42 | #define RX_DC_ENTRIES 64 | ||
43 | #define RX_DC_ENTRIES_ORDER 3 | ||
44 | |||
45 | /* If EFX_MAX_INT_ERRORS internal errors occur within | ||
46 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | ||
47 | * disable it. | ||
48 | */ | ||
49 | #define EFX_INT_ERROR_EXPIRE 3600 | ||
50 | #define EFX_MAX_INT_ERRORS 5 | ||
51 | |||
52 | /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times | ||
53 | */ | ||
54 | #define EFX_FLUSH_INTERVAL 10 | ||
55 | #define EFX_FLUSH_POLL_COUNT 100 | ||
56 | |||
57 | /* Size and alignment of special buffers (4KB) */ | ||
58 | #define EFX_BUF_SIZE 4096 | ||
59 | |||
60 | /* Depth of RX flush request fifo */ | ||
61 | #define EFX_RX_FLUSH_COUNT 4 | ||
62 | |||
63 | /* Generated event code for efx_generate_test_event() */ | ||
64 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ | ||
65 | (0x00010100 + (_channel)->channel) | ||
66 | |||
67 | /* Generated event code for efx_generate_fill_event() */ | ||
68 | #define EFX_CHANNEL_MAGIC_FILL(_channel) \ | ||
69 | (0x00010200 + (_channel)->channel) | ||
70 | |||
71 | /************************************************************************** | ||
72 | * | ||
73 | * Solarstorm hardware access | ||
74 | * | ||
75 | **************************************************************************/ | ||
76 | |||
77 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | ||
78 | unsigned int index) | ||
79 | { | ||
80 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, | ||
81 | value, index); | ||
82 | } | ||
83 | |||
84 | /* Read the current event from the event queue */ | ||
85 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | ||
86 | unsigned int index) | ||
87 | { | ||
88 | return ((efx_qword_t *) (channel->eventq.addr)) + | ||
89 | (index & channel->eventq_mask); | ||
90 | } | ||
91 | |||
92 | /* See if an event is present | ||
93 | * | ||
94 | * We check both the high and low dword of the event for all ones. We | ||
95 | * wrote all ones when we cleared the event, and no valid event can | ||
96 | * have all ones in either its high or low dwords. This approach is | ||
97 | * robust against reordering. | ||
98 | * | ||
99 | * Note that using a single 64-bit comparison is incorrect; even | ||
100 | * though the CPU read will be atomic, the DMA write may not be. | ||
101 | */ | ||
102 | static inline int efx_event_present(efx_qword_t *event) | ||
103 | { | ||
104 | return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | ||
105 | EFX_DWORD_IS_ALL_ONES(event->dword[1])); | ||
106 | } | ||
107 | |||
108 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | ||
109 | const efx_oword_t *mask) | ||
110 | { | ||
111 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | ||
112 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | ||
113 | } | ||
114 | |||
115 | int efx_nic_test_registers(struct efx_nic *efx, | ||
116 | const struct efx_nic_register_test *regs, | ||
117 | size_t n_regs) | ||
118 | { | ||
119 | unsigned address = 0, i, j; | ||
120 | efx_oword_t mask, imask, original, reg, buf; | ||
121 | |||
122 | /* Falcon should be in loopback to isolate the XMAC from the PHY */ | ||
123 | WARN_ON(!LOOPBACK_INTERNAL(efx)); | ||
124 | |||
125 | for (i = 0; i < n_regs; ++i) { | ||
126 | address = regs[i].address; | ||
127 | mask = imask = regs[i].mask; | ||
128 | EFX_INVERT_OWORD(imask); | ||
129 | |||
130 | efx_reado(efx, &original, address); | ||
131 | |||
132 | /* bit sweep on and off */ | ||
133 | for (j = 0; j < 128; j++) { | ||
134 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | ||
135 | continue; | ||
136 | |||
137 | /* Test this testable bit can be set in isolation */ | ||
138 | EFX_AND_OWORD(reg, original, mask); | ||
139 | EFX_SET_OWORD32(reg, j, j, 1); | ||
140 | |||
141 | efx_writeo(efx, ®, address); | ||
142 | efx_reado(efx, &buf, address); | ||
143 | |||
144 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
145 | goto fail; | ||
146 | |||
147 | /* Test this testable bit can be cleared in isolation */ | ||
148 | EFX_OR_OWORD(reg, original, mask); | ||
149 | EFX_SET_OWORD32(reg, j, j, 0); | ||
150 | |||
151 | efx_writeo(efx, ®, address); | ||
152 | efx_reado(efx, &buf, address); | ||
153 | |||
154 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
155 | goto fail; | ||
156 | } | ||
157 | |||
158 | efx_writeo(efx, &original, address); | ||
159 | } | ||
160 | |||
161 | return 0; | ||
162 | |||
163 | fail: | ||
164 | netif_err(efx, hw, efx->net_dev, | ||
165 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | ||
166 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | ||
167 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | ||
168 | return -EIO; | ||
169 | } | ||
170 | |||
171 | /************************************************************************** | ||
172 | * | ||
173 | * Special buffer handling | ||
174 | * Special buffers are used for event queues and the TX and RX | ||
175 | * descriptor rings. | ||
176 | * | ||
177 | *************************************************************************/ | ||
178 | |||
179 | /* | ||
180 | * Initialise a special buffer | ||
181 | * | ||
182 | * This will define a buffer (previously allocated via | ||
183 | * efx_alloc_special_buffer()) in the buffer table, allowing | ||
184 | * it to be used for event queues, descriptor rings etc. | ||
185 | */ | ||
186 | static void | ||
187 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
188 | { | ||
189 | efx_qword_t buf_desc; | ||
190 | int index; | ||
191 | dma_addr_t dma_addr; | ||
192 | int i; | ||
193 | |||
194 | EFX_BUG_ON_PARANOID(!buffer->addr); | ||
195 | |||
196 | /* Write buffer descriptors to NIC */ | ||
197 | for (i = 0; i < buffer->entries; i++) { | ||
198 | index = buffer->index + i; | ||
199 | dma_addr = buffer->dma_addr + (i * 4096); | ||
200 | netif_dbg(efx, probe, efx->net_dev, | ||
201 | "mapping special buffer %d at %llx\n", | ||
202 | index, (unsigned long long)dma_addr); | ||
203 | EFX_POPULATE_QWORD_3(buf_desc, | ||
204 | FRF_AZ_BUF_ADR_REGION, 0, | ||
205 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | ||
206 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); | ||
207 | efx_write_buf_tbl(efx, &buf_desc, index); | ||
208 | } | ||
209 | } | ||
210 | |||
211 | /* Unmaps a buffer and clears the buffer table entries */ | ||
212 | static void | ||
213 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
214 | { | ||
215 | efx_oword_t buf_tbl_upd; | ||
216 | unsigned int start = buffer->index; | ||
217 | unsigned int end = (buffer->index + buffer->entries - 1); | ||
218 | |||
219 | if (!buffer->entries) | ||
220 | return; | ||
221 | |||
222 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", | ||
223 | buffer->index, buffer->index + buffer->entries - 1); | ||
224 | |||
225 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | ||
226 | FRF_AZ_BUF_UPD_CMD, 0, | ||
227 | FRF_AZ_BUF_CLR_CMD, 1, | ||
228 | FRF_AZ_BUF_CLR_END_ID, end, | ||
229 | FRF_AZ_BUF_CLR_START_ID, start); | ||
230 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Allocate a new special buffer | ||
235 | * | ||
236 | * This allocates memory for a new buffer, clears it and allocates a | ||
237 | * new buffer ID range. It does not write into the buffer table. | ||
238 | * | ||
239 | * This call will allocate 4KB buffers, since 8KB buffers can't be | ||
240 | * used for event queues and descriptor rings. | ||
241 | */ | ||
242 | static int efx_alloc_special_buffer(struct efx_nic *efx, | ||
243 | struct efx_special_buffer *buffer, | ||
244 | unsigned int len) | ||
245 | { | ||
246 | len = ALIGN(len, EFX_BUF_SIZE); | ||
247 | |||
248 | buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, | ||
249 | &buffer->dma_addr, GFP_KERNEL); | ||
250 | if (!buffer->addr) | ||
251 | return -ENOMEM; | ||
252 | buffer->len = len; | ||
253 | buffer->entries = len / EFX_BUF_SIZE; | ||
254 | BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); | ||
255 | |||
256 | /* All zeros is a potentially valid event so memset to 0xff */ | ||
257 | memset(buffer->addr, 0xff, len); | ||
258 | |||
259 | /* Select new buffer ID */ | ||
260 | buffer->index = efx->next_buffer_table; | ||
261 | efx->next_buffer_table += buffer->entries; | ||
262 | |||
263 | netif_dbg(efx, probe, efx->net_dev, | ||
264 | "allocating special buffers %d-%d at %llx+%x " | ||
265 | "(virt %p phys %llx)\n", buffer->index, | ||
266 | buffer->index + buffer->entries - 1, | ||
267 | (u64)buffer->dma_addr, len, | ||
268 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | static void | ||
274 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | ||
275 | { | ||
276 | if (!buffer->addr) | ||
277 | return; | ||
278 | |||
279 | netif_dbg(efx, hw, efx->net_dev, | ||
280 | "deallocating special buffers %d-%d at %llx+%x " | ||
281 | "(virt %p phys %llx)\n", buffer->index, | ||
282 | buffer->index + buffer->entries - 1, | ||
283 | (u64)buffer->dma_addr, buffer->len, | ||
284 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | ||
285 | |||
286 | dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, | ||
287 | buffer->dma_addr); | ||
288 | buffer->addr = NULL; | ||
289 | buffer->entries = 0; | ||
290 | } | ||
291 | |||
292 | /************************************************************************** | ||
293 | * | ||
294 | * Generic buffer handling | ||
295 | * These buffers are used for interrupt status and MAC stats | ||
296 | * | ||
297 | **************************************************************************/ | ||
298 | |||
299 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | ||
300 | unsigned int len) | ||
301 | { | ||
302 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | ||
303 | &buffer->dma_addr); | ||
304 | if (!buffer->addr) | ||
305 | return -ENOMEM; | ||
306 | buffer->len = len; | ||
307 | memset(buffer->addr, 0, len); | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) | ||
312 | { | ||
313 | if (buffer->addr) { | ||
314 | pci_free_consistent(efx->pci_dev, buffer->len, | ||
315 | buffer->addr, buffer->dma_addr); | ||
316 | buffer->addr = NULL; | ||
317 | } | ||
318 | } | ||
319 | |||
320 | /************************************************************************** | ||
321 | * | ||
322 | * TX path | ||
323 | * | ||
324 | **************************************************************************/ | ||
325 | |||
326 | /* Returns a pointer to the specified transmit descriptor in the TX | ||
327 | * descriptor queue belonging to the specified channel. | ||
328 | */ | ||
329 | static inline efx_qword_t * | ||
330 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) | ||
331 | { | ||
332 | return ((efx_qword_t *) (tx_queue->txd.addr)) + index; | ||
333 | } | ||
334 | |||
335 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | ||
336 | static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) | ||
337 | { | ||
338 | unsigned write_ptr; | ||
339 | efx_dword_t reg; | ||
340 | |||
341 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
342 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | ||
343 | efx_writed_page(tx_queue->efx, ®, | ||
344 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | ||
345 | } | ||
346 | |||
347 | /* Write pointer and first descriptor for TX descriptor ring */ | ||
348 | static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, | ||
349 | const efx_qword_t *txd) | ||
350 | { | ||
351 | unsigned write_ptr; | ||
352 | efx_oword_t reg; | ||
353 | |||
354 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); | ||
355 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); | ||
356 | |||
357 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
358 | EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, | ||
359 | FRF_AZ_TX_DESC_WPTR, write_ptr); | ||
360 | reg.qword[0] = *txd; | ||
361 | efx_writeo_page(tx_queue->efx, ®, | ||
362 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); | ||
363 | } | ||
364 | |||
365 | static inline bool | ||
366 | efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) | ||
367 | { | ||
368 | unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | ||
369 | |||
370 | if (empty_read_count == 0) | ||
371 | return false; | ||
372 | |||
373 | tx_queue->empty_read_count = 0; | ||
374 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; | ||
375 | } | ||
376 | |||
377 | /* For each entry inserted into the software descriptor ring, create a | ||
378 | * descriptor in the hardware TX descriptor ring (in host memory), and | ||
379 | * write a doorbell. | ||
380 | */ | ||
381 | void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | ||
382 | { | ||
383 | |||
384 | struct efx_tx_buffer *buffer; | ||
385 | efx_qword_t *txd; | ||
386 | unsigned write_ptr; | ||
387 | unsigned old_write_count = tx_queue->write_count; | ||
388 | |||
389 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | ||
390 | |||
391 | do { | ||
392 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | ||
393 | buffer = &tx_queue->buffer[write_ptr]; | ||
394 | txd = efx_tx_desc(tx_queue, write_ptr); | ||
395 | ++tx_queue->write_count; | ||
396 | |||
397 | /* Create TX descriptor ring entry */ | ||
398 | EFX_POPULATE_QWORD_4(*txd, | ||
399 | FSF_AZ_TX_KER_CONT, buffer->continuation, | ||
400 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, | ||
401 | FSF_AZ_TX_KER_BUF_REGION, 0, | ||
402 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); | ||
403 | } while (tx_queue->write_count != tx_queue->insert_count); | ||
404 | |||
405 | wmb(); /* Ensure descriptors are written before they are fetched */ | ||
406 | |||
407 | if (efx_may_push_tx_desc(tx_queue, old_write_count)) { | ||
408 | txd = efx_tx_desc(tx_queue, | ||
409 | old_write_count & tx_queue->ptr_mask); | ||
410 | efx_push_tx_desc(tx_queue, txd); | ||
411 | ++tx_queue->pushes; | ||
412 | } else { | ||
413 | efx_notify_tx_desc(tx_queue); | ||
414 | } | ||
415 | } | ||
416 | |||
417 | /* Allocate hardware resources for a TX queue */ | ||
418 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) | ||
419 | { | ||
420 | struct efx_nic *efx = tx_queue->efx; | ||
421 | unsigned entries; | ||
422 | |||
423 | entries = tx_queue->ptr_mask + 1; | ||
424 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | ||
425 | entries * sizeof(efx_qword_t)); | ||
426 | } | ||
427 | |||
428 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | ||
429 | { | ||
430 | struct efx_nic *efx = tx_queue->efx; | ||
431 | efx_oword_t reg; | ||
432 | |||
433 | tx_queue->flushed = FLUSH_NONE; | ||
434 | |||
435 | /* Pin TX descriptor ring */ | ||
436 | efx_init_special_buffer(efx, &tx_queue->txd); | ||
437 | |||
438 | /* Push TX descriptor ring to card */ | ||
439 | EFX_POPULATE_OWORD_10(reg, | ||
440 | FRF_AZ_TX_DESCQ_EN, 1, | ||
441 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, | ||
442 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, | ||
443 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | ||
444 | FRF_AZ_TX_DESCQ_EVQ_ID, | ||
445 | tx_queue->channel->channel, | ||
446 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, | ||
447 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, | ||
448 | FRF_AZ_TX_DESCQ_SIZE, | ||
449 | __ffs(tx_queue->txd.entries), | ||
450 | FRF_AZ_TX_DESCQ_TYPE, 0, | ||
451 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); | ||
452 | |||
453 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
454 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | ||
455 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); | ||
456 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, | ||
457 | !csum); | ||
458 | } | ||
459 | |||
460 | efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, | ||
461 | tx_queue->queue); | ||
462 | |||
463 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | ||
464 | /* Only 128 bits in this register */ | ||
465 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); | ||
466 | |||
467 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); | ||
468 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | ||
469 | clear_bit_le(tx_queue->queue, (void *)®); | ||
470 | else | ||
471 | set_bit_le(tx_queue->queue, (void *)®); | ||
472 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); | ||
473 | } | ||
474 | |||
475 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
476 | EFX_POPULATE_OWORD_1(reg, | ||
477 | FRF_BZ_TX_PACE, | ||
478 | (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | ||
479 | FFE_BZ_TX_PACE_OFF : | ||
480 | FFE_BZ_TX_PACE_RESERVED); | ||
481 | efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, | ||
482 | tx_queue->queue); | ||
483 | } | ||
484 | } | ||
485 | |||
486 | static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) | ||
487 | { | ||
488 | struct efx_nic *efx = tx_queue->efx; | ||
489 | efx_oword_t tx_flush_descq; | ||
490 | |||
491 | tx_queue->flushed = FLUSH_PENDING; | ||
492 | |||
493 | /* Post a flush command */ | ||
494 | EFX_POPULATE_OWORD_2(tx_flush_descq, | ||
495 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, | ||
496 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); | ||
497 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); | ||
498 | } | ||
499 | |||
500 | void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) | ||
501 | { | ||
502 | struct efx_nic *efx = tx_queue->efx; | ||
503 | efx_oword_t tx_desc_ptr; | ||
504 | |||
505 | /* The queue should have been flushed */ | ||
506 | WARN_ON(tx_queue->flushed != FLUSH_DONE); | ||
507 | |||
508 | /* Remove TX descriptor ring from card */ | ||
509 | EFX_ZERO_OWORD(tx_desc_ptr); | ||
510 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | ||
511 | tx_queue->queue); | ||
512 | |||
513 | /* Unpin TX descriptor ring */ | ||
514 | efx_fini_special_buffer(efx, &tx_queue->txd); | ||
515 | } | ||
516 | |||
517 | /* Free buffers backing TX queue */ | ||
518 | void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) | ||
519 | { | ||
520 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); | ||
521 | } | ||
522 | |||
523 | /************************************************************************** | ||
524 | * | ||
525 | * RX path | ||
526 | * | ||
527 | **************************************************************************/ | ||
528 | |||
529 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ | ||
530 | static inline efx_qword_t * | ||
531 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | ||
532 | { | ||
533 | return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; | ||
534 | } | ||
535 | |||
536 | /* This creates an entry in the RX descriptor queue */ | ||
537 | static inline void | ||
538 | efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | ||
539 | { | ||
540 | struct efx_rx_buffer *rx_buf; | ||
541 | efx_qword_t *rxd; | ||
542 | |||
543 | rxd = efx_rx_desc(rx_queue, index); | ||
544 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
545 | EFX_POPULATE_QWORD_3(*rxd, | ||
546 | FSF_AZ_RX_KER_BUF_SIZE, | ||
547 | rx_buf->len - | ||
548 | rx_queue->efx->type->rx_buffer_padding, | ||
549 | FSF_AZ_RX_KER_BUF_REGION, 0, | ||
550 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | ||
551 | } | ||
552 | |||
553 | /* This writes to the RX_DESC_WPTR register for the specified receive | ||
554 | * descriptor ring. | ||
555 | */ | ||
556 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) | ||
557 | { | ||
558 | struct efx_nic *efx = rx_queue->efx; | ||
559 | efx_dword_t reg; | ||
560 | unsigned write_ptr; | ||
561 | |||
562 | while (rx_queue->notified_count != rx_queue->added_count) { | ||
563 | efx_build_rx_desc( | ||
564 | rx_queue, | ||
565 | rx_queue->notified_count & rx_queue->ptr_mask); | ||
566 | ++rx_queue->notified_count; | ||
567 | } | ||
568 | |||
569 | wmb(); | ||
570 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; | ||
571 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | ||
572 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, | ||
573 | efx_rx_queue_index(rx_queue)); | ||
574 | } | ||
575 | |||
576 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) | ||
577 | { | ||
578 | struct efx_nic *efx = rx_queue->efx; | ||
579 | unsigned entries; | ||
580 | |||
581 | entries = rx_queue->ptr_mask + 1; | ||
582 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | ||
583 | entries * sizeof(efx_qword_t)); | ||
584 | } | ||
585 | |||
586 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | ||
587 | { | ||
588 | efx_oword_t rx_desc_ptr; | ||
589 | struct efx_nic *efx = rx_queue->efx; | ||
590 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | ||
591 | bool iscsi_digest_en = is_b0; | ||
592 | |||
593 | netif_dbg(efx, hw, efx->net_dev, | ||
594 | "RX queue %d ring in special buffers %d-%d\n", | ||
595 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, | ||
596 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | ||
597 | |||
598 | rx_queue->flushed = FLUSH_NONE; | ||
599 | |||
600 | /* Pin RX descriptor ring */ | ||
601 | efx_init_special_buffer(efx, &rx_queue->rxd); | ||
602 | |||
603 | /* Push RX descriptor ring to card */ | ||
604 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | ||
605 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, | ||
606 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, | ||
607 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | ||
608 | FRF_AZ_RX_DESCQ_EVQ_ID, | ||
609 | efx_rx_queue_channel(rx_queue)->channel, | ||
610 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, | ||
611 | FRF_AZ_RX_DESCQ_LABEL, | ||
612 | efx_rx_queue_index(rx_queue), | ||
613 | FRF_AZ_RX_DESCQ_SIZE, | ||
614 | __ffs(rx_queue->rxd.entries), | ||
615 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | ||
616 | /* For >=B0 this is scatter so disable */ | ||
617 | FRF_AZ_RX_DESCQ_JUMBO, !is_b0, | ||
618 | FRF_AZ_RX_DESCQ_EN, 1); | ||
619 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
620 | efx_rx_queue_index(rx_queue)); | ||
621 | } | ||
622 | |||
623 | static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) | ||
624 | { | ||
625 | struct efx_nic *efx = rx_queue->efx; | ||
626 | efx_oword_t rx_flush_descq; | ||
627 | |||
628 | rx_queue->flushed = FLUSH_PENDING; | ||
629 | |||
630 | /* Post a flush command */ | ||
631 | EFX_POPULATE_OWORD_2(rx_flush_descq, | ||
632 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | ||
633 | FRF_AZ_RX_FLUSH_DESCQ, | ||
634 | efx_rx_queue_index(rx_queue)); | ||
635 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); | ||
636 | } | ||
637 | |||
638 | void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) | ||
639 | { | ||
640 | efx_oword_t rx_desc_ptr; | ||
641 | struct efx_nic *efx = rx_queue->efx; | ||
642 | |||
643 | /* The queue should already have been flushed */ | ||
644 | WARN_ON(rx_queue->flushed != FLUSH_DONE); | ||
645 | |||
646 | /* Remove RX descriptor ring from card */ | ||
647 | EFX_ZERO_OWORD(rx_desc_ptr); | ||
648 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
649 | efx_rx_queue_index(rx_queue)); | ||
650 | |||
651 | /* Unpin RX descriptor ring */ | ||
652 | efx_fini_special_buffer(efx, &rx_queue->rxd); | ||
653 | } | ||
654 | |||
655 | /* Free buffers backing RX queue */ | ||
656 | void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) | ||
657 | { | ||
658 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | ||
659 | } | ||
660 | |||
661 | /************************************************************************** | ||
662 | * | ||
663 | * Event queue processing | ||
664 | * Event queues are processed by per-channel tasklets. | ||
665 | * | ||
666 | **************************************************************************/ | ||
667 | |||
668 | /* Update a channel's event queue's read pointer (RPTR) register | ||
669 | * | ||
670 | * This writes the EVQ_RPTR_REG register for the specified channel's | ||
671 | * event queue. | ||
672 | */ | ||
673 | void efx_nic_eventq_read_ack(struct efx_channel *channel) | ||
674 | { | ||
675 | efx_dword_t reg; | ||
676 | struct efx_nic *efx = channel->efx; | ||
677 | |||
678 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, | ||
679 | channel->eventq_read_ptr & channel->eventq_mask); | ||
680 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, | ||
681 | channel->channel); | ||
682 | } | ||
683 | |||
684 | /* Use HW to insert a SW defined event */ | ||
685 | static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) | ||
686 | { | ||
687 | efx_oword_t drv_ev_reg; | ||
688 | |||
689 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || | ||
690 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); | ||
691 | drv_ev_reg.u32[0] = event->u32[0]; | ||
692 | drv_ev_reg.u32[1] = event->u32[1]; | ||
693 | drv_ev_reg.u32[2] = 0; | ||
694 | drv_ev_reg.u32[3] = 0; | ||
695 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); | ||
696 | efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); | ||
697 | } | ||
698 | |||
699 | /* Handle a transmit completion event | ||
700 | * | ||
701 | * The NIC batches TX completion events; the message we receive is of | ||
702 | * the form "complete all TX events up to this index". | ||
703 | */ | ||
704 | static int | ||
705 | efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | ||
706 | { | ||
707 | unsigned int tx_ev_desc_ptr; | ||
708 | unsigned int tx_ev_q_label; | ||
709 | struct efx_tx_queue *tx_queue; | ||
710 | struct efx_nic *efx = channel->efx; | ||
711 | int tx_packets = 0; | ||
712 | |||
713 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | ||
714 | /* Transmit completion */ | ||
715 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); | ||
716 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | ||
717 | tx_queue = efx_channel_get_tx_queue( | ||
718 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | ||
719 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & | ||
720 | tx_queue->ptr_mask); | ||
721 | channel->irq_mod_score += tx_packets; | ||
722 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | ||
723 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | ||
724 | /* Rewrite the FIFO write pointer */ | ||
725 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | ||
726 | tx_queue = efx_channel_get_tx_queue( | ||
727 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | ||
728 | |||
729 | if (efx_dev_registered(efx)) | ||
730 | netif_tx_lock(efx->net_dev); | ||
731 | efx_notify_tx_desc(tx_queue); | ||
732 | if (efx_dev_registered(efx)) | ||
733 | netif_tx_unlock(efx->net_dev); | ||
734 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && | ||
735 | EFX_WORKAROUND_10727(efx)) { | ||
736 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
737 | } else { | ||
738 | netif_err(efx, tx_err, efx->net_dev, | ||
739 | "channel %d unexpected TX event " | ||
740 | EFX_QWORD_FMT"\n", channel->channel, | ||
741 | EFX_QWORD_VAL(*event)); | ||
742 | } | ||
743 | |||
744 | return tx_packets; | ||
745 | } | ||
746 | |||
747 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | ||
748 | static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | ||
749 | const efx_qword_t *event, | ||
750 | bool *rx_ev_pkt_ok, | ||
751 | bool *discard) | ||
752 | { | ||
753 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
754 | struct efx_nic *efx = rx_queue->efx; | ||
755 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | ||
756 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | ||
757 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | ||
758 | bool rx_ev_other_err, rx_ev_pause_frm; | ||
759 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
760 | unsigned rx_ev_pkt_type; | ||
761 | |||
762 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | ||
763 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | ||
764 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); | ||
765 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); | ||
766 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | ||
767 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); | ||
768 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | ||
769 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); | ||
770 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | ||
771 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); | ||
772 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); | ||
773 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); | ||
774 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? | ||
775 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); | ||
776 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); | ||
777 | |||
778 | /* Every error apart from tobe_disc and pause_frm */ | ||
779 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | ||
780 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | ||
781 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | ||
782 | |||
783 | /* Count errors that are not in MAC stats. Ignore expected | ||
784 | * checksum errors during self-test. */ | ||
785 | if (rx_ev_frm_trunc) | ||
786 | ++channel->n_rx_frm_trunc; | ||
787 | else if (rx_ev_tobe_disc) | ||
788 | ++channel->n_rx_tobe_disc; | ||
789 | else if (!efx->loopback_selftest) { | ||
790 | if (rx_ev_ip_hdr_chksum_err) | ||
791 | ++channel->n_rx_ip_hdr_chksum_err; | ||
792 | else if (rx_ev_tcp_udp_chksum_err) | ||
793 | ++channel->n_rx_tcp_udp_chksum_err; | ||
794 | } | ||
795 | |||
796 | /* The frame must be discarded if any of these are true. */ | ||
797 | *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | ||
798 | rx_ev_tobe_disc | rx_ev_pause_frm); | ||
799 | |||
800 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | ||
801 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | ||
802 | * to a FIFO overflow. | ||
803 | */ | ||
804 | #ifdef EFX_ENABLE_DEBUG | ||
805 | if (rx_ev_other_err && net_ratelimit()) { | ||
806 | netif_dbg(efx, rx_err, efx->net_dev, | ||
807 | " RX queue %d unexpected RX event " | ||
808 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | ||
809 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), | ||
810 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | ||
811 | rx_ev_ip_hdr_chksum_err ? | ||
812 | " [IP_HDR_CHKSUM_ERR]" : "", | ||
813 | rx_ev_tcp_udp_chksum_err ? | ||
814 | " [TCP_UDP_CHKSUM_ERR]" : "", | ||
815 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | ||
816 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | ||
817 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | ||
818 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | ||
819 | rx_ev_pause_frm ? " [PAUSE]" : ""); | ||
820 | } | ||
821 | #endif | ||
822 | } | ||
823 | |||
824 | /* Handle receive events that are not in-order. */ | ||
825 | static void | ||
826 | efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | ||
827 | { | ||
828 | struct efx_nic *efx = rx_queue->efx; | ||
829 | unsigned expected, dropped; | ||
830 | |||
831 | expected = rx_queue->removed_count & rx_queue->ptr_mask; | ||
832 | dropped = (index - expected) & rx_queue->ptr_mask; | ||
833 | netif_info(efx, rx_err, efx->net_dev, | ||
834 | "dropped %d events (index=%d expected=%d)\n", | ||
835 | dropped, index, expected); | ||
836 | |||
837 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | ||
838 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
839 | } | ||
840 | |||
841 | /* Handle a packet received event | ||
842 | * | ||
843 | * The NIC gives a "discard" flag if it's a unicast packet with the | ||
844 | * wrong destination address | ||
845 | * Also "is multicast" and "matches multicast filter" flags can be used to | ||
846 | * discard non-matching multicast packets. | ||
847 | */ | ||
848 | static void | ||
849 | efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | ||
850 | { | ||
851 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; | ||
852 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
853 | unsigned expected_ptr; | ||
854 | bool rx_ev_pkt_ok, discard = false, checksummed; | ||
855 | struct efx_rx_queue *rx_queue; | ||
856 | |||
857 | /* Basic packet information */ | ||
858 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); | ||
859 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); | ||
860 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | ||
861 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); | ||
862 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); | ||
863 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != | ||
864 | channel->channel); | ||
865 | |||
866 | rx_queue = efx_channel_get_rx_queue(channel); | ||
867 | |||
868 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | ||
869 | expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; | ||
870 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) | ||
871 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | ||
872 | |||
873 | if (likely(rx_ev_pkt_ok)) { | ||
874 | /* If packet is marked as OK and packet type is TCP/IP or | ||
875 | * UDP/IP, then we can rely on the hardware checksum. | ||
876 | */ | ||
877 | checksummed = | ||
878 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || | ||
879 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP; | ||
880 | } else { | ||
881 | efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); | ||
882 | checksummed = false; | ||
883 | } | ||
884 | |||
885 | /* Detect multicast packets that didn't match the filter */ | ||
886 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | ||
887 | if (rx_ev_mcast_pkt) { | ||
888 | unsigned int rx_ev_mcast_hash_match = | ||
889 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); | ||
890 | |||
891 | if (unlikely(!rx_ev_mcast_hash_match)) { | ||
892 | ++channel->n_rx_mcast_mismatch; | ||
893 | discard = true; | ||
894 | } | ||
895 | } | ||
896 | |||
897 | channel->irq_mod_score += 2; | ||
898 | |||
899 | /* Handle received packet */ | ||
900 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, | ||
901 | checksummed, discard); | ||
902 | } | ||
903 | |||
904 | static void | ||
905 | efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) | ||
906 | { | ||
907 | struct efx_nic *efx = channel->efx; | ||
908 | unsigned code; | ||
909 | |||
910 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | ||
911 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) | ||
912 | ; /* ignore */ | ||
913 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) | ||
914 | /* The queue must be empty, so we won't receive any rx | ||
915 | * events, so efx_process_channel() won't refill the | ||
916 | * queue. Refill it here */ | ||
917 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); | ||
918 | else | ||
919 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " | ||
920 | "generated event "EFX_QWORD_FMT"\n", | ||
921 | channel->channel, EFX_QWORD_VAL(*event)); | ||
922 | } | ||
923 | |||
924 | static void | ||
925 | efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | ||
926 | { | ||
927 | struct efx_nic *efx = channel->efx; | ||
928 | unsigned int ev_sub_code; | ||
929 | unsigned int ev_sub_data; | ||
930 | |||
931 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); | ||
932 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | ||
933 | |||
934 | switch (ev_sub_code) { | ||
935 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | ||
936 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", | ||
937 | channel->channel, ev_sub_data); | ||
938 | break; | ||
939 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | ||
940 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", | ||
941 | channel->channel, ev_sub_data); | ||
942 | break; | ||
943 | case FSE_AZ_EVQ_INIT_DONE_EV: | ||
944 | netif_dbg(efx, hw, efx->net_dev, | ||
945 | "channel %d EVQ %d initialised\n", | ||
946 | channel->channel, ev_sub_data); | ||
947 | break; | ||
948 | case FSE_AZ_SRM_UPD_DONE_EV: | ||
949 | netif_vdbg(efx, hw, efx->net_dev, | ||
950 | "channel %d SRAM update done\n", channel->channel); | ||
951 | break; | ||
952 | case FSE_AZ_WAKE_UP_EV: | ||
953 | netif_vdbg(efx, hw, efx->net_dev, | ||
954 | "channel %d RXQ %d wakeup event\n", | ||
955 | channel->channel, ev_sub_data); | ||
956 | break; | ||
957 | case FSE_AZ_TIMER_EV: | ||
958 | netif_vdbg(efx, hw, efx->net_dev, | ||
959 | "channel %d RX queue %d timer expired\n", | ||
960 | channel->channel, ev_sub_data); | ||
961 | break; | ||
962 | case FSE_AA_RX_RECOVER_EV: | ||
963 | netif_err(efx, rx_err, efx->net_dev, | ||
964 | "channel %d seen DRIVER RX_RESET event. " | ||
965 | "Resetting.\n", channel->channel); | ||
966 | atomic_inc(&efx->rx_reset); | ||
967 | efx_schedule_reset(efx, | ||
968 | EFX_WORKAROUND_6555(efx) ? | ||
969 | RESET_TYPE_RX_RECOVERY : | ||
970 | RESET_TYPE_DISABLE); | ||
971 | break; | ||
972 | case FSE_BZ_RX_DSC_ERROR_EV: | ||
973 | netif_err(efx, rx_err, efx->net_dev, | ||
974 | "RX DMA Q %d reports descriptor fetch error." | ||
975 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
976 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | ||
977 | break; | ||
978 | case FSE_BZ_TX_DSC_ERROR_EV: | ||
979 | netif_err(efx, tx_err, efx->net_dev, | ||
980 | "TX DMA Q %d reports descriptor fetch error." | ||
981 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
982 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
983 | break; | ||
984 | default: | ||
985 | netif_vdbg(efx, hw, efx->net_dev, | ||
986 | "channel %d unknown driver event code %d " | ||
987 | "data %04x\n", channel->channel, ev_sub_code, | ||
988 | ev_sub_data); | ||
989 | break; | ||
990 | } | ||
991 | } | ||
992 | |||
993 | int efx_nic_process_eventq(struct efx_channel *channel, int budget) | ||
994 | { | ||
995 | struct efx_nic *efx = channel->efx; | ||
996 | unsigned int read_ptr; | ||
997 | efx_qword_t event, *p_event; | ||
998 | int ev_code; | ||
999 | int tx_packets = 0; | ||
1000 | int spent = 0; | ||
1001 | |||
1002 | read_ptr = channel->eventq_read_ptr; | ||
1003 | |||
1004 | for (;;) { | ||
1005 | p_event = efx_event(channel, read_ptr); | ||
1006 | event = *p_event; | ||
1007 | |||
1008 | if (!efx_event_present(&event)) | ||
1009 | /* End of events */ | ||
1010 | break; | ||
1011 | |||
1012 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, | ||
1013 | "channel %d event is "EFX_QWORD_FMT"\n", | ||
1014 | channel->channel, EFX_QWORD_VAL(event)); | ||
1015 | |||
1016 | /* Clear this event by marking it all ones */ | ||
1017 | EFX_SET_QWORD(*p_event); | ||
1018 | |||
1019 | ++read_ptr; | ||
1020 | |||
1021 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | ||
1022 | |||
1023 | switch (ev_code) { | ||
1024 | case FSE_AZ_EV_CODE_RX_EV: | ||
1025 | efx_handle_rx_event(channel, &event); | ||
1026 | if (++spent == budget) | ||
1027 | goto out; | ||
1028 | break; | ||
1029 | case FSE_AZ_EV_CODE_TX_EV: | ||
1030 | tx_packets += efx_handle_tx_event(channel, &event); | ||
1031 | if (tx_packets > efx->txq_entries) { | ||
1032 | spent = budget; | ||
1033 | goto out; | ||
1034 | } | ||
1035 | break; | ||
1036 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | ||
1037 | efx_handle_generated_event(channel, &event); | ||
1038 | break; | ||
1039 | case FSE_AZ_EV_CODE_DRIVER_EV: | ||
1040 | efx_handle_driver_event(channel, &event); | ||
1041 | break; | ||
1042 | case FSE_CZ_EV_CODE_MCDI_EV: | ||
1043 | efx_mcdi_process_event(channel, &event); | ||
1044 | break; | ||
1045 | case FSE_AZ_EV_CODE_GLOBAL_EV: | ||
1046 | if (efx->type->handle_global_event && | ||
1047 | efx->type->handle_global_event(channel, &event)) | ||
1048 | break; | ||
1049 | /* else fall through */ | ||
1050 | default: | ||
1051 | netif_err(channel->efx, hw, channel->efx->net_dev, | ||
1052 | "channel %d unknown event type %d (data " | ||
1053 | EFX_QWORD_FMT ")\n", channel->channel, | ||
1054 | ev_code, EFX_QWORD_VAL(event)); | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | out: | ||
1059 | channel->eventq_read_ptr = read_ptr; | ||
1060 | return spent; | ||
1061 | } | ||
1062 | |||
1063 | /* Check whether an event is present in the eventq at the current | ||
1064 | * read pointer. Only useful for self-test. | ||
1065 | */ | ||
1066 | bool efx_nic_event_present(struct efx_channel *channel) | ||
1067 | { | ||
1068 | return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); | ||
1069 | } | ||
1070 | |||
1071 | /* Allocate buffer table entries for event queue */ | ||
1072 | int efx_nic_probe_eventq(struct efx_channel *channel) | ||
1073 | { | ||
1074 | struct efx_nic *efx = channel->efx; | ||
1075 | unsigned entries; | ||
1076 | |||
1077 | entries = channel->eventq_mask + 1; | ||
1078 | return efx_alloc_special_buffer(efx, &channel->eventq, | ||
1079 | entries * sizeof(efx_qword_t)); | ||
1080 | } | ||
1081 | |||
1082 | void efx_nic_init_eventq(struct efx_channel *channel) | ||
1083 | { | ||
1084 | efx_oword_t reg; | ||
1085 | struct efx_nic *efx = channel->efx; | ||
1086 | |||
1087 | netif_dbg(efx, hw, efx->net_dev, | ||
1088 | "channel %d event queue in special buffers %d-%d\n", | ||
1089 | channel->channel, channel->eventq.index, | ||
1090 | channel->eventq.index + channel->eventq.entries - 1); | ||
1091 | |||
1092 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | ||
1093 | EFX_POPULATE_OWORD_3(reg, | ||
1094 | FRF_CZ_TIMER_Q_EN, 1, | ||
1095 | FRF_CZ_HOST_NOTIFY_MODE, 0, | ||
1096 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); | ||
1097 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | ||
1098 | } | ||
1099 | |||
1100 | /* Pin event queue buffer */ | ||
1101 | efx_init_special_buffer(efx, &channel->eventq); | ||
1102 | |||
1103 | /* Fill event queue with all ones (i.e. empty events) */ | ||
1104 | memset(channel->eventq.addr, 0xff, channel->eventq.len); | ||
1105 | |||
1106 | /* Push event queue to card */ | ||
1107 | EFX_POPULATE_OWORD_3(reg, | ||
1108 | FRF_AZ_EVQ_EN, 1, | ||
1109 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), | ||
1110 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); | ||
1111 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | ||
1112 | channel->channel); | ||
1113 | |||
1114 | efx->type->push_irq_moderation(channel); | ||
1115 | } | ||
1116 | |||
1117 | void efx_nic_fini_eventq(struct efx_channel *channel) | ||
1118 | { | ||
1119 | efx_oword_t reg; | ||
1120 | struct efx_nic *efx = channel->efx; | ||
1121 | |||
1122 | /* Remove event queue from card */ | ||
1123 | EFX_ZERO_OWORD(reg); | ||
1124 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | ||
1125 | channel->channel); | ||
1126 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
1127 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | ||
1128 | |||
1129 | /* Unpin event queue */ | ||
1130 | efx_fini_special_buffer(efx, &channel->eventq); | ||
1131 | } | ||
1132 | |||
1133 | /* Free buffers backing event queue */ | ||
1134 | void efx_nic_remove_eventq(struct efx_channel *channel) | ||
1135 | { | ||
1136 | efx_free_special_buffer(channel->efx, &channel->eventq); | ||
1137 | } | ||
1138 | |||
1139 | |||
1140 | void efx_nic_generate_test_event(struct efx_channel *channel) | ||
1141 | { | ||
1142 | unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); | ||
1143 | efx_qword_t test_event; | ||
1144 | |||
1145 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, | ||
1146 | FSE_AZ_EV_CODE_DRV_GEN_EV, | ||
1147 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | ||
1148 | efx_generate_event(channel, &test_event); | ||
1149 | } | ||
1150 | |||
1151 | void efx_nic_generate_fill_event(struct efx_channel *channel) | ||
1152 | { | ||
1153 | unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel); | ||
1154 | efx_qword_t test_event; | ||
1155 | |||
1156 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, | ||
1157 | FSE_AZ_EV_CODE_DRV_GEN_EV, | ||
1158 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | ||
1159 | efx_generate_event(channel, &test_event); | ||
1160 | } | ||
1161 | |||
1162 | /************************************************************************** | ||
1163 | * | ||
1164 | * Flush handling | ||
1165 | * | ||
1166 | **************************************************************************/ | ||
1167 | |||
1168 | |||
1169 | static void efx_poll_flush_events(struct efx_nic *efx) | ||
1170 | { | ||
1171 | struct efx_channel *channel = efx_get_channel(efx, 0); | ||
1172 | struct efx_tx_queue *tx_queue; | ||
1173 | struct efx_rx_queue *rx_queue; | ||
1174 | unsigned int read_ptr = channel->eventq_read_ptr; | ||
1175 | unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; | ||
1176 | |||
1177 | do { | ||
1178 | efx_qword_t *event = efx_event(channel, read_ptr); | ||
1179 | int ev_code, ev_sub_code, ev_queue; | ||
1180 | bool ev_failed; | ||
1181 | |||
1182 | if (!efx_event_present(event)) | ||
1183 | break; | ||
1184 | |||
1185 | ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); | ||
1186 | ev_sub_code = EFX_QWORD_FIELD(*event, | ||
1187 | FSF_AZ_DRIVER_EV_SUBCODE); | ||
1188 | if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | ||
1189 | ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { | ||
1190 | ev_queue = EFX_QWORD_FIELD(*event, | ||
1191 | FSF_AZ_DRIVER_EV_SUBDATA); | ||
1192 | if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { | ||
1193 | tx_queue = efx_get_tx_queue( | ||
1194 | efx, ev_queue / EFX_TXQ_TYPES, | ||
1195 | ev_queue % EFX_TXQ_TYPES); | ||
1196 | tx_queue->flushed = FLUSH_DONE; | ||
1197 | } | ||
1198 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | ||
1199 | ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { | ||
1200 | ev_queue = EFX_QWORD_FIELD( | ||
1201 | *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); | ||
1202 | ev_failed = EFX_QWORD_FIELD( | ||
1203 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | ||
1204 | if (ev_queue < efx->n_rx_channels) { | ||
1205 | rx_queue = efx_get_rx_queue(efx, ev_queue); | ||
1206 | rx_queue->flushed = | ||
1207 | ev_failed ? FLUSH_FAILED : FLUSH_DONE; | ||
1208 | } | ||
1209 | } | ||
1210 | |||
1211 | /* We're about to destroy the queue anyway, so | ||
1212 | * it's ok to throw away every non-flush event */ | ||
1213 | EFX_SET_QWORD(*event); | ||
1214 | |||
1215 | ++read_ptr; | ||
1216 | } while (read_ptr != end_ptr); | ||
1217 | |||
1218 | channel->eventq_read_ptr = read_ptr; | ||
1219 | } | ||
1220 | |||
1221 | /* Handle tx and rx flushes at the same time, since they run in | ||
1222 | * parallel in the hardware and there's no reason for us to | ||
1223 | * serialise them */ | ||
1224 | int efx_nic_flush_queues(struct efx_nic *efx) | ||
1225 | { | ||
1226 | struct efx_channel *channel; | ||
1227 | struct efx_rx_queue *rx_queue; | ||
1228 | struct efx_tx_queue *tx_queue; | ||
1229 | int i, tx_pending, rx_pending; | ||
1230 | |||
1231 | /* If necessary prepare the hardware for flushing */ | ||
1232 | efx->type->prepare_flush(efx); | ||
1233 | |||
1234 | /* Flush all tx queues in parallel */ | ||
1235 | efx_for_each_channel(channel, efx) { | ||
1236 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { | ||
1237 | if (tx_queue->initialised) | ||
1238 | efx_flush_tx_queue(tx_queue); | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1242 | /* The hardware supports four concurrent rx flushes, each of which may | ||
1243 | * need to be retried if there is an outstanding descriptor fetch */ | ||
1244 | for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { | ||
1245 | rx_pending = tx_pending = 0; | ||
1246 | efx_for_each_channel(channel, efx) { | ||
1247 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
1248 | if (rx_queue->flushed == FLUSH_PENDING) | ||
1249 | ++rx_pending; | ||
1250 | } | ||
1251 | } | ||
1252 | efx_for_each_channel(channel, efx) { | ||
1253 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
1254 | if (rx_pending == EFX_RX_FLUSH_COUNT) | ||
1255 | break; | ||
1256 | if (rx_queue->flushed == FLUSH_FAILED || | ||
1257 | rx_queue->flushed == FLUSH_NONE) { | ||
1258 | efx_flush_rx_queue(rx_queue); | ||
1259 | ++rx_pending; | ||
1260 | } | ||
1261 | } | ||
1262 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { | ||
1263 | if (tx_queue->initialised && | ||
1264 | tx_queue->flushed != FLUSH_DONE) | ||
1265 | ++tx_pending; | ||
1266 | } | ||
1267 | } | ||
1268 | |||
1269 | if (rx_pending == 0 && tx_pending == 0) | ||
1270 | return 0; | ||
1271 | |||
1272 | msleep(EFX_FLUSH_INTERVAL); | ||
1273 | efx_poll_flush_events(efx); | ||
1274 | } | ||
1275 | |||
1276 | /* Mark the queues as all flushed. We're going to return failure | ||
1277 | * leading to a reset, or fake up success anyway */ | ||
1278 | efx_for_each_channel(channel, efx) { | ||
1279 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { | ||
1280 | if (tx_queue->initialised && | ||
1281 | tx_queue->flushed != FLUSH_DONE) | ||
1282 | netif_err(efx, hw, efx->net_dev, | ||
1283 | "tx queue %d flush command timed out\n", | ||
1284 | tx_queue->queue); | ||
1285 | tx_queue->flushed = FLUSH_DONE; | ||
1286 | } | ||
1287 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
1288 | if (rx_queue->flushed != FLUSH_DONE) | ||
1289 | netif_err(efx, hw, efx->net_dev, | ||
1290 | "rx queue %d flush command timed out\n", | ||
1291 | efx_rx_queue_index(rx_queue)); | ||
1292 | rx_queue->flushed = FLUSH_DONE; | ||
1293 | } | ||
1294 | } | ||
1295 | |||
1296 | return -ETIMEDOUT; | ||
1297 | } | ||
1298 | |||
1299 | /************************************************************************** | ||
1300 | * | ||
1301 | * Hardware interrupts | ||
1302 | * The hardware interrupt handler does very little work; all the event | ||
1303 | * queue processing is carried out by per-channel tasklets. | ||
1304 | * | ||
1305 | **************************************************************************/ | ||
1306 | |||
1307 | /* Enable/disable/generate interrupts */ | ||
1308 | static inline void efx_nic_interrupts(struct efx_nic *efx, | ||
1309 | bool enabled, bool force) | ||
1310 | { | ||
1311 | efx_oword_t int_en_reg_ker; | ||
1312 | |||
1313 | EFX_POPULATE_OWORD_3(int_en_reg_ker, | ||
1314 | FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level, | ||
1315 | FRF_AZ_KER_INT_KER, force, | ||
1316 | FRF_AZ_DRV_INT_EN_KER, enabled); | ||
1317 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); | ||
1318 | } | ||
1319 | |||
1320 | void efx_nic_enable_interrupts(struct efx_nic *efx) | ||
1321 | { | ||
1322 | struct efx_channel *channel; | ||
1323 | |||
1324 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | ||
1325 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | ||
1326 | |||
1327 | /* Enable interrupts */ | ||
1328 | efx_nic_interrupts(efx, true, false); | ||
1329 | |||
1330 | /* Force processing of all the channels to get the EVQ RPTRs up to | ||
1331 | date */ | ||
1332 | efx_for_each_channel(channel, efx) | ||
1333 | efx_schedule_channel(channel); | ||
1334 | } | ||
1335 | |||
1336 | void efx_nic_disable_interrupts(struct efx_nic *efx) | ||
1337 | { | ||
1338 | /* Disable interrupts */ | ||
1339 | efx_nic_interrupts(efx, false, false); | ||
1340 | } | ||
1341 | |||
1342 | /* Generate a test interrupt | ||
1343 | * Interrupt must already have been enabled, otherwise nasty things | ||
1344 | * may happen. | ||
1345 | */ | ||
1346 | void efx_nic_generate_interrupt(struct efx_nic *efx) | ||
1347 | { | ||
1348 | efx_nic_interrupts(efx, true, true); | ||
1349 | } | ||
1350 | |||
1351 | /* Process a fatal interrupt | ||
1352 | * Disable bus mastering ASAP and schedule a reset | ||
1353 | */ | ||
1354 | irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | ||
1355 | { | ||
1356 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
1357 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1358 | efx_oword_t fatal_intr; | ||
1359 | int error, mem_perr; | ||
1360 | |||
1361 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | ||
1362 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | ||
1363 | |||
1364 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " | ||
1365 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | ||
1366 | EFX_OWORD_VAL(fatal_intr), | ||
1367 | error ? "disabling bus mastering" : "no recognised error"); | ||
1368 | |||
1369 | /* If this is a memory parity error dump which blocks are offending */ | ||
1370 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || | ||
1371 | EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); | ||
1372 | if (mem_perr) { | ||
1373 | efx_oword_t reg; | ||
1374 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | ||
1375 | netif_err(efx, hw, efx->net_dev, | ||
1376 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", | ||
1377 | EFX_OWORD_VAL(reg)); | ||
1378 | } | ||
1379 | |||
1380 | /* Disable both devices */ | ||
1381 | pci_clear_master(efx->pci_dev); | ||
1382 | if (efx_nic_is_dual_func(efx)) | ||
1383 | pci_clear_master(nic_data->pci_dev2); | ||
1384 | efx_nic_disable_interrupts(efx); | ||
1385 | |||
1386 | /* Count errors and reset or disable the NIC accordingly */ | ||
1387 | if (efx->int_error_count == 0 || | ||
1388 | time_after(jiffies, efx->int_error_expire)) { | ||
1389 | efx->int_error_count = 0; | ||
1390 | efx->int_error_expire = | ||
1391 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | ||
1392 | } | ||
1393 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | ||
1394 | netif_err(efx, hw, efx->net_dev, | ||
1395 | "SYSTEM ERROR - reset scheduled\n"); | ||
1396 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | ||
1397 | } else { | ||
1398 | netif_err(efx, hw, efx->net_dev, | ||
1399 | "SYSTEM ERROR - max number of errors seen." | ||
1400 | "NIC will be disabled\n"); | ||
1401 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
1402 | } | ||
1403 | |||
1404 | return IRQ_HANDLED; | ||
1405 | } | ||
1406 | |||
1407 | /* Handle a legacy interrupt | ||
1408 | * Acknowledges the interrupt and schedule event queue processing. | ||
1409 | */ | ||
1410 | static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | ||
1411 | { | ||
1412 | struct efx_nic *efx = dev_id; | ||
1413 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1414 | irqreturn_t result = IRQ_NONE; | ||
1415 | struct efx_channel *channel; | ||
1416 | efx_dword_t reg; | ||
1417 | u32 queues; | ||
1418 | int syserr; | ||
1419 | |||
1420 | /* Could this be ours? If interrupts are disabled then the | ||
1421 | * channel state may not be valid. | ||
1422 | */ | ||
1423 | if (!efx->legacy_irq_enabled) | ||
1424 | return result; | ||
1425 | |||
1426 | /* Read the ISR which also ACKs the interrupts */ | ||
1427 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | ||
1428 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | ||
1429 | |||
1430 | /* Check to see if we have a serious error condition */ | ||
1431 | if (queues & (1U << efx->fatal_irq_level)) { | ||
1432 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
1433 | if (unlikely(syserr)) | ||
1434 | return efx_nic_fatal_interrupt(efx); | ||
1435 | } | ||
1436 | |||
1437 | if (queues != 0) { | ||
1438 | if (EFX_WORKAROUND_15783(efx)) | ||
1439 | efx->irq_zero_count = 0; | ||
1440 | |||
1441 | /* Schedule processing of any interrupting queues */ | ||
1442 | efx_for_each_channel(channel, efx) { | ||
1443 | if (queues & 1) | ||
1444 | efx_schedule_channel(channel); | ||
1445 | queues >>= 1; | ||
1446 | } | ||
1447 | result = IRQ_HANDLED; | ||
1448 | |||
1449 | } else if (EFX_WORKAROUND_15783(efx)) { | ||
1450 | efx_qword_t *event; | ||
1451 | |||
1452 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 | ||
1453 | * because this might be a shared interrupt. */ | ||
1454 | if (efx->irq_zero_count++ == 0) | ||
1455 | result = IRQ_HANDLED; | ||
1456 | |||
1457 | /* Ensure we schedule or rearm all event queues */ | ||
1458 | efx_for_each_channel(channel, efx) { | ||
1459 | event = efx_event(channel, channel->eventq_read_ptr); | ||
1460 | if (efx_event_present(event)) | ||
1461 | efx_schedule_channel(channel); | ||
1462 | else | ||
1463 | efx_nic_eventq_read_ack(channel); | ||
1464 | } | ||
1465 | } | ||
1466 | |||
1467 | if (result == IRQ_HANDLED) { | ||
1468 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1469 | netif_vdbg(efx, intr, efx->net_dev, | ||
1470 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | ||
1471 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
1472 | } | ||
1473 | |||
1474 | return result; | ||
1475 | } | ||
1476 | |||
1477 | /* Handle an MSI interrupt | ||
1478 | * | ||
1479 | * Handle an MSI hardware interrupt. This routine schedules event | ||
1480 | * queue processing. No interrupt acknowledgement cycle is necessary. | ||
1481 | * Also, we never need to check that the interrupt is for us, since | ||
1482 | * MSI interrupts cannot be shared. | ||
1483 | */ | ||
1484 | static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) | ||
1485 | { | ||
1486 | struct efx_channel *channel = *(struct efx_channel **)dev_id; | ||
1487 | struct efx_nic *efx = channel->efx; | ||
1488 | efx_oword_t *int_ker = efx->irq_status.addr; | ||
1489 | int syserr; | ||
1490 | |||
1491 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1492 | netif_vdbg(efx, intr, efx->net_dev, | ||
1493 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | ||
1494 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
1495 | |||
1496 | /* Check to see if we have a serious error condition */ | ||
1497 | if (channel->channel == efx->fatal_irq_level) { | ||
1498 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | ||
1499 | if (unlikely(syserr)) | ||
1500 | return efx_nic_fatal_interrupt(efx); | ||
1501 | } | ||
1502 | |||
1503 | /* Schedule processing of the channel */ | ||
1504 | efx_schedule_channel(channel); | ||
1505 | |||
1506 | return IRQ_HANDLED; | ||
1507 | } | ||
1508 | |||
1509 | |||
1510 | /* Setup RSS indirection table. | ||
1511 | * This maps from the hash value of the packet to RXQ | ||
1512 | */ | ||
1513 | void efx_nic_push_rx_indir_table(struct efx_nic *efx) | ||
1514 | { | ||
1515 | size_t i = 0; | ||
1516 | efx_dword_t dword; | ||
1517 | |||
1518 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | ||
1519 | return; | ||
1520 | |||
1521 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != | ||
1522 | FR_BZ_RX_INDIRECTION_TBL_ROWS); | ||
1523 | |||
1524 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { | ||
1525 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, | ||
1526 | efx->rx_indir_table[i]); | ||
1527 | efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); | ||
1528 | } | ||
1529 | } | ||
1530 | |||
1531 | /* Hook interrupt handler(s) | ||
1532 | * Try MSI and then legacy interrupts. | ||
1533 | */ | ||
1534 | int efx_nic_init_interrupt(struct efx_nic *efx) | ||
1535 | { | ||
1536 | struct efx_channel *channel; | ||
1537 | int rc; | ||
1538 | |||
1539 | if (!EFX_INT_MODE_USE_MSI(efx)) { | ||
1540 | irq_handler_t handler; | ||
1541 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
1542 | handler = efx_legacy_interrupt; | ||
1543 | else | ||
1544 | handler = falcon_legacy_interrupt_a1; | ||
1545 | |||
1546 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, | ||
1547 | efx->name, efx); | ||
1548 | if (rc) { | ||
1549 | netif_err(efx, drv, efx->net_dev, | ||
1550 | "failed to hook legacy IRQ %d\n", | ||
1551 | efx->pci_dev->irq); | ||
1552 | goto fail1; | ||
1553 | } | ||
1554 | return 0; | ||
1555 | } | ||
1556 | |||
1557 | /* Hook MSI or MSI-X interrupt */ | ||
1558 | efx_for_each_channel(channel, efx) { | ||
1559 | rc = request_irq(channel->irq, efx_msi_interrupt, | ||
1560 | IRQF_PROBE_SHARED, /* Not shared */ | ||
1561 | efx->channel_name[channel->channel], | ||
1562 | &efx->channel[channel->channel]); | ||
1563 | if (rc) { | ||
1564 | netif_err(efx, drv, efx->net_dev, | ||
1565 | "failed to hook IRQ %d\n", channel->irq); | ||
1566 | goto fail2; | ||
1567 | } | ||
1568 | } | ||
1569 | |||
1570 | return 0; | ||
1571 | |||
1572 | fail2: | ||
1573 | efx_for_each_channel(channel, efx) | ||
1574 | free_irq(channel->irq, &efx->channel[channel->channel]); | ||
1575 | fail1: | ||
1576 | return rc; | ||
1577 | } | ||
1578 | |||
1579 | void efx_nic_fini_interrupt(struct efx_nic *efx) | ||
1580 | { | ||
1581 | struct efx_channel *channel; | ||
1582 | efx_oword_t reg; | ||
1583 | |||
1584 | /* Disable MSI/MSI-X interrupts */ | ||
1585 | efx_for_each_channel(channel, efx) { | ||
1586 | if (channel->irq) | ||
1587 | free_irq(channel->irq, &efx->channel[channel->channel]); | ||
1588 | } | ||
1589 | |||
1590 | /* ACK legacy interrupt */ | ||
1591 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
1592 | efx_reado(efx, ®, FR_BZ_INT_ISR0); | ||
1593 | else | ||
1594 | falcon_irq_ack_a1(efx); | ||
1595 | |||
1596 | /* Disable legacy interrupt */ | ||
1597 | if (efx->legacy_irq) | ||
1598 | free_irq(efx->legacy_irq, efx); | ||
1599 | } | ||
1600 | |||
1601 | u32 efx_nic_fpga_ver(struct efx_nic *efx) | ||
1602 | { | ||
1603 | efx_oword_t altera_build; | ||
1604 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); | ||
1605 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); | ||
1606 | } | ||
1607 | |||
1608 | void efx_nic_init_common(struct efx_nic *efx) | ||
1609 | { | ||
1610 | efx_oword_t temp; | ||
1611 | |||
1612 | /* Set positions of descriptor caches in SRAM. */ | ||
1613 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, | ||
1614 | efx->type->tx_dc_base / 8); | ||
1615 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); | ||
1616 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, | ||
1617 | efx->type->rx_dc_base / 8); | ||
1618 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); | ||
1619 | |||
1620 | /* Set TX descriptor cache size. */ | ||
1621 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); | ||
1622 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | ||
1623 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); | ||
1624 | |||
1625 | /* Set RX descriptor cache size. Set low watermark to size-8, as | ||
1626 | * this allows most efficient prefetching. | ||
1627 | */ | ||
1628 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); | ||
1629 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | ||
1630 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); | ||
1631 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | ||
1632 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); | ||
1633 | |||
1634 | /* Program INT_KER address */ | ||
1635 | EFX_POPULATE_OWORD_2(temp, | ||
1636 | FRF_AZ_NORM_INT_VEC_DIS_KER, | ||
1637 | EFX_INT_MODE_USE_MSI(efx), | ||
1638 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); | ||
1639 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); | ||
1640 | |||
1641 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) | ||
1642 | /* Use an interrupt level unused by event queues */ | ||
1643 | efx->fatal_irq_level = 0x1f; | ||
1644 | else | ||
1645 | /* Use a valid MSI-X vector */ | ||
1646 | efx->fatal_irq_level = 0; | ||
1647 | |||
1648 | /* Enable all the genuinely fatal interrupts. (They are still | ||
1649 | * masked by the overall interrupt mask, controlled by | ||
1650 | * falcon_interrupts()). | ||
1651 | * | ||
1652 | * Note: All other fatal interrupts are enabled | ||
1653 | */ | ||
1654 | EFX_POPULATE_OWORD_3(temp, | ||
1655 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, | ||
1656 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, | ||
1657 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); | ||
1658 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | ||
1659 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); | ||
1660 | EFX_INVERT_OWORD(temp); | ||
1661 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); | ||
1662 | |||
1663 | efx_nic_push_rx_indir_table(efx); | ||
1664 | |||
1665 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | ||
1666 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | ||
1667 | */ | ||
1668 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | ||
1669 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | ||
1670 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | ||
1671 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | ||
1672 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); | ||
1673 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | ||
1674 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | ||
1675 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | ||
1676 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | ||
1677 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); | ||
1678 | /* Disable hardware watchdog which can misfire */ | ||
1679 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); | ||
1680 | /* Squash TX of packets of 16 bytes or less */ | ||
1681 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | ||
1682 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | ||
1683 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | ||
1684 | |||
1685 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | ||
1686 | EFX_POPULATE_OWORD_4(temp, | ||
1687 | /* Default values */ | ||
1688 | FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, | ||
1689 | FRF_BZ_TX_PACE_SB_AF, 0xb, | ||
1690 | FRF_BZ_TX_PACE_FB_BASE, 0, | ||
1691 | /* Allow large pace values in the | ||
1692 | * fast bin. */ | ||
1693 | FRF_BZ_TX_PACE_BIN_TH, | ||
1694 | FFE_BZ_TX_PACE_RESERVED); | ||
1695 | efx_writeo(efx, &temp, FR_BZ_TX_PACE); | ||
1696 | } | ||
1697 | } | ||
1698 | |||
1699 | /* Register dump */ | ||
1700 | |||
1701 | #define REGISTER_REVISION_A 1 | ||
1702 | #define REGISTER_REVISION_B 2 | ||
1703 | #define REGISTER_REVISION_C 3 | ||
1704 | #define REGISTER_REVISION_Z 3 /* latest revision */ | ||
1705 | |||
1706 | struct efx_nic_reg { | ||
1707 | u32 offset:24; | ||
1708 | u32 min_revision:2, max_revision:2; | ||
1709 | }; | ||
1710 | |||
1711 | #define REGISTER(name, min_rev, max_rev) { \ | ||
1712 | FR_ ## min_rev ## max_rev ## _ ## name, \ | ||
1713 | REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ | ||
1714 | } | ||
1715 | #define REGISTER_AA(name) REGISTER(name, A, A) | ||
1716 | #define REGISTER_AB(name) REGISTER(name, A, B) | ||
1717 | #define REGISTER_AZ(name) REGISTER(name, A, Z) | ||
1718 | #define REGISTER_BB(name) REGISTER(name, B, B) | ||
1719 | #define REGISTER_BZ(name) REGISTER(name, B, Z) | ||
1720 | #define REGISTER_CZ(name) REGISTER(name, C, Z) | ||
1721 | |||
1722 | static const struct efx_nic_reg efx_nic_regs[] = { | ||
1723 | REGISTER_AZ(ADR_REGION), | ||
1724 | REGISTER_AZ(INT_EN_KER), | ||
1725 | REGISTER_BZ(INT_EN_CHAR), | ||
1726 | REGISTER_AZ(INT_ADR_KER), | ||
1727 | REGISTER_BZ(INT_ADR_CHAR), | ||
1728 | /* INT_ACK_KER is WO */ | ||
1729 | /* INT_ISR0 is RC */ | ||
1730 | REGISTER_AZ(HW_INIT), | ||
1731 | REGISTER_CZ(USR_EV_CFG), | ||
1732 | REGISTER_AB(EE_SPI_HCMD), | ||
1733 | REGISTER_AB(EE_SPI_HADR), | ||
1734 | REGISTER_AB(EE_SPI_HDATA), | ||
1735 | REGISTER_AB(EE_BASE_PAGE), | ||
1736 | REGISTER_AB(EE_VPD_CFG0), | ||
1737 | /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ | ||
1738 | /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ | ||
1739 | /* PCIE_CORE_INDIRECT is indirect */ | ||
1740 | REGISTER_AB(NIC_STAT), | ||
1741 | REGISTER_AB(GPIO_CTL), | ||
1742 | REGISTER_AB(GLB_CTL), | ||
1743 | /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ | ||
1744 | REGISTER_BZ(DP_CTRL), | ||
1745 | REGISTER_AZ(MEM_STAT), | ||
1746 | REGISTER_AZ(CS_DEBUG), | ||
1747 | REGISTER_AZ(ALTERA_BUILD), | ||
1748 | REGISTER_AZ(CSR_SPARE), | ||
1749 | REGISTER_AB(PCIE_SD_CTL0123), | ||
1750 | REGISTER_AB(PCIE_SD_CTL45), | ||
1751 | REGISTER_AB(PCIE_PCS_CTL_STAT), | ||
1752 | /* DEBUG_DATA_OUT is not used */ | ||
1753 | /* DRV_EV is WO */ | ||
1754 | REGISTER_AZ(EVQ_CTL), | ||
1755 | REGISTER_AZ(EVQ_CNT1), | ||
1756 | REGISTER_AZ(EVQ_CNT2), | ||
1757 | REGISTER_AZ(BUF_TBL_CFG), | ||
1758 | REGISTER_AZ(SRM_RX_DC_CFG), | ||
1759 | REGISTER_AZ(SRM_TX_DC_CFG), | ||
1760 | REGISTER_AZ(SRM_CFG), | ||
1761 | /* BUF_TBL_UPD is WO */ | ||
1762 | REGISTER_AZ(SRM_UPD_EVQ), | ||
1763 | REGISTER_AZ(SRAM_PARITY), | ||
1764 | REGISTER_AZ(RX_CFG), | ||
1765 | REGISTER_BZ(RX_FILTER_CTL), | ||
1766 | /* RX_FLUSH_DESCQ is WO */ | ||
1767 | REGISTER_AZ(RX_DC_CFG), | ||
1768 | REGISTER_AZ(RX_DC_PF_WM), | ||
1769 | REGISTER_BZ(RX_RSS_TKEY), | ||
1770 | /* RX_NODESC_DROP is RC */ | ||
1771 | REGISTER_AA(RX_SELF_RST), | ||
1772 | /* RX_DEBUG, RX_PUSH_DROP are not used */ | ||
1773 | REGISTER_CZ(RX_RSS_IPV6_REG1), | ||
1774 | REGISTER_CZ(RX_RSS_IPV6_REG2), | ||
1775 | REGISTER_CZ(RX_RSS_IPV6_REG3), | ||
1776 | /* TX_FLUSH_DESCQ is WO */ | ||
1777 | REGISTER_AZ(TX_DC_CFG), | ||
1778 | REGISTER_AA(TX_CHKSM_CFG), | ||
1779 | REGISTER_AZ(TX_CFG), | ||
1780 | /* TX_PUSH_DROP is not used */ | ||
1781 | REGISTER_AZ(TX_RESERVED), | ||
1782 | REGISTER_BZ(TX_PACE), | ||
1783 | /* TX_PACE_DROP_QID is RC */ | ||
1784 | REGISTER_BB(TX_VLAN), | ||
1785 | REGISTER_BZ(TX_IPFIL_PORTEN), | ||
1786 | REGISTER_AB(MD_TXD), | ||
1787 | REGISTER_AB(MD_RXD), | ||
1788 | REGISTER_AB(MD_CS), | ||
1789 | REGISTER_AB(MD_PHY_ADR), | ||
1790 | REGISTER_AB(MD_ID), | ||
1791 | /* MD_STAT is RC */ | ||
1792 | REGISTER_AB(MAC_STAT_DMA), | ||
1793 | REGISTER_AB(MAC_CTRL), | ||
1794 | REGISTER_BB(GEN_MODE), | ||
1795 | REGISTER_AB(MAC_MC_HASH_REG0), | ||
1796 | REGISTER_AB(MAC_MC_HASH_REG1), | ||
1797 | REGISTER_AB(GM_CFG1), | ||
1798 | REGISTER_AB(GM_CFG2), | ||
1799 | /* GM_IPG and GM_HD are not used */ | ||
1800 | REGISTER_AB(GM_MAX_FLEN), | ||
1801 | /* GM_TEST is not used */ | ||
1802 | REGISTER_AB(GM_ADR1), | ||
1803 | REGISTER_AB(GM_ADR2), | ||
1804 | REGISTER_AB(GMF_CFG0), | ||
1805 | REGISTER_AB(GMF_CFG1), | ||
1806 | REGISTER_AB(GMF_CFG2), | ||
1807 | REGISTER_AB(GMF_CFG3), | ||
1808 | REGISTER_AB(GMF_CFG4), | ||
1809 | REGISTER_AB(GMF_CFG5), | ||
1810 | REGISTER_BB(TX_SRC_MAC_CTL), | ||
1811 | REGISTER_AB(XM_ADR_LO), | ||
1812 | REGISTER_AB(XM_ADR_HI), | ||
1813 | REGISTER_AB(XM_GLB_CFG), | ||
1814 | REGISTER_AB(XM_TX_CFG), | ||
1815 | REGISTER_AB(XM_RX_CFG), | ||
1816 | REGISTER_AB(XM_MGT_INT_MASK), | ||
1817 | REGISTER_AB(XM_FC), | ||
1818 | REGISTER_AB(XM_PAUSE_TIME), | ||
1819 | REGISTER_AB(XM_TX_PARAM), | ||
1820 | REGISTER_AB(XM_RX_PARAM), | ||
1821 | /* XM_MGT_INT_MSK (note no 'A') is RC */ | ||
1822 | REGISTER_AB(XX_PWR_RST), | ||
1823 | REGISTER_AB(XX_SD_CTL), | ||
1824 | REGISTER_AB(XX_TXDRV_CTL), | ||
1825 | /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ | ||
1826 | /* XX_CORE_STAT is partly RC */ | ||
1827 | }; | ||
1828 | |||
1829 | struct efx_nic_reg_table { | ||
1830 | u32 offset:24; | ||
1831 | u32 min_revision:2, max_revision:2; | ||
1832 | u32 step:6, rows:21; | ||
1833 | }; | ||
1834 | |||
1835 | #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ | ||
1836 | offset, \ | ||
1837 | REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ | ||
1838 | step, rows \ | ||
1839 | } | ||
1840 | #define REGISTER_TABLE(name, min_rev, max_rev) \ | ||
1841 | REGISTER_TABLE_DIMENSIONS( \ | ||
1842 | name, FR_ ## min_rev ## max_rev ## _ ## name, \ | ||
1843 | min_rev, max_rev, \ | ||
1844 | FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ | ||
1845 | FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) | ||
1846 | #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) | ||
1847 | #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) | ||
1848 | #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) | ||
1849 | #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) | ||
1850 | #define REGISTER_TABLE_BB_CZ(name) \ | ||
1851 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ | ||
1852 | FR_BZ_ ## name ## _STEP, \ | ||
1853 | FR_BB_ ## name ## _ROWS), \ | ||
1854 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ | ||
1855 | FR_BZ_ ## name ## _STEP, \ | ||
1856 | FR_CZ_ ## name ## _ROWS) | ||
1857 | #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) | ||
1858 | |||
1859 | static const struct efx_nic_reg_table efx_nic_reg_tables[] = { | ||
1860 | /* DRIVER is not used */ | ||
1861 | /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ | ||
1862 | REGISTER_TABLE_BB(TX_IPFIL_TBL), | ||
1863 | REGISTER_TABLE_BB(TX_SRC_MAC_TBL), | ||
1864 | REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), | ||
1865 | REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), | ||
1866 | REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), | ||
1867 | REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), | ||
1868 | REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), | ||
1869 | REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), | ||
1870 | /* We can't reasonably read all of the buffer table (up to 8MB!). | ||
1871 | * However this driver will only use a few entries. Reading | ||
1872 | * 1K entries allows for some expansion of queue count and | ||
1873 | * size before we need to change the version. */ | ||
1874 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, | ||
1875 | A, A, 8, 1024), | ||
1876 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, | ||
1877 | B, Z, 8, 1024), | ||
1878 | REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), | ||
1879 | REGISTER_TABLE_BB_CZ(TIMER_TBL), | ||
1880 | REGISTER_TABLE_BB_CZ(TX_PACE_TBL), | ||
1881 | REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), | ||
1882 | /* TX_FILTER_TBL0 is huge and not used by this driver */ | ||
1883 | REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), | ||
1884 | REGISTER_TABLE_CZ(MC_TREG_SMEM), | ||
1885 | /* MSIX_PBA_TABLE is not mapped */ | ||
1886 | /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ | ||
1887 | REGISTER_TABLE_BZ(RX_FILTER_TBL0), | ||
1888 | }; | ||
1889 | |||
1890 | size_t efx_nic_get_regs_len(struct efx_nic *efx) | ||
1891 | { | ||
1892 | const struct efx_nic_reg *reg; | ||
1893 | const struct efx_nic_reg_table *table; | ||
1894 | size_t len = 0; | ||
1895 | |||
1896 | for (reg = efx_nic_regs; | ||
1897 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); | ||
1898 | reg++) | ||
1899 | if (efx->type->revision >= reg->min_revision && | ||
1900 | efx->type->revision <= reg->max_revision) | ||
1901 | len += sizeof(efx_oword_t); | ||
1902 | |||
1903 | for (table = efx_nic_reg_tables; | ||
1904 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); | ||
1905 | table++) | ||
1906 | if (efx->type->revision >= table->min_revision && | ||
1907 | efx->type->revision <= table->max_revision) | ||
1908 | len += table->rows * min_t(size_t, table->step, 16); | ||
1909 | |||
1910 | return len; | ||
1911 | } | ||
1912 | |||
1913 | void efx_nic_get_regs(struct efx_nic *efx, void *buf) | ||
1914 | { | ||
1915 | const struct efx_nic_reg *reg; | ||
1916 | const struct efx_nic_reg_table *table; | ||
1917 | |||
1918 | for (reg = efx_nic_regs; | ||
1919 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); | ||
1920 | reg++) { | ||
1921 | if (efx->type->revision >= reg->min_revision && | ||
1922 | efx->type->revision <= reg->max_revision) { | ||
1923 | efx_reado(efx, (efx_oword_t *)buf, reg->offset); | ||
1924 | buf += sizeof(efx_oword_t); | ||
1925 | } | ||
1926 | } | ||
1927 | |||
1928 | for (table = efx_nic_reg_tables; | ||
1929 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); | ||
1930 | table++) { | ||
1931 | size_t size, i; | ||
1932 | |||
1933 | if (!(efx->type->revision >= table->min_revision && | ||
1934 | efx->type->revision <= table->max_revision)) | ||
1935 | continue; | ||
1936 | |||
1937 | size = min_t(size_t, table->step, 16); | ||
1938 | |||
1939 | for (i = 0; i < table->rows; i++) { | ||
1940 | switch (table->step) { | ||
1941 | case 4: /* 32-bit register or SRAM */ | ||
1942 | efx_readd_table(efx, buf, table->offset, i); | ||
1943 | break; | ||
1944 | case 8: /* 64-bit SRAM */ | ||
1945 | efx_sram_readq(efx, | ||
1946 | efx->membase + table->offset, | ||
1947 | buf, i); | ||
1948 | break; | ||
1949 | case 16: /* 128-bit register */ | ||
1950 | efx_reado_table(efx, buf, table->offset, i); | ||
1951 | break; | ||
1952 | case 32: /* 128-bit register, interleaved */ | ||
1953 | efx_reado_table(efx, buf, table->offset, 2 * i); | ||
1954 | break; | ||
1955 | default: | ||
1956 | WARN_ON(1); | ||
1957 | return; | ||
1958 | } | ||
1959 | buf += size; | ||
1960 | } | ||
1961 | } | ||
1962 | } | ||
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h new file mode 100644 index 00000000000..7443f99c977 --- /dev/null +++ b/drivers/net/sfc/nic.h | |||
@@ -0,0 +1,271 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2011 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_NIC_H | ||
12 | #define EFX_NIC_H | ||
13 | |||
14 | #include <linux/i2c-algo-bit.h> | ||
15 | #include "net_driver.h" | ||
16 | #include "efx.h" | ||
17 | #include "mcdi.h" | ||
18 | #include "spi.h" | ||
19 | |||
20 | /* | ||
21 | * Falcon hardware control | ||
22 | */ | ||
23 | |||
24 | enum { | ||
25 | EFX_REV_FALCON_A0 = 0, | ||
26 | EFX_REV_FALCON_A1 = 1, | ||
27 | EFX_REV_FALCON_B0 = 2, | ||
28 | EFX_REV_SIENA_A0 = 3, | ||
29 | }; | ||
30 | |||
31 | static inline int efx_nic_rev(struct efx_nic *efx) | ||
32 | { | ||
33 | return efx->type->revision; | ||
34 | } | ||
35 | |||
36 | extern u32 efx_nic_fpga_ver(struct efx_nic *efx); | ||
37 | |||
38 | static inline bool efx_nic_has_mc(struct efx_nic *efx) | ||
39 | { | ||
40 | return efx_nic_rev(efx) >= EFX_REV_SIENA_A0; | ||
41 | } | ||
42 | /* NIC has two interlinked PCI functions for the same port. */ | ||
43 | static inline bool efx_nic_is_dual_func(struct efx_nic *efx) | ||
44 | { | ||
45 | return efx_nic_rev(efx) < EFX_REV_FALCON_B0; | ||
46 | } | ||
47 | |||
48 | enum { | ||
49 | PHY_TYPE_NONE = 0, | ||
50 | PHY_TYPE_TXC43128 = 1, | ||
51 | PHY_TYPE_88E1111 = 2, | ||
52 | PHY_TYPE_SFX7101 = 3, | ||
53 | PHY_TYPE_QT2022C2 = 4, | ||
54 | PHY_TYPE_PM8358 = 6, | ||
55 | PHY_TYPE_SFT9001A = 8, | ||
56 | PHY_TYPE_QT2025C = 9, | ||
57 | PHY_TYPE_SFT9001B = 10, | ||
58 | }; | ||
59 | |||
60 | #define FALCON_XMAC_LOOPBACKS \ | ||
61 | ((1 << LOOPBACK_XGMII) | \ | ||
62 | (1 << LOOPBACK_XGXS) | \ | ||
63 | (1 << LOOPBACK_XAUI)) | ||
64 | |||
65 | #define FALCON_GMAC_LOOPBACKS \ | ||
66 | (1 << LOOPBACK_GMAC) | ||
67 | |||
68 | /** | ||
69 | * struct falcon_board_type - board operations and type information | ||
70 | * @id: Board type id, as found in NVRAM | ||
71 | * @ref_model: Model number of Solarflare reference design | ||
72 | * @gen_type: Generic board type description | ||
73 | * @init: Allocate resources and initialise peripheral hardware | ||
74 | * @init_phy: Do board-specific PHY initialisation | ||
75 | * @fini: Shut down hardware and free resources | ||
76 | * @set_id_led: Set state of identifying LED or revert to automatic function | ||
77 | * @monitor: Board-specific health check function | ||
78 | */ | ||
79 | struct falcon_board_type { | ||
80 | u8 id; | ||
81 | const char *ref_model; | ||
82 | const char *gen_type; | ||
83 | int (*init) (struct efx_nic *nic); | ||
84 | void (*init_phy) (struct efx_nic *efx); | ||
85 | void (*fini) (struct efx_nic *nic); | ||
86 | void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode); | ||
87 | int (*monitor) (struct efx_nic *nic); | ||
88 | }; | ||
89 | |||
90 | /** | ||
91 | * struct falcon_board - board information | ||
92 | * @type: Type of board | ||
93 | * @major: Major rev. ('A', 'B' ...) | ||
94 | * @minor: Minor rev. (0, 1, ...) | ||
95 | * @i2c_adap: I2C adapter for on-board peripherals | ||
96 | * @i2c_data: Data for bit-banging algorithm | ||
97 | * @hwmon_client: I2C client for hardware monitor | ||
98 | * @ioexp_client: I2C client for power/port control | ||
99 | */ | ||
100 | struct falcon_board { | ||
101 | const struct falcon_board_type *type; | ||
102 | int major; | ||
103 | int minor; | ||
104 | struct i2c_adapter i2c_adap; | ||
105 | struct i2c_algo_bit_data i2c_data; | ||
106 | struct i2c_client *hwmon_client, *ioexp_client; | ||
107 | }; | ||
108 | |||
109 | /** | ||
110 | * struct falcon_nic_data - Falcon NIC state | ||
111 | * @pci_dev2: Secondary function of Falcon A | ||
112 | * @board: Board state and functions | ||
113 | * @stats_disable_count: Nest count for disabling statistics fetches | ||
114 | * @stats_pending: Is there a pending DMA of MAC statistics. | ||
115 | * @stats_timer: A timer for regularly fetching MAC statistics. | ||
116 | * @stats_dma_done: Pointer to the flag which indicates DMA completion. | ||
117 | * @spi_flash: SPI flash device | ||
118 | * @spi_eeprom: SPI EEPROM device | ||
119 | * @spi_lock: SPI bus lock | ||
120 | * @mdio_lock: MDIO bus lock | ||
121 | * @xmac_poll_required: XMAC link state needs polling | ||
122 | */ | ||
123 | struct falcon_nic_data { | ||
124 | struct pci_dev *pci_dev2; | ||
125 | struct falcon_board board; | ||
126 | unsigned int stats_disable_count; | ||
127 | bool stats_pending; | ||
128 | struct timer_list stats_timer; | ||
129 | u32 *stats_dma_done; | ||
130 | struct efx_spi_device spi_flash; | ||
131 | struct efx_spi_device spi_eeprom; | ||
132 | struct mutex spi_lock; | ||
133 | struct mutex mdio_lock; | ||
134 | bool xmac_poll_required; | ||
135 | }; | ||
136 | |||
137 | static inline struct falcon_board *falcon_board(struct efx_nic *efx) | ||
138 | { | ||
139 | struct falcon_nic_data *data = efx->nic_data; | ||
140 | return &data->board; | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * struct siena_nic_data - Siena NIC state | ||
145 | * @mcdi: Management-Controller-to-Driver Interface | ||
146 | * @wol_filter_id: Wake-on-LAN packet filter id | ||
147 | */ | ||
148 | struct siena_nic_data { | ||
149 | struct efx_mcdi_iface mcdi; | ||
150 | int wol_filter_id; | ||
151 | }; | ||
152 | |||
153 | extern const struct efx_nic_type falcon_a1_nic_type; | ||
154 | extern const struct efx_nic_type falcon_b0_nic_type; | ||
155 | extern const struct efx_nic_type siena_a0_nic_type; | ||
156 | |||
157 | /************************************************************************** | ||
158 | * | ||
159 | * Externs | ||
160 | * | ||
161 | ************************************************************************** | ||
162 | */ | ||
163 | |||
164 | extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); | ||
165 | |||
166 | /* TX data path */ | ||
167 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); | ||
168 | extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue); | ||
169 | extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue); | ||
170 | extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue); | ||
171 | extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue); | ||
172 | |||
173 | /* RX data path */ | ||
174 | extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue); | ||
175 | extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue); | ||
176 | extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue); | ||
177 | extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); | ||
178 | extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); | ||
179 | |||
180 | /* Event data path */ | ||
181 | extern int efx_nic_probe_eventq(struct efx_channel *channel); | ||
182 | extern void efx_nic_init_eventq(struct efx_channel *channel); | ||
183 | extern void efx_nic_fini_eventq(struct efx_channel *channel); | ||
184 | extern void efx_nic_remove_eventq(struct efx_channel *channel); | ||
185 | extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); | ||
186 | extern void efx_nic_eventq_read_ack(struct efx_channel *channel); | ||
187 | extern bool efx_nic_event_present(struct efx_channel *channel); | ||
188 | |||
189 | /* MAC/PHY */ | ||
190 | extern void falcon_drain_tx_fifo(struct efx_nic *efx); | ||
191 | extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); | ||
192 | |||
193 | /* Interrupts and test events */ | ||
194 | extern int efx_nic_init_interrupt(struct efx_nic *efx); | ||
195 | extern void efx_nic_enable_interrupts(struct efx_nic *efx); | ||
196 | extern void efx_nic_generate_test_event(struct efx_channel *channel); | ||
197 | extern void efx_nic_generate_fill_event(struct efx_channel *channel); | ||
198 | extern void efx_nic_generate_interrupt(struct efx_nic *efx); | ||
199 | extern void efx_nic_disable_interrupts(struct efx_nic *efx); | ||
200 | extern void efx_nic_fini_interrupt(struct efx_nic *efx); | ||
201 | extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx); | ||
202 | extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); | ||
203 | extern void falcon_irq_ack_a1(struct efx_nic *efx); | ||
204 | |||
205 | #define EFX_IRQ_MOD_RESOLUTION 5 | ||
206 | |||
207 | /* Global Resources */ | ||
208 | extern int efx_nic_flush_queues(struct efx_nic *efx); | ||
209 | extern void falcon_start_nic_stats(struct efx_nic *efx); | ||
210 | extern void falcon_stop_nic_stats(struct efx_nic *efx); | ||
211 | extern void falcon_setup_xaui(struct efx_nic *efx); | ||
212 | extern int falcon_reset_xaui(struct efx_nic *efx); | ||
213 | extern void efx_nic_init_common(struct efx_nic *efx); | ||
214 | extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); | ||
215 | |||
216 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | ||
217 | unsigned int len); | ||
218 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); | ||
219 | |||
220 | /* Tests */ | ||
221 | struct efx_nic_register_test { | ||
222 | unsigned address; | ||
223 | efx_oword_t mask; | ||
224 | }; | ||
225 | extern int efx_nic_test_registers(struct efx_nic *efx, | ||
226 | const struct efx_nic_register_test *regs, | ||
227 | size_t n_regs); | ||
228 | |||
229 | extern size_t efx_nic_get_regs_len(struct efx_nic *efx); | ||
230 | extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); | ||
231 | |||
232 | /************************************************************************** | ||
233 | * | ||
234 | * Falcon MAC stats | ||
235 | * | ||
236 | ************************************************************************** | ||
237 | */ | ||
238 | |||
239 | #define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) | ||
240 | #define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) | ||
241 | |||
242 | /* Retrieve statistic from statistics block */ | ||
243 | #define FALCON_STAT(efx, falcon_stat, efx_stat) do { \ | ||
244 | if (FALCON_STAT_WIDTH(falcon_stat) == 16) \ | ||
245 | (efx)->mac_stats.efx_stat += le16_to_cpu( \ | ||
246 | *((__force __le16 *) \ | ||
247 | (efx->stats_buffer.addr + \ | ||
248 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
249 | else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \ | ||
250 | (efx)->mac_stats.efx_stat += le32_to_cpu( \ | ||
251 | *((__force __le32 *) \ | ||
252 | (efx->stats_buffer.addr + \ | ||
253 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
254 | else \ | ||
255 | (efx)->mac_stats.efx_stat += le64_to_cpu( \ | ||
256 | *((__force __le64 *) \ | ||
257 | (efx->stats_buffer.addr + \ | ||
258 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
259 | } while (0) | ||
260 | |||
261 | #define FALCON_MAC_STATS_SIZE 0x100 | ||
262 | |||
263 | #define MAC_DATA_LBN 0 | ||
264 | #define MAC_DATA_WIDTH 32 | ||
265 | |||
266 | extern void efx_nic_generate_event(struct efx_channel *channel, | ||
267 | efx_qword_t *event); | ||
268 | |||
269 | extern void falcon_poll_xmac(struct efx_nic *efx); | ||
270 | |||
271 | #endif /* EFX_NIC_H */ | ||
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h new file mode 100644 index 00000000000..11d148cd844 --- /dev/null +++ b/drivers/net/sfc/phy.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2007-2010 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #ifndef EFX_PHY_H | ||
11 | #define EFX_PHY_H | ||
12 | |||
13 | /**************************************************************************** | ||
14 | * 10Xpress (SFX7101) PHY | ||
15 | */ | ||
16 | extern const struct efx_phy_operations falcon_sfx7101_phy_ops; | ||
17 | |||
18 | extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); | ||
19 | |||
20 | /**************************************************************************** | ||
21 | * AMCC/Quake QT202x PHYs | ||
22 | */ | ||
23 | extern const struct efx_phy_operations falcon_qt202x_phy_ops; | ||
24 | |||
25 | /* These PHYs provide various H/W control states for LEDs */ | ||
26 | #define QUAKE_LED_LINK_INVAL (0) | ||
27 | #define QUAKE_LED_LINK_STAT (1) | ||
28 | #define QUAKE_LED_LINK_ACT (2) | ||
29 | #define QUAKE_LED_LINK_ACTSTAT (3) | ||
30 | #define QUAKE_LED_OFF (4) | ||
31 | #define QUAKE_LED_ON (5) | ||
32 | #define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */ | ||
33 | /* What link the LED tracks */ | ||
34 | #define QUAKE_LED_TXLINK (0) | ||
35 | #define QUAKE_LED_RXLINK (8) | ||
36 | |||
37 | extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state); | ||
38 | |||
39 | /**************************************************************************** | ||
40 | * Transwitch CX4 retimer | ||
41 | */ | ||
42 | extern const struct efx_phy_operations falcon_txc_phy_ops; | ||
43 | |||
44 | #define TXC_GPIO_DIR_INPUT 0 | ||
45 | #define TXC_GPIO_DIR_OUTPUT 1 | ||
46 | |||
47 | extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir); | ||
48 | extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val); | ||
49 | |||
50 | /**************************************************************************** | ||
51 | * Siena managed PHYs | ||
52 | */ | ||
53 | extern const struct efx_phy_operations efx_mcdi_phy_ops; | ||
54 | |||
55 | extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, | ||
56 | unsigned int prtad, unsigned int devad, | ||
57 | u16 addr, u16 *value_out, u32 *status_out); | ||
58 | extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, | ||
59 | unsigned int prtad, unsigned int devad, | ||
60 | u16 addr, u16 value, u32 *status_out); | ||
61 | extern void efx_mcdi_phy_decode_link(struct efx_nic *efx, | ||
62 | struct efx_link_state *link_state, | ||
63 | u32 speed, u32 flags, u32 fcntl); | ||
64 | extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx); | ||
65 | extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa); | ||
66 | |||
67 | #endif | ||
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c new file mode 100644 index 00000000000..7ad97e39740 --- /dev/null +++ b/drivers/net/sfc/qt202x_phy.c | |||
@@ -0,0 +1,462 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | /* | ||
10 | * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details | ||
11 | */ | ||
12 | |||
13 | #include <linux/slab.h> | ||
14 | #include <linux/timer.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include "efx.h" | ||
17 | #include "mdio_10g.h" | ||
18 | #include "phy.h" | ||
19 | #include "nic.h" | ||
20 | |||
21 | #define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \ | ||
22 | MDIO_DEVS_PMAPMD | \ | ||
23 | MDIO_DEVS_PHYXS) | ||
24 | |||
25 | #define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \ | ||
26 | (1 << LOOPBACK_PMAPMD) | \ | ||
27 | (1 << LOOPBACK_PHYXS_WS)) | ||
28 | |||
29 | /****************************************************************************/ | ||
30 | /* Quake-specific MDIO registers */ | ||
31 | #define MDIO_QUAKE_LED0_REG (0xD006) | ||
32 | |||
33 | /* QT2025C only */ | ||
34 | #define PCS_FW_HEARTBEAT_REG 0xd7ee | ||
35 | #define PCS_FW_HEARTB_LBN 0 | ||
36 | #define PCS_FW_HEARTB_WIDTH 8 | ||
37 | #define PCS_FW_PRODUCT_CODE_1 0xd7f0 | ||
38 | #define PCS_FW_VERSION_1 0xd7f3 | ||
39 | #define PCS_FW_BUILD_1 0xd7f6 | ||
40 | #define PCS_UC8051_STATUS_REG 0xd7fd | ||
41 | #define PCS_UC_STATUS_LBN 0 | ||
42 | #define PCS_UC_STATUS_WIDTH 8 | ||
43 | #define PCS_UC_STATUS_FW_SAVE 0x20 | ||
44 | #define PMA_PMD_MODE_REG 0xc301 | ||
45 | #define PMA_PMD_RXIN_SEL_LBN 6 | ||
46 | #define PMA_PMD_FTX_CTRL2_REG 0xc309 | ||
47 | #define PMA_PMD_FTX_STATIC_LBN 13 | ||
48 | #define PMA_PMD_VEND1_REG 0xc001 | ||
49 | #define PMA_PMD_VEND1_LBTXD_LBN 15 | ||
50 | #define PCS_VEND1_REG 0xc000 | ||
51 | #define PCS_VEND1_LBTXD_LBN 5 | ||
52 | |||
53 | void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode) | ||
54 | { | ||
55 | int addr = MDIO_QUAKE_LED0_REG + led; | ||
56 | efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode); | ||
57 | } | ||
58 | |||
59 | struct qt202x_phy_data { | ||
60 | enum efx_phy_mode phy_mode; | ||
61 | bool bug17190_in_bad_state; | ||
62 | unsigned long bug17190_timer; | ||
63 | u32 firmware_ver; | ||
64 | }; | ||
65 | |||
66 | #define QT2022C2_MAX_RESET_TIME 500 | ||
67 | #define QT2022C2_RESET_WAIT 10 | ||
68 | |||
69 | #define QT2025C_MAX_HEARTB_TIME (5 * HZ) | ||
70 | #define QT2025C_HEARTB_WAIT 100 | ||
71 | #define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10) | ||
72 | #define QT2025C_FWSTART_WAIT 100 | ||
73 | |||
74 | #define BUG17190_INTERVAL (2 * HZ) | ||
75 | |||
76 | static int qt2025c_wait_heartbeat(struct efx_nic *efx) | ||
77 | { | ||
78 | unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME; | ||
79 | int reg, old_counter = 0; | ||
80 | |||
81 | /* Wait for firmware heartbeat to start */ | ||
82 | for (;;) { | ||
83 | int counter; | ||
84 | reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG); | ||
85 | if (reg < 0) | ||
86 | return reg; | ||
87 | counter = ((reg >> PCS_FW_HEARTB_LBN) & | ||
88 | ((1 << PCS_FW_HEARTB_WIDTH) - 1)); | ||
89 | if (old_counter == 0) | ||
90 | old_counter = counter; | ||
91 | else if (counter != old_counter) | ||
92 | break; | ||
93 | if (time_after(jiffies, timeout)) { | ||
94 | /* Some cables have EEPROMs that conflict with the | ||
95 | * PHY's on-board EEPROM so it cannot load firmware */ | ||
96 | netif_err(efx, hw, efx->net_dev, | ||
97 | "If an SFP+ direct attach cable is" | ||
98 | " connected, please check that it complies" | ||
99 | " with the SFP+ specification\n"); | ||
100 | return -ETIMEDOUT; | ||
101 | } | ||
102 | msleep(QT2025C_HEARTB_WAIT); | ||
103 | } | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static int qt2025c_wait_fw_status_good(struct efx_nic *efx) | ||
109 | { | ||
110 | unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME; | ||
111 | int reg; | ||
112 | |||
113 | /* Wait for firmware status to look good */ | ||
114 | for (;;) { | ||
115 | reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG); | ||
116 | if (reg < 0) | ||
117 | return reg; | ||
118 | if ((reg & | ||
119 | ((1 << PCS_UC_STATUS_WIDTH) - 1) << PCS_UC_STATUS_LBN) >= | ||
120 | PCS_UC_STATUS_FW_SAVE) | ||
121 | break; | ||
122 | if (time_after(jiffies, timeout)) | ||
123 | return -ETIMEDOUT; | ||
124 | msleep(QT2025C_FWSTART_WAIT); | ||
125 | } | ||
126 | |||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static void qt2025c_restart_firmware(struct efx_nic *efx) | ||
131 | { | ||
132 | /* Restart microcontroller execution of firmware from RAM */ | ||
133 | efx_mdio_write(efx, 3, 0xe854, 0x00c0); | ||
134 | efx_mdio_write(efx, 3, 0xe854, 0x0040); | ||
135 | msleep(50); | ||
136 | } | ||
137 | |||
138 | static int qt2025c_wait_reset(struct efx_nic *efx) | ||
139 | { | ||
140 | int rc; | ||
141 | |||
142 | rc = qt2025c_wait_heartbeat(efx); | ||
143 | if (rc != 0) | ||
144 | return rc; | ||
145 | |||
146 | rc = qt2025c_wait_fw_status_good(efx); | ||
147 | if (rc == -ETIMEDOUT) { | ||
148 | /* Bug 17689: occasionally heartbeat starts but firmware status | ||
149 | * code never progresses beyond 0x00. Try again, once, after | ||
150 | * restarting execution of the firmware image. */ | ||
151 | netif_dbg(efx, hw, efx->net_dev, | ||
152 | "bashing QT2025C microcontroller\n"); | ||
153 | qt2025c_restart_firmware(efx); | ||
154 | rc = qt2025c_wait_heartbeat(efx); | ||
155 | if (rc != 0) | ||
156 | return rc; | ||
157 | rc = qt2025c_wait_fw_status_good(efx); | ||
158 | } | ||
159 | |||
160 | return rc; | ||
161 | } | ||
162 | |||
163 | static void qt2025c_firmware_id(struct efx_nic *efx) | ||
164 | { | ||
165 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
166 | u8 firmware_id[9]; | ||
167 | size_t i; | ||
168 | |||
169 | for (i = 0; i < sizeof(firmware_id); i++) | ||
170 | firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS, | ||
171 | PCS_FW_PRODUCT_CODE_1 + i); | ||
172 | netif_info(efx, probe, efx->net_dev, | ||
173 | "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n", | ||
174 | (firmware_id[0] << 8) | firmware_id[1], firmware_id[2], | ||
175 | firmware_id[3] >> 4, firmware_id[3] & 0xf, | ||
176 | firmware_id[4], firmware_id[5], | ||
177 | firmware_id[6], firmware_id[7], firmware_id[8]); | ||
178 | phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) | | ||
179 | ((firmware_id[3] & 0x0f) << 16) | | ||
180 | (firmware_id[4] << 8) | firmware_id[5]; | ||
181 | } | ||
182 | |||
183 | static void qt2025c_bug17190_workaround(struct efx_nic *efx) | ||
184 | { | ||
185 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
186 | |||
187 | /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD | ||
188 | * layers up, but PCS down (no block_lock). If we notice this state | ||
189 | * persisting for a couple of seconds, we switch PMA/PMD loopback | ||
190 | * briefly on and then off again, which is normally sufficient to | ||
191 | * recover it. | ||
192 | */ | ||
193 | if (efx->link_state.up || | ||
194 | !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) { | ||
195 | phy_data->bug17190_in_bad_state = false; | ||
196 | return; | ||
197 | } | ||
198 | |||
199 | if (!phy_data->bug17190_in_bad_state) { | ||
200 | phy_data->bug17190_in_bad_state = true; | ||
201 | phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL; | ||
202 | return; | ||
203 | } | ||
204 | |||
205 | if (time_after_eq(jiffies, phy_data->bug17190_timer)) { | ||
206 | netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n"); | ||
207 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, | ||
208 | MDIO_PMA_CTRL1_LOOPBACK, true); | ||
209 | msleep(100); | ||
210 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, | ||
211 | MDIO_PMA_CTRL1_LOOPBACK, false); | ||
212 | phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static int qt2025c_select_phy_mode(struct efx_nic *efx) | ||
217 | { | ||
218 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
219 | struct falcon_board *board = falcon_board(efx); | ||
220 | int reg, rc, i; | ||
221 | uint16_t phy_op_mode; | ||
222 | |||
223 | /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+ | ||
224 | * Self-Configure mode. Don't attempt any switching if we encounter | ||
225 | * older firmware. */ | ||
226 | if (phy_data->firmware_ver < 0x02000100) | ||
227 | return 0; | ||
228 | |||
229 | /* In general we will get optimal behaviour in "SFP+ Self-Configure" | ||
230 | * mode; however, that powers down most of the PHY when no module is | ||
231 | * present, so we must use a different mode (any fixed mode will do) | ||
232 | * to be sure that loopbacks will work. */ | ||
233 | phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020; | ||
234 | |||
235 | /* Only change mode if really necessary */ | ||
236 | reg = efx_mdio_read(efx, 1, 0xc319); | ||
237 | if ((reg & 0x0038) == phy_op_mode) | ||
238 | return 0; | ||
239 | netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n", | ||
240 | phy_op_mode); | ||
241 | |||
242 | /* This sequence replicates the register writes configured in the boot | ||
243 | * EEPROM (including the differences between board revisions), except | ||
244 | * that the operating mode is changed, and the PHY is prevented from | ||
245 | * unnecessarily reloading the main firmware image again. */ | ||
246 | efx_mdio_write(efx, 1, 0xc300, 0x0000); | ||
247 | /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9 | ||
248 | * STOPs onto the firmware/module I2C bus to reset it, varies across | ||
249 | * board revisions, as the bus is connected to different GPIO/LED | ||
250 | * outputs on the PHY.) */ | ||
251 | if (board->major == 0 && board->minor < 2) { | ||
252 | efx_mdio_write(efx, 1, 0xc303, 0x4498); | ||
253 | for (i = 0; i < 9; i++) { | ||
254 | efx_mdio_write(efx, 1, 0xc303, 0x4488); | ||
255 | efx_mdio_write(efx, 1, 0xc303, 0x4480); | ||
256 | efx_mdio_write(efx, 1, 0xc303, 0x4490); | ||
257 | efx_mdio_write(efx, 1, 0xc303, 0x4498); | ||
258 | } | ||
259 | } else { | ||
260 | efx_mdio_write(efx, 1, 0xc303, 0x0920); | ||
261 | efx_mdio_write(efx, 1, 0xd008, 0x0004); | ||
262 | for (i = 0; i < 9; i++) { | ||
263 | efx_mdio_write(efx, 1, 0xc303, 0x0900); | ||
264 | efx_mdio_write(efx, 1, 0xd008, 0x0005); | ||
265 | efx_mdio_write(efx, 1, 0xc303, 0x0920); | ||
266 | efx_mdio_write(efx, 1, 0xd008, 0x0004); | ||
267 | } | ||
268 | efx_mdio_write(efx, 1, 0xc303, 0x4900); | ||
269 | } | ||
270 | efx_mdio_write(efx, 1, 0xc303, 0x4900); | ||
271 | efx_mdio_write(efx, 1, 0xc302, 0x0004); | ||
272 | efx_mdio_write(efx, 1, 0xc316, 0x0013); | ||
273 | efx_mdio_write(efx, 1, 0xc318, 0x0054); | ||
274 | efx_mdio_write(efx, 1, 0xc319, phy_op_mode); | ||
275 | efx_mdio_write(efx, 1, 0xc31a, 0x0098); | ||
276 | efx_mdio_write(efx, 3, 0x0026, 0x0e00); | ||
277 | efx_mdio_write(efx, 3, 0x0027, 0x0013); | ||
278 | efx_mdio_write(efx, 3, 0x0028, 0xa528); | ||
279 | efx_mdio_write(efx, 1, 0xd006, 0x000a); | ||
280 | efx_mdio_write(efx, 1, 0xd007, 0x0009); | ||
281 | efx_mdio_write(efx, 1, 0xd008, 0x0004); | ||
282 | /* This additional write is not present in the boot EEPROM. It | ||
283 | * prevents the PHY's internal boot ROM doing another pointless (and | ||
284 | * slow) reload of the firmware image (the microcontroller's code | ||
285 | * memory is not affected by the microcontroller reset). */ | ||
286 | efx_mdio_write(efx, 1, 0xc317, 0x00ff); | ||
287 | /* PMA/PMD loopback sets RXIN to inverse polarity and the firmware | ||
288 | * restart doesn't reset it. We need to do that ourselves. */ | ||
289 | efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG, | ||
290 | 1 << PMA_PMD_RXIN_SEL_LBN, false); | ||
291 | efx_mdio_write(efx, 1, 0xc300, 0x0002); | ||
292 | msleep(20); | ||
293 | |||
294 | /* Restart microcontroller execution of firmware from RAM */ | ||
295 | qt2025c_restart_firmware(efx); | ||
296 | |||
297 | /* Wait for the microcontroller to be ready again */ | ||
298 | rc = qt2025c_wait_reset(efx); | ||
299 | if (rc < 0) { | ||
300 | netif_err(efx, hw, efx->net_dev, | ||
301 | "PHY microcontroller reset during mode switch " | ||
302 | "timed out\n"); | ||
303 | return rc; | ||
304 | } | ||
305 | |||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static int qt202x_reset_phy(struct efx_nic *efx) | ||
310 | { | ||
311 | int rc; | ||
312 | |||
313 | if (efx->phy_type == PHY_TYPE_QT2025C) { | ||
314 | /* Wait for the reset triggered by falcon_reset_hw() | ||
315 | * to complete */ | ||
316 | rc = qt2025c_wait_reset(efx); | ||
317 | if (rc < 0) | ||
318 | goto fail; | ||
319 | } else { | ||
320 | /* Reset the PHYXS MMD. This is documented as doing | ||
321 | * a complete soft reset. */ | ||
322 | rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS, | ||
323 | QT2022C2_MAX_RESET_TIME / | ||
324 | QT2022C2_RESET_WAIT, | ||
325 | QT2022C2_RESET_WAIT); | ||
326 | if (rc < 0) | ||
327 | goto fail; | ||
328 | } | ||
329 | |||
330 | /* Wait 250ms for the PHY to complete bootup */ | ||
331 | msleep(250); | ||
332 | |||
333 | falcon_board(efx)->type->init_phy(efx); | ||
334 | |||
335 | return 0; | ||
336 | |||
337 | fail: | ||
338 | netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n"); | ||
339 | return rc; | ||
340 | } | ||
341 | |||
342 | static int qt202x_phy_probe(struct efx_nic *efx) | ||
343 | { | ||
344 | struct qt202x_phy_data *phy_data; | ||
345 | |||
346 | phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL); | ||
347 | if (!phy_data) | ||
348 | return -ENOMEM; | ||
349 | efx->phy_data = phy_data; | ||
350 | phy_data->phy_mode = efx->phy_mode; | ||
351 | phy_data->bug17190_in_bad_state = false; | ||
352 | phy_data->bug17190_timer = 0; | ||
353 | |||
354 | efx->mdio.mmds = QT202X_REQUIRED_DEVS; | ||
355 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
356 | efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS; | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | static int qt202x_phy_init(struct efx_nic *efx) | ||
361 | { | ||
362 | u32 devid; | ||
363 | int rc; | ||
364 | |||
365 | rc = qt202x_reset_phy(efx); | ||
366 | if (rc) { | ||
367 | netif_err(efx, probe, efx->net_dev, "PHY init failed\n"); | ||
368 | return rc; | ||
369 | } | ||
370 | |||
371 | devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); | ||
372 | netif_info(efx, probe, efx->net_dev, | ||
373 | "PHY ID reg %x (OUI %06x model %02x revision %x)\n", | ||
374 | devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), | ||
375 | efx_mdio_id_rev(devid)); | ||
376 | |||
377 | if (efx->phy_type == PHY_TYPE_QT2025C) | ||
378 | qt2025c_firmware_id(efx); | ||
379 | |||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static int qt202x_link_ok(struct efx_nic *efx) | ||
384 | { | ||
385 | return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS); | ||
386 | } | ||
387 | |||
388 | static bool qt202x_phy_poll(struct efx_nic *efx) | ||
389 | { | ||
390 | bool was_up = efx->link_state.up; | ||
391 | |||
392 | efx->link_state.up = qt202x_link_ok(efx); | ||
393 | efx->link_state.speed = 10000; | ||
394 | efx->link_state.fd = true; | ||
395 | efx->link_state.fc = efx->wanted_fc; | ||
396 | |||
397 | if (efx->phy_type == PHY_TYPE_QT2025C) | ||
398 | qt2025c_bug17190_workaround(efx); | ||
399 | |||
400 | return efx->link_state.up != was_up; | ||
401 | } | ||
402 | |||
403 | static int qt202x_phy_reconfigure(struct efx_nic *efx) | ||
404 | { | ||
405 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
406 | |||
407 | if (efx->phy_type == PHY_TYPE_QT2025C) { | ||
408 | int rc = qt2025c_select_phy_mode(efx); | ||
409 | if (rc) | ||
410 | return rc; | ||
411 | |||
412 | /* There are several different register bits which can | ||
413 | * disable TX (and save power) on direct-attach cables | ||
414 | * or optical transceivers, varying somewhat between | ||
415 | * firmware versions. Only 'static mode' appears to | ||
416 | * cover everything. */ | ||
417 | mdio_set_flag( | ||
418 | &efx->mdio, efx->mdio.prtad, MDIO_MMD_PMAPMD, | ||
419 | PMA_PMD_FTX_CTRL2_REG, 1 << PMA_PMD_FTX_STATIC_LBN, | ||
420 | efx->phy_mode & PHY_MODE_TX_DISABLED || | ||
421 | efx->phy_mode & PHY_MODE_LOW_POWER || | ||
422 | efx->loopback_mode == LOOPBACK_PCS || | ||
423 | efx->loopback_mode == LOOPBACK_PMAPMD); | ||
424 | } else { | ||
425 | /* Reset the PHY when moving from tx off to tx on */ | ||
426 | if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) && | ||
427 | (phy_data->phy_mode & PHY_MODE_TX_DISABLED)) | ||
428 | qt202x_reset_phy(efx); | ||
429 | |||
430 | efx_mdio_transmit_disable(efx); | ||
431 | } | ||
432 | |||
433 | efx_mdio_phy_reconfigure(efx); | ||
434 | |||
435 | phy_data->phy_mode = efx->phy_mode; | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
441 | { | ||
442 | mdio45_ethtool_gset(&efx->mdio, ecmd); | ||
443 | } | ||
444 | |||
445 | static void qt202x_phy_remove(struct efx_nic *efx) | ||
446 | { | ||
447 | /* Free the context block */ | ||
448 | kfree(efx->phy_data); | ||
449 | efx->phy_data = NULL; | ||
450 | } | ||
451 | |||
452 | const struct efx_phy_operations falcon_qt202x_phy_ops = { | ||
453 | .probe = qt202x_phy_probe, | ||
454 | .init = qt202x_phy_init, | ||
455 | .reconfigure = qt202x_phy_reconfigure, | ||
456 | .poll = qt202x_phy_poll, | ||
457 | .fini = efx_port_dummy_op_void, | ||
458 | .remove = qt202x_phy_remove, | ||
459 | .get_settings = qt202x_phy_get_settings, | ||
460 | .set_settings = efx_mdio_set_settings, | ||
461 | .test_alive = efx_mdio_test_alive, | ||
462 | }; | ||
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h new file mode 100644 index 00000000000..cc2c86b76a7 --- /dev/null +++ b/drivers/net/sfc/regs.h | |||
@@ -0,0 +1,3188 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_REGS_H | ||
12 | #define EFX_REGS_H | ||
13 | |||
14 | /* | ||
15 | * Falcon hardware architecture definitions have a name prefix following | ||
16 | * the format: | ||
17 | * | ||
18 | * F<type>_<min-rev><max-rev>_ | ||
19 | * | ||
20 | * The following <type> strings are used: | ||
21 | * | ||
22 | * MMIO register MC register Host memory structure | ||
23 | * ------------------------------------------------------------- | ||
24 | * Address R MCR | ||
25 | * Bitfield RF MCRF SF | ||
26 | * Enumerator FE MCFE SE | ||
27 | * | ||
28 | * <min-rev> is the first revision to which the definition applies: | ||
29 | * | ||
30 | * A: Falcon A1 (SFC4000AB) | ||
31 | * B: Falcon B0 (SFC4000BA) | ||
32 | * C: Siena A0 (SFL9021AA) | ||
33 | * | ||
34 | * If the definition has been changed or removed in later revisions | ||
35 | * then <max-rev> is the last revision to which the definition applies; | ||
36 | * otherwise it is "Z". | ||
37 | */ | ||
38 | |||
39 | /************************************************************************** | ||
40 | * | ||
41 | * Falcon/Siena registers and descriptors | ||
42 | * | ||
43 | ************************************************************************** | ||
44 | */ | ||
45 | |||
46 | /* ADR_REGION_REG: Address region register */ | ||
47 | #define FR_AZ_ADR_REGION 0x00000000 | ||
48 | #define FRF_AZ_ADR_REGION3_LBN 96 | ||
49 | #define FRF_AZ_ADR_REGION3_WIDTH 18 | ||
50 | #define FRF_AZ_ADR_REGION2_LBN 64 | ||
51 | #define FRF_AZ_ADR_REGION2_WIDTH 18 | ||
52 | #define FRF_AZ_ADR_REGION1_LBN 32 | ||
53 | #define FRF_AZ_ADR_REGION1_WIDTH 18 | ||
54 | #define FRF_AZ_ADR_REGION0_LBN 0 | ||
55 | #define FRF_AZ_ADR_REGION0_WIDTH 18 | ||
56 | |||
57 | /* INT_EN_REG_KER: Kernel driver Interrupt enable register */ | ||
58 | #define FR_AZ_INT_EN_KER 0x00000010 | ||
59 | #define FRF_AZ_KER_INT_LEVE_SEL_LBN 8 | ||
60 | #define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6 | ||
61 | #define FRF_AZ_KER_INT_CHAR_LBN 4 | ||
62 | #define FRF_AZ_KER_INT_CHAR_WIDTH 1 | ||
63 | #define FRF_AZ_KER_INT_KER_LBN 3 | ||
64 | #define FRF_AZ_KER_INT_KER_WIDTH 1 | ||
65 | #define FRF_AZ_DRV_INT_EN_KER_LBN 0 | ||
66 | #define FRF_AZ_DRV_INT_EN_KER_WIDTH 1 | ||
67 | |||
68 | /* INT_EN_REG_CHAR: Char Driver interrupt enable register */ | ||
69 | #define FR_BZ_INT_EN_CHAR 0x00000020 | ||
70 | #define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8 | ||
71 | #define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6 | ||
72 | #define FRF_BZ_CHAR_INT_CHAR_LBN 4 | ||
73 | #define FRF_BZ_CHAR_INT_CHAR_WIDTH 1 | ||
74 | #define FRF_BZ_CHAR_INT_KER_LBN 3 | ||
75 | #define FRF_BZ_CHAR_INT_KER_WIDTH 1 | ||
76 | #define FRF_BZ_DRV_INT_EN_CHAR_LBN 0 | ||
77 | #define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1 | ||
78 | |||
79 | /* INT_ADR_REG_KER: Interrupt host address for Kernel driver */ | ||
80 | #define FR_AZ_INT_ADR_KER 0x00000030 | ||
81 | #define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64 | ||
82 | #define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1 | ||
83 | #define FRF_AZ_INT_ADR_KER_LBN 0 | ||
84 | #define FRF_AZ_INT_ADR_KER_WIDTH 64 | ||
85 | |||
86 | /* INT_ADR_REG_CHAR: Interrupt host address for Char driver */ | ||
87 | #define FR_BZ_INT_ADR_CHAR 0x00000040 | ||
88 | #define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64 | ||
89 | #define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1 | ||
90 | #define FRF_BZ_INT_ADR_CHAR_LBN 0 | ||
91 | #define FRF_BZ_INT_ADR_CHAR_WIDTH 64 | ||
92 | |||
93 | /* INT_ACK_KER: Kernel interrupt acknowledge register */ | ||
94 | #define FR_AA_INT_ACK_KER 0x00000050 | ||
95 | #define FRF_AA_INT_ACK_KER_FIELD_LBN 0 | ||
96 | #define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 | ||
97 | |||
98 | /* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */ | ||
99 | #define FR_BZ_INT_ISR0 0x00000090 | ||
100 | #define FRF_BZ_INT_ISR_REG_LBN 0 | ||
101 | #define FRF_BZ_INT_ISR_REG_WIDTH 64 | ||
102 | |||
103 | /* HW_INIT_REG: Hardware initialization register */ | ||
104 | #define FR_AZ_HW_INIT 0x000000c0 | ||
105 | #define FRF_BB_BDMRD_CPLF_FULL_LBN 124 | ||
106 | #define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1 | ||
107 | #define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121 | ||
108 | #define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3 | ||
109 | #define FRF_CZ_TX_MRG_TAGS_LBN 120 | ||
110 | #define FRF_CZ_TX_MRG_TAGS_WIDTH 1 | ||
111 | #define FRF_AB_TRGT_MASK_ALL_LBN 100 | ||
112 | #define FRF_AB_TRGT_MASK_ALL_WIDTH 1 | ||
113 | #define FRF_AZ_DOORBELL_DROP_LBN 92 | ||
114 | #define FRF_AZ_DOORBELL_DROP_WIDTH 8 | ||
115 | #define FRF_AB_TX_RREQ_MASK_EN_LBN 76 | ||
116 | #define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1 | ||
117 | #define FRF_AB_PE_EIDLE_DIS_LBN 75 | ||
118 | #define FRF_AB_PE_EIDLE_DIS_WIDTH 1 | ||
119 | #define FRF_AA_FC_BLOCKING_EN_LBN 45 | ||
120 | #define FRF_AA_FC_BLOCKING_EN_WIDTH 1 | ||
121 | #define FRF_BZ_B2B_REQ_EN_LBN 45 | ||
122 | #define FRF_BZ_B2B_REQ_EN_WIDTH 1 | ||
123 | #define FRF_AA_B2B_REQ_EN_LBN 44 | ||
124 | #define FRF_AA_B2B_REQ_EN_WIDTH 1 | ||
125 | #define FRF_BB_FC_BLOCKING_EN_LBN 44 | ||
126 | #define FRF_BB_FC_BLOCKING_EN_WIDTH 1 | ||
127 | #define FRF_AZ_POST_WR_MASK_LBN 40 | ||
128 | #define FRF_AZ_POST_WR_MASK_WIDTH 4 | ||
129 | #define FRF_AZ_TLP_TC_LBN 34 | ||
130 | #define FRF_AZ_TLP_TC_WIDTH 3 | ||
131 | #define FRF_AZ_TLP_ATTR_LBN 32 | ||
132 | #define FRF_AZ_TLP_ATTR_WIDTH 2 | ||
133 | #define FRF_AB_INTB_VEC_LBN 24 | ||
134 | #define FRF_AB_INTB_VEC_WIDTH 5 | ||
135 | #define FRF_AB_INTA_VEC_LBN 16 | ||
136 | #define FRF_AB_INTA_VEC_WIDTH 5 | ||
137 | #define FRF_AZ_WD_TIMER_LBN 8 | ||
138 | #define FRF_AZ_WD_TIMER_WIDTH 8 | ||
139 | #define FRF_AZ_US_DISABLE_LBN 5 | ||
140 | #define FRF_AZ_US_DISABLE_WIDTH 1 | ||
141 | #define FRF_AZ_TLP_EP_LBN 4 | ||
142 | #define FRF_AZ_TLP_EP_WIDTH 1 | ||
143 | #define FRF_AZ_ATTR_SEL_LBN 3 | ||
144 | #define FRF_AZ_ATTR_SEL_WIDTH 1 | ||
145 | #define FRF_AZ_TD_SEL_LBN 1 | ||
146 | #define FRF_AZ_TD_SEL_WIDTH 1 | ||
147 | #define FRF_AZ_TLP_TD_LBN 0 | ||
148 | #define FRF_AZ_TLP_TD_WIDTH 1 | ||
149 | |||
150 | /* EE_SPI_HCMD_REG: SPI host command register */ | ||
151 | #define FR_AB_EE_SPI_HCMD 0x00000100 | ||
152 | #define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31 | ||
153 | #define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1 | ||
154 | #define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28 | ||
155 | #define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1 | ||
156 | #define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24 | ||
157 | #define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1 | ||
158 | #define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16 | ||
159 | #define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5 | ||
160 | #define FRF_AB_EE_SPI_HCMD_READ_LBN 15 | ||
161 | #define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1 | ||
162 | #define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12 | ||
163 | #define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2 | ||
164 | #define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8 | ||
165 | #define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2 | ||
166 | #define FRF_AB_EE_SPI_HCMD_ENC_LBN 0 | ||
167 | #define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8 | ||
168 | |||
169 | /* USR_EV_CFG: User Level Event Configuration register */ | ||
170 | #define FR_CZ_USR_EV_CFG 0x00000100 | ||
171 | #define FRF_CZ_USREV_DIS_LBN 16 | ||
172 | #define FRF_CZ_USREV_DIS_WIDTH 1 | ||
173 | #define FRF_CZ_DFLT_EVQ_LBN 0 | ||
174 | #define FRF_CZ_DFLT_EVQ_WIDTH 10 | ||
175 | |||
176 | /* EE_SPI_HADR_REG: SPI host address register */ | ||
177 | #define FR_AB_EE_SPI_HADR 0x00000110 | ||
178 | #define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24 | ||
179 | #define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8 | ||
180 | #define FRF_AB_EE_SPI_HADR_ADR_LBN 0 | ||
181 | #define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24 | ||
182 | |||
183 | /* EE_SPI_HDATA_REG: SPI host data register */ | ||
184 | #define FR_AB_EE_SPI_HDATA 0x00000120 | ||
185 | #define FRF_AB_EE_SPI_HDATA3_LBN 96 | ||
186 | #define FRF_AB_EE_SPI_HDATA3_WIDTH 32 | ||
187 | #define FRF_AB_EE_SPI_HDATA2_LBN 64 | ||
188 | #define FRF_AB_EE_SPI_HDATA2_WIDTH 32 | ||
189 | #define FRF_AB_EE_SPI_HDATA1_LBN 32 | ||
190 | #define FRF_AB_EE_SPI_HDATA1_WIDTH 32 | ||
191 | #define FRF_AB_EE_SPI_HDATA0_LBN 0 | ||
192 | #define FRF_AB_EE_SPI_HDATA0_WIDTH 32 | ||
193 | |||
194 | /* EE_BASE_PAGE_REG: Expansion ROM base mirror register */ | ||
195 | #define FR_AB_EE_BASE_PAGE 0x00000130 | ||
196 | #define FRF_AB_EE_EXPROM_MASK_LBN 16 | ||
197 | #define FRF_AB_EE_EXPROM_MASK_WIDTH 13 | ||
198 | #define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0 | ||
199 | #define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13 | ||
200 | |||
201 | /* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */ | ||
202 | #define FR_AB_EE_VPD_CFG0 0x00000140 | ||
203 | #define FRF_AB_EE_SF_FASTRD_EN_LBN 127 | ||
204 | #define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1 | ||
205 | #define FRF_AB_EE_SF_CLOCK_DIV_LBN 120 | ||
206 | #define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7 | ||
207 | #define FRF_AB_EE_VPD_WIP_POLL_LBN 119 | ||
208 | #define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1 | ||
209 | #define FRF_AB_EE_EE_CLOCK_DIV_LBN 112 | ||
210 | #define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7 | ||
211 | #define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96 | ||
212 | #define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16 | ||
213 | #define FRF_AB_EE_VPDW_LENGTH_LBN 80 | ||
214 | #define FRF_AB_EE_VPDW_LENGTH_WIDTH 15 | ||
215 | #define FRF_AB_EE_VPDW_BASE_LBN 64 | ||
216 | #define FRF_AB_EE_VPDW_BASE_WIDTH 15 | ||
217 | #define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56 | ||
218 | #define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8 | ||
219 | #define FRF_AB_EE_VPD_BASE_LBN 32 | ||
220 | #define FRF_AB_EE_VPD_BASE_WIDTH 24 | ||
221 | #define FRF_AB_EE_VPD_LENGTH_LBN 16 | ||
222 | #define FRF_AB_EE_VPD_LENGTH_WIDTH 15 | ||
223 | #define FRF_AB_EE_VPD_AD_SIZE_LBN 8 | ||
224 | #define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5 | ||
225 | #define FRF_AB_EE_VPD_ACCESS_ON_LBN 5 | ||
226 | #define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1 | ||
227 | #define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4 | ||
228 | #define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1 | ||
229 | #define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2 | ||
230 | #define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1 | ||
231 | #define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1 | ||
232 | #define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1 | ||
233 | #define FRF_AB_EE_VPD_EN_LBN 0 | ||
234 | #define FRF_AB_EE_VPD_EN_WIDTH 1 | ||
235 | |||
236 | /* EE_VPD_SW_CNTL_REG: VPD access SW control register */ | ||
237 | #define FR_AB_EE_VPD_SW_CNTL 0x00000150 | ||
238 | #define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31 | ||
239 | #define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1 | ||
240 | #define FRF_AB_EE_VPD_CYC_WRITE_LBN 28 | ||
241 | #define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1 | ||
242 | #define FRF_AB_EE_VPD_CYC_ADR_LBN 0 | ||
243 | #define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15 | ||
244 | |||
245 | /* EE_VPD_SW_DATA_REG: VPD access SW data register */ | ||
246 | #define FR_AB_EE_VPD_SW_DATA 0x00000160 | ||
247 | #define FRF_AB_EE_VPD_CYC_DAT_LBN 0 | ||
248 | #define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32 | ||
249 | |||
250 | /* PBMX_DBG_IADDR_REG: Capture Module address register */ | ||
251 | #define FR_CZ_PBMX_DBG_IADDR 0x000001f0 | ||
252 | #define FRF_CZ_PBMX_DBG_IADDR_LBN 0 | ||
253 | #define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32 | ||
254 | |||
255 | /* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */ | ||
256 | #define FR_BB_PCIE_CORE_INDIRECT 0x000001f0 | ||
257 | #define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32 | ||
258 | #define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32 | ||
259 | #define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15 | ||
260 | #define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1 | ||
261 | #define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0 | ||
262 | #define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12 | ||
263 | |||
264 | /* PBMX_DBG_IDATA_REG: Capture Module data register */ | ||
265 | #define FR_CZ_PBMX_DBG_IDATA 0x000001f8 | ||
266 | #define FRF_CZ_PBMX_DBG_IDATA_LBN 0 | ||
267 | #define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64 | ||
268 | |||
269 | /* NIC_STAT_REG: NIC status register */ | ||
270 | #define FR_AB_NIC_STAT 0x00000200 | ||
271 | #define FRF_BB_AER_DIS_LBN 34 | ||
272 | #define FRF_BB_AER_DIS_WIDTH 1 | ||
273 | #define FRF_BB_EE_STRAP_EN_LBN 31 | ||
274 | #define FRF_BB_EE_STRAP_EN_WIDTH 1 | ||
275 | #define FRF_BB_EE_STRAP_LBN 24 | ||
276 | #define FRF_BB_EE_STRAP_WIDTH 4 | ||
277 | #define FRF_BB_REVISION_ID_LBN 17 | ||
278 | #define FRF_BB_REVISION_ID_WIDTH 7 | ||
279 | #define FRF_AB_ONCHIP_SRAM_LBN 16 | ||
280 | #define FRF_AB_ONCHIP_SRAM_WIDTH 1 | ||
281 | #define FRF_AB_SF_PRST_LBN 9 | ||
282 | #define FRF_AB_SF_PRST_WIDTH 1 | ||
283 | #define FRF_AB_EE_PRST_LBN 8 | ||
284 | #define FRF_AB_EE_PRST_WIDTH 1 | ||
285 | #define FRF_AB_ATE_MODE_LBN 3 | ||
286 | #define FRF_AB_ATE_MODE_WIDTH 1 | ||
287 | #define FRF_AB_STRAP_PINS_LBN 0 | ||
288 | #define FRF_AB_STRAP_PINS_WIDTH 3 | ||
289 | |||
290 | /* GPIO_CTL_REG: GPIO control register */ | ||
291 | #define FR_AB_GPIO_CTL 0x00000210 | ||
292 | #define FRF_AB_GPIO_OUT3_LBN 112 | ||
293 | #define FRF_AB_GPIO_OUT3_WIDTH 16 | ||
294 | #define FRF_AB_GPIO_IN3_LBN 104 | ||
295 | #define FRF_AB_GPIO_IN3_WIDTH 8 | ||
296 | #define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96 | ||
297 | #define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8 | ||
298 | #define FRF_AB_GPIO_OUT2_LBN 80 | ||
299 | #define FRF_AB_GPIO_OUT2_WIDTH 16 | ||
300 | #define FRF_AB_GPIO_IN2_LBN 72 | ||
301 | #define FRF_AB_GPIO_IN2_WIDTH 8 | ||
302 | #define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64 | ||
303 | #define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8 | ||
304 | #define FRF_AB_GPIO15_OEN_LBN 63 | ||
305 | #define FRF_AB_GPIO15_OEN_WIDTH 1 | ||
306 | #define FRF_AB_GPIO14_OEN_LBN 62 | ||
307 | #define FRF_AB_GPIO14_OEN_WIDTH 1 | ||
308 | #define FRF_AB_GPIO13_OEN_LBN 61 | ||
309 | #define FRF_AB_GPIO13_OEN_WIDTH 1 | ||
310 | #define FRF_AB_GPIO12_OEN_LBN 60 | ||
311 | #define FRF_AB_GPIO12_OEN_WIDTH 1 | ||
312 | #define FRF_AB_GPIO11_OEN_LBN 59 | ||
313 | #define FRF_AB_GPIO11_OEN_WIDTH 1 | ||
314 | #define FRF_AB_GPIO10_OEN_LBN 58 | ||
315 | #define FRF_AB_GPIO10_OEN_WIDTH 1 | ||
316 | #define FRF_AB_GPIO9_OEN_LBN 57 | ||
317 | #define FRF_AB_GPIO9_OEN_WIDTH 1 | ||
318 | #define FRF_AB_GPIO8_OEN_LBN 56 | ||
319 | #define FRF_AB_GPIO8_OEN_WIDTH 1 | ||
320 | #define FRF_AB_GPIO15_OUT_LBN 55 | ||
321 | #define FRF_AB_GPIO15_OUT_WIDTH 1 | ||
322 | #define FRF_AB_GPIO14_OUT_LBN 54 | ||
323 | #define FRF_AB_GPIO14_OUT_WIDTH 1 | ||
324 | #define FRF_AB_GPIO13_OUT_LBN 53 | ||
325 | #define FRF_AB_GPIO13_OUT_WIDTH 1 | ||
326 | #define FRF_AB_GPIO12_OUT_LBN 52 | ||
327 | #define FRF_AB_GPIO12_OUT_WIDTH 1 | ||
328 | #define FRF_AB_GPIO11_OUT_LBN 51 | ||
329 | #define FRF_AB_GPIO11_OUT_WIDTH 1 | ||
330 | #define FRF_AB_GPIO10_OUT_LBN 50 | ||
331 | #define FRF_AB_GPIO10_OUT_WIDTH 1 | ||
332 | #define FRF_AB_GPIO9_OUT_LBN 49 | ||
333 | #define FRF_AB_GPIO9_OUT_WIDTH 1 | ||
334 | #define FRF_AB_GPIO8_OUT_LBN 48 | ||
335 | #define FRF_AB_GPIO8_OUT_WIDTH 1 | ||
336 | #define FRF_AB_GPIO15_IN_LBN 47 | ||
337 | #define FRF_AB_GPIO15_IN_WIDTH 1 | ||
338 | #define FRF_AB_GPIO14_IN_LBN 46 | ||
339 | #define FRF_AB_GPIO14_IN_WIDTH 1 | ||
340 | #define FRF_AB_GPIO13_IN_LBN 45 | ||
341 | #define FRF_AB_GPIO13_IN_WIDTH 1 | ||
342 | #define FRF_AB_GPIO12_IN_LBN 44 | ||
343 | #define FRF_AB_GPIO12_IN_WIDTH 1 | ||
344 | #define FRF_AB_GPIO11_IN_LBN 43 | ||
345 | #define FRF_AB_GPIO11_IN_WIDTH 1 | ||
346 | #define FRF_AB_GPIO10_IN_LBN 42 | ||
347 | #define FRF_AB_GPIO10_IN_WIDTH 1 | ||
348 | #define FRF_AB_GPIO9_IN_LBN 41 | ||
349 | #define FRF_AB_GPIO9_IN_WIDTH 1 | ||
350 | #define FRF_AB_GPIO8_IN_LBN 40 | ||
351 | #define FRF_AB_GPIO8_IN_WIDTH 1 | ||
352 | #define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39 | ||
353 | #define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1 | ||
354 | #define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38 | ||
355 | #define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1 | ||
356 | #define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37 | ||
357 | #define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1 | ||
358 | #define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36 | ||
359 | #define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1 | ||
360 | #define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35 | ||
361 | #define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1 | ||
362 | #define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34 | ||
363 | #define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1 | ||
364 | #define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33 | ||
365 | #define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1 | ||
366 | #define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32 | ||
367 | #define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1 | ||
368 | #define FRF_AB_CLK156_OUT_EN_LBN 31 | ||
369 | #define FRF_AB_CLK156_OUT_EN_WIDTH 1 | ||
370 | #define FRF_AB_USE_NIC_CLK_LBN 30 | ||
371 | #define FRF_AB_USE_NIC_CLK_WIDTH 1 | ||
372 | #define FRF_AB_GPIO5_OEN_LBN 29 | ||
373 | #define FRF_AB_GPIO5_OEN_WIDTH 1 | ||
374 | #define FRF_AB_GPIO4_OEN_LBN 28 | ||
375 | #define FRF_AB_GPIO4_OEN_WIDTH 1 | ||
376 | #define FRF_AB_GPIO3_OEN_LBN 27 | ||
377 | #define FRF_AB_GPIO3_OEN_WIDTH 1 | ||
378 | #define FRF_AB_GPIO2_OEN_LBN 26 | ||
379 | #define FRF_AB_GPIO2_OEN_WIDTH 1 | ||
380 | #define FRF_AB_GPIO1_OEN_LBN 25 | ||
381 | #define FRF_AB_GPIO1_OEN_WIDTH 1 | ||
382 | #define FRF_AB_GPIO0_OEN_LBN 24 | ||
383 | #define FRF_AB_GPIO0_OEN_WIDTH 1 | ||
384 | #define FRF_AB_GPIO7_OUT_LBN 23 | ||
385 | #define FRF_AB_GPIO7_OUT_WIDTH 1 | ||
386 | #define FRF_AB_GPIO6_OUT_LBN 22 | ||
387 | #define FRF_AB_GPIO6_OUT_WIDTH 1 | ||
388 | #define FRF_AB_GPIO5_OUT_LBN 21 | ||
389 | #define FRF_AB_GPIO5_OUT_WIDTH 1 | ||
390 | #define FRF_AB_GPIO4_OUT_LBN 20 | ||
391 | #define FRF_AB_GPIO4_OUT_WIDTH 1 | ||
392 | #define FRF_AB_GPIO3_OUT_LBN 19 | ||
393 | #define FRF_AB_GPIO3_OUT_WIDTH 1 | ||
394 | #define FRF_AB_GPIO2_OUT_LBN 18 | ||
395 | #define FRF_AB_GPIO2_OUT_WIDTH 1 | ||
396 | #define FRF_AB_GPIO1_OUT_LBN 17 | ||
397 | #define FRF_AB_GPIO1_OUT_WIDTH 1 | ||
398 | #define FRF_AB_GPIO0_OUT_LBN 16 | ||
399 | #define FRF_AB_GPIO0_OUT_WIDTH 1 | ||
400 | #define FRF_AB_GPIO7_IN_LBN 15 | ||
401 | #define FRF_AB_GPIO7_IN_WIDTH 1 | ||
402 | #define FRF_AB_GPIO6_IN_LBN 14 | ||
403 | #define FRF_AB_GPIO6_IN_WIDTH 1 | ||
404 | #define FRF_AB_GPIO5_IN_LBN 13 | ||
405 | #define FRF_AB_GPIO5_IN_WIDTH 1 | ||
406 | #define FRF_AB_GPIO4_IN_LBN 12 | ||
407 | #define FRF_AB_GPIO4_IN_WIDTH 1 | ||
408 | #define FRF_AB_GPIO3_IN_LBN 11 | ||
409 | #define FRF_AB_GPIO3_IN_WIDTH 1 | ||
410 | #define FRF_AB_GPIO2_IN_LBN 10 | ||
411 | #define FRF_AB_GPIO2_IN_WIDTH 1 | ||
412 | #define FRF_AB_GPIO1_IN_LBN 9 | ||
413 | #define FRF_AB_GPIO1_IN_WIDTH 1 | ||
414 | #define FRF_AB_GPIO0_IN_LBN 8 | ||
415 | #define FRF_AB_GPIO0_IN_WIDTH 1 | ||
416 | #define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7 | ||
417 | #define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1 | ||
418 | #define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6 | ||
419 | #define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1 | ||
420 | #define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5 | ||
421 | #define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1 | ||
422 | #define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4 | ||
423 | #define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1 | ||
424 | #define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3 | ||
425 | #define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1 | ||
426 | #define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2 | ||
427 | #define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1 | ||
428 | #define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1 | ||
429 | #define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1 | ||
430 | #define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0 | ||
431 | #define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1 | ||
432 | |||
433 | /* GLB_CTL_REG: Global control register */ | ||
434 | #define FR_AB_GLB_CTL 0x00000220 | ||
435 | #define FRF_AB_EXT_PHY_RST_CTL_LBN 63 | ||
436 | #define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1 | ||
437 | #define FRF_AB_XAUI_SD_RST_CTL_LBN 62 | ||
438 | #define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1 | ||
439 | #define FRF_AB_PCIE_SD_RST_CTL_LBN 61 | ||
440 | #define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1 | ||
441 | #define FRF_AA_PCIX_RST_CTL_LBN 60 | ||
442 | #define FRF_AA_PCIX_RST_CTL_WIDTH 1 | ||
443 | #define FRF_BB_BIU_RST_CTL_LBN 60 | ||
444 | #define FRF_BB_BIU_RST_CTL_WIDTH 1 | ||
445 | #define FRF_AB_PCIE_STKY_RST_CTL_LBN 59 | ||
446 | #define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1 | ||
447 | #define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58 | ||
448 | #define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1 | ||
449 | #define FRF_AB_PCIE_CORE_RST_CTL_LBN 57 | ||
450 | #define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1 | ||
451 | #define FRF_AB_XGRX_RST_CTL_LBN 56 | ||
452 | #define FRF_AB_XGRX_RST_CTL_WIDTH 1 | ||
453 | #define FRF_AB_XGTX_RST_CTL_LBN 55 | ||
454 | #define FRF_AB_XGTX_RST_CTL_WIDTH 1 | ||
455 | #define FRF_AB_EM_RST_CTL_LBN 54 | ||
456 | #define FRF_AB_EM_RST_CTL_WIDTH 1 | ||
457 | #define FRF_AB_EV_RST_CTL_LBN 53 | ||
458 | #define FRF_AB_EV_RST_CTL_WIDTH 1 | ||
459 | #define FRF_AB_SR_RST_CTL_LBN 52 | ||
460 | #define FRF_AB_SR_RST_CTL_WIDTH 1 | ||
461 | #define FRF_AB_RX_RST_CTL_LBN 51 | ||
462 | #define FRF_AB_RX_RST_CTL_WIDTH 1 | ||
463 | #define FRF_AB_TX_RST_CTL_LBN 50 | ||
464 | #define FRF_AB_TX_RST_CTL_WIDTH 1 | ||
465 | #define FRF_AB_EE_RST_CTL_LBN 49 | ||
466 | #define FRF_AB_EE_RST_CTL_WIDTH 1 | ||
467 | #define FRF_AB_CS_RST_CTL_LBN 48 | ||
468 | #define FRF_AB_CS_RST_CTL_WIDTH 1 | ||
469 | #define FRF_AB_HOT_RST_CTL_LBN 40 | ||
470 | #define FRF_AB_HOT_RST_CTL_WIDTH 2 | ||
471 | #define FRF_AB_RST_EXT_PHY_LBN 31 | ||
472 | #define FRF_AB_RST_EXT_PHY_WIDTH 1 | ||
473 | #define FRF_AB_RST_XAUI_SD_LBN 30 | ||
474 | #define FRF_AB_RST_XAUI_SD_WIDTH 1 | ||
475 | #define FRF_AB_RST_PCIE_SD_LBN 29 | ||
476 | #define FRF_AB_RST_PCIE_SD_WIDTH 1 | ||
477 | #define FRF_AA_RST_PCIX_LBN 28 | ||
478 | #define FRF_AA_RST_PCIX_WIDTH 1 | ||
479 | #define FRF_BB_RST_BIU_LBN 28 | ||
480 | #define FRF_BB_RST_BIU_WIDTH 1 | ||
481 | #define FRF_AB_RST_PCIE_STKY_LBN 27 | ||
482 | #define FRF_AB_RST_PCIE_STKY_WIDTH 1 | ||
483 | #define FRF_AB_RST_PCIE_NSTKY_LBN 26 | ||
484 | #define FRF_AB_RST_PCIE_NSTKY_WIDTH 1 | ||
485 | #define FRF_AB_RST_PCIE_CORE_LBN 25 | ||
486 | #define FRF_AB_RST_PCIE_CORE_WIDTH 1 | ||
487 | #define FRF_AB_RST_XGRX_LBN 24 | ||
488 | #define FRF_AB_RST_XGRX_WIDTH 1 | ||
489 | #define FRF_AB_RST_XGTX_LBN 23 | ||
490 | #define FRF_AB_RST_XGTX_WIDTH 1 | ||
491 | #define FRF_AB_RST_EM_LBN 22 | ||
492 | #define FRF_AB_RST_EM_WIDTH 1 | ||
493 | #define FRF_AB_RST_EV_LBN 21 | ||
494 | #define FRF_AB_RST_EV_WIDTH 1 | ||
495 | #define FRF_AB_RST_SR_LBN 20 | ||
496 | #define FRF_AB_RST_SR_WIDTH 1 | ||
497 | #define FRF_AB_RST_RX_LBN 19 | ||
498 | #define FRF_AB_RST_RX_WIDTH 1 | ||
499 | #define FRF_AB_RST_TX_LBN 18 | ||
500 | #define FRF_AB_RST_TX_WIDTH 1 | ||
501 | #define FRF_AB_RST_SF_LBN 17 | ||
502 | #define FRF_AB_RST_SF_WIDTH 1 | ||
503 | #define FRF_AB_RST_CS_LBN 16 | ||
504 | #define FRF_AB_RST_CS_WIDTH 1 | ||
505 | #define FRF_AB_INT_RST_DUR_LBN 4 | ||
506 | #define FRF_AB_INT_RST_DUR_WIDTH 3 | ||
507 | #define FRF_AB_EXT_PHY_RST_DUR_LBN 1 | ||
508 | #define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3 | ||
509 | #define FFE_AB_EXT_PHY_RST_DUR_10240US 7 | ||
510 | #define FFE_AB_EXT_PHY_RST_DUR_5120US 6 | ||
511 | #define FFE_AB_EXT_PHY_RST_DUR_2560US 5 | ||
512 | #define FFE_AB_EXT_PHY_RST_DUR_1280US 4 | ||
513 | #define FFE_AB_EXT_PHY_RST_DUR_640US 3 | ||
514 | #define FFE_AB_EXT_PHY_RST_DUR_320US 2 | ||
515 | #define FFE_AB_EXT_PHY_RST_DUR_160US 1 | ||
516 | #define FFE_AB_EXT_PHY_RST_DUR_80US 0 | ||
517 | #define FRF_AB_SWRST_LBN 0 | ||
518 | #define FRF_AB_SWRST_WIDTH 1 | ||
519 | |||
520 | /* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ | ||
521 | #define FR_AZ_FATAL_INTR_KER 0x00000230 | ||
522 | #define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44 | ||
523 | #define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1 | ||
524 | #define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43 | ||
525 | #define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1 | ||
526 | #define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43 | ||
527 | #define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1 | ||
528 | #define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42 | ||
529 | #define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1 | ||
530 | #define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41 | ||
531 | #define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1 | ||
532 | #define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40 | ||
533 | #define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1 | ||
534 | #define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39 | ||
535 | #define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1 | ||
536 | #define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38 | ||
537 | #define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1 | ||
538 | #define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37 | ||
539 | #define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1 | ||
540 | #define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36 | ||
541 | #define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1 | ||
542 | #define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35 | ||
543 | #define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1 | ||
544 | #define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34 | ||
545 | #define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1 | ||
546 | #define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33 | ||
547 | #define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1 | ||
548 | #define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32 | ||
549 | #define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1 | ||
550 | #define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12 | ||
551 | #define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1 | ||
552 | #define FRF_AB_PCI_BUSERR_INT_KER_LBN 11 | ||
553 | #define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1 | ||
554 | #define FRF_CZ_MBU_PERR_INT_KER_LBN 11 | ||
555 | #define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1 | ||
556 | #define FRF_AZ_SRAM_OOB_INT_KER_LBN 10 | ||
557 | #define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1 | ||
558 | #define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9 | ||
559 | #define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1 | ||
560 | #define FRF_AZ_MEM_PERR_INT_KER_LBN 8 | ||
561 | #define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1 | ||
562 | #define FRF_AZ_RBUF_OWN_INT_KER_LBN 7 | ||
563 | #define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1 | ||
564 | #define FRF_AZ_TBUF_OWN_INT_KER_LBN 6 | ||
565 | #define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1 | ||
566 | #define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5 | ||
567 | #define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1 | ||
568 | #define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4 | ||
569 | #define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1 | ||
570 | #define FRF_AZ_EVQ_OWN_INT_KER_LBN 3 | ||
571 | #define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1 | ||
572 | #define FRF_AZ_EVF_OFLO_INT_KER_LBN 2 | ||
573 | #define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1 | ||
574 | #define FRF_AZ_ILL_ADR_INT_KER_LBN 1 | ||
575 | #define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1 | ||
576 | #define FRF_AZ_SRM_PERR_INT_KER_LBN 0 | ||
577 | #define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1 | ||
578 | |||
579 | /* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */ | ||
580 | #define FR_BZ_FATAL_INTR_CHAR 0x00000240 | ||
581 | #define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44 | ||
582 | #define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1 | ||
583 | #define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43 | ||
584 | #define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1 | ||
585 | #define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43 | ||
586 | #define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1 | ||
587 | #define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42 | ||
588 | #define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1 | ||
589 | #define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41 | ||
590 | #define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1 | ||
591 | #define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40 | ||
592 | #define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1 | ||
593 | #define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39 | ||
594 | #define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1 | ||
595 | #define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38 | ||
596 | #define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1 | ||
597 | #define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37 | ||
598 | #define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 | ||
599 | #define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36 | ||
600 | #define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 | ||
601 | #define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35 | ||
602 | #define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1 | ||
603 | #define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34 | ||
604 | #define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1 | ||
605 | #define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33 | ||
606 | #define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1 | ||
607 | #define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32 | ||
608 | #define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1 | ||
609 | #define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12 | ||
610 | #define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1 | ||
611 | #define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11 | ||
612 | #define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1 | ||
613 | #define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11 | ||
614 | #define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1 | ||
615 | #define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10 | ||
616 | #define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1 | ||
617 | #define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9 | ||
618 | #define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1 | ||
619 | #define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8 | ||
620 | #define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1 | ||
621 | #define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7 | ||
622 | #define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1 | ||
623 | #define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6 | ||
624 | #define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1 | ||
625 | #define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5 | ||
626 | #define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1 | ||
627 | #define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4 | ||
628 | #define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1 | ||
629 | #define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3 | ||
630 | #define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1 | ||
631 | #define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2 | ||
632 | #define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1 | ||
633 | #define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1 | ||
634 | #define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1 | ||
635 | #define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0 | ||
636 | #define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1 | ||
637 | |||
638 | /* DP_CTRL_REG: Datapath control register */ | ||
639 | #define FR_BZ_DP_CTRL 0x00000250 | ||
640 | #define FRF_BZ_FLS_EVQ_ID_LBN 0 | ||
641 | #define FRF_BZ_FLS_EVQ_ID_WIDTH 12 | ||
642 | |||
643 | /* MEM_STAT_REG: Memory status register */ | ||
644 | #define FR_AZ_MEM_STAT 0x00000260 | ||
645 | #define FRF_AB_MEM_PERR_VEC_LBN 53 | ||
646 | #define FRF_AB_MEM_PERR_VEC_WIDTH 38 | ||
647 | #define FRF_AB_MBIST_CORR_LBN 38 | ||
648 | #define FRF_AB_MBIST_CORR_WIDTH 15 | ||
649 | #define FRF_AB_MBIST_ERR_LBN 0 | ||
650 | #define FRF_AB_MBIST_ERR_WIDTH 40 | ||
651 | #define FRF_CZ_MEM_PERR_VEC_LBN 0 | ||
652 | #define FRF_CZ_MEM_PERR_VEC_WIDTH 35 | ||
653 | |||
654 | /* CS_DEBUG_REG: Debug register */ | ||
655 | #define FR_AZ_CS_DEBUG 0x00000270 | ||
656 | #define FRF_AB_GLB_DEBUG2_SEL_LBN 50 | ||
657 | #define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3 | ||
658 | #define FRF_AB_DEBUG_BLK_SEL2_LBN 47 | ||
659 | #define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3 | ||
660 | #define FRF_AB_DEBUG_BLK_SEL1_LBN 44 | ||
661 | #define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3 | ||
662 | #define FRF_AB_DEBUG_BLK_SEL0_LBN 41 | ||
663 | #define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3 | ||
664 | #define FRF_CZ_CS_PORT_NUM_LBN 40 | ||
665 | #define FRF_CZ_CS_PORT_NUM_WIDTH 2 | ||
666 | #define FRF_AB_MISC_DEBUG_ADDR_LBN 36 | ||
667 | #define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5 | ||
668 | #define FRF_AB_SERDES_DEBUG_ADDR_LBN 31 | ||
669 | #define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5 | ||
670 | #define FRF_CZ_CS_PORT_FPE_LBN 1 | ||
671 | #define FRF_CZ_CS_PORT_FPE_WIDTH 35 | ||
672 | #define FRF_AB_EM_DEBUG_ADDR_LBN 26 | ||
673 | #define FRF_AB_EM_DEBUG_ADDR_WIDTH 5 | ||
674 | #define FRF_AB_SR_DEBUG_ADDR_LBN 21 | ||
675 | #define FRF_AB_SR_DEBUG_ADDR_WIDTH 5 | ||
676 | #define FRF_AB_EV_DEBUG_ADDR_LBN 16 | ||
677 | #define FRF_AB_EV_DEBUG_ADDR_WIDTH 5 | ||
678 | #define FRF_AB_RX_DEBUG_ADDR_LBN 11 | ||
679 | #define FRF_AB_RX_DEBUG_ADDR_WIDTH 5 | ||
680 | #define FRF_AB_TX_DEBUG_ADDR_LBN 6 | ||
681 | #define FRF_AB_TX_DEBUG_ADDR_WIDTH 5 | ||
682 | #define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1 | ||
683 | #define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5 | ||
684 | #define FRF_AZ_CS_DEBUG_EN_LBN 0 | ||
685 | #define FRF_AZ_CS_DEBUG_EN_WIDTH 1 | ||
686 | |||
687 | /* DRIVER_REG: Driver scratch register [0-7] */ | ||
688 | #define FR_AZ_DRIVER 0x00000280 | ||
689 | #define FR_AZ_DRIVER_STEP 16 | ||
690 | #define FR_AZ_DRIVER_ROWS 8 | ||
691 | #define FRF_AZ_DRIVER_DW0_LBN 0 | ||
692 | #define FRF_AZ_DRIVER_DW0_WIDTH 32 | ||
693 | |||
694 | /* ALTERA_BUILD_REG: Altera build register */ | ||
695 | #define FR_AZ_ALTERA_BUILD 0x00000300 | ||
696 | #define FRF_AZ_ALTERA_BUILD_VER_LBN 0 | ||
697 | #define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32 | ||
698 | |||
699 | /* CSR_SPARE_REG: Spare register */ | ||
700 | #define FR_AZ_CSR_SPARE 0x00000310 | ||
701 | #define FRF_AB_MEM_PERR_EN_LBN 64 | ||
702 | #define FRF_AB_MEM_PERR_EN_WIDTH 38 | ||
703 | #define FRF_CZ_MEM_PERR_EN_LBN 64 | ||
704 | #define FRF_CZ_MEM_PERR_EN_WIDTH 35 | ||
705 | #define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72 | ||
706 | #define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2 | ||
707 | #define FRF_AZ_CSR_SPARE_BITS_LBN 0 | ||
708 | #define FRF_AZ_CSR_SPARE_BITS_WIDTH 32 | ||
709 | |||
710 | /* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */ | ||
711 | #define FR_AB_PCIE_SD_CTL0123 0x00000320 | ||
712 | #define FRF_AB_PCIE_TESTSIG_H_LBN 96 | ||
713 | #define FRF_AB_PCIE_TESTSIG_H_WIDTH 19 | ||
714 | #define FRF_AB_PCIE_TESTSIG_L_LBN 64 | ||
715 | #define FRF_AB_PCIE_TESTSIG_L_WIDTH 19 | ||
716 | #define FRF_AB_PCIE_OFFSET_LBN 56 | ||
717 | #define FRF_AB_PCIE_OFFSET_WIDTH 8 | ||
718 | #define FRF_AB_PCIE_OFFSETEN_H_LBN 55 | ||
719 | #define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1 | ||
720 | #define FRF_AB_PCIE_OFFSETEN_L_LBN 54 | ||
721 | #define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1 | ||
722 | #define FRF_AB_PCIE_HIVMODE_H_LBN 53 | ||
723 | #define FRF_AB_PCIE_HIVMODE_H_WIDTH 1 | ||
724 | #define FRF_AB_PCIE_HIVMODE_L_LBN 52 | ||
725 | #define FRF_AB_PCIE_HIVMODE_L_WIDTH 1 | ||
726 | #define FRF_AB_PCIE_PARRESET_H_LBN 51 | ||
727 | #define FRF_AB_PCIE_PARRESET_H_WIDTH 1 | ||
728 | #define FRF_AB_PCIE_PARRESET_L_LBN 50 | ||
729 | #define FRF_AB_PCIE_PARRESET_L_WIDTH 1 | ||
730 | #define FRF_AB_PCIE_LPBKWDRV_H_LBN 49 | ||
731 | #define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1 | ||
732 | #define FRF_AB_PCIE_LPBKWDRV_L_LBN 48 | ||
733 | #define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1 | ||
734 | #define FRF_AB_PCIE_LPBK_LBN 40 | ||
735 | #define FRF_AB_PCIE_LPBK_WIDTH 8 | ||
736 | #define FRF_AB_PCIE_PARLPBK_LBN 32 | ||
737 | #define FRF_AB_PCIE_PARLPBK_WIDTH 8 | ||
738 | #define FRF_AB_PCIE_RXTERMADJ_H_LBN 30 | ||
739 | #define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2 | ||
740 | #define FRF_AB_PCIE_RXTERMADJ_L_LBN 28 | ||
741 | #define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2 | ||
742 | #define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3 | ||
743 | #define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2 | ||
744 | #define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1 | ||
745 | #define FFE_AB_PCIE_RXTERMADJ_NOMNL 0 | ||
746 | #define FRF_AB_PCIE_TXTERMADJ_H_LBN 26 | ||
747 | #define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2 | ||
748 | #define FRF_AB_PCIE_TXTERMADJ_L_LBN 24 | ||
749 | #define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2 | ||
750 | #define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3 | ||
751 | #define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2 | ||
752 | #define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1 | ||
753 | #define FFE_AB_PCIE_TXTERMADJ_NOMNL 0 | ||
754 | #define FRF_AB_PCIE_RXEQCTL_H_LBN 18 | ||
755 | #define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2 | ||
756 | #define FRF_AB_PCIE_RXEQCTL_L_LBN 16 | ||
757 | #define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2 | ||
758 | #define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3 | ||
759 | #define FFE_AB_PCIE_RXEQCTL_OFF 2 | ||
760 | #define FFE_AB_PCIE_RXEQCTL_MIN 1 | ||
761 | #define FFE_AB_PCIE_RXEQCTL_MAX 0 | ||
762 | #define FRF_AB_PCIE_HIDRV_LBN 8 | ||
763 | #define FRF_AB_PCIE_HIDRV_WIDTH 8 | ||
764 | #define FRF_AB_PCIE_LODRV_LBN 0 | ||
765 | #define FRF_AB_PCIE_LODRV_WIDTH 8 | ||
766 | |||
767 | /* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */ | ||
768 | #define FR_AB_PCIE_SD_CTL45 0x00000330 | ||
769 | #define FRF_AB_PCIE_DTX7_LBN 60 | ||
770 | #define FRF_AB_PCIE_DTX7_WIDTH 4 | ||
771 | #define FRF_AB_PCIE_DTX6_LBN 56 | ||
772 | #define FRF_AB_PCIE_DTX6_WIDTH 4 | ||
773 | #define FRF_AB_PCIE_DTX5_LBN 52 | ||
774 | #define FRF_AB_PCIE_DTX5_WIDTH 4 | ||
775 | #define FRF_AB_PCIE_DTX4_LBN 48 | ||
776 | #define FRF_AB_PCIE_DTX4_WIDTH 4 | ||
777 | #define FRF_AB_PCIE_DTX3_LBN 44 | ||
778 | #define FRF_AB_PCIE_DTX3_WIDTH 4 | ||
779 | #define FRF_AB_PCIE_DTX2_LBN 40 | ||
780 | #define FRF_AB_PCIE_DTX2_WIDTH 4 | ||
781 | #define FRF_AB_PCIE_DTX1_LBN 36 | ||
782 | #define FRF_AB_PCIE_DTX1_WIDTH 4 | ||
783 | #define FRF_AB_PCIE_DTX0_LBN 32 | ||
784 | #define FRF_AB_PCIE_DTX0_WIDTH 4 | ||
785 | #define FRF_AB_PCIE_DEQ7_LBN 28 | ||
786 | #define FRF_AB_PCIE_DEQ7_WIDTH 4 | ||
787 | #define FRF_AB_PCIE_DEQ6_LBN 24 | ||
788 | #define FRF_AB_PCIE_DEQ6_WIDTH 4 | ||
789 | #define FRF_AB_PCIE_DEQ5_LBN 20 | ||
790 | #define FRF_AB_PCIE_DEQ5_WIDTH 4 | ||
791 | #define FRF_AB_PCIE_DEQ4_LBN 16 | ||
792 | #define FRF_AB_PCIE_DEQ4_WIDTH 4 | ||
793 | #define FRF_AB_PCIE_DEQ3_LBN 12 | ||
794 | #define FRF_AB_PCIE_DEQ3_WIDTH 4 | ||
795 | #define FRF_AB_PCIE_DEQ2_LBN 8 | ||
796 | #define FRF_AB_PCIE_DEQ2_WIDTH 4 | ||
797 | #define FRF_AB_PCIE_DEQ1_LBN 4 | ||
798 | #define FRF_AB_PCIE_DEQ1_WIDTH 4 | ||
799 | #define FRF_AB_PCIE_DEQ0_LBN 0 | ||
800 | #define FRF_AB_PCIE_DEQ0_WIDTH 4 | ||
801 | |||
802 | /* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */ | ||
803 | #define FR_AB_PCIE_PCS_CTL_STAT 0x00000340 | ||
804 | #define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52 | ||
805 | #define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4 | ||
806 | #define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48 | ||
807 | #define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4 | ||
808 | #define FRF_AB_PCIE_PRBSERR_LBN 40 | ||
809 | #define FRF_AB_PCIE_PRBSERR_WIDTH 8 | ||
810 | #define FRF_AB_PCIE_PRBSERRH0_LBN 32 | ||
811 | #define FRF_AB_PCIE_PRBSERRH0_WIDTH 8 | ||
812 | #define FRF_AB_PCIE_FASTINIT_H_LBN 15 | ||
813 | #define FRF_AB_PCIE_FASTINIT_H_WIDTH 1 | ||
814 | #define FRF_AB_PCIE_FASTINIT_L_LBN 14 | ||
815 | #define FRF_AB_PCIE_FASTINIT_L_WIDTH 1 | ||
816 | #define FRF_AB_PCIE_CTCDISABLE_H_LBN 13 | ||
817 | #define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1 | ||
818 | #define FRF_AB_PCIE_CTCDISABLE_L_LBN 12 | ||
819 | #define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1 | ||
820 | #define FRF_AB_PCIE_PRBSSYNC_H_LBN 11 | ||
821 | #define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1 | ||
822 | #define FRF_AB_PCIE_PRBSSYNC_L_LBN 10 | ||
823 | #define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1 | ||
824 | #define FRF_AB_PCIE_PRBSERRACK_H_LBN 9 | ||
825 | #define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1 | ||
826 | #define FRF_AB_PCIE_PRBSERRACK_L_LBN 8 | ||
827 | #define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1 | ||
828 | #define FRF_AB_PCIE_PRBSSEL_LBN 0 | ||
829 | #define FRF_AB_PCIE_PRBSSEL_WIDTH 8 | ||
830 | |||
831 | /* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */ | ||
832 | #define FR_BB_DEBUG_DATA_OUT 0x00000350 | ||
833 | #define FRF_BB_DEBUG2_PORT_LBN 25 | ||
834 | #define FRF_BB_DEBUG2_PORT_WIDTH 15 | ||
835 | #define FRF_BB_DEBUG1_PORT_LBN 0 | ||
836 | #define FRF_BB_DEBUG1_PORT_WIDTH 25 | ||
837 | |||
838 | /* EVQ_RPTR_REGP0: Event queue read pointer register */ | ||
839 | #define FR_BZ_EVQ_RPTR_P0 0x00000400 | ||
840 | #define FR_BZ_EVQ_RPTR_P0_STEP 8192 | ||
841 | #define FR_BZ_EVQ_RPTR_P0_ROWS 1024 | ||
842 | /* EVQ_RPTR_REG_KER: Event queue read pointer register */ | ||
843 | #define FR_AA_EVQ_RPTR_KER 0x00011b00 | ||
844 | #define FR_AA_EVQ_RPTR_KER_STEP 4 | ||
845 | #define FR_AA_EVQ_RPTR_KER_ROWS 4 | ||
846 | /* EVQ_RPTR_REG: Event queue read pointer register */ | ||
847 | #define FR_BZ_EVQ_RPTR 0x00fa0000 | ||
848 | #define FR_BZ_EVQ_RPTR_STEP 16 | ||
849 | #define FR_BB_EVQ_RPTR_ROWS 4096 | ||
850 | #define FR_CZ_EVQ_RPTR_ROWS 1024 | ||
851 | /* EVQ_RPTR_REGP123: Event queue read pointer register */ | ||
852 | #define FR_BB_EVQ_RPTR_P123 0x01000400 | ||
853 | #define FR_BB_EVQ_RPTR_P123_STEP 8192 | ||
854 | #define FR_BB_EVQ_RPTR_P123_ROWS 3072 | ||
855 | #define FRF_AZ_EVQ_RPTR_VLD_LBN 15 | ||
856 | #define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1 | ||
857 | #define FRF_AZ_EVQ_RPTR_LBN 0 | ||
858 | #define FRF_AZ_EVQ_RPTR_WIDTH 15 | ||
859 | |||
860 | /* TIMER_COMMAND_REGP0: Timer Command Registers */ | ||
861 | #define FR_BZ_TIMER_COMMAND_P0 0x00000420 | ||
862 | #define FR_BZ_TIMER_COMMAND_P0_STEP 8192 | ||
863 | #define FR_BZ_TIMER_COMMAND_P0_ROWS 1024 | ||
864 | /* TIMER_COMMAND_REG_KER: Timer Command Registers */ | ||
865 | #define FR_AA_TIMER_COMMAND_KER 0x00000420 | ||
866 | #define FR_AA_TIMER_COMMAND_KER_STEP 8192 | ||
867 | #define FR_AA_TIMER_COMMAND_KER_ROWS 4 | ||
868 | /* TIMER_COMMAND_REGP123: Timer Command Registers */ | ||
869 | #define FR_BB_TIMER_COMMAND_P123 0x01000420 | ||
870 | #define FR_BB_TIMER_COMMAND_P123_STEP 8192 | ||
871 | #define FR_BB_TIMER_COMMAND_P123_ROWS 3072 | ||
872 | #define FRF_CZ_TC_TIMER_MODE_LBN 14 | ||
873 | #define FRF_CZ_TC_TIMER_MODE_WIDTH 2 | ||
874 | #define FRF_AB_TC_TIMER_MODE_LBN 12 | ||
875 | #define FRF_AB_TC_TIMER_MODE_WIDTH 2 | ||
876 | #define FRF_CZ_TC_TIMER_VAL_LBN 0 | ||
877 | #define FRF_CZ_TC_TIMER_VAL_WIDTH 14 | ||
878 | #define FRF_AB_TC_TIMER_VAL_LBN 0 | ||
879 | #define FRF_AB_TC_TIMER_VAL_WIDTH 12 | ||
880 | |||
881 | /* DRV_EV_REG: Driver generated event register */ | ||
882 | #define FR_AZ_DRV_EV 0x00000440 | ||
883 | #define FRF_AZ_DRV_EV_QID_LBN 64 | ||
884 | #define FRF_AZ_DRV_EV_QID_WIDTH 12 | ||
885 | #define FRF_AZ_DRV_EV_DATA_LBN 0 | ||
886 | #define FRF_AZ_DRV_EV_DATA_WIDTH 64 | ||
887 | |||
888 | /* EVQ_CTL_REG: Event queue control register */ | ||
889 | #define FR_AZ_EVQ_CTL 0x00000450 | ||
890 | #define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15 | ||
891 | #define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10 | ||
892 | #define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15 | ||
893 | #define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6 | ||
894 | #define FRF_AZ_EVQ_OWNERR_CTL_LBN 14 | ||
895 | #define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1 | ||
896 | #define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7 | ||
897 | #define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7 | ||
898 | #define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0 | ||
899 | #define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7 | ||
900 | |||
901 | /* EVQ_CNT1_REG: Event counter 1 register */ | ||
902 | #define FR_AZ_EVQ_CNT1 0x00000460 | ||
903 | #define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120 | ||
904 | #define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7 | ||
905 | #define FRF_AZ_EVQ_CNT_TOBIU_LBN 100 | ||
906 | #define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20 | ||
907 | #define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80 | ||
908 | #define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20 | ||
909 | #define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60 | ||
910 | #define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20 | ||
911 | #define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40 | ||
912 | #define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20 | ||
913 | #define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20 | ||
914 | #define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20 | ||
915 | #define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0 | ||
916 | #define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20 | ||
917 | |||
918 | /* EVQ_CNT2_REG: Event counter 2 register */ | ||
919 | #define FR_AZ_EVQ_CNT2 0x00000470 | ||
920 | #define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104 | ||
921 | #define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20 | ||
922 | #define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84 | ||
923 | #define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20 | ||
924 | #define FRF_AZ_EVQ_RDY_CNT_LBN 80 | ||
925 | #define FRF_AZ_EVQ_RDY_CNT_WIDTH 4 | ||
926 | #define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60 | ||
927 | #define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20 | ||
928 | #define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40 | ||
929 | #define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20 | ||
930 | #define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20 | ||
931 | #define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20 | ||
932 | #define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0 | ||
933 | #define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20 | ||
934 | |||
935 | /* USR_EV_REG: Event mailbox register */ | ||
936 | #define FR_CZ_USR_EV 0x00000540 | ||
937 | #define FR_CZ_USR_EV_STEP 8192 | ||
938 | #define FR_CZ_USR_EV_ROWS 1024 | ||
939 | #define FRF_CZ_USR_EV_DATA_LBN 0 | ||
940 | #define FRF_CZ_USR_EV_DATA_WIDTH 32 | ||
941 | |||
942 | /* BUF_TBL_CFG_REG: Buffer table configuration register */ | ||
943 | #define FR_AZ_BUF_TBL_CFG 0x00000600 | ||
944 | #define FRF_AZ_BUF_TBL_MODE_LBN 3 | ||
945 | #define FRF_AZ_BUF_TBL_MODE_WIDTH 1 | ||
946 | |||
947 | /* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */ | ||
948 | #define FR_AZ_SRM_RX_DC_CFG 0x00000610 | ||
949 | #define FRF_AZ_SRM_CLK_TMP_EN_LBN 21 | ||
950 | #define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1 | ||
951 | #define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0 | ||
952 | #define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21 | ||
953 | |||
954 | /* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */ | ||
955 | #define FR_AZ_SRM_TX_DC_CFG 0x00000620 | ||
956 | #define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0 | ||
957 | #define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21 | ||
958 | |||
959 | /* SRM_CFG_REG: SRAM configuration register */ | ||
960 | #define FR_AZ_SRM_CFG 0x00000630 | ||
961 | #define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5 | ||
962 | #define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1 | ||
963 | #define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4 | ||
964 | #define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1 | ||
965 | #define FRF_AZ_SRM_INIT_EN_LBN 3 | ||
966 | #define FRF_AZ_SRM_INIT_EN_WIDTH 1 | ||
967 | #define FRF_AZ_SRM_NUM_BANK_LBN 2 | ||
968 | #define FRF_AZ_SRM_NUM_BANK_WIDTH 1 | ||
969 | #define FRF_AZ_SRM_BANK_SIZE_LBN 0 | ||
970 | #define FRF_AZ_SRM_BANK_SIZE_WIDTH 2 | ||
971 | |||
972 | /* BUF_TBL_UPD_REG: Buffer table update register */ | ||
973 | #define FR_AZ_BUF_TBL_UPD 0x00000650 | ||
974 | #define FRF_AZ_BUF_UPD_CMD_LBN 63 | ||
975 | #define FRF_AZ_BUF_UPD_CMD_WIDTH 1 | ||
976 | #define FRF_AZ_BUF_CLR_CMD_LBN 62 | ||
977 | #define FRF_AZ_BUF_CLR_CMD_WIDTH 1 | ||
978 | #define FRF_AZ_BUF_CLR_END_ID_LBN 32 | ||
979 | #define FRF_AZ_BUF_CLR_END_ID_WIDTH 20 | ||
980 | #define FRF_AZ_BUF_CLR_START_ID_LBN 0 | ||
981 | #define FRF_AZ_BUF_CLR_START_ID_WIDTH 20 | ||
982 | |||
983 | /* SRM_UPD_EVQ_REG: Buffer table update register */ | ||
984 | #define FR_AZ_SRM_UPD_EVQ 0x00000660 | ||
985 | #define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0 | ||
986 | #define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12 | ||
987 | |||
988 | /* SRAM_PARITY_REG: SRAM parity register. */ | ||
989 | #define FR_AZ_SRAM_PARITY 0x00000670 | ||
990 | #define FRF_CZ_BYPASS_ECC_LBN 3 | ||
991 | #define FRF_CZ_BYPASS_ECC_WIDTH 1 | ||
992 | #define FRF_CZ_SEC_INT_LBN 2 | ||
993 | #define FRF_CZ_SEC_INT_WIDTH 1 | ||
994 | #define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1 | ||
995 | #define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1 | ||
996 | #define FRF_AB_FORCE_SRAM_PERR_LBN 0 | ||
997 | #define FRF_AB_FORCE_SRAM_PERR_WIDTH 1 | ||
998 | #define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0 | ||
999 | #define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1 | ||
1000 | |||
1001 | /* RX_CFG_REG: Receive configuration register */ | ||
1002 | #define FR_AZ_RX_CFG 0x00000800 | ||
1003 | #define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72 | ||
1004 | #define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14 | ||
1005 | #define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71 | ||
1006 | #define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1 | ||
1007 | #define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62 | ||
1008 | #define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9 | ||
1009 | #define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53 | ||
1010 | #define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9 | ||
1011 | #define FRF_CZ_RX_PRE_RFF_IPG_LBN 49 | ||
1012 | #define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4 | ||
1013 | #define FRF_BZ_RX_TCP_SUP_LBN 48 | ||
1014 | #define FRF_BZ_RX_TCP_SUP_WIDTH 1 | ||
1015 | #define FRF_BZ_RX_INGR_EN_LBN 47 | ||
1016 | #define FRF_BZ_RX_INGR_EN_WIDTH 1 | ||
1017 | #define FRF_BZ_RX_IP_HASH_LBN 46 | ||
1018 | #define FRF_BZ_RX_IP_HASH_WIDTH 1 | ||
1019 | #define FRF_BZ_RX_HASH_ALG_LBN 45 | ||
1020 | #define FRF_BZ_RX_HASH_ALG_WIDTH 1 | ||
1021 | #define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44 | ||
1022 | #define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1 | ||
1023 | #define FRF_BZ_RX_DESC_PUSH_EN_LBN 43 | ||
1024 | #define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1 | ||
1025 | #define FRF_BZ_RX_RDW_PATCH_EN_LBN 42 | ||
1026 | #define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1 | ||
1027 | #define FRF_BB_RX_PCI_BURST_SIZE_LBN 39 | ||
1028 | #define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3 | ||
1029 | #define FRF_BZ_RX_OWNERR_CTL_LBN 38 | ||
1030 | #define FRF_BZ_RX_OWNERR_CTL_WIDTH 1 | ||
1031 | #define FRF_BZ_RX_XON_TX_TH_LBN 33 | ||
1032 | #define FRF_BZ_RX_XON_TX_TH_WIDTH 5 | ||
1033 | #define FRF_AA_RX_DESC_PUSH_EN_LBN 35 | ||
1034 | #define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1 | ||
1035 | #define FRF_AA_RX_RDW_PATCH_EN_LBN 34 | ||
1036 | #define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1 | ||
1037 | #define FRF_AA_RX_PCI_BURST_SIZE_LBN 31 | ||
1038 | #define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3 | ||
1039 | #define FRF_BZ_RX_XOFF_TX_TH_LBN 28 | ||
1040 | #define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5 | ||
1041 | #define FRF_AA_RX_OWNERR_CTL_LBN 30 | ||
1042 | #define FRF_AA_RX_OWNERR_CTL_WIDTH 1 | ||
1043 | #define FRF_AA_RX_XON_TX_TH_LBN 25 | ||
1044 | #define FRF_AA_RX_XON_TX_TH_WIDTH 5 | ||
1045 | #define FRF_BZ_RX_USR_BUF_SIZE_LBN 19 | ||
1046 | #define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9 | ||
1047 | #define FRF_AA_RX_XOFF_TX_TH_LBN 20 | ||
1048 | #define FRF_AA_RX_XOFF_TX_TH_WIDTH 5 | ||
1049 | #define FRF_AA_RX_USR_BUF_SIZE_LBN 11 | ||
1050 | #define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9 | ||
1051 | #define FRF_BZ_RX_XON_MAC_TH_LBN 10 | ||
1052 | #define FRF_BZ_RX_XON_MAC_TH_WIDTH 9 | ||
1053 | #define FRF_AA_RX_XON_MAC_TH_LBN 6 | ||
1054 | #define FRF_AA_RX_XON_MAC_TH_WIDTH 5 | ||
1055 | #define FRF_BZ_RX_XOFF_MAC_TH_LBN 1 | ||
1056 | #define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9 | ||
1057 | #define FRF_AA_RX_XOFF_MAC_TH_LBN 1 | ||
1058 | #define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5 | ||
1059 | #define FRF_AZ_RX_XOFF_MAC_EN_LBN 0 | ||
1060 | #define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1 | ||
1061 | |||
1062 | /* RX_FILTER_CTL_REG: Receive filter control registers */ | ||
1063 | #define FR_BZ_RX_FILTER_CTL 0x00000810 | ||
1064 | #define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94 | ||
1065 | #define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8 | ||
1066 | #define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86 | ||
1067 | #define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8 | ||
1068 | #define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85 | ||
1069 | #define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1 | ||
1070 | #define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69 | ||
1071 | #define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16 | ||
1072 | #define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57 | ||
1073 | #define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12 | ||
1074 | #define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56 | ||
1075 | #define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1 | ||
1076 | #define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55 | ||
1077 | #define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 | ||
1078 | #define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43 | ||
1079 | #define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12 | ||
1080 | #define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42 | ||
1081 | #define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1 | ||
1082 | #define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41 | ||
1083 | #define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 | ||
1084 | #define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40 | ||
1085 | #define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1 | ||
1086 | #define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32 | ||
1087 | #define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8 | ||
1088 | #define FRF_BZ_NUM_KER_LBN 24 | ||
1089 | #define FRF_BZ_NUM_KER_WIDTH 2 | ||
1090 | #define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16 | ||
1091 | #define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8 | ||
1092 | #define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8 | ||
1093 | #define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8 | ||
1094 | #define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0 | ||
1095 | #define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8 | ||
1096 | |||
1097 | /* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */ | ||
1098 | #define FR_AZ_RX_FLUSH_DESCQ 0x00000820 | ||
1099 | #define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24 | ||
1100 | #define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1 | ||
1101 | #define FRF_AZ_RX_FLUSH_DESCQ_LBN 0 | ||
1102 | #define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12 | ||
1103 | |||
1104 | /* RX_DESC_UPD_REGP0: Receive descriptor update register. */ | ||
1105 | #define FR_BZ_RX_DESC_UPD_P0 0x00000830 | ||
1106 | #define FR_BZ_RX_DESC_UPD_P0_STEP 8192 | ||
1107 | #define FR_BZ_RX_DESC_UPD_P0_ROWS 1024 | ||
1108 | /* RX_DESC_UPD_REG_KER: Receive descriptor update register. */ | ||
1109 | #define FR_AA_RX_DESC_UPD_KER 0x00000830 | ||
1110 | #define FR_AA_RX_DESC_UPD_KER_STEP 8192 | ||
1111 | #define FR_AA_RX_DESC_UPD_KER_ROWS 4 | ||
1112 | /* RX_DESC_UPD_REGP123: Receive descriptor update register. */ | ||
1113 | #define FR_BB_RX_DESC_UPD_P123 0x01000830 | ||
1114 | #define FR_BB_RX_DESC_UPD_P123_STEP 8192 | ||
1115 | #define FR_BB_RX_DESC_UPD_P123_ROWS 3072 | ||
1116 | #define FRF_AZ_RX_DESC_WPTR_LBN 96 | ||
1117 | #define FRF_AZ_RX_DESC_WPTR_WIDTH 12 | ||
1118 | #define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95 | ||
1119 | #define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1 | ||
1120 | #define FRF_AZ_RX_DESC_LBN 0 | ||
1121 | #define FRF_AZ_RX_DESC_WIDTH 64 | ||
1122 | |||
1123 | /* RX_DC_CFG_REG: Receive descriptor cache configuration register */ | ||
1124 | #define FR_AZ_RX_DC_CFG 0x00000840 | ||
1125 | #define FRF_AB_RX_MAX_PF_LBN 2 | ||
1126 | #define FRF_AB_RX_MAX_PF_WIDTH 2 | ||
1127 | #define FRF_AZ_RX_DC_SIZE_LBN 0 | ||
1128 | #define FRF_AZ_RX_DC_SIZE_WIDTH 2 | ||
1129 | #define FFE_AZ_RX_DC_SIZE_64 3 | ||
1130 | #define FFE_AZ_RX_DC_SIZE_32 2 | ||
1131 | #define FFE_AZ_RX_DC_SIZE_16 1 | ||
1132 | #define FFE_AZ_RX_DC_SIZE_8 0 | ||
1133 | |||
1134 | /* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */ | ||
1135 | #define FR_AZ_RX_DC_PF_WM 0x00000850 | ||
1136 | #define FRF_AZ_RX_DC_PF_HWM_LBN 6 | ||
1137 | #define FRF_AZ_RX_DC_PF_HWM_WIDTH 6 | ||
1138 | #define FRF_AZ_RX_DC_PF_LWM_LBN 0 | ||
1139 | #define FRF_AZ_RX_DC_PF_LWM_WIDTH 6 | ||
1140 | |||
1141 | /* RX_RSS_TKEY_REG: RSS Toeplitz hash key */ | ||
1142 | #define FR_BZ_RX_RSS_TKEY 0x00000860 | ||
1143 | #define FRF_BZ_RX_RSS_TKEY_HI_LBN 64 | ||
1144 | #define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64 | ||
1145 | #define FRF_BZ_RX_RSS_TKEY_LO_LBN 0 | ||
1146 | #define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64 | ||
1147 | |||
1148 | /* RX_NODESC_DROP_REG: Receive dropped packet counter register */ | ||
1149 | #define FR_AZ_RX_NODESC_DROP 0x00000880 | ||
1150 | #define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0 | ||
1151 | #define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32 | ||
1152 | #define FRF_AB_RX_NODESC_DROP_CNT_LBN 0 | ||
1153 | #define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16 | ||
1154 | |||
1155 | /* RX_SELF_RST_REG: Receive self reset register */ | ||
1156 | #define FR_AA_RX_SELF_RST 0x00000890 | ||
1157 | #define FRF_AA_RX_ISCSI_DIS_LBN 17 | ||
1158 | #define FRF_AA_RX_ISCSI_DIS_WIDTH 1 | ||
1159 | #define FRF_AA_RX_SW_RST_REG_LBN 16 | ||
1160 | #define FRF_AA_RX_SW_RST_REG_WIDTH 1 | ||
1161 | #define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9 | ||
1162 | #define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1 | ||
1163 | #define FRF_AA_RX_SELF_RST_EN_LBN 8 | ||
1164 | #define FRF_AA_RX_SELF_RST_EN_WIDTH 1 | ||
1165 | #define FRF_AA_RX_MAX_PF_LAT_LBN 4 | ||
1166 | #define FRF_AA_RX_MAX_PF_LAT_WIDTH 4 | ||
1167 | #define FRF_AA_RX_MAX_LU_LAT_LBN 0 | ||
1168 | #define FRF_AA_RX_MAX_LU_LAT_WIDTH 4 | ||
1169 | |||
1170 | /* RX_DEBUG_REG: undocumented register */ | ||
1171 | #define FR_AZ_RX_DEBUG 0x000008a0 | ||
1172 | #define FRF_AZ_RX_DEBUG_LBN 0 | ||
1173 | #define FRF_AZ_RX_DEBUG_WIDTH 64 | ||
1174 | |||
1175 | /* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */ | ||
1176 | #define FR_AZ_RX_PUSH_DROP 0x000008b0 | ||
1177 | #define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0 | ||
1178 | #define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32 | ||
1179 | |||
1180 | /* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */ | ||
1181 | #define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0 | ||
1182 | #define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0 | ||
1183 | #define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128 | ||
1184 | |||
1185 | /* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */ | ||
1186 | #define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0 | ||
1187 | #define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0 | ||
1188 | #define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128 | ||
1189 | |||
1190 | /* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */ | ||
1191 | #define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0 | ||
1192 | #define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66 | ||
1193 | #define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1 | ||
1194 | #define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65 | ||
1195 | #define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1 | ||
1196 | #define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64 | ||
1197 | #define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1 | ||
1198 | #define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0 | ||
1199 | #define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64 | ||
1200 | |||
1201 | /* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */ | ||
1202 | #define FR_AZ_TX_FLUSH_DESCQ 0x00000a00 | ||
1203 | #define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12 | ||
1204 | #define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1 | ||
1205 | #define FRF_AZ_TX_FLUSH_DESCQ_LBN 0 | ||
1206 | #define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12 | ||
1207 | |||
1208 | /* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ | ||
1209 | #define FR_BZ_TX_DESC_UPD_P0 0x00000a10 | ||
1210 | #define FR_BZ_TX_DESC_UPD_P0_STEP 8192 | ||
1211 | #define FR_BZ_TX_DESC_UPD_P0_ROWS 1024 | ||
1212 | /* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */ | ||
1213 | #define FR_AA_TX_DESC_UPD_KER 0x00000a10 | ||
1214 | #define FR_AA_TX_DESC_UPD_KER_STEP 8192 | ||
1215 | #define FR_AA_TX_DESC_UPD_KER_ROWS 8 | ||
1216 | /* TX_DESC_UPD_REGP123: Transmit descriptor update register. */ | ||
1217 | #define FR_BB_TX_DESC_UPD_P123 0x01000a10 | ||
1218 | #define FR_BB_TX_DESC_UPD_P123_STEP 8192 | ||
1219 | #define FR_BB_TX_DESC_UPD_P123_ROWS 3072 | ||
1220 | #define FRF_AZ_TX_DESC_WPTR_LBN 96 | ||
1221 | #define FRF_AZ_TX_DESC_WPTR_WIDTH 12 | ||
1222 | #define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95 | ||
1223 | #define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1 | ||
1224 | #define FRF_AZ_TX_DESC_LBN 0 | ||
1225 | #define FRF_AZ_TX_DESC_WIDTH 95 | ||
1226 | |||
1227 | /* TX_DC_CFG_REG: Transmit descriptor cache configuration register */ | ||
1228 | #define FR_AZ_TX_DC_CFG 0x00000a20 | ||
1229 | #define FRF_AZ_TX_DC_SIZE_LBN 0 | ||
1230 | #define FRF_AZ_TX_DC_SIZE_WIDTH 2 | ||
1231 | #define FFE_AZ_TX_DC_SIZE_32 2 | ||
1232 | #define FFE_AZ_TX_DC_SIZE_16 1 | ||
1233 | #define FFE_AZ_TX_DC_SIZE_8 0 | ||
1234 | |||
1235 | /* TX_CHKSM_CFG_REG: Transmit checksum configuration register */ | ||
1236 | #define FR_AA_TX_CHKSM_CFG 0x00000a30 | ||
1237 | #define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96 | ||
1238 | #define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32 | ||
1239 | #define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64 | ||
1240 | #define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32 | ||
1241 | #define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32 | ||
1242 | #define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32 | ||
1243 | #define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0 | ||
1244 | #define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32 | ||
1245 | |||
1246 | /* TX_CFG_REG: Transmit configuration register */ | ||
1247 | #define FR_AZ_TX_CFG 0x00000a50 | ||
1248 | #define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114 | ||
1249 | #define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8 | ||
1250 | #define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113 | ||
1251 | #define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1 | ||
1252 | #define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105 | ||
1253 | #define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8 | ||
1254 | #define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97 | ||
1255 | #define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8 | ||
1256 | #define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89 | ||
1257 | #define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 | ||
1258 | #define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81 | ||
1259 | #define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 | ||
1260 | #define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73 | ||
1261 | #define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 | ||
1262 | #define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65 | ||
1263 | #define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 | ||
1264 | #define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64 | ||
1265 | #define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1 | ||
1266 | #define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48 | ||
1267 | #define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16 | ||
1268 | #define FRF_CZ_TX_FILTER_EN_BIT_LBN 47 | ||
1269 | #define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1 | ||
1270 | #define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16 | ||
1271 | #define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15 | ||
1272 | #define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5 | ||
1273 | #define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1 | ||
1274 | #define FRF_AZ_TX_P1_PRI_EN_LBN 4 | ||
1275 | #define FRF_AZ_TX_P1_PRI_EN_WIDTH 1 | ||
1276 | #define FRF_AZ_TX_OWNERR_CTL_LBN 2 | ||
1277 | #define FRF_AZ_TX_OWNERR_CTL_WIDTH 1 | ||
1278 | #define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1 | ||
1279 | #define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1 | ||
1280 | #define FRF_AZ_TX_IP_ID_REP_EN_LBN 0 | ||
1281 | #define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1 | ||
1282 | |||
1283 | /* TX_PUSH_DROP_REG: Transmit push dropped register */ | ||
1284 | #define FR_AZ_TX_PUSH_DROP 0x00000a60 | ||
1285 | #define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0 | ||
1286 | #define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32 | ||
1287 | |||
1288 | /* TX_RESERVED_REG: Transmit configuration register */ | ||
1289 | #define FR_AZ_TX_RESERVED 0x00000a80 | ||
1290 | #define FRF_AZ_TX_EVT_CNT_LBN 121 | ||
1291 | #define FRF_AZ_TX_EVT_CNT_WIDTH 7 | ||
1292 | #define FRF_AZ_TX_PREF_AGE_CNT_LBN 119 | ||
1293 | #define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2 | ||
1294 | #define FRF_AZ_TX_RD_COMP_TMR_LBN 96 | ||
1295 | #define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23 | ||
1296 | #define FRF_AZ_TX_PUSH_EN_LBN 89 | ||
1297 | #define FRF_AZ_TX_PUSH_EN_WIDTH 1 | ||
1298 | #define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88 | ||
1299 | #define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1 | ||
1300 | #define FRF_AZ_TX_D_FF_FULL_P0_LBN 85 | ||
1301 | #define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1 | ||
1302 | #define FRF_AZ_TX_DMAR_ST_P0_LBN 81 | ||
1303 | #define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1 | ||
1304 | #define FRF_AZ_TX_DMAQ_ST_LBN 78 | ||
1305 | #define FRF_AZ_TX_DMAQ_ST_WIDTH 1 | ||
1306 | #define FRF_AZ_TX_RX_SPACER_LBN 64 | ||
1307 | #define FRF_AZ_TX_RX_SPACER_WIDTH 8 | ||
1308 | #define FRF_AZ_TX_DROP_ABORT_EN_LBN 60 | ||
1309 | #define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1 | ||
1310 | #define FRF_AZ_TX_SOFT_EVT_EN_LBN 59 | ||
1311 | #define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1 | ||
1312 | #define FRF_AZ_TX_PS_EVT_DIS_LBN 58 | ||
1313 | #define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1 | ||
1314 | #define FRF_AZ_TX_RX_SPACER_EN_LBN 57 | ||
1315 | #define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1 | ||
1316 | #define FRF_AZ_TX_XP_TIMER_LBN 52 | ||
1317 | #define FRF_AZ_TX_XP_TIMER_WIDTH 5 | ||
1318 | #define FRF_AZ_TX_PREF_SPACER_LBN 44 | ||
1319 | #define FRF_AZ_TX_PREF_SPACER_WIDTH 8 | ||
1320 | #define FRF_AZ_TX_PREF_WD_TMR_LBN 22 | ||
1321 | #define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22 | ||
1322 | #define FRF_AZ_TX_ONLY1TAG_LBN 21 | ||
1323 | #define FRF_AZ_TX_ONLY1TAG_WIDTH 1 | ||
1324 | #define FRF_AZ_TX_PREF_THRESHOLD_LBN 19 | ||
1325 | #define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2 | ||
1326 | #define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18 | ||
1327 | #define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1 | ||
1328 | #define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17 | ||
1329 | #define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1 | ||
1330 | #define FRF_AA_TX_DMA_FF_THR_LBN 16 | ||
1331 | #define FRF_AA_TX_DMA_FF_THR_WIDTH 1 | ||
1332 | #define FRF_AZ_TX_DMA_SPACER_LBN 8 | ||
1333 | #define FRF_AZ_TX_DMA_SPACER_WIDTH 8 | ||
1334 | #define FRF_AA_TX_TCP_DIS_LBN 7 | ||
1335 | #define FRF_AA_TX_TCP_DIS_WIDTH 1 | ||
1336 | #define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7 | ||
1337 | #define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1 | ||
1338 | #define FRF_AA_TX_IP_DIS_LBN 6 | ||
1339 | #define FRF_AA_TX_IP_DIS_WIDTH 1 | ||
1340 | #define FRF_AZ_TX_MAX_CPL_LBN 2 | ||
1341 | #define FRF_AZ_TX_MAX_CPL_WIDTH 2 | ||
1342 | #define FFE_AZ_TX_MAX_CPL_16 3 | ||
1343 | #define FFE_AZ_TX_MAX_CPL_8 2 | ||
1344 | #define FFE_AZ_TX_MAX_CPL_4 1 | ||
1345 | #define FFE_AZ_TX_MAX_CPL_NOLIMIT 0 | ||
1346 | #define FRF_AZ_TX_MAX_PREF_LBN 0 | ||
1347 | #define FRF_AZ_TX_MAX_PREF_WIDTH 2 | ||
1348 | #define FFE_AZ_TX_MAX_PREF_32 3 | ||
1349 | #define FFE_AZ_TX_MAX_PREF_16 2 | ||
1350 | #define FFE_AZ_TX_MAX_PREF_8 1 | ||
1351 | #define FFE_AZ_TX_MAX_PREF_OFF 0 | ||
1352 | |||
1353 | /* TX_PACE_REG: Transmit pace control register */ | ||
1354 | #define FR_BZ_TX_PACE 0x00000a90 | ||
1355 | #define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19 | ||
1356 | #define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10 | ||
1357 | #define FRF_BZ_TX_PACE_SB_AF_LBN 9 | ||
1358 | #define FRF_BZ_TX_PACE_SB_AF_WIDTH 10 | ||
1359 | #define FRF_BZ_TX_PACE_FB_BASE_LBN 5 | ||
1360 | #define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4 | ||
1361 | #define FRF_BZ_TX_PACE_BIN_TH_LBN 0 | ||
1362 | #define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5 | ||
1363 | |||
1364 | /* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */ | ||
1365 | #define FR_BZ_TX_PACE_DROP_QID 0x00000aa0 | ||
1366 | #define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0 | ||
1367 | #define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16 | ||
1368 | |||
1369 | /* TX_VLAN_REG: Transmit VLAN tag register */ | ||
1370 | #define FR_BB_TX_VLAN 0x00000ae0 | ||
1371 | #define FRF_BB_TX_VLAN_EN_LBN 127 | ||
1372 | #define FRF_BB_TX_VLAN_EN_WIDTH 1 | ||
1373 | #define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125 | ||
1374 | #define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1 | ||
1375 | #define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124 | ||
1376 | #define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1 | ||
1377 | #define FRF_BB_TX_VLAN7_LBN 112 | ||
1378 | #define FRF_BB_TX_VLAN7_WIDTH 12 | ||
1379 | #define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109 | ||
1380 | #define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1 | ||
1381 | #define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108 | ||
1382 | #define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1 | ||
1383 | #define FRF_BB_TX_VLAN6_LBN 96 | ||
1384 | #define FRF_BB_TX_VLAN6_WIDTH 12 | ||
1385 | #define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93 | ||
1386 | #define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1 | ||
1387 | #define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92 | ||
1388 | #define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1 | ||
1389 | #define FRF_BB_TX_VLAN5_LBN 80 | ||
1390 | #define FRF_BB_TX_VLAN5_WIDTH 12 | ||
1391 | #define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77 | ||
1392 | #define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1 | ||
1393 | #define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76 | ||
1394 | #define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1 | ||
1395 | #define FRF_BB_TX_VLAN4_LBN 64 | ||
1396 | #define FRF_BB_TX_VLAN4_WIDTH 12 | ||
1397 | #define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61 | ||
1398 | #define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1 | ||
1399 | #define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60 | ||
1400 | #define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1 | ||
1401 | #define FRF_BB_TX_VLAN3_LBN 48 | ||
1402 | #define FRF_BB_TX_VLAN3_WIDTH 12 | ||
1403 | #define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45 | ||
1404 | #define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1 | ||
1405 | #define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44 | ||
1406 | #define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1 | ||
1407 | #define FRF_BB_TX_VLAN2_LBN 32 | ||
1408 | #define FRF_BB_TX_VLAN2_WIDTH 12 | ||
1409 | #define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29 | ||
1410 | #define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1 | ||
1411 | #define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28 | ||
1412 | #define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1 | ||
1413 | #define FRF_BB_TX_VLAN1_LBN 16 | ||
1414 | #define FRF_BB_TX_VLAN1_WIDTH 12 | ||
1415 | #define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13 | ||
1416 | #define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1 | ||
1417 | #define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12 | ||
1418 | #define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1 | ||
1419 | #define FRF_BB_TX_VLAN0_LBN 0 | ||
1420 | #define FRF_BB_TX_VLAN0_WIDTH 12 | ||
1421 | |||
1422 | /* TX_IPFIL_PORTEN_REG: Transmit filter control register */ | ||
1423 | #define FR_BZ_TX_IPFIL_PORTEN 0x00000af0 | ||
1424 | #define FRF_BZ_TX_MADR0_FIL_EN_LBN 64 | ||
1425 | #define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1 | ||
1426 | #define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62 | ||
1427 | #define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1 | ||
1428 | #define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60 | ||
1429 | #define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1 | ||
1430 | #define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58 | ||
1431 | #define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1 | ||
1432 | #define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56 | ||
1433 | #define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1 | ||
1434 | #define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54 | ||
1435 | #define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1 | ||
1436 | #define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52 | ||
1437 | #define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1 | ||
1438 | #define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50 | ||
1439 | #define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1 | ||
1440 | #define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48 | ||
1441 | #define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1 | ||
1442 | #define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46 | ||
1443 | #define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1 | ||
1444 | #define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44 | ||
1445 | #define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1 | ||
1446 | #define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42 | ||
1447 | #define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1 | ||
1448 | #define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40 | ||
1449 | #define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1 | ||
1450 | #define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38 | ||
1451 | #define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1 | ||
1452 | #define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36 | ||
1453 | #define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1 | ||
1454 | #define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34 | ||
1455 | #define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1 | ||
1456 | #define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32 | ||
1457 | #define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1 | ||
1458 | #define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30 | ||
1459 | #define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1 | ||
1460 | #define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28 | ||
1461 | #define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1 | ||
1462 | #define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26 | ||
1463 | #define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1 | ||
1464 | #define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24 | ||
1465 | #define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1 | ||
1466 | #define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22 | ||
1467 | #define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1 | ||
1468 | #define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20 | ||
1469 | #define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1 | ||
1470 | #define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18 | ||
1471 | #define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1 | ||
1472 | #define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16 | ||
1473 | #define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1 | ||
1474 | #define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14 | ||
1475 | #define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1 | ||
1476 | #define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12 | ||
1477 | #define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1 | ||
1478 | #define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10 | ||
1479 | #define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1 | ||
1480 | #define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8 | ||
1481 | #define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1 | ||
1482 | #define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6 | ||
1483 | #define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1 | ||
1484 | #define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4 | ||
1485 | #define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1 | ||
1486 | #define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2 | ||
1487 | #define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1 | ||
1488 | #define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0 | ||
1489 | #define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1 | ||
1490 | |||
1491 | /* TX_IPFIL_TBL: Transmit IP source address filter table */ | ||
1492 | #define FR_BB_TX_IPFIL_TBL 0x00000b00 | ||
1493 | #define FR_BB_TX_IPFIL_TBL_STEP 16 | ||
1494 | #define FR_BB_TX_IPFIL_TBL_ROWS 16 | ||
1495 | #define FRF_BB_TX_IPFIL_MASK_1_LBN 96 | ||
1496 | #define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32 | ||
1497 | #define FRF_BB_TX_IP_SRC_ADR_1_LBN 64 | ||
1498 | #define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32 | ||
1499 | #define FRF_BB_TX_IPFIL_MASK_0_LBN 32 | ||
1500 | #define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32 | ||
1501 | #define FRF_BB_TX_IP_SRC_ADR_0_LBN 0 | ||
1502 | #define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32 | ||
1503 | |||
1504 | /* MD_TXD_REG: PHY management transmit data register */ | ||
1505 | #define FR_AB_MD_TXD 0x00000c00 | ||
1506 | #define FRF_AB_MD_TXD_LBN 0 | ||
1507 | #define FRF_AB_MD_TXD_WIDTH 16 | ||
1508 | |||
1509 | /* MD_RXD_REG: PHY management receive data register */ | ||
1510 | #define FR_AB_MD_RXD 0x00000c10 | ||
1511 | #define FRF_AB_MD_RXD_LBN 0 | ||
1512 | #define FRF_AB_MD_RXD_WIDTH 16 | ||
1513 | |||
1514 | /* MD_CS_REG: PHY management configuration & status register */ | ||
1515 | #define FR_AB_MD_CS 0x00000c20 | ||
1516 | #define FRF_AB_MD_RD_EN_CMD_LBN 15 | ||
1517 | #define FRF_AB_MD_RD_EN_CMD_WIDTH 1 | ||
1518 | #define FRF_AB_MD_WR_EN_CMD_LBN 14 | ||
1519 | #define FRF_AB_MD_WR_EN_CMD_WIDTH 1 | ||
1520 | #define FRF_AB_MD_ADDR_CMD_LBN 13 | ||
1521 | #define FRF_AB_MD_ADDR_CMD_WIDTH 1 | ||
1522 | #define FRF_AB_MD_PT_LBN 7 | ||
1523 | #define FRF_AB_MD_PT_WIDTH 3 | ||
1524 | #define FRF_AB_MD_PL_LBN 6 | ||
1525 | #define FRF_AB_MD_PL_WIDTH 1 | ||
1526 | #define FRF_AB_MD_INT_CLR_LBN 5 | ||
1527 | #define FRF_AB_MD_INT_CLR_WIDTH 1 | ||
1528 | #define FRF_AB_MD_GC_LBN 4 | ||
1529 | #define FRF_AB_MD_GC_WIDTH 1 | ||
1530 | #define FRF_AB_MD_PRSP_LBN 3 | ||
1531 | #define FRF_AB_MD_PRSP_WIDTH 1 | ||
1532 | #define FRF_AB_MD_RIC_LBN 2 | ||
1533 | #define FRF_AB_MD_RIC_WIDTH 1 | ||
1534 | #define FRF_AB_MD_RDC_LBN 1 | ||
1535 | #define FRF_AB_MD_RDC_WIDTH 1 | ||
1536 | #define FRF_AB_MD_WRC_LBN 0 | ||
1537 | #define FRF_AB_MD_WRC_WIDTH 1 | ||
1538 | |||
1539 | /* MD_PHY_ADR_REG: PHY management PHY address register */ | ||
1540 | #define FR_AB_MD_PHY_ADR 0x00000c30 | ||
1541 | #define FRF_AB_MD_PHY_ADR_LBN 0 | ||
1542 | #define FRF_AB_MD_PHY_ADR_WIDTH 16 | ||
1543 | |||
1544 | /* MD_ID_REG: PHY management ID register */ | ||
1545 | #define FR_AB_MD_ID 0x00000c40 | ||
1546 | #define FRF_AB_MD_PRT_ADR_LBN 11 | ||
1547 | #define FRF_AB_MD_PRT_ADR_WIDTH 5 | ||
1548 | #define FRF_AB_MD_DEV_ADR_LBN 6 | ||
1549 | #define FRF_AB_MD_DEV_ADR_WIDTH 5 | ||
1550 | |||
1551 | /* MD_STAT_REG: PHY management status & mask register */ | ||
1552 | #define FR_AB_MD_STAT 0x00000c50 | ||
1553 | #define FRF_AB_MD_PINT_LBN 4 | ||
1554 | #define FRF_AB_MD_PINT_WIDTH 1 | ||
1555 | #define FRF_AB_MD_DONE_LBN 3 | ||
1556 | #define FRF_AB_MD_DONE_WIDTH 1 | ||
1557 | #define FRF_AB_MD_BSERR_LBN 2 | ||
1558 | #define FRF_AB_MD_BSERR_WIDTH 1 | ||
1559 | #define FRF_AB_MD_LNFL_LBN 1 | ||
1560 | #define FRF_AB_MD_LNFL_WIDTH 1 | ||
1561 | #define FRF_AB_MD_BSY_LBN 0 | ||
1562 | #define FRF_AB_MD_BSY_WIDTH 1 | ||
1563 | |||
1564 | /* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */ | ||
1565 | #define FR_AB_MAC_STAT_DMA 0x00000c60 | ||
1566 | #define FRF_AB_MAC_STAT_DMA_CMD_LBN 48 | ||
1567 | #define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1 | ||
1568 | #define FRF_AB_MAC_STAT_DMA_ADR_LBN 0 | ||
1569 | #define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48 | ||
1570 | |||
1571 | /* MAC_CTRL_REG: Port MAC control register */ | ||
1572 | #define FR_AB_MAC_CTRL 0x00000c80 | ||
1573 | #define FRF_AB_MAC_XOFF_VAL_LBN 16 | ||
1574 | #define FRF_AB_MAC_XOFF_VAL_WIDTH 16 | ||
1575 | #define FRF_BB_TXFIFO_DRAIN_EN_LBN 7 | ||
1576 | #define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1 | ||
1577 | #define FRF_AB_MAC_XG_DISTXCRC_LBN 5 | ||
1578 | #define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1 | ||
1579 | #define FRF_AB_MAC_BCAD_ACPT_LBN 4 | ||
1580 | #define FRF_AB_MAC_BCAD_ACPT_WIDTH 1 | ||
1581 | #define FRF_AB_MAC_UC_PROM_LBN 3 | ||
1582 | #define FRF_AB_MAC_UC_PROM_WIDTH 1 | ||
1583 | #define FRF_AB_MAC_LINK_STATUS_LBN 2 | ||
1584 | #define FRF_AB_MAC_LINK_STATUS_WIDTH 1 | ||
1585 | #define FRF_AB_MAC_SPEED_LBN 0 | ||
1586 | #define FRF_AB_MAC_SPEED_WIDTH 2 | ||
1587 | #define FFE_AB_MAC_SPEED_10G 3 | ||
1588 | #define FFE_AB_MAC_SPEED_1G 2 | ||
1589 | #define FFE_AB_MAC_SPEED_100M 1 | ||
1590 | #define FFE_AB_MAC_SPEED_10M 0 | ||
1591 | |||
1592 | /* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */ | ||
1593 | #define FR_BB_GEN_MODE 0x00000c90 | ||
1594 | #define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3 | ||
1595 | #define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1 | ||
1596 | #define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2 | ||
1597 | #define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1 | ||
1598 | #define FRF_BB_XFP_PHY_INT_MASK_LBN 1 | ||
1599 | #define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1 | ||
1600 | #define FRF_BB_XG_PHY_INT_MASK_LBN 0 | ||
1601 | #define FRF_BB_XG_PHY_INT_MASK_WIDTH 1 | ||
1602 | |||
1603 | /* MAC_MC_HASH_REG0: Multicast address hash table */ | ||
1604 | #define FR_AB_MAC_MC_HASH_REG0 0x00000ca0 | ||
1605 | #define FRF_AB_MAC_MCAST_HASH0_LBN 0 | ||
1606 | #define FRF_AB_MAC_MCAST_HASH0_WIDTH 128 | ||
1607 | |||
1608 | /* MAC_MC_HASH_REG1: Multicast address hash table */ | ||
1609 | #define FR_AB_MAC_MC_HASH_REG1 0x00000cb0 | ||
1610 | #define FRF_AB_MAC_MCAST_HASH1_LBN 0 | ||
1611 | #define FRF_AB_MAC_MCAST_HASH1_WIDTH 128 | ||
1612 | |||
1613 | /* GM_CFG1_REG: GMAC configuration register 1 */ | ||
1614 | #define FR_AB_GM_CFG1 0x00000e00 | ||
1615 | #define FRF_AB_GM_SW_RST_LBN 31 | ||
1616 | #define FRF_AB_GM_SW_RST_WIDTH 1 | ||
1617 | #define FRF_AB_GM_SIM_RST_LBN 30 | ||
1618 | #define FRF_AB_GM_SIM_RST_WIDTH 1 | ||
1619 | #define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19 | ||
1620 | #define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1 | ||
1621 | #define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18 | ||
1622 | #define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1 | ||
1623 | #define FRF_AB_GM_RST_RX_FUNC_LBN 17 | ||
1624 | #define FRF_AB_GM_RST_RX_FUNC_WIDTH 1 | ||
1625 | #define FRF_AB_GM_RST_TX_FUNC_LBN 16 | ||
1626 | #define FRF_AB_GM_RST_TX_FUNC_WIDTH 1 | ||
1627 | #define FRF_AB_GM_LOOP_LBN 8 | ||
1628 | #define FRF_AB_GM_LOOP_WIDTH 1 | ||
1629 | #define FRF_AB_GM_RX_FC_EN_LBN 5 | ||
1630 | #define FRF_AB_GM_RX_FC_EN_WIDTH 1 | ||
1631 | #define FRF_AB_GM_TX_FC_EN_LBN 4 | ||
1632 | #define FRF_AB_GM_TX_FC_EN_WIDTH 1 | ||
1633 | #define FRF_AB_GM_SYNC_RXEN_LBN 3 | ||
1634 | #define FRF_AB_GM_SYNC_RXEN_WIDTH 1 | ||
1635 | #define FRF_AB_GM_RX_EN_LBN 2 | ||
1636 | #define FRF_AB_GM_RX_EN_WIDTH 1 | ||
1637 | #define FRF_AB_GM_SYNC_TXEN_LBN 1 | ||
1638 | #define FRF_AB_GM_SYNC_TXEN_WIDTH 1 | ||
1639 | #define FRF_AB_GM_TX_EN_LBN 0 | ||
1640 | #define FRF_AB_GM_TX_EN_WIDTH 1 | ||
1641 | |||
1642 | /* GM_CFG2_REG: GMAC configuration register 2 */ | ||
1643 | #define FR_AB_GM_CFG2 0x00000e10 | ||
1644 | #define FRF_AB_GM_PAMBL_LEN_LBN 12 | ||
1645 | #define FRF_AB_GM_PAMBL_LEN_WIDTH 4 | ||
1646 | #define FRF_AB_GM_IF_MODE_LBN 8 | ||
1647 | #define FRF_AB_GM_IF_MODE_WIDTH 2 | ||
1648 | #define FFE_AB_IF_MODE_BYTE_MODE 2 | ||
1649 | #define FFE_AB_IF_MODE_NIBBLE_MODE 1 | ||
1650 | #define FRF_AB_GM_HUGE_FRM_EN_LBN 5 | ||
1651 | #define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1 | ||
1652 | #define FRF_AB_GM_LEN_CHK_LBN 4 | ||
1653 | #define FRF_AB_GM_LEN_CHK_WIDTH 1 | ||
1654 | #define FRF_AB_GM_PAD_CRC_EN_LBN 2 | ||
1655 | #define FRF_AB_GM_PAD_CRC_EN_WIDTH 1 | ||
1656 | #define FRF_AB_GM_CRC_EN_LBN 1 | ||
1657 | #define FRF_AB_GM_CRC_EN_WIDTH 1 | ||
1658 | #define FRF_AB_GM_FD_LBN 0 | ||
1659 | #define FRF_AB_GM_FD_WIDTH 1 | ||
1660 | |||
1661 | /* GM_IPG_REG: GMAC IPG register */ | ||
1662 | #define FR_AB_GM_IPG 0x00000e20 | ||
1663 | #define FRF_AB_GM_NONB2B_IPG1_LBN 24 | ||
1664 | #define FRF_AB_GM_NONB2B_IPG1_WIDTH 7 | ||
1665 | #define FRF_AB_GM_NONB2B_IPG2_LBN 16 | ||
1666 | #define FRF_AB_GM_NONB2B_IPG2_WIDTH 7 | ||
1667 | #define FRF_AB_GM_MIN_IPG_ENF_LBN 8 | ||
1668 | #define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8 | ||
1669 | #define FRF_AB_GM_B2B_IPG_LBN 0 | ||
1670 | #define FRF_AB_GM_B2B_IPG_WIDTH 7 | ||
1671 | |||
1672 | /* GM_HD_REG: GMAC half duplex register */ | ||
1673 | #define FR_AB_GM_HD 0x00000e30 | ||
1674 | #define FRF_AB_GM_ALT_BOFF_VAL_LBN 20 | ||
1675 | #define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4 | ||
1676 | #define FRF_AB_GM_ALT_BOFF_EN_LBN 19 | ||
1677 | #define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1 | ||
1678 | #define FRF_AB_GM_BP_NO_BOFF_LBN 18 | ||
1679 | #define FRF_AB_GM_BP_NO_BOFF_WIDTH 1 | ||
1680 | #define FRF_AB_GM_DIS_BOFF_LBN 17 | ||
1681 | #define FRF_AB_GM_DIS_BOFF_WIDTH 1 | ||
1682 | #define FRF_AB_GM_EXDEF_TX_EN_LBN 16 | ||
1683 | #define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1 | ||
1684 | #define FRF_AB_GM_RTRY_LIMIT_LBN 12 | ||
1685 | #define FRF_AB_GM_RTRY_LIMIT_WIDTH 4 | ||
1686 | #define FRF_AB_GM_COL_WIN_LBN 0 | ||
1687 | #define FRF_AB_GM_COL_WIN_WIDTH 10 | ||
1688 | |||
1689 | /* GM_MAX_FLEN_REG: GMAC maximum frame length register */ | ||
1690 | #define FR_AB_GM_MAX_FLEN 0x00000e40 | ||
1691 | #define FRF_AB_GM_MAX_FLEN_LBN 0 | ||
1692 | #define FRF_AB_GM_MAX_FLEN_WIDTH 16 | ||
1693 | |||
1694 | /* GM_TEST_REG: GMAC test register */ | ||
1695 | #define FR_AB_GM_TEST 0x00000e70 | ||
1696 | #define FRF_AB_GM_MAX_BOFF_LBN 3 | ||
1697 | #define FRF_AB_GM_MAX_BOFF_WIDTH 1 | ||
1698 | #define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2 | ||
1699 | #define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1 | ||
1700 | #define FRF_AB_GM_TEST_PAUSE_LBN 1 | ||
1701 | #define FRF_AB_GM_TEST_PAUSE_WIDTH 1 | ||
1702 | #define FRF_AB_GM_SHORT_SLOT_LBN 0 | ||
1703 | #define FRF_AB_GM_SHORT_SLOT_WIDTH 1 | ||
1704 | |||
1705 | /* GM_ADR1_REG: GMAC station address register 1 */ | ||
1706 | #define FR_AB_GM_ADR1 0x00000f00 | ||
1707 | #define FRF_AB_GM_ADR_B0_LBN 24 | ||
1708 | #define FRF_AB_GM_ADR_B0_WIDTH 8 | ||
1709 | #define FRF_AB_GM_ADR_B1_LBN 16 | ||
1710 | #define FRF_AB_GM_ADR_B1_WIDTH 8 | ||
1711 | #define FRF_AB_GM_ADR_B2_LBN 8 | ||
1712 | #define FRF_AB_GM_ADR_B2_WIDTH 8 | ||
1713 | #define FRF_AB_GM_ADR_B3_LBN 0 | ||
1714 | #define FRF_AB_GM_ADR_B3_WIDTH 8 | ||
1715 | |||
1716 | /* GM_ADR2_REG: GMAC station address register 2 */ | ||
1717 | #define FR_AB_GM_ADR2 0x00000f10 | ||
1718 | #define FRF_AB_GM_ADR_B4_LBN 24 | ||
1719 | #define FRF_AB_GM_ADR_B4_WIDTH 8 | ||
1720 | #define FRF_AB_GM_ADR_B5_LBN 16 | ||
1721 | #define FRF_AB_GM_ADR_B5_WIDTH 8 | ||
1722 | |||
1723 | /* GMF_CFG0_REG: GMAC FIFO configuration register 0 */ | ||
1724 | #define FR_AB_GMF_CFG0 0x00000f20 | ||
1725 | #define FRF_AB_GMF_FTFENRPLY_LBN 20 | ||
1726 | #define FRF_AB_GMF_FTFENRPLY_WIDTH 1 | ||
1727 | #define FRF_AB_GMF_STFENRPLY_LBN 19 | ||
1728 | #define FRF_AB_GMF_STFENRPLY_WIDTH 1 | ||
1729 | #define FRF_AB_GMF_FRFENRPLY_LBN 18 | ||
1730 | #define FRF_AB_GMF_FRFENRPLY_WIDTH 1 | ||
1731 | #define FRF_AB_GMF_SRFENRPLY_LBN 17 | ||
1732 | #define FRF_AB_GMF_SRFENRPLY_WIDTH 1 | ||
1733 | #define FRF_AB_GMF_WTMENRPLY_LBN 16 | ||
1734 | #define FRF_AB_GMF_WTMENRPLY_WIDTH 1 | ||
1735 | #define FRF_AB_GMF_FTFENREQ_LBN 12 | ||
1736 | #define FRF_AB_GMF_FTFENREQ_WIDTH 1 | ||
1737 | #define FRF_AB_GMF_STFENREQ_LBN 11 | ||
1738 | #define FRF_AB_GMF_STFENREQ_WIDTH 1 | ||
1739 | #define FRF_AB_GMF_FRFENREQ_LBN 10 | ||
1740 | #define FRF_AB_GMF_FRFENREQ_WIDTH 1 | ||
1741 | #define FRF_AB_GMF_SRFENREQ_LBN 9 | ||
1742 | #define FRF_AB_GMF_SRFENREQ_WIDTH 1 | ||
1743 | #define FRF_AB_GMF_WTMENREQ_LBN 8 | ||
1744 | #define FRF_AB_GMF_WTMENREQ_WIDTH 1 | ||
1745 | #define FRF_AB_GMF_HSTRSTFT_LBN 4 | ||
1746 | #define FRF_AB_GMF_HSTRSTFT_WIDTH 1 | ||
1747 | #define FRF_AB_GMF_HSTRSTST_LBN 3 | ||
1748 | #define FRF_AB_GMF_HSTRSTST_WIDTH 1 | ||
1749 | #define FRF_AB_GMF_HSTRSTFR_LBN 2 | ||
1750 | #define FRF_AB_GMF_HSTRSTFR_WIDTH 1 | ||
1751 | #define FRF_AB_GMF_HSTRSTSR_LBN 1 | ||
1752 | #define FRF_AB_GMF_HSTRSTSR_WIDTH 1 | ||
1753 | #define FRF_AB_GMF_HSTRSTWT_LBN 0 | ||
1754 | #define FRF_AB_GMF_HSTRSTWT_WIDTH 1 | ||
1755 | |||
1756 | /* GMF_CFG1_REG: GMAC FIFO configuration register 1 */ | ||
1757 | #define FR_AB_GMF_CFG1 0x00000f30 | ||
1758 | #define FRF_AB_GMF_CFGFRTH_LBN 16 | ||
1759 | #define FRF_AB_GMF_CFGFRTH_WIDTH 5 | ||
1760 | #define FRF_AB_GMF_CFGXOFFRTX_LBN 0 | ||
1761 | #define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16 | ||
1762 | |||
1763 | /* GMF_CFG2_REG: GMAC FIFO configuration register 2 */ | ||
1764 | #define FR_AB_GMF_CFG2 0x00000f40 | ||
1765 | #define FRF_AB_GMF_CFGHWM_LBN 16 | ||
1766 | #define FRF_AB_GMF_CFGHWM_WIDTH 6 | ||
1767 | #define FRF_AB_GMF_CFGLWM_LBN 0 | ||
1768 | #define FRF_AB_GMF_CFGLWM_WIDTH 6 | ||
1769 | |||
1770 | /* GMF_CFG3_REG: GMAC FIFO configuration register 3 */ | ||
1771 | #define FR_AB_GMF_CFG3 0x00000f50 | ||
1772 | #define FRF_AB_GMF_CFGHWMFT_LBN 16 | ||
1773 | #define FRF_AB_GMF_CFGHWMFT_WIDTH 6 | ||
1774 | #define FRF_AB_GMF_CFGFTTH_LBN 0 | ||
1775 | #define FRF_AB_GMF_CFGFTTH_WIDTH 6 | ||
1776 | |||
1777 | /* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ | ||
1778 | #define FR_AB_GMF_CFG4 0x00000f60 | ||
1779 | #define FRF_AB_GMF_HSTFLTRFRM_LBN 0 | ||
1780 | #define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18 | ||
1781 | |||
1782 | /* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ | ||
1783 | #define FR_AB_GMF_CFG5 0x00000f70 | ||
1784 | #define FRF_AB_GMF_CFGHDPLX_LBN 22 | ||
1785 | #define FRF_AB_GMF_CFGHDPLX_WIDTH 1 | ||
1786 | #define FRF_AB_GMF_SRFULL_LBN 21 | ||
1787 | #define FRF_AB_GMF_SRFULL_WIDTH 1 | ||
1788 | #define FRF_AB_GMF_HSTSRFULLCLR_LBN 20 | ||
1789 | #define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1 | ||
1790 | #define FRF_AB_GMF_CFGBYTMODE_LBN 19 | ||
1791 | #define FRF_AB_GMF_CFGBYTMODE_WIDTH 1 | ||
1792 | #define FRF_AB_GMF_HSTDRPLT64_LBN 18 | ||
1793 | #define FRF_AB_GMF_HSTDRPLT64_WIDTH 1 | ||
1794 | #define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0 | ||
1795 | #define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18 | ||
1796 | |||
1797 | /* TX_SRC_MAC_TBL: Transmit IP source address filter table */ | ||
1798 | #define FR_BB_TX_SRC_MAC_TBL 0x00001000 | ||
1799 | #define FR_BB_TX_SRC_MAC_TBL_STEP 16 | ||
1800 | #define FR_BB_TX_SRC_MAC_TBL_ROWS 16 | ||
1801 | #define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64 | ||
1802 | #define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48 | ||
1803 | #define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0 | ||
1804 | #define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48 | ||
1805 | |||
1806 | /* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */ | ||
1807 | #define FR_BB_TX_SRC_MAC_CTL 0x00001100 | ||
1808 | #define FRF_BB_TX_SRC_DROP_CTR_LBN 16 | ||
1809 | #define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16 | ||
1810 | #define FRF_BB_TX_SRC_FLTR_EN_LBN 15 | ||
1811 | #define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1 | ||
1812 | #define FRF_BB_TX_DROP_CTR_CLR_LBN 12 | ||
1813 | #define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1 | ||
1814 | #define FRF_BB_TX_MAC_QID_SEL_LBN 0 | ||
1815 | #define FRF_BB_TX_MAC_QID_SEL_WIDTH 3 | ||
1816 | |||
1817 | /* XM_ADR_LO_REG: XGMAC address register low */ | ||
1818 | #define FR_AB_XM_ADR_LO 0x00001200 | ||
1819 | #define FRF_AB_XM_ADR_LO_LBN 0 | ||
1820 | #define FRF_AB_XM_ADR_LO_WIDTH 32 | ||
1821 | |||
1822 | /* XM_ADR_HI_REG: XGMAC address register high */ | ||
1823 | #define FR_AB_XM_ADR_HI 0x00001210 | ||
1824 | #define FRF_AB_XM_ADR_HI_LBN 0 | ||
1825 | #define FRF_AB_XM_ADR_HI_WIDTH 16 | ||
1826 | |||
1827 | /* XM_GLB_CFG_REG: XGMAC global configuration */ | ||
1828 | #define FR_AB_XM_GLB_CFG 0x00001220 | ||
1829 | #define FRF_AB_XM_RMTFLT_GEN_LBN 17 | ||
1830 | #define FRF_AB_XM_RMTFLT_GEN_WIDTH 1 | ||
1831 | #define FRF_AB_XM_DEBUG_MODE_LBN 16 | ||
1832 | #define FRF_AB_XM_DEBUG_MODE_WIDTH 1 | ||
1833 | #define FRF_AB_XM_RX_STAT_EN_LBN 11 | ||
1834 | #define FRF_AB_XM_RX_STAT_EN_WIDTH 1 | ||
1835 | #define FRF_AB_XM_TX_STAT_EN_LBN 10 | ||
1836 | #define FRF_AB_XM_TX_STAT_EN_WIDTH 1 | ||
1837 | #define FRF_AB_XM_RX_JUMBO_MODE_LBN 6 | ||
1838 | #define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1 | ||
1839 | #define FRF_AB_XM_WAN_MODE_LBN 5 | ||
1840 | #define FRF_AB_XM_WAN_MODE_WIDTH 1 | ||
1841 | #define FRF_AB_XM_INTCLR_MODE_LBN 3 | ||
1842 | #define FRF_AB_XM_INTCLR_MODE_WIDTH 1 | ||
1843 | #define FRF_AB_XM_CORE_RST_LBN 0 | ||
1844 | #define FRF_AB_XM_CORE_RST_WIDTH 1 | ||
1845 | |||
1846 | /* XM_TX_CFG_REG: XGMAC transmit configuration */ | ||
1847 | #define FR_AB_XM_TX_CFG 0x00001230 | ||
1848 | #define FRF_AB_XM_TX_PROG_LBN 24 | ||
1849 | #define FRF_AB_XM_TX_PROG_WIDTH 1 | ||
1850 | #define FRF_AB_XM_IPG_LBN 16 | ||
1851 | #define FRF_AB_XM_IPG_WIDTH 4 | ||
1852 | #define FRF_AB_XM_FCNTL_LBN 10 | ||
1853 | #define FRF_AB_XM_FCNTL_WIDTH 1 | ||
1854 | #define FRF_AB_XM_TXCRC_LBN 8 | ||
1855 | #define FRF_AB_XM_TXCRC_WIDTH 1 | ||
1856 | #define FRF_AB_XM_EDRC_LBN 6 | ||
1857 | #define FRF_AB_XM_EDRC_WIDTH 1 | ||
1858 | #define FRF_AB_XM_AUTO_PAD_LBN 5 | ||
1859 | #define FRF_AB_XM_AUTO_PAD_WIDTH 1 | ||
1860 | #define FRF_AB_XM_TX_PRMBL_LBN 2 | ||
1861 | #define FRF_AB_XM_TX_PRMBL_WIDTH 1 | ||
1862 | #define FRF_AB_XM_TXEN_LBN 1 | ||
1863 | #define FRF_AB_XM_TXEN_WIDTH 1 | ||
1864 | #define FRF_AB_XM_TX_RST_LBN 0 | ||
1865 | #define FRF_AB_XM_TX_RST_WIDTH 1 | ||
1866 | |||
1867 | /* XM_RX_CFG_REG: XGMAC receive configuration */ | ||
1868 | #define FR_AB_XM_RX_CFG 0x00001240 | ||
1869 | #define FRF_AB_XM_PASS_LENERR_LBN 26 | ||
1870 | #define FRF_AB_XM_PASS_LENERR_WIDTH 1 | ||
1871 | #define FRF_AB_XM_PASS_CRC_ERR_LBN 25 | ||
1872 | #define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1 | ||
1873 | #define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24 | ||
1874 | #define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1 | ||
1875 | #define FRF_AB_XM_REJ_BCAST_LBN 20 | ||
1876 | #define FRF_AB_XM_REJ_BCAST_WIDTH 1 | ||
1877 | #define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11 | ||
1878 | #define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1 | ||
1879 | #define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9 | ||
1880 | #define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1 | ||
1881 | #define FRF_AB_XM_AUTO_DEPAD_LBN 8 | ||
1882 | #define FRF_AB_XM_AUTO_DEPAD_WIDTH 1 | ||
1883 | #define FRF_AB_XM_RXCRC_LBN 3 | ||
1884 | #define FRF_AB_XM_RXCRC_WIDTH 1 | ||
1885 | #define FRF_AB_XM_RX_PRMBL_LBN 2 | ||
1886 | #define FRF_AB_XM_RX_PRMBL_WIDTH 1 | ||
1887 | #define FRF_AB_XM_RXEN_LBN 1 | ||
1888 | #define FRF_AB_XM_RXEN_WIDTH 1 | ||
1889 | #define FRF_AB_XM_RX_RST_LBN 0 | ||
1890 | #define FRF_AB_XM_RX_RST_WIDTH 1 | ||
1891 | |||
1892 | /* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */ | ||
1893 | #define FR_AB_XM_MGT_INT_MASK 0x00001250 | ||
1894 | #define FRF_AB_XM_MSK_STA_INTR_LBN 16 | ||
1895 | #define FRF_AB_XM_MSK_STA_INTR_WIDTH 1 | ||
1896 | #define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9 | ||
1897 | #define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1 | ||
1898 | #define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8 | ||
1899 | #define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1 | ||
1900 | #define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2 | ||
1901 | #define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1 | ||
1902 | #define FRF_AB_XM_MSK_RMTFLT_LBN 1 | ||
1903 | #define FRF_AB_XM_MSK_RMTFLT_WIDTH 1 | ||
1904 | #define FRF_AB_XM_MSK_LCLFLT_LBN 0 | ||
1905 | #define FRF_AB_XM_MSK_LCLFLT_WIDTH 1 | ||
1906 | |||
1907 | /* XM_FC_REG: XGMAC flow control register */ | ||
1908 | #define FR_AB_XM_FC 0x00001270 | ||
1909 | #define FRF_AB_XM_PAUSE_TIME_LBN 16 | ||
1910 | #define FRF_AB_XM_PAUSE_TIME_WIDTH 16 | ||
1911 | #define FRF_AB_XM_RX_MAC_STAT_LBN 11 | ||
1912 | #define FRF_AB_XM_RX_MAC_STAT_WIDTH 1 | ||
1913 | #define FRF_AB_XM_TX_MAC_STAT_LBN 10 | ||
1914 | #define FRF_AB_XM_TX_MAC_STAT_WIDTH 1 | ||
1915 | #define FRF_AB_XM_MCNTL_PASS_LBN 8 | ||
1916 | #define FRF_AB_XM_MCNTL_PASS_WIDTH 2 | ||
1917 | #define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6 | ||
1918 | #define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1 | ||
1919 | #define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5 | ||
1920 | #define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1 | ||
1921 | #define FRF_AB_XM_ZPAUSE_LBN 2 | ||
1922 | #define FRF_AB_XM_ZPAUSE_WIDTH 1 | ||
1923 | #define FRF_AB_XM_XMIT_PAUSE_LBN 1 | ||
1924 | #define FRF_AB_XM_XMIT_PAUSE_WIDTH 1 | ||
1925 | #define FRF_AB_XM_DIS_FCNTL_LBN 0 | ||
1926 | #define FRF_AB_XM_DIS_FCNTL_WIDTH 1 | ||
1927 | |||
1928 | /* XM_PAUSE_TIME_REG: XGMAC pause time register */ | ||
1929 | #define FR_AB_XM_PAUSE_TIME 0x00001290 | ||
1930 | #define FRF_AB_XM_TX_PAUSE_CNT_LBN 16 | ||
1931 | #define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16 | ||
1932 | #define FRF_AB_XM_RX_PAUSE_CNT_LBN 0 | ||
1933 | #define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16 | ||
1934 | |||
1935 | /* XM_TX_PARAM_REG: XGMAC transmit parameter register */ | ||
1936 | #define FR_AB_XM_TX_PARAM 0x000012d0 | ||
1937 | #define FRF_AB_XM_TX_JUMBO_MODE_LBN 31 | ||
1938 | #define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1 | ||
1939 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19 | ||
1940 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11 | ||
1941 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16 | ||
1942 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3 | ||
1943 | #define FRF_AB_XM_PAD_CHAR_LBN 0 | ||
1944 | #define FRF_AB_XM_PAD_CHAR_WIDTH 8 | ||
1945 | |||
1946 | /* XM_RX_PARAM_REG: XGMAC receive parameter register */ | ||
1947 | #define FR_AB_XM_RX_PARAM 0x000012e0 | ||
1948 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3 | ||
1949 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11 | ||
1950 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0 | ||
1951 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3 | ||
1952 | |||
1953 | /* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */ | ||
1954 | #define FR_AB_XM_MGT_INT_MSK 0x000012f0 | ||
1955 | #define FRF_AB_XM_STAT_CNTR_OF_LBN 9 | ||
1956 | #define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1 | ||
1957 | #define FRF_AB_XM_STAT_CNTR_HF_LBN 8 | ||
1958 | #define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1 | ||
1959 | #define FRF_AB_XM_PRMBLE_ERR_LBN 2 | ||
1960 | #define FRF_AB_XM_PRMBLE_ERR_WIDTH 1 | ||
1961 | #define FRF_AB_XM_RMTFLT_LBN 1 | ||
1962 | #define FRF_AB_XM_RMTFLT_WIDTH 1 | ||
1963 | #define FRF_AB_XM_LCLFLT_LBN 0 | ||
1964 | #define FRF_AB_XM_LCLFLT_WIDTH 1 | ||
1965 | |||
1966 | /* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */ | ||
1967 | #define FR_AB_XX_PWR_RST 0x00001300 | ||
1968 | #define FRF_AB_XX_PWRDND_SIG_LBN 31 | ||
1969 | #define FRF_AB_XX_PWRDND_SIG_WIDTH 1 | ||
1970 | #define FRF_AB_XX_PWRDNC_SIG_LBN 30 | ||
1971 | #define FRF_AB_XX_PWRDNC_SIG_WIDTH 1 | ||
1972 | #define FRF_AB_XX_PWRDNB_SIG_LBN 29 | ||
1973 | #define FRF_AB_XX_PWRDNB_SIG_WIDTH 1 | ||
1974 | #define FRF_AB_XX_PWRDNA_SIG_LBN 28 | ||
1975 | #define FRF_AB_XX_PWRDNA_SIG_WIDTH 1 | ||
1976 | #define FRF_AB_XX_SIM_MODE_LBN 27 | ||
1977 | #define FRF_AB_XX_SIM_MODE_WIDTH 1 | ||
1978 | #define FRF_AB_XX_RSTPLLCD_SIG_LBN 25 | ||
1979 | #define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1 | ||
1980 | #define FRF_AB_XX_RSTPLLAB_SIG_LBN 24 | ||
1981 | #define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1 | ||
1982 | #define FRF_AB_XX_RESETD_SIG_LBN 23 | ||
1983 | #define FRF_AB_XX_RESETD_SIG_WIDTH 1 | ||
1984 | #define FRF_AB_XX_RESETC_SIG_LBN 22 | ||
1985 | #define FRF_AB_XX_RESETC_SIG_WIDTH 1 | ||
1986 | #define FRF_AB_XX_RESETB_SIG_LBN 21 | ||
1987 | #define FRF_AB_XX_RESETB_SIG_WIDTH 1 | ||
1988 | #define FRF_AB_XX_RESETA_SIG_LBN 20 | ||
1989 | #define FRF_AB_XX_RESETA_SIG_WIDTH 1 | ||
1990 | #define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18 | ||
1991 | #define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1 | ||
1992 | #define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17 | ||
1993 | #define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1 | ||
1994 | #define FRF_AB_XX_SD_RST_ACT_LBN 16 | ||
1995 | #define FRF_AB_XX_SD_RST_ACT_WIDTH 1 | ||
1996 | #define FRF_AB_XX_PWRDND_EN_LBN 15 | ||
1997 | #define FRF_AB_XX_PWRDND_EN_WIDTH 1 | ||
1998 | #define FRF_AB_XX_PWRDNC_EN_LBN 14 | ||
1999 | #define FRF_AB_XX_PWRDNC_EN_WIDTH 1 | ||
2000 | #define FRF_AB_XX_PWRDNB_EN_LBN 13 | ||
2001 | #define FRF_AB_XX_PWRDNB_EN_WIDTH 1 | ||
2002 | #define FRF_AB_XX_PWRDNA_EN_LBN 12 | ||
2003 | #define FRF_AB_XX_PWRDNA_EN_WIDTH 1 | ||
2004 | #define FRF_AB_XX_RSTPLLCD_EN_LBN 9 | ||
2005 | #define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1 | ||
2006 | #define FRF_AB_XX_RSTPLLAB_EN_LBN 8 | ||
2007 | #define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1 | ||
2008 | #define FRF_AB_XX_RESETD_EN_LBN 7 | ||
2009 | #define FRF_AB_XX_RESETD_EN_WIDTH 1 | ||
2010 | #define FRF_AB_XX_RESETC_EN_LBN 6 | ||
2011 | #define FRF_AB_XX_RESETC_EN_WIDTH 1 | ||
2012 | #define FRF_AB_XX_RESETB_EN_LBN 5 | ||
2013 | #define FRF_AB_XX_RESETB_EN_WIDTH 1 | ||
2014 | #define FRF_AB_XX_RESETA_EN_LBN 4 | ||
2015 | #define FRF_AB_XX_RESETA_EN_WIDTH 1 | ||
2016 | #define FRF_AB_XX_RSTXGXSRX_EN_LBN 2 | ||
2017 | #define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1 | ||
2018 | #define FRF_AB_XX_RSTXGXSTX_EN_LBN 1 | ||
2019 | #define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1 | ||
2020 | #define FRF_AB_XX_RST_XX_EN_LBN 0 | ||
2021 | #define FRF_AB_XX_RST_XX_EN_WIDTH 1 | ||
2022 | |||
2023 | /* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */ | ||
2024 | #define FR_AB_XX_SD_CTL 0x00001310 | ||
2025 | #define FRF_AB_XX_TERMADJ1_LBN 17 | ||
2026 | #define FRF_AB_XX_TERMADJ1_WIDTH 1 | ||
2027 | #define FRF_AB_XX_TERMADJ0_LBN 16 | ||
2028 | #define FRF_AB_XX_TERMADJ0_WIDTH 1 | ||
2029 | #define FRF_AB_XX_HIDRVD_LBN 15 | ||
2030 | #define FRF_AB_XX_HIDRVD_WIDTH 1 | ||
2031 | #define FRF_AB_XX_LODRVD_LBN 14 | ||
2032 | #define FRF_AB_XX_LODRVD_WIDTH 1 | ||
2033 | #define FRF_AB_XX_HIDRVC_LBN 13 | ||
2034 | #define FRF_AB_XX_HIDRVC_WIDTH 1 | ||
2035 | #define FRF_AB_XX_LODRVC_LBN 12 | ||
2036 | #define FRF_AB_XX_LODRVC_WIDTH 1 | ||
2037 | #define FRF_AB_XX_HIDRVB_LBN 11 | ||
2038 | #define FRF_AB_XX_HIDRVB_WIDTH 1 | ||
2039 | #define FRF_AB_XX_LODRVB_LBN 10 | ||
2040 | #define FRF_AB_XX_LODRVB_WIDTH 1 | ||
2041 | #define FRF_AB_XX_HIDRVA_LBN 9 | ||
2042 | #define FRF_AB_XX_HIDRVA_WIDTH 1 | ||
2043 | #define FRF_AB_XX_LODRVA_LBN 8 | ||
2044 | #define FRF_AB_XX_LODRVA_WIDTH 1 | ||
2045 | #define FRF_AB_XX_LPBKD_LBN 3 | ||
2046 | #define FRF_AB_XX_LPBKD_WIDTH 1 | ||
2047 | #define FRF_AB_XX_LPBKC_LBN 2 | ||
2048 | #define FRF_AB_XX_LPBKC_WIDTH 1 | ||
2049 | #define FRF_AB_XX_LPBKB_LBN 1 | ||
2050 | #define FRF_AB_XX_LPBKB_WIDTH 1 | ||
2051 | #define FRF_AB_XX_LPBKA_LBN 0 | ||
2052 | #define FRF_AB_XX_LPBKA_WIDTH 1 | ||
2053 | |||
2054 | /* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ | ||
2055 | #define FR_AB_XX_TXDRV_CTL 0x00001320 | ||
2056 | #define FRF_AB_XX_DEQD_LBN 28 | ||
2057 | #define FRF_AB_XX_DEQD_WIDTH 4 | ||
2058 | #define FRF_AB_XX_DEQC_LBN 24 | ||
2059 | #define FRF_AB_XX_DEQC_WIDTH 4 | ||
2060 | #define FRF_AB_XX_DEQB_LBN 20 | ||
2061 | #define FRF_AB_XX_DEQB_WIDTH 4 | ||
2062 | #define FRF_AB_XX_DEQA_LBN 16 | ||
2063 | #define FRF_AB_XX_DEQA_WIDTH 4 | ||
2064 | #define FRF_AB_XX_DTXD_LBN 12 | ||
2065 | #define FRF_AB_XX_DTXD_WIDTH 4 | ||
2066 | #define FRF_AB_XX_DTXC_LBN 8 | ||
2067 | #define FRF_AB_XX_DTXC_WIDTH 4 | ||
2068 | #define FRF_AB_XX_DTXB_LBN 4 | ||
2069 | #define FRF_AB_XX_DTXB_WIDTH 4 | ||
2070 | #define FRF_AB_XX_DTXA_LBN 0 | ||
2071 | #define FRF_AB_XX_DTXA_WIDTH 4 | ||
2072 | |||
2073 | /* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */ | ||
2074 | #define FR_AB_XX_PRBS_CTL 0x00001330 | ||
2075 | #define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30 | ||
2076 | #define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2 | ||
2077 | #define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29 | ||
2078 | #define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1 | ||
2079 | #define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28 | ||
2080 | #define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1 | ||
2081 | #define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26 | ||
2082 | #define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2 | ||
2083 | #define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25 | ||
2084 | #define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1 | ||
2085 | #define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24 | ||
2086 | #define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1 | ||
2087 | #define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22 | ||
2088 | #define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2 | ||
2089 | #define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21 | ||
2090 | #define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1 | ||
2091 | #define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20 | ||
2092 | #define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1 | ||
2093 | #define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18 | ||
2094 | #define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2 | ||
2095 | #define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17 | ||
2096 | #define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1 | ||
2097 | #define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16 | ||
2098 | #define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1 | ||
2099 | #define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14 | ||
2100 | #define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2 | ||
2101 | #define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13 | ||
2102 | #define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1 | ||
2103 | #define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12 | ||
2104 | #define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1 | ||
2105 | #define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10 | ||
2106 | #define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2 | ||
2107 | #define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9 | ||
2108 | #define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1 | ||
2109 | #define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8 | ||
2110 | #define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1 | ||
2111 | #define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6 | ||
2112 | #define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2 | ||
2113 | #define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5 | ||
2114 | #define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1 | ||
2115 | #define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4 | ||
2116 | #define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1 | ||
2117 | #define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2 | ||
2118 | #define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2 | ||
2119 | #define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1 | ||
2120 | #define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1 | ||
2121 | #define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0 | ||
2122 | #define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1 | ||
2123 | |||
2124 | /* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */ | ||
2125 | #define FR_AB_XX_PRBS_CHK 0x00001340 | ||
2126 | #define FRF_AB_XX_REV_LB_EN_LBN 16 | ||
2127 | #define FRF_AB_XX_REV_LB_EN_WIDTH 1 | ||
2128 | #define FRF_AB_XX_CH3_DEG_DET_LBN 15 | ||
2129 | #define FRF_AB_XX_CH3_DEG_DET_WIDTH 1 | ||
2130 | #define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14 | ||
2131 | #define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1 | ||
2132 | #define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13 | ||
2133 | #define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1 | ||
2134 | #define FRF_AB_XX_CH3_ERR_CHK_LBN 12 | ||
2135 | #define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1 | ||
2136 | #define FRF_AB_XX_CH2_DEG_DET_LBN 11 | ||
2137 | #define FRF_AB_XX_CH2_DEG_DET_WIDTH 1 | ||
2138 | #define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10 | ||
2139 | #define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1 | ||
2140 | #define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9 | ||
2141 | #define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1 | ||
2142 | #define FRF_AB_XX_CH2_ERR_CHK_LBN 8 | ||
2143 | #define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1 | ||
2144 | #define FRF_AB_XX_CH1_DEG_DET_LBN 7 | ||
2145 | #define FRF_AB_XX_CH1_DEG_DET_WIDTH 1 | ||
2146 | #define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6 | ||
2147 | #define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1 | ||
2148 | #define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5 | ||
2149 | #define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1 | ||
2150 | #define FRF_AB_XX_CH1_ERR_CHK_LBN 4 | ||
2151 | #define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1 | ||
2152 | #define FRF_AB_XX_CH0_DEG_DET_LBN 3 | ||
2153 | #define FRF_AB_XX_CH0_DEG_DET_WIDTH 1 | ||
2154 | #define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2 | ||
2155 | #define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1 | ||
2156 | #define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1 | ||
2157 | #define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1 | ||
2158 | #define FRF_AB_XX_CH0_ERR_CHK_LBN 0 | ||
2159 | #define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1 | ||
2160 | |||
2161 | /* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */ | ||
2162 | #define FR_AB_XX_PRBS_ERR 0x00001350 | ||
2163 | #define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24 | ||
2164 | #define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8 | ||
2165 | #define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16 | ||
2166 | #define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8 | ||
2167 | #define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8 | ||
2168 | #define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8 | ||
2169 | #define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0 | ||
2170 | #define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8 | ||
2171 | |||
2172 | /* XX_CORE_STAT_REG: XAUI XGXS core status register */ | ||
2173 | #define FR_AB_XX_CORE_STAT 0x00001360 | ||
2174 | #define FRF_AB_XX_FORCE_SIG3_LBN 31 | ||
2175 | #define FRF_AB_XX_FORCE_SIG3_WIDTH 1 | ||
2176 | #define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30 | ||
2177 | #define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1 | ||
2178 | #define FRF_AB_XX_FORCE_SIG2_LBN 29 | ||
2179 | #define FRF_AB_XX_FORCE_SIG2_WIDTH 1 | ||
2180 | #define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28 | ||
2181 | #define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1 | ||
2182 | #define FRF_AB_XX_FORCE_SIG1_LBN 27 | ||
2183 | #define FRF_AB_XX_FORCE_SIG1_WIDTH 1 | ||
2184 | #define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26 | ||
2185 | #define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1 | ||
2186 | #define FRF_AB_XX_FORCE_SIG0_LBN 25 | ||
2187 | #define FRF_AB_XX_FORCE_SIG0_WIDTH 1 | ||
2188 | #define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24 | ||
2189 | #define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1 | ||
2190 | #define FRF_AB_XX_XGXS_LB_EN_LBN 23 | ||
2191 | #define FRF_AB_XX_XGXS_LB_EN_WIDTH 1 | ||
2192 | #define FRF_AB_XX_XGMII_LB_EN_LBN 22 | ||
2193 | #define FRF_AB_XX_XGMII_LB_EN_WIDTH 1 | ||
2194 | #define FRF_AB_XX_MATCH_FAULT_LBN 21 | ||
2195 | #define FRF_AB_XX_MATCH_FAULT_WIDTH 1 | ||
2196 | #define FRF_AB_XX_ALIGN_DONE_LBN 20 | ||
2197 | #define FRF_AB_XX_ALIGN_DONE_WIDTH 1 | ||
2198 | #define FRF_AB_XX_SYNC_STAT3_LBN 19 | ||
2199 | #define FRF_AB_XX_SYNC_STAT3_WIDTH 1 | ||
2200 | #define FRF_AB_XX_SYNC_STAT2_LBN 18 | ||
2201 | #define FRF_AB_XX_SYNC_STAT2_WIDTH 1 | ||
2202 | #define FRF_AB_XX_SYNC_STAT1_LBN 17 | ||
2203 | #define FRF_AB_XX_SYNC_STAT1_WIDTH 1 | ||
2204 | #define FRF_AB_XX_SYNC_STAT0_LBN 16 | ||
2205 | #define FRF_AB_XX_SYNC_STAT0_WIDTH 1 | ||
2206 | #define FRF_AB_XX_COMMA_DET_CH3_LBN 15 | ||
2207 | #define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1 | ||
2208 | #define FRF_AB_XX_COMMA_DET_CH2_LBN 14 | ||
2209 | #define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1 | ||
2210 | #define FRF_AB_XX_COMMA_DET_CH1_LBN 13 | ||
2211 | #define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1 | ||
2212 | #define FRF_AB_XX_COMMA_DET_CH0_LBN 12 | ||
2213 | #define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1 | ||
2214 | #define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11 | ||
2215 | #define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1 | ||
2216 | #define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10 | ||
2217 | #define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1 | ||
2218 | #define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9 | ||
2219 | #define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1 | ||
2220 | #define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8 | ||
2221 | #define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1 | ||
2222 | #define FRF_AB_XX_CHAR_ERR_CH3_LBN 7 | ||
2223 | #define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1 | ||
2224 | #define FRF_AB_XX_CHAR_ERR_CH2_LBN 6 | ||
2225 | #define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1 | ||
2226 | #define FRF_AB_XX_CHAR_ERR_CH1_LBN 5 | ||
2227 | #define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1 | ||
2228 | #define FRF_AB_XX_CHAR_ERR_CH0_LBN 4 | ||
2229 | #define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1 | ||
2230 | #define FRF_AB_XX_DISPERR_CH3_LBN 3 | ||
2231 | #define FRF_AB_XX_DISPERR_CH3_WIDTH 1 | ||
2232 | #define FRF_AB_XX_DISPERR_CH2_LBN 2 | ||
2233 | #define FRF_AB_XX_DISPERR_CH2_WIDTH 1 | ||
2234 | #define FRF_AB_XX_DISPERR_CH1_LBN 1 | ||
2235 | #define FRF_AB_XX_DISPERR_CH1_WIDTH 1 | ||
2236 | #define FRF_AB_XX_DISPERR_CH0_LBN 0 | ||
2237 | #define FRF_AB_XX_DISPERR_CH0_WIDTH 1 | ||
2238 | |||
2239 | /* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */ | ||
2240 | #define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800 | ||
2241 | #define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16 | ||
2242 | #define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4 | ||
2243 | /* RX_DESC_PTR_TBL: Receive descriptor pointer table */ | ||
2244 | #define FR_BZ_RX_DESC_PTR_TBL 0x00f40000 | ||
2245 | #define FR_BZ_RX_DESC_PTR_TBL_STEP 16 | ||
2246 | #define FR_BB_RX_DESC_PTR_TBL_ROWS 4096 | ||
2247 | #define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024 | ||
2248 | #define FRF_CZ_RX_HDR_SPLIT_LBN 90 | ||
2249 | #define FRF_CZ_RX_HDR_SPLIT_WIDTH 1 | ||
2250 | #define FRF_AA_RX_RESET_LBN 89 | ||
2251 | #define FRF_AA_RX_RESET_WIDTH 1 | ||
2252 | #define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88 | ||
2253 | #define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1 | ||
2254 | #define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87 | ||
2255 | #define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1 | ||
2256 | #define FRF_AZ_RX_DESC_PREF_ACT_LBN 86 | ||
2257 | #define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1 | ||
2258 | #define FRF_AZ_RX_DC_HW_RPTR_LBN 80 | ||
2259 | #define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6 | ||
2260 | #define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68 | ||
2261 | #define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12 | ||
2262 | #define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56 | ||
2263 | #define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12 | ||
2264 | #define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36 | ||
2265 | #define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20 | ||
2266 | #define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24 | ||
2267 | #define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12 | ||
2268 | #define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10 | ||
2269 | #define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14 | ||
2270 | #define FRF_AZ_RX_DESCQ_LABEL_LBN 5 | ||
2271 | #define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5 | ||
2272 | #define FRF_AZ_RX_DESCQ_SIZE_LBN 3 | ||
2273 | #define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2 | ||
2274 | #define FFE_AZ_RX_DESCQ_SIZE_4K 3 | ||
2275 | #define FFE_AZ_RX_DESCQ_SIZE_2K 2 | ||
2276 | #define FFE_AZ_RX_DESCQ_SIZE_1K 1 | ||
2277 | #define FFE_AZ_RX_DESCQ_SIZE_512 0 | ||
2278 | #define FRF_AZ_RX_DESCQ_TYPE_LBN 2 | ||
2279 | #define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1 | ||
2280 | #define FRF_AZ_RX_DESCQ_JUMBO_LBN 1 | ||
2281 | #define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1 | ||
2282 | #define FRF_AZ_RX_DESCQ_EN_LBN 0 | ||
2283 | #define FRF_AZ_RX_DESCQ_EN_WIDTH 1 | ||
2284 | |||
2285 | /* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */ | ||
2286 | #define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900 | ||
2287 | #define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16 | ||
2288 | #define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8 | ||
2289 | /* TX_DESC_PTR_TBL: Transmit descriptor pointer */ | ||
2290 | #define FR_BZ_TX_DESC_PTR_TBL 0x00f50000 | ||
2291 | #define FR_BZ_TX_DESC_PTR_TBL_STEP 16 | ||
2292 | #define FR_BB_TX_DESC_PTR_TBL_ROWS 4096 | ||
2293 | #define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024 | ||
2294 | #define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94 | ||
2295 | #define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2 | ||
2296 | #define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93 | ||
2297 | #define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1 | ||
2298 | #define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92 | ||
2299 | #define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1 | ||
2300 | #define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91 | ||
2301 | #define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1 | ||
2302 | #define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90 | ||
2303 | #define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1 | ||
2304 | #define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89 | ||
2305 | #define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1 | ||
2306 | #define FRF_AZ_TX_DESCQ_EN_LBN 88 | ||
2307 | #define FRF_AZ_TX_DESCQ_EN_WIDTH 1 | ||
2308 | #define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87 | ||
2309 | #define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1 | ||
2310 | #define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86 | ||
2311 | #define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1 | ||
2312 | #define FRF_AZ_TX_DC_HW_RPTR_LBN 80 | ||
2313 | #define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6 | ||
2314 | #define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68 | ||
2315 | #define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12 | ||
2316 | #define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56 | ||
2317 | #define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12 | ||
2318 | #define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36 | ||
2319 | #define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20 | ||
2320 | #define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24 | ||
2321 | #define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12 | ||
2322 | #define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10 | ||
2323 | #define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14 | ||
2324 | #define FRF_AZ_TX_DESCQ_LABEL_LBN 5 | ||
2325 | #define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5 | ||
2326 | #define FRF_AZ_TX_DESCQ_SIZE_LBN 3 | ||
2327 | #define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2 | ||
2328 | #define FFE_AZ_TX_DESCQ_SIZE_4K 3 | ||
2329 | #define FFE_AZ_TX_DESCQ_SIZE_2K 2 | ||
2330 | #define FFE_AZ_TX_DESCQ_SIZE_1K 1 | ||
2331 | #define FFE_AZ_TX_DESCQ_SIZE_512 0 | ||
2332 | #define FRF_AZ_TX_DESCQ_TYPE_LBN 1 | ||
2333 | #define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2 | ||
2334 | #define FRF_AZ_TX_DESCQ_FLUSH_LBN 0 | ||
2335 | #define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1 | ||
2336 | |||
2337 | /* EVQ_PTR_TBL_KER: Event queue pointer table */ | ||
2338 | #define FR_AA_EVQ_PTR_TBL_KER 0x00011a00 | ||
2339 | #define FR_AA_EVQ_PTR_TBL_KER_STEP 16 | ||
2340 | #define FR_AA_EVQ_PTR_TBL_KER_ROWS 4 | ||
2341 | /* EVQ_PTR_TBL: Event queue pointer table */ | ||
2342 | #define FR_BZ_EVQ_PTR_TBL 0x00f60000 | ||
2343 | #define FR_BZ_EVQ_PTR_TBL_STEP 16 | ||
2344 | #define FR_CZ_EVQ_PTR_TBL_ROWS 1024 | ||
2345 | #define FR_BB_EVQ_PTR_TBL_ROWS 4096 | ||
2346 | #define FRF_BZ_EVQ_RPTR_IGN_LBN 40 | ||
2347 | #define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1 | ||
2348 | #define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39 | ||
2349 | #define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1 | ||
2350 | #define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39 | ||
2351 | #define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1 | ||
2352 | #define FRF_AZ_EVQ_NXT_WPTR_LBN 24 | ||
2353 | #define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15 | ||
2354 | #define FRF_AZ_EVQ_EN_LBN 23 | ||
2355 | #define FRF_AZ_EVQ_EN_WIDTH 1 | ||
2356 | #define FRF_AZ_EVQ_SIZE_LBN 20 | ||
2357 | #define FRF_AZ_EVQ_SIZE_WIDTH 3 | ||
2358 | #define FFE_AZ_EVQ_SIZE_32K 6 | ||
2359 | #define FFE_AZ_EVQ_SIZE_16K 5 | ||
2360 | #define FFE_AZ_EVQ_SIZE_8K 4 | ||
2361 | #define FFE_AZ_EVQ_SIZE_4K 3 | ||
2362 | #define FFE_AZ_EVQ_SIZE_2K 2 | ||
2363 | #define FFE_AZ_EVQ_SIZE_1K 1 | ||
2364 | #define FFE_AZ_EVQ_SIZE_512 0 | ||
2365 | #define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0 | ||
2366 | #define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20 | ||
2367 | |||
2368 | /* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */ | ||
2369 | #define FR_AA_BUF_HALF_TBL_KER 0x00018000 | ||
2370 | #define FR_AA_BUF_HALF_TBL_KER_STEP 8 | ||
2371 | #define FR_AA_BUF_HALF_TBL_KER_ROWS 4096 | ||
2372 | /* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */ | ||
2373 | #define FR_BZ_BUF_HALF_TBL 0x00800000 | ||
2374 | #define FR_BZ_BUF_HALF_TBL_STEP 8 | ||
2375 | #define FR_CZ_BUF_HALF_TBL_ROWS 147456 | ||
2376 | #define FR_BB_BUF_HALF_TBL_ROWS 524288 | ||
2377 | #define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44 | ||
2378 | #define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20 | ||
2379 | #define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32 | ||
2380 | #define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12 | ||
2381 | #define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12 | ||
2382 | #define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20 | ||
2383 | #define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0 | ||
2384 | #define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 | ||
2385 | |||
2386 | /* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */ | ||
2387 | #define FR_AA_BUF_FULL_TBL_KER 0x00018000 | ||
2388 | #define FR_AA_BUF_FULL_TBL_KER_STEP 8 | ||
2389 | #define FR_AA_BUF_FULL_TBL_KER_ROWS 4096 | ||
2390 | /* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */ | ||
2391 | #define FR_BZ_BUF_FULL_TBL 0x00800000 | ||
2392 | #define FR_BZ_BUF_FULL_TBL_STEP 8 | ||
2393 | #define FR_CZ_BUF_FULL_TBL_ROWS 147456 | ||
2394 | #define FR_BB_BUF_FULL_TBL_ROWS 917504 | ||
2395 | #define FRF_AZ_BUF_FULL_UNUSED_LBN 51 | ||
2396 | #define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13 | ||
2397 | #define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50 | ||
2398 | #define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1 | ||
2399 | #define FRF_AZ_BUF_ADR_REGION_LBN 48 | ||
2400 | #define FRF_AZ_BUF_ADR_REGION_WIDTH 2 | ||
2401 | #define FFE_AZ_BUF_ADR_REGN3 3 | ||
2402 | #define FFE_AZ_BUF_ADR_REGN2 2 | ||
2403 | #define FFE_AZ_BUF_ADR_REGN1 1 | ||
2404 | #define FFE_AZ_BUF_ADR_REGN0 0 | ||
2405 | #define FRF_AZ_BUF_ADR_FBUF_LBN 14 | ||
2406 | #define FRF_AZ_BUF_ADR_FBUF_WIDTH 34 | ||
2407 | #define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0 | ||
2408 | #define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14 | ||
2409 | |||
2410 | /* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */ | ||
2411 | #define FR_BZ_RX_FILTER_TBL0 0x00f00000 | ||
2412 | #define FR_BZ_RX_FILTER_TBL0_STEP 32 | ||
2413 | #define FR_BZ_RX_FILTER_TBL0_ROWS 8192 | ||
2414 | /* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */ | ||
2415 | #define FR_BB_RX_FILTER_TBL1 0x00f00010 | ||
2416 | #define FR_BB_RX_FILTER_TBL1_STEP 32 | ||
2417 | #define FR_BB_RX_FILTER_TBL1_ROWS 8192 | ||
2418 | #define FRF_BZ_RSS_EN_LBN 110 | ||
2419 | #define FRF_BZ_RSS_EN_WIDTH 1 | ||
2420 | #define FRF_BZ_SCATTER_EN_LBN 109 | ||
2421 | #define FRF_BZ_SCATTER_EN_WIDTH 1 | ||
2422 | #define FRF_BZ_TCP_UDP_LBN 108 | ||
2423 | #define FRF_BZ_TCP_UDP_WIDTH 1 | ||
2424 | #define FRF_BZ_RXQ_ID_LBN 96 | ||
2425 | #define FRF_BZ_RXQ_ID_WIDTH 12 | ||
2426 | #define FRF_BZ_DEST_IP_LBN 64 | ||
2427 | #define FRF_BZ_DEST_IP_WIDTH 32 | ||
2428 | #define FRF_BZ_DEST_PORT_TCP_LBN 48 | ||
2429 | #define FRF_BZ_DEST_PORT_TCP_WIDTH 16 | ||
2430 | #define FRF_BZ_SRC_IP_LBN 16 | ||
2431 | #define FRF_BZ_SRC_IP_WIDTH 32 | ||
2432 | #define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0 | ||
2433 | #define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16 | ||
2434 | |||
2435 | /* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */ | ||
2436 | #define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010 | ||
2437 | #define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32 | ||
2438 | #define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512 | ||
2439 | #define FRF_CZ_RMFT_RSS_EN_LBN 75 | ||
2440 | #define FRF_CZ_RMFT_RSS_EN_WIDTH 1 | ||
2441 | #define FRF_CZ_RMFT_SCATTER_EN_LBN 74 | ||
2442 | #define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1 | ||
2443 | #define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73 | ||
2444 | #define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1 | ||
2445 | #define FRF_CZ_RMFT_RXQ_ID_LBN 61 | ||
2446 | #define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 | ||
2447 | #define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 | ||
2448 | #define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 | ||
2449 | #define FRF_CZ_RMFT_DEST_MAC_LBN 16 | ||
2450 | #define FRF_CZ_RMFT_DEST_MAC_WIDTH 44 | ||
2451 | #define FRF_CZ_RMFT_VLAN_ID_LBN 0 | ||
2452 | #define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 | ||
2453 | |||
2454 | /* TIMER_TBL: Timer table */ | ||
2455 | #define FR_BZ_TIMER_TBL 0x00f70000 | ||
2456 | #define FR_BZ_TIMER_TBL_STEP 16 | ||
2457 | #define FR_CZ_TIMER_TBL_ROWS 1024 | ||
2458 | #define FR_BB_TIMER_TBL_ROWS 4096 | ||
2459 | #define FRF_CZ_TIMER_Q_EN_LBN 33 | ||
2460 | #define FRF_CZ_TIMER_Q_EN_WIDTH 1 | ||
2461 | #define FRF_CZ_INT_ARMD_LBN 32 | ||
2462 | #define FRF_CZ_INT_ARMD_WIDTH 1 | ||
2463 | #define FRF_CZ_INT_PEND_LBN 31 | ||
2464 | #define FRF_CZ_INT_PEND_WIDTH 1 | ||
2465 | #define FRF_CZ_HOST_NOTIFY_MODE_LBN 30 | ||
2466 | #define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1 | ||
2467 | #define FRF_CZ_RELOAD_TIMER_VAL_LBN 16 | ||
2468 | #define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14 | ||
2469 | #define FRF_CZ_TIMER_MODE_LBN 14 | ||
2470 | #define FRF_CZ_TIMER_MODE_WIDTH 2 | ||
2471 | #define FFE_CZ_TIMER_MODE_INT_HLDOFF 3 | ||
2472 | #define FFE_CZ_TIMER_MODE_TRIG_START 2 | ||
2473 | #define FFE_CZ_TIMER_MODE_IMMED_START 1 | ||
2474 | #define FFE_CZ_TIMER_MODE_DIS 0 | ||
2475 | #define FRF_BB_TIMER_MODE_LBN 12 | ||
2476 | #define FRF_BB_TIMER_MODE_WIDTH 2 | ||
2477 | #define FFE_BB_TIMER_MODE_INT_HLDOFF 2 | ||
2478 | #define FFE_BB_TIMER_MODE_TRIG_START 2 | ||
2479 | #define FFE_BB_TIMER_MODE_IMMED_START 1 | ||
2480 | #define FFE_BB_TIMER_MODE_DIS 0 | ||
2481 | #define FRF_CZ_TIMER_VAL_LBN 0 | ||
2482 | #define FRF_CZ_TIMER_VAL_WIDTH 14 | ||
2483 | #define FRF_BB_TIMER_VAL_LBN 0 | ||
2484 | #define FRF_BB_TIMER_VAL_WIDTH 12 | ||
2485 | |||
2486 | /* TX_PACE_TBL: Transmit pacing table */ | ||
2487 | #define FR_BZ_TX_PACE_TBL 0x00f80000 | ||
2488 | #define FR_BZ_TX_PACE_TBL_STEP 16 | ||
2489 | #define FR_CZ_TX_PACE_TBL_ROWS 1024 | ||
2490 | #define FR_BB_TX_PACE_TBL_ROWS 4096 | ||
2491 | #define FRF_BZ_TX_PACE_LBN 0 | ||
2492 | #define FRF_BZ_TX_PACE_WIDTH 5 | ||
2493 | |||
2494 | /* RX_INDIRECTION_TBL: RX Indirection Table */ | ||
2495 | #define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000 | ||
2496 | #define FR_BZ_RX_INDIRECTION_TBL_STEP 16 | ||
2497 | #define FR_BZ_RX_INDIRECTION_TBL_ROWS 128 | ||
2498 | #define FRF_BZ_IT_QUEUE_LBN 0 | ||
2499 | #define FRF_BZ_IT_QUEUE_WIDTH 6 | ||
2500 | |||
2501 | /* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */ | ||
2502 | #define FR_CZ_TX_FILTER_TBL0 0x00fc0000 | ||
2503 | #define FR_CZ_TX_FILTER_TBL0_STEP 16 | ||
2504 | #define FR_CZ_TX_FILTER_TBL0_ROWS 8192 | ||
2505 | #define FRF_CZ_TIFT_TCP_UDP_LBN 108 | ||
2506 | #define FRF_CZ_TIFT_TCP_UDP_WIDTH 1 | ||
2507 | #define FRF_CZ_TIFT_TXQ_ID_LBN 96 | ||
2508 | #define FRF_CZ_TIFT_TXQ_ID_WIDTH 12 | ||
2509 | #define FRF_CZ_TIFT_DEST_IP_LBN 64 | ||
2510 | #define FRF_CZ_TIFT_DEST_IP_WIDTH 32 | ||
2511 | #define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48 | ||
2512 | #define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16 | ||
2513 | #define FRF_CZ_TIFT_SRC_IP_LBN 16 | ||
2514 | #define FRF_CZ_TIFT_SRC_IP_WIDTH 32 | ||
2515 | #define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0 | ||
2516 | #define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16 | ||
2517 | |||
2518 | /* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */ | ||
2519 | #define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000 | ||
2520 | #define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16 | ||
2521 | #define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512 | ||
2522 | #define FRF_CZ_TMFT_TXQ_ID_LBN 61 | ||
2523 | #define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 | ||
2524 | #define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 | ||
2525 | #define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 | ||
2526 | #define FRF_CZ_TMFT_SRC_MAC_LBN 16 | ||
2527 | #define FRF_CZ_TMFT_SRC_MAC_WIDTH 44 | ||
2528 | #define FRF_CZ_TMFT_VLAN_ID_LBN 0 | ||
2529 | #define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 | ||
2530 | |||
2531 | /* MC_TREG_SMEM: MC Shared Memory */ | ||
2532 | #define FR_CZ_MC_TREG_SMEM 0x00ff0000 | ||
2533 | #define FR_CZ_MC_TREG_SMEM_STEP 4 | ||
2534 | #define FR_CZ_MC_TREG_SMEM_ROWS 512 | ||
2535 | #define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0 | ||
2536 | #define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32 | ||
2537 | |||
2538 | /* MSIX_VECTOR_TABLE: MSIX Vector Table */ | ||
2539 | #define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000 | ||
2540 | #define FR_BZ_MSIX_VECTOR_TABLE_STEP 16 | ||
2541 | #define FR_BB_MSIX_VECTOR_TABLE_ROWS 64 | ||
2542 | /* MSIX_VECTOR_TABLE: MSIX Vector Table */ | ||
2543 | #define FR_CZ_MSIX_VECTOR_TABLE 0x00000000 | ||
2544 | /* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */ | ||
2545 | #define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024 | ||
2546 | #define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97 | ||
2547 | #define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31 | ||
2548 | #define FRF_BZ_MSIX_VECTOR_MASK_LBN 96 | ||
2549 | #define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1 | ||
2550 | #define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64 | ||
2551 | #define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32 | ||
2552 | #define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32 | ||
2553 | #define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32 | ||
2554 | #define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0 | ||
2555 | #define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32 | ||
2556 | |||
2557 | /* MSIX_PBA_TABLE: MSIX Pending Bit Array */ | ||
2558 | #define FR_BB_MSIX_PBA_TABLE 0x00ff2000 | ||
2559 | #define FR_BZ_MSIX_PBA_TABLE_STEP 4 | ||
2560 | #define FR_BB_MSIX_PBA_TABLE_ROWS 2 | ||
2561 | /* MSIX_PBA_TABLE: MSIX Pending Bit Array */ | ||
2562 | #define FR_CZ_MSIX_PBA_TABLE 0x00008000 | ||
2563 | /* FR_BZ_MSIX_PBA_TABLE_STEP 4 */ | ||
2564 | #define FR_CZ_MSIX_PBA_TABLE_ROWS 32 | ||
2565 | #define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0 | ||
2566 | #define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32 | ||
2567 | |||
2568 | /* SRM_DBG_REG: SRAM debug access */ | ||
2569 | #define FR_BZ_SRM_DBG 0x03000000 | ||
2570 | #define FR_BZ_SRM_DBG_STEP 8 | ||
2571 | #define FR_CZ_SRM_DBG_ROWS 262144 | ||
2572 | #define FR_BB_SRM_DBG_ROWS 2097152 | ||
2573 | #define FRF_BZ_SRM_DBG_LBN 0 | ||
2574 | #define FRF_BZ_SRM_DBG_WIDTH 64 | ||
2575 | |||
2576 | /* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */ | ||
2577 | #define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000 | ||
2578 | #define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4 | ||
2579 | #define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024 | ||
2580 | #define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0 | ||
2581 | #define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32 | ||
2582 | |||
2583 | /* DRIVER_EV */ | ||
2584 | #define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56 | ||
2585 | #define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4 | ||
2586 | #define FSE_BZ_TX_DSC_ERROR_EV 15 | ||
2587 | #define FSE_BZ_RX_DSC_ERROR_EV 14 | ||
2588 | #define FSE_AA_RX_RECOVER_EV 11 | ||
2589 | #define FSE_AZ_TIMER_EV 10 | ||
2590 | #define FSE_AZ_TX_PKT_NON_TCP_UDP 9 | ||
2591 | #define FSE_AZ_WAKE_UP_EV 6 | ||
2592 | #define FSE_AZ_SRM_UPD_DONE_EV 5 | ||
2593 | #define FSE_AB_EVQ_NOT_EN_EV 3 | ||
2594 | #define FSE_AZ_EVQ_INIT_DONE_EV 2 | ||
2595 | #define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1 | ||
2596 | #define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0 | ||
2597 | #define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0 | ||
2598 | #define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14 | ||
2599 | |||
2600 | /* EVENT_ENTRY */ | ||
2601 | #define FSF_AZ_EV_CODE_LBN 60 | ||
2602 | #define FSF_AZ_EV_CODE_WIDTH 4 | ||
2603 | #define FSE_CZ_EV_CODE_MCDI_EV 12 | ||
2604 | #define FSE_CZ_EV_CODE_USER_EV 8 | ||
2605 | #define FSE_AZ_EV_CODE_DRV_GEN_EV 7 | ||
2606 | #define FSE_AZ_EV_CODE_GLOBAL_EV 6 | ||
2607 | #define FSE_AZ_EV_CODE_DRIVER_EV 5 | ||
2608 | #define FSE_AZ_EV_CODE_TX_EV 2 | ||
2609 | #define FSE_AZ_EV_CODE_RX_EV 0 | ||
2610 | #define FSF_AZ_EV_DATA_LBN 0 | ||
2611 | #define FSF_AZ_EV_DATA_WIDTH 60 | ||
2612 | |||
2613 | /* GLOBAL_EV */ | ||
2614 | #define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12 | ||
2615 | #define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1 | ||
2616 | #define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11 | ||
2617 | #define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1 | ||
2618 | #define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11 | ||
2619 | #define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1 | ||
2620 | #define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10 | ||
2621 | #define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1 | ||
2622 | #define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9 | ||
2623 | #define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1 | ||
2624 | #define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7 | ||
2625 | #define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1 | ||
2626 | |||
2627 | /* LEGACY_INT_VEC */ | ||
2628 | #define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64 | ||
2629 | #define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1 | ||
2630 | #define FSF_AZ_NET_IVEC_INT_Q_LBN 40 | ||
2631 | #define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4 | ||
2632 | #define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32 | ||
2633 | #define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1 | ||
2634 | #define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1 | ||
2635 | #define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1 | ||
2636 | #define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0 | ||
2637 | #define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1 | ||
2638 | |||
2639 | /* MC_XGMAC_FLTR_RULE_DEF */ | ||
2640 | #define FSF_CZ_MC_XFRC_MODE_LBN 416 | ||
2641 | #define FSF_CZ_MC_XFRC_MODE_WIDTH 1 | ||
2642 | #define FSE_CZ_MC_XFRC_MODE_LAYERED 1 | ||
2643 | #define FSE_CZ_MC_XFRC_MODE_SIMPLE 0 | ||
2644 | #define FSF_CZ_MC_XFRC_HASH_LBN 384 | ||
2645 | #define FSF_CZ_MC_XFRC_HASH_WIDTH 32 | ||
2646 | #define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256 | ||
2647 | #define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128 | ||
2648 | #define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128 | ||
2649 | #define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128 | ||
2650 | #define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0 | ||
2651 | #define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128 | ||
2652 | |||
2653 | /* RX_EV */ | ||
2654 | #define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58 | ||
2655 | #define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1 | ||
2656 | #define FSF_CZ_RX_EV_IPV6_PKT_LBN 57 | ||
2657 | #define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1 | ||
2658 | #define FSF_AZ_RX_EV_PKT_OK_LBN 56 | ||
2659 | #define FSF_AZ_RX_EV_PKT_OK_WIDTH 1 | ||
2660 | #define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55 | ||
2661 | #define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1 | ||
2662 | #define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54 | ||
2663 | #define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 | ||
2664 | #define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53 | ||
2665 | #define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1 | ||
2666 | #define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 | ||
2667 | #define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 | ||
2668 | #define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 | ||
2669 | #define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 | ||
2670 | #define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50 | ||
2671 | #define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1 | ||
2672 | #define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49 | ||
2673 | #define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1 | ||
2674 | #define FSF_AA_RX_EV_DRIB_NIB_LBN 49 | ||
2675 | #define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1 | ||
2676 | #define FSF_AZ_RX_EV_TOBE_DISC_LBN 47 | ||
2677 | #define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1 | ||
2678 | #define FSF_AZ_RX_EV_PKT_TYPE_LBN 44 | ||
2679 | #define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3 | ||
2680 | #define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5 | ||
2681 | #define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4 | ||
2682 | #define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3 | ||
2683 | #define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2 | ||
2684 | #define FSE_AZ_RX_EV_PKT_TYPE_LLC 1 | ||
2685 | #define FSE_AZ_RX_EV_PKT_TYPE_ETH 0 | ||
2686 | #define FSF_AZ_RX_EV_HDR_TYPE_LBN 42 | ||
2687 | #define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2 | ||
2688 | #define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3 | ||
2689 | #define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2 | ||
2690 | #define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2 | ||
2691 | #define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1 | ||
2692 | #define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1 | ||
2693 | #define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0 | ||
2694 | #define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0 | ||
2695 | #define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41 | ||
2696 | #define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1 | ||
2697 | #define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40 | ||
2698 | #define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1 | ||
2699 | #define FSF_AZ_RX_EV_MCAST_PKT_LBN 39 | ||
2700 | #define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1 | ||
2701 | #define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37 | ||
2702 | #define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1 | ||
2703 | #define FSF_AZ_RX_EV_Q_LABEL_LBN 32 | ||
2704 | #define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5 | ||
2705 | #define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31 | ||
2706 | #define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1 | ||
2707 | #define FSF_AZ_RX_EV_PORT_LBN 30 | ||
2708 | #define FSF_AZ_RX_EV_PORT_WIDTH 1 | ||
2709 | #define FSF_AZ_RX_EV_BYTE_CNT_LBN 16 | ||
2710 | #define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14 | ||
2711 | #define FSF_AZ_RX_EV_SOP_LBN 15 | ||
2712 | #define FSF_AZ_RX_EV_SOP_WIDTH 1 | ||
2713 | #define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14 | ||
2714 | #define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1 | ||
2715 | #define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13 | ||
2716 | #define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1 | ||
2717 | #define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12 | ||
2718 | #define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1 | ||
2719 | #define FSF_AZ_RX_EV_DESC_PTR_LBN 0 | ||
2720 | #define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12 | ||
2721 | |||
2722 | /* RX_KER_DESC */ | ||
2723 | #define FSF_AZ_RX_KER_BUF_SIZE_LBN 48 | ||
2724 | #define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14 | ||
2725 | #define FSF_AZ_RX_KER_BUF_REGION_LBN 46 | ||
2726 | #define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2 | ||
2727 | #define FSF_AZ_RX_KER_BUF_ADDR_LBN 0 | ||
2728 | #define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46 | ||
2729 | |||
2730 | /* RX_USER_DESC */ | ||
2731 | #define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20 | ||
2732 | #define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12 | ||
2733 | #define FSF_AZ_RX_USER_BUF_ID_LBN 0 | ||
2734 | #define FSF_AZ_RX_USER_BUF_ID_WIDTH 20 | ||
2735 | |||
2736 | /* TX_EV */ | ||
2737 | #define FSF_AZ_TX_EV_PKT_ERR_LBN 38 | ||
2738 | #define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1 | ||
2739 | #define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37 | ||
2740 | #define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1 | ||
2741 | #define FSF_AZ_TX_EV_Q_LABEL_LBN 32 | ||
2742 | #define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5 | ||
2743 | #define FSF_AZ_TX_EV_PORT_LBN 16 | ||
2744 | #define FSF_AZ_TX_EV_PORT_WIDTH 1 | ||
2745 | #define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15 | ||
2746 | #define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1 | ||
2747 | #define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14 | ||
2748 | #define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 | ||
2749 | #define FSF_AZ_TX_EV_COMP_LBN 12 | ||
2750 | #define FSF_AZ_TX_EV_COMP_WIDTH 1 | ||
2751 | #define FSF_AZ_TX_EV_DESC_PTR_LBN 0 | ||
2752 | #define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12 | ||
2753 | |||
2754 | /* TX_KER_DESC */ | ||
2755 | #define FSF_AZ_TX_KER_CONT_LBN 62 | ||
2756 | #define FSF_AZ_TX_KER_CONT_WIDTH 1 | ||
2757 | #define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48 | ||
2758 | #define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14 | ||
2759 | #define FSF_AZ_TX_KER_BUF_REGION_LBN 46 | ||
2760 | #define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2 | ||
2761 | #define FSF_AZ_TX_KER_BUF_ADDR_LBN 0 | ||
2762 | #define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46 | ||
2763 | |||
2764 | /* TX_USER_DESC */ | ||
2765 | #define FSF_AZ_TX_USER_SW_EV_EN_LBN 48 | ||
2766 | #define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1 | ||
2767 | #define FSF_AZ_TX_USER_CONT_LBN 46 | ||
2768 | #define FSF_AZ_TX_USER_CONT_WIDTH 1 | ||
2769 | #define FSF_AZ_TX_USER_BYTE_CNT_LBN 33 | ||
2770 | #define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13 | ||
2771 | #define FSF_AZ_TX_USER_BUF_ID_LBN 13 | ||
2772 | #define FSF_AZ_TX_USER_BUF_ID_WIDTH 20 | ||
2773 | #define FSF_AZ_TX_USER_BYTE_OFS_LBN 0 | ||
2774 | #define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13 | ||
2775 | |||
2776 | /* USER_EV */ | ||
2777 | #define FSF_CZ_USER_QID_LBN 32 | ||
2778 | #define FSF_CZ_USER_QID_WIDTH 10 | ||
2779 | #define FSF_CZ_USER_EV_REG_VALUE_LBN 0 | ||
2780 | #define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32 | ||
2781 | |||
2782 | /************************************************************************** | ||
2783 | * | ||
2784 | * Falcon B0 PCIe core indirect registers | ||
2785 | * | ||
2786 | ************************************************************************** | ||
2787 | */ | ||
2788 | |||
2789 | #define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68 | ||
2790 | |||
2791 | #define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70 | ||
2792 | |||
2793 | #define FPCR_BB_ACK_RPL_TIMER 0x700 | ||
2794 | #define FPCRF_BB_ACK_TL_LBN 0 | ||
2795 | #define FPCRF_BB_ACK_TL_WIDTH 16 | ||
2796 | #define FPCRF_BB_RPL_TL_LBN 16 | ||
2797 | #define FPCRF_BB_RPL_TL_WIDTH 16 | ||
2798 | |||
2799 | #define FPCR_BB_ACK_FREQ 0x70C | ||
2800 | #define FPCRF_BB_ACK_FREQ_LBN 0 | ||
2801 | #define FPCRF_BB_ACK_FREQ_WIDTH 7 | ||
2802 | |||
2803 | /************************************************************************** | ||
2804 | * | ||
2805 | * Pseudo-registers and fields | ||
2806 | * | ||
2807 | ************************************************************************** | ||
2808 | */ | ||
2809 | |||
2810 | /* Interrupt acknowledge work-around register (A0/A1 only) */ | ||
2811 | #define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070 | ||
2812 | |||
2813 | /* EE_SPI_HCMD_REG: SPI host command register */ | ||
2814 | /* Values for the EE_SPI_HCMD_SF_SEL register field */ | ||
2815 | #define FFE_AB_SPI_DEVICE_EEPROM 0 | ||
2816 | #define FFE_AB_SPI_DEVICE_FLASH 1 | ||
2817 | |||
2818 | /* NIC_STAT_REG: NIC status register */ | ||
2819 | #define FRF_AB_STRAP_10G_LBN 2 | ||
2820 | #define FRF_AB_STRAP_10G_WIDTH 1 | ||
2821 | #define FRF_AA_STRAP_PCIE_LBN 0 | ||
2822 | #define FRF_AA_STRAP_PCIE_WIDTH 1 | ||
2823 | |||
2824 | /* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ | ||
2825 | #define FRF_AZ_FATAL_INTR_LBN 0 | ||
2826 | #define FRF_AZ_FATAL_INTR_WIDTH 12 | ||
2827 | |||
2828 | /* SRM_CFG_REG: SRAM configuration register */ | ||
2829 | /* We treat the number of SRAM banks and bank size as a single field */ | ||
2830 | #define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN | ||
2831 | #define FRF_AZ_SRM_NB_SZ_WIDTH \ | ||
2832 | (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH) | ||
2833 | #define FFE_AB_SRM_NB1_SZ2M 0 | ||
2834 | #define FFE_AB_SRM_NB1_SZ4M 1 | ||
2835 | #define FFE_AB_SRM_NB1_SZ8M 2 | ||
2836 | #define FFE_AB_SRM_NB_SZ_DEF 3 | ||
2837 | #define FFE_AB_SRM_NB2_SZ4M 4 | ||
2838 | #define FFE_AB_SRM_NB2_SZ8M 5 | ||
2839 | #define FFE_AB_SRM_NB2_SZ16M 6 | ||
2840 | #define FFE_AB_SRM_NB_SZ_RES 7 | ||
2841 | |||
2842 | /* RX_DESC_UPD_REGP0: Receive descriptor update register. */ | ||
2843 | /* We write just the last dword of these registers */ | ||
2844 | #define FR_AZ_RX_DESC_UPD_DWORD_P0 \ | ||
2845 | (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \ | ||
2846 | FR_BZ_RX_DESC_UPD_P0 + 3 * 4) | ||
2847 | #define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32) | ||
2848 | #define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH | ||
2849 | |||
2850 | /* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ | ||
2851 | #define FR_AZ_TX_DESC_UPD_DWORD_P0 \ | ||
2852 | (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \ | ||
2853 | FR_BZ_TX_DESC_UPD_P0 + 3 * 4) | ||
2854 | #define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32) | ||
2855 | #define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH | ||
2856 | |||
2857 | /* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ | ||
2858 | #define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12 | ||
2859 | #define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1 | ||
2860 | |||
2861 | /* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ | ||
2862 | #define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12 | ||
2863 | #define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1 | ||
2864 | |||
2865 | /* XM_TX_PARAM_REG: XGMAC transmit parameter register */ | ||
2866 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN | ||
2867 | #define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \ | ||
2868 | FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH) | ||
2869 | |||
2870 | /* XM_RX_PARAM_REG: XGMAC receive parameter register */ | ||
2871 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN | ||
2872 | #define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \ | ||
2873 | FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH) | ||
2874 | |||
2875 | /* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ | ||
2876 | /* Default values */ | ||
2877 | #define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */ | ||
2878 | #define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */ | ||
2879 | #define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */ | ||
2880 | |||
2881 | /* XX_CORE_STAT_REG: XAUI XGXS core status register */ | ||
2882 | /* XGXS all-lanes status fields */ | ||
2883 | #define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN | ||
2884 | #define FRF_AB_XX_SYNC_STAT_WIDTH 4 | ||
2885 | #define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN | ||
2886 | #define FRF_AB_XX_COMMA_DET_WIDTH 4 | ||
2887 | #define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN | ||
2888 | #define FRF_AB_XX_CHAR_ERR_WIDTH 4 | ||
2889 | #define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN | ||
2890 | #define FRF_AB_XX_DISPERR_WIDTH 4 | ||
2891 | #define FFE_AB_XX_STAT_ALL_LANES 0xf | ||
2892 | #define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN | ||
2893 | #define FRF_AB_XX_FORCE_SIG_WIDTH 8 | ||
2894 | #define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff | ||
2895 | |||
2896 | /* RX_MAC_FILTER_TBL0 */ | ||
2897 | /* RMFT_DEST_MAC is wider than 32 bits */ | ||
2898 | #define FRF_CZ_RMFT_DEST_MAC_LO_LBN 12 | ||
2899 | #define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32 | ||
2900 | #define FRF_CZ_RMFT_DEST_MAC_HI_LBN 44 | ||
2901 | #define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH 16 | ||
2902 | |||
2903 | /* TX_MAC_FILTER_TBL0 */ | ||
2904 | /* TMFT_SRC_MAC is wider than 32 bits */ | ||
2905 | #define FRF_CZ_TMFT_SRC_MAC_LO_LBN 12 | ||
2906 | #define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32 | ||
2907 | #define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 | ||
2908 | #define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 | ||
2909 | |||
2910 | /* TX_PACE_TBL */ | ||
2911 | /* Values >20 are documented as reserved, but will result in a queue going | ||
2912 | * into the fast bin with a pace value of zero. */ | ||
2913 | #define FFE_BZ_TX_PACE_OFF 0 | ||
2914 | #define FFE_BZ_TX_PACE_RESERVED 21 | ||
2915 | |||
2916 | /* DRIVER_EV */ | ||
2917 | /* Sub-fields of an RX flush completion event */ | ||
2918 | #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 | ||
2919 | #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 | ||
2920 | #define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0 | ||
2921 | #define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12 | ||
2922 | |||
2923 | /* EVENT_ENTRY */ | ||
2924 | /* Magic number field for event test */ | ||
2925 | #define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 | ||
2926 | #define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 | ||
2927 | |||
2928 | /************************************************************************** | ||
2929 | * | ||
2930 | * Falcon MAC stats | ||
2931 | * | ||
2932 | ************************************************************************** | ||
2933 | * | ||
2934 | */ | ||
2935 | |||
2936 | #define GRxGoodOct_offset 0x0 | ||
2937 | #define GRxGoodOct_WIDTH 48 | ||
2938 | #define GRxBadOct_offset 0x8 | ||
2939 | #define GRxBadOct_WIDTH 48 | ||
2940 | #define GRxMissPkt_offset 0x10 | ||
2941 | #define GRxMissPkt_WIDTH 32 | ||
2942 | #define GRxFalseCRS_offset 0x14 | ||
2943 | #define GRxFalseCRS_WIDTH 32 | ||
2944 | #define GRxPausePkt_offset 0x18 | ||
2945 | #define GRxPausePkt_WIDTH 32 | ||
2946 | #define GRxBadPkt_offset 0x1C | ||
2947 | #define GRxBadPkt_WIDTH 32 | ||
2948 | #define GRxUcastPkt_offset 0x20 | ||
2949 | #define GRxUcastPkt_WIDTH 32 | ||
2950 | #define GRxMcastPkt_offset 0x24 | ||
2951 | #define GRxMcastPkt_WIDTH 32 | ||
2952 | #define GRxBcastPkt_offset 0x28 | ||
2953 | #define GRxBcastPkt_WIDTH 32 | ||
2954 | #define GRxGoodLt64Pkt_offset 0x2C | ||
2955 | #define GRxGoodLt64Pkt_WIDTH 32 | ||
2956 | #define GRxBadLt64Pkt_offset 0x30 | ||
2957 | #define GRxBadLt64Pkt_WIDTH 32 | ||
2958 | #define GRx64Pkt_offset 0x34 | ||
2959 | #define GRx64Pkt_WIDTH 32 | ||
2960 | #define GRx65to127Pkt_offset 0x38 | ||
2961 | #define GRx65to127Pkt_WIDTH 32 | ||
2962 | #define GRx128to255Pkt_offset 0x3C | ||
2963 | #define GRx128to255Pkt_WIDTH 32 | ||
2964 | #define GRx256to511Pkt_offset 0x40 | ||
2965 | #define GRx256to511Pkt_WIDTH 32 | ||
2966 | #define GRx512to1023Pkt_offset 0x44 | ||
2967 | #define GRx512to1023Pkt_WIDTH 32 | ||
2968 | #define GRx1024to15xxPkt_offset 0x48 | ||
2969 | #define GRx1024to15xxPkt_WIDTH 32 | ||
2970 | #define GRx15xxtoJumboPkt_offset 0x4C | ||
2971 | #define GRx15xxtoJumboPkt_WIDTH 32 | ||
2972 | #define GRxGtJumboPkt_offset 0x50 | ||
2973 | #define GRxGtJumboPkt_WIDTH 32 | ||
2974 | #define GRxFcsErr64to15xxPkt_offset 0x54 | ||
2975 | #define GRxFcsErr64to15xxPkt_WIDTH 32 | ||
2976 | #define GRxFcsErr15xxtoJumboPkt_offset 0x58 | ||
2977 | #define GRxFcsErr15xxtoJumboPkt_WIDTH 32 | ||
2978 | #define GRxFcsErrGtJumboPkt_offset 0x5C | ||
2979 | #define GRxFcsErrGtJumboPkt_WIDTH 32 | ||
2980 | #define GTxGoodBadOct_offset 0x80 | ||
2981 | #define GTxGoodBadOct_WIDTH 48 | ||
2982 | #define GTxGoodOct_offset 0x88 | ||
2983 | #define GTxGoodOct_WIDTH 48 | ||
2984 | #define GTxSglColPkt_offset 0x90 | ||
2985 | #define GTxSglColPkt_WIDTH 32 | ||
2986 | #define GTxMultColPkt_offset 0x94 | ||
2987 | #define GTxMultColPkt_WIDTH 32 | ||
2988 | #define GTxExColPkt_offset 0x98 | ||
2989 | #define GTxExColPkt_WIDTH 32 | ||
2990 | #define GTxDefPkt_offset 0x9C | ||
2991 | #define GTxDefPkt_WIDTH 32 | ||
2992 | #define GTxLateCol_offset 0xA0 | ||
2993 | #define GTxLateCol_WIDTH 32 | ||
2994 | #define GTxExDefPkt_offset 0xA4 | ||
2995 | #define GTxExDefPkt_WIDTH 32 | ||
2996 | #define GTxPausePkt_offset 0xA8 | ||
2997 | #define GTxPausePkt_WIDTH 32 | ||
2998 | #define GTxBadPkt_offset 0xAC | ||
2999 | #define GTxBadPkt_WIDTH 32 | ||
3000 | #define GTxUcastPkt_offset 0xB0 | ||
3001 | #define GTxUcastPkt_WIDTH 32 | ||
3002 | #define GTxMcastPkt_offset 0xB4 | ||
3003 | #define GTxMcastPkt_WIDTH 32 | ||
3004 | #define GTxBcastPkt_offset 0xB8 | ||
3005 | #define GTxBcastPkt_WIDTH 32 | ||
3006 | #define GTxLt64Pkt_offset 0xBC | ||
3007 | #define GTxLt64Pkt_WIDTH 32 | ||
3008 | #define GTx64Pkt_offset 0xC0 | ||
3009 | #define GTx64Pkt_WIDTH 32 | ||
3010 | #define GTx65to127Pkt_offset 0xC4 | ||
3011 | #define GTx65to127Pkt_WIDTH 32 | ||
3012 | #define GTx128to255Pkt_offset 0xC8 | ||
3013 | #define GTx128to255Pkt_WIDTH 32 | ||
3014 | #define GTx256to511Pkt_offset 0xCC | ||
3015 | #define GTx256to511Pkt_WIDTH 32 | ||
3016 | #define GTx512to1023Pkt_offset 0xD0 | ||
3017 | #define GTx512to1023Pkt_WIDTH 32 | ||
3018 | #define GTx1024to15xxPkt_offset 0xD4 | ||
3019 | #define GTx1024to15xxPkt_WIDTH 32 | ||
3020 | #define GTx15xxtoJumboPkt_offset 0xD8 | ||
3021 | #define GTx15xxtoJumboPkt_WIDTH 32 | ||
3022 | #define GTxGtJumboPkt_offset 0xDC | ||
3023 | #define GTxGtJumboPkt_WIDTH 32 | ||
3024 | #define GTxNonTcpUdpPkt_offset 0xE0 | ||
3025 | #define GTxNonTcpUdpPkt_WIDTH 16 | ||
3026 | #define GTxMacSrcErrPkt_offset 0xE4 | ||
3027 | #define GTxMacSrcErrPkt_WIDTH 16 | ||
3028 | #define GTxIpSrcErrPkt_offset 0xE8 | ||
3029 | #define GTxIpSrcErrPkt_WIDTH 16 | ||
3030 | #define GDmaDone_offset 0xEC | ||
3031 | #define GDmaDone_WIDTH 32 | ||
3032 | |||
3033 | #define XgRxOctets_offset 0x0 | ||
3034 | #define XgRxOctets_WIDTH 48 | ||
3035 | #define XgRxOctetsOK_offset 0x8 | ||
3036 | #define XgRxOctetsOK_WIDTH 48 | ||
3037 | #define XgRxPkts_offset 0x10 | ||
3038 | #define XgRxPkts_WIDTH 32 | ||
3039 | #define XgRxPktsOK_offset 0x14 | ||
3040 | #define XgRxPktsOK_WIDTH 32 | ||
3041 | #define XgRxBroadcastPkts_offset 0x18 | ||
3042 | #define XgRxBroadcastPkts_WIDTH 32 | ||
3043 | #define XgRxMulticastPkts_offset 0x1C | ||
3044 | #define XgRxMulticastPkts_WIDTH 32 | ||
3045 | #define XgRxUnicastPkts_offset 0x20 | ||
3046 | #define XgRxUnicastPkts_WIDTH 32 | ||
3047 | #define XgRxUndersizePkts_offset 0x24 | ||
3048 | #define XgRxUndersizePkts_WIDTH 32 | ||
3049 | #define XgRxOversizePkts_offset 0x28 | ||
3050 | #define XgRxOversizePkts_WIDTH 32 | ||
3051 | #define XgRxJabberPkts_offset 0x2C | ||
3052 | #define XgRxJabberPkts_WIDTH 32 | ||
3053 | #define XgRxUndersizeFCSerrorPkts_offset 0x30 | ||
3054 | #define XgRxUndersizeFCSerrorPkts_WIDTH 32 | ||
3055 | #define XgRxDropEvents_offset 0x34 | ||
3056 | #define XgRxDropEvents_WIDTH 32 | ||
3057 | #define XgRxFCSerrorPkts_offset 0x38 | ||
3058 | #define XgRxFCSerrorPkts_WIDTH 32 | ||
3059 | #define XgRxAlignError_offset 0x3C | ||
3060 | #define XgRxAlignError_WIDTH 32 | ||
3061 | #define XgRxSymbolError_offset 0x40 | ||
3062 | #define XgRxSymbolError_WIDTH 32 | ||
3063 | #define XgRxInternalMACError_offset 0x44 | ||
3064 | #define XgRxInternalMACError_WIDTH 32 | ||
3065 | #define XgRxControlPkts_offset 0x48 | ||
3066 | #define XgRxControlPkts_WIDTH 32 | ||
3067 | #define XgRxPausePkts_offset 0x4C | ||
3068 | #define XgRxPausePkts_WIDTH 32 | ||
3069 | #define XgRxPkts64Octets_offset 0x50 | ||
3070 | #define XgRxPkts64Octets_WIDTH 32 | ||
3071 | #define XgRxPkts65to127Octets_offset 0x54 | ||
3072 | #define XgRxPkts65to127Octets_WIDTH 32 | ||
3073 | #define XgRxPkts128to255Octets_offset 0x58 | ||
3074 | #define XgRxPkts128to255Octets_WIDTH 32 | ||
3075 | #define XgRxPkts256to511Octets_offset 0x5C | ||
3076 | #define XgRxPkts256to511Octets_WIDTH 32 | ||
3077 | #define XgRxPkts512to1023Octets_offset 0x60 | ||
3078 | #define XgRxPkts512to1023Octets_WIDTH 32 | ||
3079 | #define XgRxPkts1024to15xxOctets_offset 0x64 | ||
3080 | #define XgRxPkts1024to15xxOctets_WIDTH 32 | ||
3081 | #define XgRxPkts15xxtoMaxOctets_offset 0x68 | ||
3082 | #define XgRxPkts15xxtoMaxOctets_WIDTH 32 | ||
3083 | #define XgRxLengthError_offset 0x6C | ||
3084 | #define XgRxLengthError_WIDTH 32 | ||
3085 | #define XgTxPkts_offset 0x80 | ||
3086 | #define XgTxPkts_WIDTH 32 | ||
3087 | #define XgTxOctets_offset 0x88 | ||
3088 | #define XgTxOctets_WIDTH 48 | ||
3089 | #define XgTxMulticastPkts_offset 0x90 | ||
3090 | #define XgTxMulticastPkts_WIDTH 32 | ||
3091 | #define XgTxBroadcastPkts_offset 0x94 | ||
3092 | #define XgTxBroadcastPkts_WIDTH 32 | ||
3093 | #define XgTxUnicastPkts_offset 0x98 | ||
3094 | #define XgTxUnicastPkts_WIDTH 32 | ||
3095 | #define XgTxControlPkts_offset 0x9C | ||
3096 | #define XgTxControlPkts_WIDTH 32 | ||
3097 | #define XgTxPausePkts_offset 0xA0 | ||
3098 | #define XgTxPausePkts_WIDTH 32 | ||
3099 | #define XgTxPkts64Octets_offset 0xA4 | ||
3100 | #define XgTxPkts64Octets_WIDTH 32 | ||
3101 | #define XgTxPkts65to127Octets_offset 0xA8 | ||
3102 | #define XgTxPkts65to127Octets_WIDTH 32 | ||
3103 | #define XgTxPkts128to255Octets_offset 0xAC | ||
3104 | #define XgTxPkts128to255Octets_WIDTH 32 | ||
3105 | #define XgTxPkts256to511Octets_offset 0xB0 | ||
3106 | #define XgTxPkts256to511Octets_WIDTH 32 | ||
3107 | #define XgTxPkts512to1023Octets_offset 0xB4 | ||
3108 | #define XgTxPkts512to1023Octets_WIDTH 32 | ||
3109 | #define XgTxPkts1024to15xxOctets_offset 0xB8 | ||
3110 | #define XgTxPkts1024to15xxOctets_WIDTH 32 | ||
3111 | #define XgTxPkts1519toMaxOctets_offset 0xBC | ||
3112 | #define XgTxPkts1519toMaxOctets_WIDTH 32 | ||
3113 | #define XgTxUndersizePkts_offset 0xC0 | ||
3114 | #define XgTxUndersizePkts_WIDTH 32 | ||
3115 | #define XgTxOversizePkts_offset 0xC4 | ||
3116 | #define XgTxOversizePkts_WIDTH 32 | ||
3117 | #define XgTxNonTcpUdpPkt_offset 0xC8 | ||
3118 | #define XgTxNonTcpUdpPkt_WIDTH 16 | ||
3119 | #define XgTxMacSrcErrPkt_offset 0xCC | ||
3120 | #define XgTxMacSrcErrPkt_WIDTH 16 | ||
3121 | #define XgTxIpSrcErrPkt_offset 0xD0 | ||
3122 | #define XgTxIpSrcErrPkt_WIDTH 16 | ||
3123 | #define XgDmaDone_offset 0xD4 | ||
3124 | #define XgDmaDone_WIDTH 32 | ||
3125 | |||
3126 | #define FALCON_STATS_NOT_DONE 0x00000000 | ||
3127 | #define FALCON_STATS_DONE 0xffffffff | ||
3128 | |||
3129 | /************************************************************************** | ||
3130 | * | ||
3131 | * Falcon non-volatile configuration | ||
3132 | * | ||
3133 | ************************************************************************** | ||
3134 | */ | ||
3135 | |||
3136 | /* Board configuration v2 (v1 is obsolete; later versions are compatible) */ | ||
3137 | struct falcon_nvconfig_board_v2 { | ||
3138 | __le16 nports; | ||
3139 | u8 port0_phy_addr; | ||
3140 | u8 port0_phy_type; | ||
3141 | u8 port1_phy_addr; | ||
3142 | u8 port1_phy_type; | ||
3143 | __le16 asic_sub_revision; | ||
3144 | __le16 board_revision; | ||
3145 | } __packed; | ||
3146 | |||
3147 | /* Board configuration v3 extra information */ | ||
3148 | struct falcon_nvconfig_board_v3 { | ||
3149 | __le32 spi_device_type[2]; | ||
3150 | } __packed; | ||
3151 | |||
3152 | /* Bit numbers for spi_device_type */ | ||
3153 | #define SPI_DEV_TYPE_SIZE_LBN 0 | ||
3154 | #define SPI_DEV_TYPE_SIZE_WIDTH 5 | ||
3155 | #define SPI_DEV_TYPE_ADDR_LEN_LBN 6 | ||
3156 | #define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2 | ||
3157 | #define SPI_DEV_TYPE_ERASE_CMD_LBN 8 | ||
3158 | #define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8 | ||
3159 | #define SPI_DEV_TYPE_ERASE_SIZE_LBN 16 | ||
3160 | #define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5 | ||
3161 | #define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24 | ||
3162 | #define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5 | ||
3163 | #define SPI_DEV_TYPE_FIELD(type, field) \ | ||
3164 | (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field))) | ||
3165 | |||
3166 | #define FALCON_NVCONFIG_OFFSET 0x300 | ||
3167 | |||
3168 | #define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C | ||
3169 | struct falcon_nvconfig { | ||
3170 | efx_oword_t ee_vpd_cfg_reg; /* 0x300 */ | ||
3171 | u8 mac_address[2][8]; /* 0x310 */ | ||
3172 | efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */ | ||
3173 | efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */ | ||
3174 | efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */ | ||
3175 | efx_oword_t hw_init_reg; /* 0x350 */ | ||
3176 | efx_oword_t nic_stat_reg; /* 0x360 */ | ||
3177 | efx_oword_t glb_ctl_reg; /* 0x370 */ | ||
3178 | efx_oword_t srm_cfg_reg; /* 0x380 */ | ||
3179 | efx_oword_t spare_reg; /* 0x390 */ | ||
3180 | __le16 board_magic_num; /* 0x3A0 */ | ||
3181 | __le16 board_struct_ver; | ||
3182 | __le16 board_checksum; | ||
3183 | struct falcon_nvconfig_board_v2 board_v2; | ||
3184 | efx_oword_t ee_base_page_reg; /* 0x3B0 */ | ||
3185 | struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */ | ||
3186 | } __packed; | ||
3187 | |||
3188 | #endif /* EFX_REGS_H */ | ||
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c new file mode 100644 index 00000000000..62e43649466 --- /dev/null +++ b/drivers/net/sfc/rx.c | |||
@@ -0,0 +1,749 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2005-2011 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/socket.h> | ||
12 | #include <linux/in.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/ip.h> | ||
15 | #include <linux/tcp.h> | ||
16 | #include <linux/udp.h> | ||
17 | #include <linux/prefetch.h> | ||
18 | #include <net/ip.h> | ||
19 | #include <net/checksum.h> | ||
20 | #include "net_driver.h" | ||
21 | #include "efx.h" | ||
22 | #include "nic.h" | ||
23 | #include "selftest.h" | ||
24 | #include "workarounds.h" | ||
25 | |||
26 | /* Number of RX descriptors pushed at once. */ | ||
27 | #define EFX_RX_BATCH 8 | ||
28 | |||
29 | /* Maximum size of a buffer sharing a page */ | ||
30 | #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) | ||
31 | |||
32 | /* Size of buffer allocated for skb header area. */ | ||
33 | #define EFX_SKB_HEADERS 64u | ||
34 | |||
35 | /* | ||
36 | * rx_alloc_method - RX buffer allocation method | ||
37 | * | ||
38 | * This driver supports two methods for allocating and using RX buffers: | ||
39 | * each RX buffer may be backed by an skb or by an order-n page. | ||
40 | * | ||
41 | * When GRO is in use then the second method has a lower overhead, | ||
42 | * since we don't have to allocate then free skbs on reassembled frames. | ||
43 | * | ||
44 | * Values: | ||
45 | * - RX_ALLOC_METHOD_AUTO = 0 | ||
46 | * - RX_ALLOC_METHOD_SKB = 1 | ||
47 | * - RX_ALLOC_METHOD_PAGE = 2 | ||
48 | * | ||
49 | * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count | ||
50 | * controlled by the parameters below. | ||
51 | * | ||
52 | * - Since pushing and popping descriptors are separated by the rx_queue | ||
53 | * size, so the watermarks should be ~rxd_size. | ||
54 | * - The performance win by using page-based allocation for GRO is less | ||
55 | * than the performance hit of using page-based allocation of non-GRO, | ||
56 | * so the watermarks should reflect this. | ||
57 | * | ||
58 | * Per channel we maintain a single variable, updated by each channel: | ||
59 | * | ||
60 | * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO : | ||
61 | * RX_ALLOC_FACTOR_SKB) | ||
62 | * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which | ||
63 | * limits the hysteresis), and update the allocation strategy: | ||
64 | * | ||
65 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ? | ||
66 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) | ||
67 | */ | ||
68 | static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; | ||
69 | |||
70 | #define RX_ALLOC_LEVEL_GRO 0x2000 | ||
71 | #define RX_ALLOC_LEVEL_MAX 0x3000 | ||
72 | #define RX_ALLOC_FACTOR_GRO 1 | ||
73 | #define RX_ALLOC_FACTOR_SKB (-2) | ||
74 | |||
75 | /* This is the percentage fill level below which new RX descriptors | ||
76 | * will be added to the RX descriptor ring. | ||
77 | */ | ||
78 | static unsigned int rx_refill_threshold = 90; | ||
79 | |||
80 | /* This is the percentage fill level to which an RX queue will be refilled | ||
81 | * when the "RX refill threshold" is reached. | ||
82 | */ | ||
83 | static unsigned int rx_refill_limit = 95; | ||
84 | |||
85 | /* | ||
86 | * RX maximum head room required. | ||
87 | * | ||
88 | * This must be at least 1 to prevent overflow and at least 2 to allow | ||
89 | * pipelined receives. | ||
90 | */ | ||
91 | #define EFX_RXD_HEAD_ROOM 2 | ||
92 | |||
93 | /* Offset of ethernet header within page */ | ||
94 | static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, | ||
95 | struct efx_rx_buffer *buf) | ||
96 | { | ||
97 | /* Offset is always within one page, so we don't need to consider | ||
98 | * the page order. | ||
99 | */ | ||
100 | return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) + | ||
101 | efx->type->rx_buffer_hash_size); | ||
102 | } | ||
103 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | ||
104 | { | ||
105 | return PAGE_SIZE << efx->rx_buffer_order; | ||
106 | } | ||
107 | |||
108 | static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) | ||
109 | { | ||
110 | if (buf->is_page) | ||
111 | return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); | ||
112 | else | ||
113 | return ((u8 *)buf->u.skb->data + | ||
114 | efx->type->rx_buffer_hash_size); | ||
115 | } | ||
116 | |||
117 | static inline u32 efx_rx_buf_hash(const u8 *eh) | ||
118 | { | ||
119 | /* The ethernet header is always directly after any hash. */ | ||
120 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 | ||
121 | return __le32_to_cpup((const __le32 *)(eh - 4)); | ||
122 | #else | ||
123 | const u8 *data = eh - 4; | ||
124 | return ((u32)data[0] | | ||
125 | (u32)data[1] << 8 | | ||
126 | (u32)data[2] << 16 | | ||
127 | (u32)data[3] << 24); | ||
128 | #endif | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers | ||
133 | * | ||
134 | * @rx_queue: Efx RX queue | ||
135 | * | ||
136 | * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a | ||
137 | * struct efx_rx_buffer for each one. Return a negative error code or 0 | ||
138 | * on success. May fail having only inserted fewer than EFX_RX_BATCH | ||
139 | * buffers. | ||
140 | */ | ||
141 | static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) | ||
142 | { | ||
143 | struct efx_nic *efx = rx_queue->efx; | ||
144 | struct net_device *net_dev = efx->net_dev; | ||
145 | struct efx_rx_buffer *rx_buf; | ||
146 | struct sk_buff *skb; | ||
147 | int skb_len = efx->rx_buffer_len; | ||
148 | unsigned index, count; | ||
149 | |||
150 | for (count = 0; count < EFX_RX_BATCH; ++count) { | ||
151 | index = rx_queue->added_count & rx_queue->ptr_mask; | ||
152 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
153 | |||
154 | rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); | ||
155 | if (unlikely(!skb)) | ||
156 | return -ENOMEM; | ||
157 | |||
158 | /* Adjust the SKB for padding and checksum */ | ||
159 | skb_reserve(skb, NET_IP_ALIGN); | ||
160 | rx_buf->len = skb_len - NET_IP_ALIGN; | ||
161 | rx_buf->is_page = false; | ||
162 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
163 | |||
164 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | ||
165 | skb->data, rx_buf->len, | ||
166 | PCI_DMA_FROMDEVICE); | ||
167 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, | ||
168 | rx_buf->dma_addr))) { | ||
169 | dev_kfree_skb_any(skb); | ||
170 | rx_buf->u.skb = NULL; | ||
171 | return -EIO; | ||
172 | } | ||
173 | |||
174 | ++rx_queue->added_count; | ||
175 | ++rx_queue->alloc_skb_count; | ||
176 | } | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers | ||
183 | * | ||
184 | * @rx_queue: Efx RX queue | ||
185 | * | ||
186 | * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, | ||
187 | * and populates struct efx_rx_buffers for each one. Return a negative error | ||
188 | * code or 0 on success. If a single page can be split between two buffers, | ||
189 | * then the page will either be inserted fully, or not at at all. | ||
190 | */ | ||
191 | static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | ||
192 | { | ||
193 | struct efx_nic *efx = rx_queue->efx; | ||
194 | struct efx_rx_buffer *rx_buf; | ||
195 | struct page *page; | ||
196 | void *page_addr; | ||
197 | struct efx_rx_page_state *state; | ||
198 | dma_addr_t dma_addr; | ||
199 | unsigned index, count; | ||
200 | |||
201 | /* We can split a page between two buffers */ | ||
202 | BUILD_BUG_ON(EFX_RX_BATCH & 1); | ||
203 | |||
204 | for (count = 0; count < EFX_RX_BATCH; ++count) { | ||
205 | page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | ||
206 | efx->rx_buffer_order); | ||
207 | if (unlikely(page == NULL)) | ||
208 | return -ENOMEM; | ||
209 | dma_addr = pci_map_page(efx->pci_dev, page, 0, | ||
210 | efx_rx_buf_size(efx), | ||
211 | PCI_DMA_FROMDEVICE); | ||
212 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { | ||
213 | __free_pages(page, efx->rx_buffer_order); | ||
214 | return -EIO; | ||
215 | } | ||
216 | page_addr = page_address(page); | ||
217 | state = page_addr; | ||
218 | state->refcnt = 0; | ||
219 | state->dma_addr = dma_addr; | ||
220 | |||
221 | page_addr += sizeof(struct efx_rx_page_state); | ||
222 | dma_addr += sizeof(struct efx_rx_page_state); | ||
223 | |||
224 | split: | ||
225 | index = rx_queue->added_count & rx_queue->ptr_mask; | ||
226 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
227 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; | ||
228 | rx_buf->u.page = page; | ||
229 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; | ||
230 | rx_buf->is_page = true; | ||
231 | ++rx_queue->added_count; | ||
232 | ++rx_queue->alloc_page_count; | ||
233 | ++state->refcnt; | ||
234 | |||
235 | if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { | ||
236 | /* Use the second half of the page */ | ||
237 | get_page(page); | ||
238 | dma_addr += (PAGE_SIZE >> 1); | ||
239 | page_addr += (PAGE_SIZE >> 1); | ||
240 | ++count; | ||
241 | goto split; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static void efx_unmap_rx_buffer(struct efx_nic *efx, | ||
249 | struct efx_rx_buffer *rx_buf) | ||
250 | { | ||
251 | if (rx_buf->is_page && rx_buf->u.page) { | ||
252 | struct efx_rx_page_state *state; | ||
253 | |||
254 | state = page_address(rx_buf->u.page); | ||
255 | if (--state->refcnt == 0) { | ||
256 | pci_unmap_page(efx->pci_dev, | ||
257 | state->dma_addr, | ||
258 | efx_rx_buf_size(efx), | ||
259 | PCI_DMA_FROMDEVICE); | ||
260 | } | ||
261 | } else if (!rx_buf->is_page && rx_buf->u.skb) { | ||
262 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, | ||
263 | rx_buf->len, PCI_DMA_FROMDEVICE); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | static void efx_free_rx_buffer(struct efx_nic *efx, | ||
268 | struct efx_rx_buffer *rx_buf) | ||
269 | { | ||
270 | if (rx_buf->is_page && rx_buf->u.page) { | ||
271 | __free_pages(rx_buf->u.page, efx->rx_buffer_order); | ||
272 | rx_buf->u.page = NULL; | ||
273 | } else if (!rx_buf->is_page && rx_buf->u.skb) { | ||
274 | dev_kfree_skb_any(rx_buf->u.skb); | ||
275 | rx_buf->u.skb = NULL; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, | ||
280 | struct efx_rx_buffer *rx_buf) | ||
281 | { | ||
282 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); | ||
283 | efx_free_rx_buffer(rx_queue->efx, rx_buf); | ||
284 | } | ||
285 | |||
286 | /* Attempt to resurrect the other receive buffer that used to share this page, | ||
287 | * which had previously been passed up to the kernel and freed. */ | ||
288 | static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | ||
289 | struct efx_rx_buffer *rx_buf) | ||
290 | { | ||
291 | struct efx_rx_page_state *state = page_address(rx_buf->u.page); | ||
292 | struct efx_rx_buffer *new_buf; | ||
293 | unsigned fill_level, index; | ||
294 | |||
295 | /* +1 because efx_rx_packet() incremented removed_count. +1 because | ||
296 | * we'd like to insert an additional descriptor whilst leaving | ||
297 | * EFX_RXD_HEAD_ROOM for the non-recycle path */ | ||
298 | fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); | ||
299 | if (unlikely(fill_level > rx_queue->max_fill)) { | ||
300 | /* We could place "state" on a list, and drain the list in | ||
301 | * efx_fast_push_rx_descriptors(). For now, this will do. */ | ||
302 | return; | ||
303 | } | ||
304 | |||
305 | ++state->refcnt; | ||
306 | get_page(rx_buf->u.page); | ||
307 | |||
308 | index = rx_queue->added_count & rx_queue->ptr_mask; | ||
309 | new_buf = efx_rx_buffer(rx_queue, index); | ||
310 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); | ||
311 | new_buf->u.page = rx_buf->u.page; | ||
312 | new_buf->len = rx_buf->len; | ||
313 | new_buf->is_page = true; | ||
314 | ++rx_queue->added_count; | ||
315 | } | ||
316 | |||
317 | /* Recycle the given rx buffer directly back into the rx_queue. There is | ||
318 | * always room to add this buffer, because we've just popped a buffer. */ | ||
319 | static void efx_recycle_rx_buffer(struct efx_channel *channel, | ||
320 | struct efx_rx_buffer *rx_buf) | ||
321 | { | ||
322 | struct efx_nic *efx = channel->efx; | ||
323 | struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); | ||
324 | struct efx_rx_buffer *new_buf; | ||
325 | unsigned index; | ||
326 | |||
327 | if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && | ||
328 | page_count(rx_buf->u.page) == 1) | ||
329 | efx_resurrect_rx_buffer(rx_queue, rx_buf); | ||
330 | |||
331 | index = rx_queue->added_count & rx_queue->ptr_mask; | ||
332 | new_buf = efx_rx_buffer(rx_queue, index); | ||
333 | |||
334 | memcpy(new_buf, rx_buf, sizeof(*new_buf)); | ||
335 | rx_buf->u.page = NULL; | ||
336 | ++rx_queue->added_count; | ||
337 | } | ||
338 | |||
339 | /** | ||
340 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | ||
341 | * @rx_queue: RX descriptor queue | ||
342 | * This will aim to fill the RX descriptor queue up to | ||
343 | * @rx_queue->@fast_fill_limit. If there is insufficient atomic | ||
344 | * memory to do so, a slow fill will be scheduled. | ||
345 | * | ||
346 | * The caller must provide serialisation (none is used here). In practise, | ||
347 | * this means this function must run from the NAPI handler, or be called | ||
348 | * when NAPI is disabled. | ||
349 | */ | ||
350 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | ||
351 | { | ||
352 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
353 | unsigned fill_level; | ||
354 | int space, rc = 0; | ||
355 | |||
356 | /* Calculate current fill level, and exit if we don't need to fill */ | ||
357 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | ||
358 | EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); | ||
359 | if (fill_level >= rx_queue->fast_fill_trigger) | ||
360 | goto out; | ||
361 | |||
362 | /* Record minimum fill level */ | ||
363 | if (unlikely(fill_level < rx_queue->min_fill)) { | ||
364 | if (fill_level) | ||
365 | rx_queue->min_fill = fill_level; | ||
366 | } | ||
367 | |||
368 | space = rx_queue->fast_fill_limit - fill_level; | ||
369 | if (space < EFX_RX_BATCH) | ||
370 | goto out; | ||
371 | |||
372 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, | ||
373 | "RX queue %d fast-filling descriptor ring from" | ||
374 | " level %d to level %d using %s allocation\n", | ||
375 | efx_rx_queue_index(rx_queue), fill_level, | ||
376 | rx_queue->fast_fill_limit, | ||
377 | channel->rx_alloc_push_pages ? "page" : "skb"); | ||
378 | |||
379 | do { | ||
380 | if (channel->rx_alloc_push_pages) | ||
381 | rc = efx_init_rx_buffers_page(rx_queue); | ||
382 | else | ||
383 | rc = efx_init_rx_buffers_skb(rx_queue); | ||
384 | if (unlikely(rc)) { | ||
385 | /* Ensure that we don't leave the rx queue empty */ | ||
386 | if (rx_queue->added_count == rx_queue->removed_count) | ||
387 | efx_schedule_slow_fill(rx_queue); | ||
388 | goto out; | ||
389 | } | ||
390 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | ||
391 | |||
392 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, | ||
393 | "RX queue %d fast-filled descriptor ring " | ||
394 | "to level %d\n", efx_rx_queue_index(rx_queue), | ||
395 | rx_queue->added_count - rx_queue->removed_count); | ||
396 | |||
397 | out: | ||
398 | if (rx_queue->notified_count != rx_queue->added_count) | ||
399 | efx_nic_notify_rx_desc(rx_queue); | ||
400 | } | ||
401 | |||
402 | void efx_rx_slow_fill(unsigned long context) | ||
403 | { | ||
404 | struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; | ||
405 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
406 | |||
407 | /* Post an event to cause NAPI to run and refill the queue */ | ||
408 | efx_nic_generate_fill_event(channel); | ||
409 | ++rx_queue->slow_fill_count; | ||
410 | } | ||
411 | |||
412 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | ||
413 | struct efx_rx_buffer *rx_buf, | ||
414 | int len, bool *discard, | ||
415 | bool *leak_packet) | ||
416 | { | ||
417 | struct efx_nic *efx = rx_queue->efx; | ||
418 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; | ||
419 | |||
420 | if (likely(len <= max_len)) | ||
421 | return; | ||
422 | |||
423 | /* The packet must be discarded, but this is only a fatal error | ||
424 | * if the caller indicated it was | ||
425 | */ | ||
426 | *discard = true; | ||
427 | |||
428 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | ||
429 | if (net_ratelimit()) | ||
430 | netif_err(efx, rx_err, efx->net_dev, | ||
431 | " RX queue %d seriously overlength " | ||
432 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | ||
433 | efx_rx_queue_index(rx_queue), len, max_len, | ||
434 | efx->type->rx_buffer_padding); | ||
435 | /* If this buffer was skb-allocated, then the meta | ||
436 | * data at the end of the skb will be trashed. So | ||
437 | * we have no choice but to leak the fragment. | ||
438 | */ | ||
439 | *leak_packet = !rx_buf->is_page; | ||
440 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); | ||
441 | } else { | ||
442 | if (net_ratelimit()) | ||
443 | netif_err(efx, rx_err, efx->net_dev, | ||
444 | " RX queue %d overlength RX event " | ||
445 | "(0x%x > 0x%x)\n", | ||
446 | efx_rx_queue_index(rx_queue), len, max_len); | ||
447 | } | ||
448 | |||
449 | efx_rx_queue_channel(rx_queue)->n_rx_overlength++; | ||
450 | } | ||
451 | |||
452 | /* Pass a received packet up through the generic GRO stack | ||
453 | * | ||
454 | * Handles driverlink veto, and passes the fragment up via | ||
455 | * the appropriate GRO method | ||
456 | */ | ||
457 | static void efx_rx_packet_gro(struct efx_channel *channel, | ||
458 | struct efx_rx_buffer *rx_buf, | ||
459 | const u8 *eh, bool checksummed) | ||
460 | { | ||
461 | struct napi_struct *napi = &channel->napi_str; | ||
462 | gro_result_t gro_result; | ||
463 | |||
464 | /* Pass the skb/page into the GRO engine */ | ||
465 | if (rx_buf->is_page) { | ||
466 | struct efx_nic *efx = channel->efx; | ||
467 | struct page *page = rx_buf->u.page; | ||
468 | struct sk_buff *skb; | ||
469 | |||
470 | rx_buf->u.page = NULL; | ||
471 | |||
472 | skb = napi_get_frags(napi); | ||
473 | if (!skb) { | ||
474 | put_page(page); | ||
475 | return; | ||
476 | } | ||
477 | |||
478 | if (efx->net_dev->features & NETIF_F_RXHASH) | ||
479 | skb->rxhash = efx_rx_buf_hash(eh); | ||
480 | |||
481 | skb_shinfo(skb)->frags[0].page = page; | ||
482 | skb_shinfo(skb)->frags[0].page_offset = | ||
483 | efx_rx_buf_offset(efx, rx_buf); | ||
484 | skb_shinfo(skb)->frags[0].size = rx_buf->len; | ||
485 | skb_shinfo(skb)->nr_frags = 1; | ||
486 | |||
487 | skb->len = rx_buf->len; | ||
488 | skb->data_len = rx_buf->len; | ||
489 | skb->truesize += rx_buf->len; | ||
490 | skb->ip_summed = | ||
491 | checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; | ||
492 | |||
493 | skb_record_rx_queue(skb, channel->channel); | ||
494 | |||
495 | gro_result = napi_gro_frags(napi); | ||
496 | } else { | ||
497 | struct sk_buff *skb = rx_buf->u.skb; | ||
498 | |||
499 | EFX_BUG_ON_PARANOID(!checksummed); | ||
500 | rx_buf->u.skb = NULL; | ||
501 | |||
502 | gro_result = napi_gro_receive(napi, skb); | ||
503 | } | ||
504 | |||
505 | if (gro_result == GRO_NORMAL) { | ||
506 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | ||
507 | } else if (gro_result != GRO_DROP) { | ||
508 | channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO; | ||
509 | channel->irq_mod_score += 2; | ||
510 | } | ||
511 | } | ||
512 | |||
513 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | ||
514 | unsigned int len, bool checksummed, bool discard) | ||
515 | { | ||
516 | struct efx_nic *efx = rx_queue->efx; | ||
517 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | ||
518 | struct efx_rx_buffer *rx_buf; | ||
519 | bool leak_packet = false; | ||
520 | |||
521 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
522 | |||
523 | /* This allows the refill path to post another buffer. | ||
524 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | ||
525 | * isn't overwritten yet. | ||
526 | */ | ||
527 | rx_queue->removed_count++; | ||
528 | |||
529 | /* Validate the length encoded in the event vs the descriptor pushed */ | ||
530 | efx_rx_packet__check_len(rx_queue, rx_buf, len, | ||
531 | &discard, &leak_packet); | ||
532 | |||
533 | netif_vdbg(efx, rx_status, efx->net_dev, | ||
534 | "RX queue %d received id %x at %llx+%x %s%s\n", | ||
535 | efx_rx_queue_index(rx_queue), index, | ||
536 | (unsigned long long)rx_buf->dma_addr, len, | ||
537 | (checksummed ? " [SUMMED]" : ""), | ||
538 | (discard ? " [DISCARD]" : "")); | ||
539 | |||
540 | /* Discard packet, if instructed to do so */ | ||
541 | if (unlikely(discard)) { | ||
542 | if (unlikely(leak_packet)) | ||
543 | channel->n_skbuff_leaks++; | ||
544 | else | ||
545 | efx_recycle_rx_buffer(channel, rx_buf); | ||
546 | |||
547 | /* Don't hold off the previous receive */ | ||
548 | rx_buf = NULL; | ||
549 | goto out; | ||
550 | } | ||
551 | |||
552 | /* Release card resources - assumes all RX buffers consumed in-order | ||
553 | * per RX queue | ||
554 | */ | ||
555 | efx_unmap_rx_buffer(efx, rx_buf); | ||
556 | |||
557 | /* Prefetch nice and early so data will (hopefully) be in cache by | ||
558 | * the time we look at it. | ||
559 | */ | ||
560 | prefetch(efx_rx_buf_eh(efx, rx_buf)); | ||
561 | |||
562 | /* Pipeline receives so that we give time for packet headers to be | ||
563 | * prefetched into cache. | ||
564 | */ | ||
565 | rx_buf->len = len - efx->type->rx_buffer_hash_size; | ||
566 | out: | ||
567 | if (channel->rx_pkt) | ||
568 | __efx_rx_packet(channel, | ||
569 | channel->rx_pkt, channel->rx_pkt_csummed); | ||
570 | channel->rx_pkt = rx_buf; | ||
571 | channel->rx_pkt_csummed = checksummed; | ||
572 | } | ||
573 | |||
574 | /* Handle a received packet. Second half: Touches packet payload. */ | ||
575 | void __efx_rx_packet(struct efx_channel *channel, | ||
576 | struct efx_rx_buffer *rx_buf, bool checksummed) | ||
577 | { | ||
578 | struct efx_nic *efx = channel->efx; | ||
579 | struct sk_buff *skb; | ||
580 | u8 *eh = efx_rx_buf_eh(efx, rx_buf); | ||
581 | |||
582 | /* If we're in loopback test, then pass the packet directly to the | ||
583 | * loopback layer, and free the rx_buf here | ||
584 | */ | ||
585 | if (unlikely(efx->loopback_selftest)) { | ||
586 | efx_loopback_rx_packet(efx, eh, rx_buf->len); | ||
587 | efx_free_rx_buffer(efx, rx_buf); | ||
588 | return; | ||
589 | } | ||
590 | |||
591 | if (!rx_buf->is_page) { | ||
592 | skb = rx_buf->u.skb; | ||
593 | |||
594 | prefetch(skb_shinfo(skb)); | ||
595 | |||
596 | skb_reserve(skb, efx->type->rx_buffer_hash_size); | ||
597 | skb_put(skb, rx_buf->len); | ||
598 | |||
599 | if (efx->net_dev->features & NETIF_F_RXHASH) | ||
600 | skb->rxhash = efx_rx_buf_hash(eh); | ||
601 | |||
602 | /* Move past the ethernet header. rx_buf->data still points | ||
603 | * at the ethernet header */ | ||
604 | skb->protocol = eth_type_trans(skb, efx->net_dev); | ||
605 | |||
606 | skb_record_rx_queue(skb, channel->channel); | ||
607 | } | ||
608 | |||
609 | if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) | ||
610 | checksummed = false; | ||
611 | |||
612 | if (likely(checksummed || rx_buf->is_page)) { | ||
613 | efx_rx_packet_gro(channel, rx_buf, eh, checksummed); | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | /* We now own the SKB */ | ||
618 | skb = rx_buf->u.skb; | ||
619 | rx_buf->u.skb = NULL; | ||
620 | |||
621 | /* Set the SKB flags */ | ||
622 | skb_checksum_none_assert(skb); | ||
623 | |||
624 | /* Pass the packet up */ | ||
625 | netif_receive_skb(skb); | ||
626 | |||
627 | /* Update allocation strategy method */ | ||
628 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | ||
629 | } | ||
630 | |||
631 | void efx_rx_strategy(struct efx_channel *channel) | ||
632 | { | ||
633 | enum efx_rx_alloc_method method = rx_alloc_method; | ||
634 | |||
635 | /* Only makes sense to use page based allocation if GRO is enabled */ | ||
636 | if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { | ||
637 | method = RX_ALLOC_METHOD_SKB; | ||
638 | } else if (method == RX_ALLOC_METHOD_AUTO) { | ||
639 | /* Constrain the rx_alloc_level */ | ||
640 | if (channel->rx_alloc_level < 0) | ||
641 | channel->rx_alloc_level = 0; | ||
642 | else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) | ||
643 | channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; | ||
644 | |||
645 | /* Decide on the allocation method */ | ||
646 | method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ? | ||
647 | RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); | ||
648 | } | ||
649 | |||
650 | /* Push the option */ | ||
651 | channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); | ||
652 | } | ||
653 | |||
654 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | ||
655 | { | ||
656 | struct efx_nic *efx = rx_queue->efx; | ||
657 | unsigned int entries; | ||
658 | int rc; | ||
659 | |||
660 | /* Create the smallest power-of-two aligned ring */ | ||
661 | entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); | ||
662 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | ||
663 | rx_queue->ptr_mask = entries - 1; | ||
664 | |||
665 | netif_dbg(efx, probe, efx->net_dev, | ||
666 | "creating RX queue %d size %#x mask %#x\n", | ||
667 | efx_rx_queue_index(rx_queue), efx->rxq_entries, | ||
668 | rx_queue->ptr_mask); | ||
669 | |||
670 | /* Allocate RX buffers */ | ||
671 | rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer), | ||
672 | GFP_KERNEL); | ||
673 | if (!rx_queue->buffer) | ||
674 | return -ENOMEM; | ||
675 | |||
676 | rc = efx_nic_probe_rx(rx_queue); | ||
677 | if (rc) { | ||
678 | kfree(rx_queue->buffer); | ||
679 | rx_queue->buffer = NULL; | ||
680 | } | ||
681 | return rc; | ||
682 | } | ||
683 | |||
684 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | ||
685 | { | ||
686 | struct efx_nic *efx = rx_queue->efx; | ||
687 | unsigned int max_fill, trigger, limit; | ||
688 | |||
689 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, | ||
690 | "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); | ||
691 | |||
692 | /* Initialise ptr fields */ | ||
693 | rx_queue->added_count = 0; | ||
694 | rx_queue->notified_count = 0; | ||
695 | rx_queue->removed_count = 0; | ||
696 | rx_queue->min_fill = -1U; | ||
697 | |||
698 | /* Initialise limit fields */ | ||
699 | max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; | ||
700 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; | ||
701 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; | ||
702 | |||
703 | rx_queue->max_fill = max_fill; | ||
704 | rx_queue->fast_fill_trigger = trigger; | ||
705 | rx_queue->fast_fill_limit = limit; | ||
706 | |||
707 | /* Set up RX descriptor ring */ | ||
708 | efx_nic_init_rx(rx_queue); | ||
709 | } | ||
710 | |||
711 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | ||
712 | { | ||
713 | int i; | ||
714 | struct efx_rx_buffer *rx_buf; | ||
715 | |||
716 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, | ||
717 | "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); | ||
718 | |||
719 | del_timer_sync(&rx_queue->slow_fill); | ||
720 | efx_nic_fini_rx(rx_queue); | ||
721 | |||
722 | /* Release RX buffers NB start at index 0 not current HW ptr */ | ||
723 | if (rx_queue->buffer) { | ||
724 | for (i = 0; i <= rx_queue->ptr_mask; i++) { | ||
725 | rx_buf = efx_rx_buffer(rx_queue, i); | ||
726 | efx_fini_rx_buffer(rx_queue, rx_buf); | ||
727 | } | ||
728 | } | ||
729 | } | ||
730 | |||
731 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | ||
732 | { | ||
733 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, | ||
734 | "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); | ||
735 | |||
736 | efx_nic_remove_rx(rx_queue); | ||
737 | |||
738 | kfree(rx_queue->buffer); | ||
739 | rx_queue->buffer = NULL; | ||
740 | } | ||
741 | |||
742 | |||
743 | module_param(rx_alloc_method, int, 0644); | ||
744 | MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); | ||
745 | |||
746 | module_param(rx_refill_threshold, uint, 0444); | ||
747 | MODULE_PARM_DESC(rx_refill_threshold, | ||
748 | "RX descriptor ring fast/slow fill threshold (%)"); | ||
749 | |||
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c new file mode 100644 index 00000000000..822f6c2a6a7 --- /dev/null +++ b/drivers/net/sfc/selftest.c | |||
@@ -0,0 +1,761 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/netdevice.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/kernel_stat.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/ethtool.h> | ||
17 | #include <linux/ip.h> | ||
18 | #include <linux/in.h> | ||
19 | #include <linux/udp.h> | ||
20 | #include <linux/rtnetlink.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <asm/io.h> | ||
23 | #include "net_driver.h" | ||
24 | #include "efx.h" | ||
25 | #include "nic.h" | ||
26 | #include "selftest.h" | ||
27 | #include "workarounds.h" | ||
28 | |||
29 | /* | ||
30 | * Loopback test packet structure | ||
31 | * | ||
32 | * The self-test should stress every RSS vector, and unfortunately | ||
33 | * Falcon only performs RSS on TCP/UDP packets. | ||
34 | */ | ||
35 | struct efx_loopback_payload { | ||
36 | struct ethhdr header; | ||
37 | struct iphdr ip; | ||
38 | struct udphdr udp; | ||
39 | __be16 iteration; | ||
40 | const char msg[64]; | ||
41 | } __packed; | ||
42 | |||
43 | /* Loopback test source MAC address */ | ||
44 | static const unsigned char payload_source[ETH_ALEN] = { | ||
45 | 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, | ||
46 | }; | ||
47 | |||
48 | static const char payload_msg[] = | ||
49 | "Hello world! This is an Efx loopback test in progress!"; | ||
50 | |||
51 | /* Interrupt mode names */ | ||
52 | static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; | ||
53 | static const char *efx_interrupt_mode_names[] = { | ||
54 | [EFX_INT_MODE_MSIX] = "MSI-X", | ||
55 | [EFX_INT_MODE_MSI] = "MSI", | ||
56 | [EFX_INT_MODE_LEGACY] = "legacy", | ||
57 | }; | ||
58 | #define INT_MODE(efx) \ | ||
59 | STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) | ||
60 | |||
61 | /** | ||
62 | * efx_loopback_state - persistent state during a loopback selftest | ||
63 | * @flush: Drop all packets in efx_loopback_rx_packet | ||
64 | * @packet_count: Number of packets being used in this test | ||
65 | * @skbs: An array of skbs transmitted | ||
66 | * @offload_csum: Checksums are being offloaded | ||
67 | * @rx_good: RX good packet count | ||
68 | * @rx_bad: RX bad packet count | ||
69 | * @payload: Payload used in tests | ||
70 | */ | ||
71 | struct efx_loopback_state { | ||
72 | bool flush; | ||
73 | int packet_count; | ||
74 | struct sk_buff **skbs; | ||
75 | bool offload_csum; | ||
76 | atomic_t rx_good; | ||
77 | atomic_t rx_bad; | ||
78 | struct efx_loopback_payload payload; | ||
79 | }; | ||
80 | |||
81 | /************************************************************************** | ||
82 | * | ||
83 | * MII, NVRAM and register tests | ||
84 | * | ||
85 | **************************************************************************/ | ||
86 | |||
87 | static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) | ||
88 | { | ||
89 | int rc = 0; | ||
90 | |||
91 | if (efx->phy_op->test_alive) { | ||
92 | rc = efx->phy_op->test_alive(efx); | ||
93 | tests->phy_alive = rc ? -1 : 1; | ||
94 | } | ||
95 | |||
96 | return rc; | ||
97 | } | ||
98 | |||
99 | static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) | ||
100 | { | ||
101 | int rc = 0; | ||
102 | |||
103 | if (efx->type->test_nvram) { | ||
104 | rc = efx->type->test_nvram(efx); | ||
105 | tests->nvram = rc ? -1 : 1; | ||
106 | } | ||
107 | |||
108 | return rc; | ||
109 | } | ||
110 | |||
111 | static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | ||
112 | { | ||
113 | int rc = 0; | ||
114 | |||
115 | /* Test register access */ | ||
116 | if (efx->type->test_registers) { | ||
117 | rc = efx->type->test_registers(efx); | ||
118 | tests->registers = rc ? -1 : 1; | ||
119 | } | ||
120 | |||
121 | return rc; | ||
122 | } | ||
123 | |||
124 | /************************************************************************** | ||
125 | * | ||
126 | * Interrupt and event queue testing | ||
127 | * | ||
128 | **************************************************************************/ | ||
129 | |||
130 | /* Test generation and receipt of interrupts */ | ||
131 | static int efx_test_interrupts(struct efx_nic *efx, | ||
132 | struct efx_self_tests *tests) | ||
133 | { | ||
134 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); | ||
135 | tests->interrupt = -1; | ||
136 | |||
137 | /* Reset interrupt flag */ | ||
138 | efx->last_irq_cpu = -1; | ||
139 | smp_wmb(); | ||
140 | |||
141 | efx_nic_generate_interrupt(efx); | ||
142 | |||
143 | /* Wait for arrival of test interrupt. */ | ||
144 | netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); | ||
145 | schedule_timeout_uninterruptible(HZ / 10); | ||
146 | if (efx->last_irq_cpu >= 0) | ||
147 | goto success; | ||
148 | |||
149 | netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); | ||
150 | return -ETIMEDOUT; | ||
151 | |||
152 | success: | ||
153 | netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", | ||
154 | INT_MODE(efx), | ||
155 | efx->last_irq_cpu); | ||
156 | tests->interrupt = 1; | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | /* Test generation and receipt of interrupting events */ | ||
161 | static int efx_test_eventq_irq(struct efx_channel *channel, | ||
162 | struct efx_self_tests *tests) | ||
163 | { | ||
164 | struct efx_nic *efx = channel->efx; | ||
165 | unsigned int read_ptr, count; | ||
166 | |||
167 | tests->eventq_dma[channel->channel] = -1; | ||
168 | tests->eventq_int[channel->channel] = -1; | ||
169 | tests->eventq_poll[channel->channel] = -1; | ||
170 | |||
171 | read_ptr = channel->eventq_read_ptr; | ||
172 | channel->efx->last_irq_cpu = -1; | ||
173 | smp_wmb(); | ||
174 | |||
175 | efx_nic_generate_test_event(channel); | ||
176 | |||
177 | /* Wait for arrival of interrupt */ | ||
178 | count = 0; | ||
179 | do { | ||
180 | schedule_timeout_uninterruptible(HZ / 100); | ||
181 | |||
182 | if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr) | ||
183 | goto eventq_ok; | ||
184 | } while (++count < 2); | ||
185 | |||
186 | netif_err(efx, drv, efx->net_dev, | ||
187 | "channel %d timed out waiting for event queue\n", | ||
188 | channel->channel); | ||
189 | |||
190 | /* See if interrupt arrived */ | ||
191 | if (channel->efx->last_irq_cpu >= 0) { | ||
192 | netif_err(efx, drv, efx->net_dev, | ||
193 | "channel %d saw interrupt on CPU%d " | ||
194 | "during event queue test\n", channel->channel, | ||
195 | raw_smp_processor_id()); | ||
196 | tests->eventq_int[channel->channel] = 1; | ||
197 | } | ||
198 | |||
199 | /* Check to see if event was received even if interrupt wasn't */ | ||
200 | if (efx_nic_event_present(channel)) { | ||
201 | netif_err(efx, drv, efx->net_dev, | ||
202 | "channel %d event was generated, but " | ||
203 | "failed to trigger an interrupt\n", channel->channel); | ||
204 | tests->eventq_dma[channel->channel] = 1; | ||
205 | } | ||
206 | |||
207 | return -ETIMEDOUT; | ||
208 | eventq_ok: | ||
209 | netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n", | ||
210 | channel->channel); | ||
211 | tests->eventq_dma[channel->channel] = 1; | ||
212 | tests->eventq_int[channel->channel] = 1; | ||
213 | tests->eventq_poll[channel->channel] = 1; | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, | ||
218 | unsigned flags) | ||
219 | { | ||
220 | int rc; | ||
221 | |||
222 | if (!efx->phy_op->run_tests) | ||
223 | return 0; | ||
224 | |||
225 | mutex_lock(&efx->mac_lock); | ||
226 | rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); | ||
227 | mutex_unlock(&efx->mac_lock); | ||
228 | return rc; | ||
229 | } | ||
230 | |||
231 | /************************************************************************** | ||
232 | * | ||
233 | * Loopback testing | ||
234 | * NB Only one loopback test can be executing concurrently. | ||
235 | * | ||
236 | **************************************************************************/ | ||
237 | |||
238 | /* Loopback test RX callback | ||
239 | * This is called for each received packet during loopback testing. | ||
240 | */ | ||
241 | void efx_loopback_rx_packet(struct efx_nic *efx, | ||
242 | const char *buf_ptr, int pkt_len) | ||
243 | { | ||
244 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
245 | struct efx_loopback_payload *received; | ||
246 | struct efx_loopback_payload *payload; | ||
247 | |||
248 | BUG_ON(!buf_ptr); | ||
249 | |||
250 | /* If we are just flushing, then drop the packet */ | ||
251 | if ((state == NULL) || state->flush) | ||
252 | return; | ||
253 | |||
254 | payload = &state->payload; | ||
255 | |||
256 | received = (struct efx_loopback_payload *) buf_ptr; | ||
257 | received->ip.saddr = payload->ip.saddr; | ||
258 | if (state->offload_csum) | ||
259 | received->ip.check = payload->ip.check; | ||
260 | |||
261 | /* Check that header exists */ | ||
262 | if (pkt_len < sizeof(received->header)) { | ||
263 | netif_err(efx, drv, efx->net_dev, | ||
264 | "saw runt RX packet (length %d) in %s loopback " | ||
265 | "test\n", pkt_len, LOOPBACK_MODE(efx)); | ||
266 | goto err; | ||
267 | } | ||
268 | |||
269 | /* Check that the ethernet header exists */ | ||
270 | if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { | ||
271 | netif_err(efx, drv, efx->net_dev, | ||
272 | "saw non-loopback RX packet in %s loopback test\n", | ||
273 | LOOPBACK_MODE(efx)); | ||
274 | goto err; | ||
275 | } | ||
276 | |||
277 | /* Check packet length */ | ||
278 | if (pkt_len != sizeof(*payload)) { | ||
279 | netif_err(efx, drv, efx->net_dev, | ||
280 | "saw incorrect RX packet length %d (wanted %d) in " | ||
281 | "%s loopback test\n", pkt_len, (int)sizeof(*payload), | ||
282 | LOOPBACK_MODE(efx)); | ||
283 | goto err; | ||
284 | } | ||
285 | |||
286 | /* Check that IP header matches */ | ||
287 | if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { | ||
288 | netif_err(efx, drv, efx->net_dev, | ||
289 | "saw corrupted IP header in %s loopback test\n", | ||
290 | LOOPBACK_MODE(efx)); | ||
291 | goto err; | ||
292 | } | ||
293 | |||
294 | /* Check that msg and padding matches */ | ||
295 | if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { | ||
296 | netif_err(efx, drv, efx->net_dev, | ||
297 | "saw corrupted RX packet in %s loopback test\n", | ||
298 | LOOPBACK_MODE(efx)); | ||
299 | goto err; | ||
300 | } | ||
301 | |||
302 | /* Check that iteration matches */ | ||
303 | if (received->iteration != payload->iteration) { | ||
304 | netif_err(efx, drv, efx->net_dev, | ||
305 | "saw RX packet from iteration %d (wanted %d) in " | ||
306 | "%s loopback test\n", ntohs(received->iteration), | ||
307 | ntohs(payload->iteration), LOOPBACK_MODE(efx)); | ||
308 | goto err; | ||
309 | } | ||
310 | |||
311 | /* Increase correct RX count */ | ||
312 | netif_vdbg(efx, drv, efx->net_dev, | ||
313 | "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); | ||
314 | |||
315 | atomic_inc(&state->rx_good); | ||
316 | return; | ||
317 | |||
318 | err: | ||
319 | #ifdef EFX_ENABLE_DEBUG | ||
320 | if (atomic_read(&state->rx_bad) == 0) { | ||
321 | netif_err(efx, drv, efx->net_dev, "received packet:\n"); | ||
322 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | ||
323 | buf_ptr, pkt_len, 0); | ||
324 | netif_err(efx, drv, efx->net_dev, "expected packet:\n"); | ||
325 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | ||
326 | &state->payload, sizeof(state->payload), 0); | ||
327 | } | ||
328 | #endif | ||
329 | atomic_inc(&state->rx_bad); | ||
330 | } | ||
331 | |||
332 | /* Initialise an efx_selftest_state for a new iteration */ | ||
333 | static void efx_iterate_state(struct efx_nic *efx) | ||
334 | { | ||
335 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
336 | struct net_device *net_dev = efx->net_dev; | ||
337 | struct efx_loopback_payload *payload = &state->payload; | ||
338 | |||
339 | /* Initialise the layerII header */ | ||
340 | memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); | ||
341 | memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); | ||
342 | payload->header.h_proto = htons(ETH_P_IP); | ||
343 | |||
344 | /* saddr set later and used as incrementing count */ | ||
345 | payload->ip.daddr = htonl(INADDR_LOOPBACK); | ||
346 | payload->ip.ihl = 5; | ||
347 | payload->ip.check = htons(0xdead); | ||
348 | payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); | ||
349 | payload->ip.version = IPVERSION; | ||
350 | payload->ip.protocol = IPPROTO_UDP; | ||
351 | |||
352 | /* Initialise udp header */ | ||
353 | payload->udp.source = 0; | ||
354 | payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - | ||
355 | sizeof(struct iphdr)); | ||
356 | payload->udp.check = 0; /* checksum ignored */ | ||
357 | |||
358 | /* Fill out payload */ | ||
359 | payload->iteration = htons(ntohs(payload->iteration) + 1); | ||
360 | memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); | ||
361 | |||
362 | /* Fill out remaining state members */ | ||
363 | atomic_set(&state->rx_good, 0); | ||
364 | atomic_set(&state->rx_bad, 0); | ||
365 | smp_wmb(); | ||
366 | } | ||
367 | |||
368 | static int efx_begin_loopback(struct efx_tx_queue *tx_queue) | ||
369 | { | ||
370 | struct efx_nic *efx = tx_queue->efx; | ||
371 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
372 | struct efx_loopback_payload *payload; | ||
373 | struct sk_buff *skb; | ||
374 | int i; | ||
375 | netdev_tx_t rc; | ||
376 | |||
377 | /* Transmit N copies of buffer */ | ||
378 | for (i = 0; i < state->packet_count; i++) { | ||
379 | /* Allocate an skb, holding an extra reference for | ||
380 | * transmit completion counting */ | ||
381 | skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); | ||
382 | if (!skb) | ||
383 | return -ENOMEM; | ||
384 | state->skbs[i] = skb; | ||
385 | skb_get(skb); | ||
386 | |||
387 | /* Copy the payload in, incrementing the source address to | ||
388 | * exercise the rss vectors */ | ||
389 | payload = ((struct efx_loopback_payload *) | ||
390 | skb_put(skb, sizeof(state->payload))); | ||
391 | memcpy(payload, &state->payload, sizeof(state->payload)); | ||
392 | payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); | ||
393 | |||
394 | /* Ensure everything we've written is visible to the | ||
395 | * interrupt handler. */ | ||
396 | smp_wmb(); | ||
397 | |||
398 | if (efx_dev_registered(efx)) | ||
399 | netif_tx_lock_bh(efx->net_dev); | ||
400 | rc = efx_enqueue_skb(tx_queue, skb); | ||
401 | if (efx_dev_registered(efx)) | ||
402 | netif_tx_unlock_bh(efx->net_dev); | ||
403 | |||
404 | if (rc != NETDEV_TX_OK) { | ||
405 | netif_err(efx, drv, efx->net_dev, | ||
406 | "TX queue %d could not transmit packet %d of " | ||
407 | "%d in %s loopback test\n", tx_queue->queue, | ||
408 | i + 1, state->packet_count, | ||
409 | LOOPBACK_MODE(efx)); | ||
410 | |||
411 | /* Defer cleaning up the other skbs for the caller */ | ||
412 | kfree_skb(skb); | ||
413 | return -EPIPE; | ||
414 | } | ||
415 | } | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static int efx_poll_loopback(struct efx_nic *efx) | ||
421 | { | ||
422 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
423 | struct efx_channel *channel; | ||
424 | |||
425 | /* NAPI polling is not enabled, so process channels | ||
426 | * synchronously */ | ||
427 | efx_for_each_channel(channel, efx) { | ||
428 | if (channel->work_pending) | ||
429 | efx_process_channel_now(channel); | ||
430 | } | ||
431 | return atomic_read(&state->rx_good) == state->packet_count; | ||
432 | } | ||
433 | |||
434 | static int efx_end_loopback(struct efx_tx_queue *tx_queue, | ||
435 | struct efx_loopback_self_tests *lb_tests) | ||
436 | { | ||
437 | struct efx_nic *efx = tx_queue->efx; | ||
438 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
439 | struct sk_buff *skb; | ||
440 | int tx_done = 0, rx_good, rx_bad; | ||
441 | int i, rc = 0; | ||
442 | |||
443 | if (efx_dev_registered(efx)) | ||
444 | netif_tx_lock_bh(efx->net_dev); | ||
445 | |||
446 | /* Count the number of tx completions, and decrement the refcnt. Any | ||
447 | * skbs not already completed will be free'd when the queue is flushed */ | ||
448 | for (i=0; i < state->packet_count; i++) { | ||
449 | skb = state->skbs[i]; | ||
450 | if (skb && !skb_shared(skb)) | ||
451 | ++tx_done; | ||
452 | dev_kfree_skb_any(skb); | ||
453 | } | ||
454 | |||
455 | if (efx_dev_registered(efx)) | ||
456 | netif_tx_unlock_bh(efx->net_dev); | ||
457 | |||
458 | /* Check TX completion and received packet counts */ | ||
459 | rx_good = atomic_read(&state->rx_good); | ||
460 | rx_bad = atomic_read(&state->rx_bad); | ||
461 | if (tx_done != state->packet_count) { | ||
462 | /* Don't free the skbs; they will be picked up on TX | ||
463 | * overflow or channel teardown. | ||
464 | */ | ||
465 | netif_err(efx, drv, efx->net_dev, | ||
466 | "TX queue %d saw only %d out of an expected %d " | ||
467 | "TX completion events in %s loopback test\n", | ||
468 | tx_queue->queue, tx_done, state->packet_count, | ||
469 | LOOPBACK_MODE(efx)); | ||
470 | rc = -ETIMEDOUT; | ||
471 | /* Allow to fall through so we see the RX errors as well */ | ||
472 | } | ||
473 | |||
474 | /* We may always be up to a flush away from our desired packet total */ | ||
475 | if (rx_good != state->packet_count) { | ||
476 | netif_dbg(efx, drv, efx->net_dev, | ||
477 | "TX queue %d saw only %d out of an expected %d " | ||
478 | "received packets in %s loopback test\n", | ||
479 | tx_queue->queue, rx_good, state->packet_count, | ||
480 | LOOPBACK_MODE(efx)); | ||
481 | rc = -ETIMEDOUT; | ||
482 | /* Fall through */ | ||
483 | } | ||
484 | |||
485 | /* Update loopback test structure */ | ||
486 | lb_tests->tx_sent[tx_queue->queue] += state->packet_count; | ||
487 | lb_tests->tx_done[tx_queue->queue] += tx_done; | ||
488 | lb_tests->rx_good += rx_good; | ||
489 | lb_tests->rx_bad += rx_bad; | ||
490 | |||
491 | return rc; | ||
492 | } | ||
493 | |||
494 | static int | ||
495 | efx_test_loopback(struct efx_tx_queue *tx_queue, | ||
496 | struct efx_loopback_self_tests *lb_tests) | ||
497 | { | ||
498 | struct efx_nic *efx = tx_queue->efx; | ||
499 | struct efx_loopback_state *state = efx->loopback_selftest; | ||
500 | int i, begin_rc, end_rc; | ||
501 | |||
502 | for (i = 0; i < 3; i++) { | ||
503 | /* Determine how many packets to send */ | ||
504 | state->packet_count = efx->txq_entries / 3; | ||
505 | state->packet_count = min(1 << (i << 2), state->packet_count); | ||
506 | state->skbs = kzalloc(sizeof(state->skbs[0]) * | ||
507 | state->packet_count, GFP_KERNEL); | ||
508 | if (!state->skbs) | ||
509 | return -ENOMEM; | ||
510 | state->flush = false; | ||
511 | |||
512 | netif_dbg(efx, drv, efx->net_dev, | ||
513 | "TX queue %d testing %s loopback with %d packets\n", | ||
514 | tx_queue->queue, LOOPBACK_MODE(efx), | ||
515 | state->packet_count); | ||
516 | |||
517 | efx_iterate_state(efx); | ||
518 | begin_rc = efx_begin_loopback(tx_queue); | ||
519 | |||
520 | /* This will normally complete very quickly, but be | ||
521 | * prepared to wait up to 100 ms. */ | ||
522 | msleep(1); | ||
523 | if (!efx_poll_loopback(efx)) { | ||
524 | msleep(100); | ||
525 | efx_poll_loopback(efx); | ||
526 | } | ||
527 | |||
528 | end_rc = efx_end_loopback(tx_queue, lb_tests); | ||
529 | kfree(state->skbs); | ||
530 | |||
531 | if (begin_rc || end_rc) { | ||
532 | /* Wait a while to ensure there are no packets | ||
533 | * floating around after a failure. */ | ||
534 | schedule_timeout_uninterruptible(HZ / 10); | ||
535 | return begin_rc ? begin_rc : end_rc; | ||
536 | } | ||
537 | } | ||
538 | |||
539 | netif_dbg(efx, drv, efx->net_dev, | ||
540 | "TX queue %d passed %s loopback test with a burst length " | ||
541 | "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), | ||
542 | state->packet_count); | ||
543 | |||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but | ||
548 | * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it | ||
549 | * to delay and retry. Therefore, it's safer to just poll directly. Wait | ||
550 | * for link up and any faults to dissipate. */ | ||
551 | static int efx_wait_for_link(struct efx_nic *efx) | ||
552 | { | ||
553 | struct efx_link_state *link_state = &efx->link_state; | ||
554 | int count, link_up_count = 0; | ||
555 | bool link_up; | ||
556 | |||
557 | for (count = 0; count < 40; count++) { | ||
558 | schedule_timeout_uninterruptible(HZ / 10); | ||
559 | |||
560 | if (efx->type->monitor != NULL) { | ||
561 | mutex_lock(&efx->mac_lock); | ||
562 | efx->type->monitor(efx); | ||
563 | mutex_unlock(&efx->mac_lock); | ||
564 | } else { | ||
565 | struct efx_channel *channel = efx_get_channel(efx, 0); | ||
566 | if (channel->work_pending) | ||
567 | efx_process_channel_now(channel); | ||
568 | } | ||
569 | |||
570 | mutex_lock(&efx->mac_lock); | ||
571 | link_up = link_state->up; | ||
572 | if (link_up) | ||
573 | link_up = !efx->mac_op->check_fault(efx); | ||
574 | mutex_unlock(&efx->mac_lock); | ||
575 | |||
576 | if (link_up) { | ||
577 | if (++link_up_count == 2) | ||
578 | return 0; | ||
579 | } else { | ||
580 | link_up_count = 0; | ||
581 | } | ||
582 | } | ||
583 | |||
584 | return -ETIMEDOUT; | ||
585 | } | ||
586 | |||
587 | static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, | ||
588 | unsigned int loopback_modes) | ||
589 | { | ||
590 | enum efx_loopback_mode mode; | ||
591 | struct efx_loopback_state *state; | ||
592 | struct efx_channel *channel = efx_get_channel(efx, 0); | ||
593 | struct efx_tx_queue *tx_queue; | ||
594 | int rc = 0; | ||
595 | |||
596 | /* Set the port loopback_selftest member. From this point on | ||
597 | * all received packets will be dropped. Mark the state as | ||
598 | * "flushing" so all inflight packets are dropped */ | ||
599 | state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
600 | if (state == NULL) | ||
601 | return -ENOMEM; | ||
602 | BUG_ON(efx->loopback_selftest); | ||
603 | state->flush = true; | ||
604 | efx->loopback_selftest = state; | ||
605 | |||
606 | /* Test all supported loopback modes */ | ||
607 | for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { | ||
608 | if (!(loopback_modes & (1 << mode))) | ||
609 | continue; | ||
610 | |||
611 | /* Move the port into the specified loopback mode. */ | ||
612 | state->flush = true; | ||
613 | mutex_lock(&efx->mac_lock); | ||
614 | efx->loopback_mode = mode; | ||
615 | rc = __efx_reconfigure_port(efx); | ||
616 | mutex_unlock(&efx->mac_lock); | ||
617 | if (rc) { | ||
618 | netif_err(efx, drv, efx->net_dev, | ||
619 | "unable to move into %s loopback\n", | ||
620 | LOOPBACK_MODE(efx)); | ||
621 | goto out; | ||
622 | } | ||
623 | |||
624 | rc = efx_wait_for_link(efx); | ||
625 | if (rc) { | ||
626 | netif_err(efx, drv, efx->net_dev, | ||
627 | "loopback %s never came up\n", | ||
628 | LOOPBACK_MODE(efx)); | ||
629 | goto out; | ||
630 | } | ||
631 | |||
632 | /* Test all enabled types of TX queue */ | ||
633 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
634 | state->offload_csum = (tx_queue->queue & | ||
635 | EFX_TXQ_TYPE_OFFLOAD); | ||
636 | rc = efx_test_loopback(tx_queue, | ||
637 | &tests->loopback[mode]); | ||
638 | if (rc) | ||
639 | goto out; | ||
640 | } | ||
641 | } | ||
642 | |||
643 | out: | ||
644 | /* Remove the flush. The caller will remove the loopback setting */ | ||
645 | state->flush = true; | ||
646 | efx->loopback_selftest = NULL; | ||
647 | wmb(); | ||
648 | kfree(state); | ||
649 | |||
650 | return rc; | ||
651 | } | ||
652 | |||
653 | /************************************************************************** | ||
654 | * | ||
655 | * Entry point | ||
656 | * | ||
657 | *************************************************************************/ | ||
658 | |||
659 | int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | ||
660 | unsigned flags) | ||
661 | { | ||
662 | enum efx_loopback_mode loopback_mode = efx->loopback_mode; | ||
663 | int phy_mode = efx->phy_mode; | ||
664 | enum reset_type reset_method = RESET_TYPE_INVISIBLE; | ||
665 | struct efx_channel *channel; | ||
666 | int rc_test = 0, rc_reset = 0, rc; | ||
667 | |||
668 | /* Online (i.e. non-disruptive) testing | ||
669 | * This checks interrupt generation, event delivery and PHY presence. */ | ||
670 | |||
671 | rc = efx_test_phy_alive(efx, tests); | ||
672 | if (rc && !rc_test) | ||
673 | rc_test = rc; | ||
674 | |||
675 | rc = efx_test_nvram(efx, tests); | ||
676 | if (rc && !rc_test) | ||
677 | rc_test = rc; | ||
678 | |||
679 | rc = efx_test_interrupts(efx, tests); | ||
680 | if (rc && !rc_test) | ||
681 | rc_test = rc; | ||
682 | |||
683 | efx_for_each_channel(channel, efx) { | ||
684 | rc = efx_test_eventq_irq(channel, tests); | ||
685 | if (rc && !rc_test) | ||
686 | rc_test = rc; | ||
687 | } | ||
688 | |||
689 | if (rc_test) | ||
690 | return rc_test; | ||
691 | |||
692 | if (!(flags & ETH_TEST_FL_OFFLINE)) | ||
693 | return efx_test_phy(efx, tests, flags); | ||
694 | |||
695 | /* Offline (i.e. disruptive) testing | ||
696 | * This checks MAC and PHY loopback on the specified port. */ | ||
697 | |||
698 | /* Detach the device so the kernel doesn't transmit during the | ||
699 | * loopback test and the watchdog timeout doesn't fire. | ||
700 | */ | ||
701 | netif_device_detach(efx->net_dev); | ||
702 | |||
703 | mutex_lock(&efx->mac_lock); | ||
704 | if (efx->loopback_modes) { | ||
705 | /* We need the 312 clock from the PHY to test the XMAC | ||
706 | * registers, so move into XGMII loopback if available */ | ||
707 | if (efx->loopback_modes & (1 << LOOPBACK_XGMII)) | ||
708 | efx->loopback_mode = LOOPBACK_XGMII; | ||
709 | else | ||
710 | efx->loopback_mode = __ffs(efx->loopback_modes); | ||
711 | } | ||
712 | |||
713 | __efx_reconfigure_port(efx); | ||
714 | mutex_unlock(&efx->mac_lock); | ||
715 | |||
716 | /* free up all consumers of SRAM (including all the queues) */ | ||
717 | efx_reset_down(efx, reset_method); | ||
718 | |||
719 | rc = efx_test_chip(efx, tests); | ||
720 | if (rc && !rc_test) | ||
721 | rc_test = rc; | ||
722 | |||
723 | /* reset the chip to recover from the register test */ | ||
724 | rc_reset = efx->type->reset(efx, reset_method); | ||
725 | |||
726 | /* Ensure that the phy is powered and out of loopback | ||
727 | * for the bist and loopback tests */ | ||
728 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; | ||
729 | efx->loopback_mode = LOOPBACK_NONE; | ||
730 | |||
731 | rc = efx_reset_up(efx, reset_method, rc_reset == 0); | ||
732 | if (rc && !rc_reset) | ||
733 | rc_reset = rc; | ||
734 | |||
735 | if (rc_reset) { | ||
736 | netif_err(efx, drv, efx->net_dev, | ||
737 | "Unable to recover from chip test\n"); | ||
738 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
739 | return rc_reset; | ||
740 | } | ||
741 | |||
742 | rc = efx_test_phy(efx, tests, flags); | ||
743 | if (rc && !rc_test) | ||
744 | rc_test = rc; | ||
745 | |||
746 | rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); | ||
747 | if (rc && !rc_test) | ||
748 | rc_test = rc; | ||
749 | |||
750 | /* restore the PHY to the previous state */ | ||
751 | mutex_lock(&efx->mac_lock); | ||
752 | efx->phy_mode = phy_mode; | ||
753 | efx->loopback_mode = loopback_mode; | ||
754 | __efx_reconfigure_port(efx); | ||
755 | mutex_unlock(&efx->mac_lock); | ||
756 | |||
757 | netif_device_attach(efx->net_dev); | ||
758 | |||
759 | return rc_test; | ||
760 | } | ||
761 | |||
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h new file mode 100644 index 00000000000..dba5456e70f --- /dev/null +++ b/drivers/net/sfc/selftest.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_SELFTEST_H | ||
12 | #define EFX_SELFTEST_H | ||
13 | |||
14 | #include "net_driver.h" | ||
15 | |||
16 | /* | ||
17 | * Self tests | ||
18 | */ | ||
19 | |||
20 | struct efx_loopback_self_tests { | ||
21 | int tx_sent[EFX_TXQ_TYPES]; | ||
22 | int tx_done[EFX_TXQ_TYPES]; | ||
23 | int rx_good; | ||
24 | int rx_bad; | ||
25 | }; | ||
26 | |||
27 | #define EFX_MAX_PHY_TESTS 20 | ||
28 | |||
29 | /* Efx self test results | ||
30 | * For fields which are not counters, 1 indicates success and -1 | ||
31 | * indicates failure. | ||
32 | */ | ||
33 | struct efx_self_tests { | ||
34 | /* online tests */ | ||
35 | int phy_alive; | ||
36 | int nvram; | ||
37 | int interrupt; | ||
38 | int eventq_dma[EFX_MAX_CHANNELS]; | ||
39 | int eventq_int[EFX_MAX_CHANNELS]; | ||
40 | int eventq_poll[EFX_MAX_CHANNELS]; | ||
41 | /* offline tests */ | ||
42 | int registers; | ||
43 | int phy_ext[EFX_MAX_PHY_TESTS]; | ||
44 | struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; | ||
45 | }; | ||
46 | |||
47 | extern void efx_loopback_rx_packet(struct efx_nic *efx, | ||
48 | const char *buf_ptr, int pkt_len); | ||
49 | extern int efx_selftest(struct efx_nic *efx, | ||
50 | struct efx_self_tests *tests, | ||
51 | unsigned flags); | ||
52 | |||
53 | #endif /* EFX_SELFTEST_H */ | ||
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c new file mode 100644 index 00000000000..2c3bd93fab5 --- /dev/null +++ b/drivers/net/sfc/siena.c | |||
@@ -0,0 +1,659 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/random.h> | ||
17 | #include "net_driver.h" | ||
18 | #include "bitfield.h" | ||
19 | #include "efx.h" | ||
20 | #include "nic.h" | ||
21 | #include "mac.h" | ||
22 | #include "spi.h" | ||
23 | #include "regs.h" | ||
24 | #include "io.h" | ||
25 | #include "phy.h" | ||
26 | #include "workarounds.h" | ||
27 | #include "mcdi.h" | ||
28 | #include "mcdi_pcol.h" | ||
29 | |||
30 | /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ | ||
31 | |||
32 | static void siena_init_wol(struct efx_nic *efx); | ||
33 | |||
34 | |||
35 | static void siena_push_irq_moderation(struct efx_channel *channel) | ||
36 | { | ||
37 | efx_dword_t timer_cmd; | ||
38 | |||
39 | if (channel->irq_moderation) | ||
40 | EFX_POPULATE_DWORD_2(timer_cmd, | ||
41 | FRF_CZ_TC_TIMER_MODE, | ||
42 | FFE_CZ_TIMER_MODE_INT_HLDOFF, | ||
43 | FRF_CZ_TC_TIMER_VAL, | ||
44 | channel->irq_moderation - 1); | ||
45 | else | ||
46 | EFX_POPULATE_DWORD_2(timer_cmd, | ||
47 | FRF_CZ_TC_TIMER_MODE, | ||
48 | FFE_CZ_TIMER_MODE_DIS, | ||
49 | FRF_CZ_TC_TIMER_VAL, 0); | ||
50 | efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, | ||
51 | channel->channel); | ||
52 | } | ||
53 | |||
54 | static void siena_push_multicast_hash(struct efx_nic *efx) | ||
55 | { | ||
56 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
57 | |||
58 | efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, | ||
59 | efx->multicast_hash.byte, sizeof(efx->multicast_hash), | ||
60 | NULL, 0, NULL); | ||
61 | } | ||
62 | |||
63 | static int siena_mdio_write(struct net_device *net_dev, | ||
64 | int prtad, int devad, u16 addr, u16 value) | ||
65 | { | ||
66 | struct efx_nic *efx = netdev_priv(net_dev); | ||
67 | uint32_t status; | ||
68 | int rc; | ||
69 | |||
70 | rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad, | ||
71 | addr, value, &status); | ||
72 | if (rc) | ||
73 | return rc; | ||
74 | if (status != MC_CMD_MDIO_STATUS_GOOD) | ||
75 | return -EIO; | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static int siena_mdio_read(struct net_device *net_dev, | ||
81 | int prtad, int devad, u16 addr) | ||
82 | { | ||
83 | struct efx_nic *efx = netdev_priv(net_dev); | ||
84 | uint16_t value; | ||
85 | uint32_t status; | ||
86 | int rc; | ||
87 | |||
88 | rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad, | ||
89 | addr, &value, &status); | ||
90 | if (rc) | ||
91 | return rc; | ||
92 | if (status != MC_CMD_MDIO_STATUS_GOOD) | ||
93 | return -EIO; | ||
94 | |||
95 | return (int)value; | ||
96 | } | ||
97 | |||
98 | /* This call is responsible for hooking in the MAC and PHY operations */ | ||
99 | static int siena_probe_port(struct efx_nic *efx) | ||
100 | { | ||
101 | int rc; | ||
102 | |||
103 | /* Hook in PHY operations table */ | ||
104 | efx->phy_op = &efx_mcdi_phy_ops; | ||
105 | |||
106 | /* Set up MDIO structure for PHY */ | ||
107 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
108 | efx->mdio.mdio_read = siena_mdio_read; | ||
109 | efx->mdio.mdio_write = siena_mdio_write; | ||
110 | |||
111 | /* Fill out MDIO structure, loopback modes, and initial link state */ | ||
112 | rc = efx->phy_op->probe(efx); | ||
113 | if (rc != 0) | ||
114 | return rc; | ||
115 | |||
116 | /* Allocate buffer for stats */ | ||
117 | rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, | ||
118 | MC_CMD_MAC_NSTATS * sizeof(u64)); | ||
119 | if (rc) | ||
120 | return rc; | ||
121 | netif_dbg(efx, probe, efx->net_dev, | ||
122 | "stats buffer at %llx (virt %p phys %llx)\n", | ||
123 | (u64)efx->stats_buffer.dma_addr, | ||
124 | efx->stats_buffer.addr, | ||
125 | (u64)virt_to_phys(efx->stats_buffer.addr)); | ||
126 | |||
127 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static void siena_remove_port(struct efx_nic *efx) | ||
133 | { | ||
134 | efx->phy_op->remove(efx); | ||
135 | efx_nic_free_buffer(efx, &efx->stats_buffer); | ||
136 | } | ||
137 | |||
138 | static const struct efx_nic_register_test siena_register_tests[] = { | ||
139 | { FR_AZ_ADR_REGION, | ||
140 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, | ||
141 | { FR_CZ_USR_EV_CFG, | ||
142 | EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, | ||
143 | { FR_AZ_RX_CFG, | ||
144 | EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, | ||
145 | { FR_AZ_TX_CFG, | ||
146 | EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, | ||
147 | { FR_AZ_TX_RESERVED, | ||
148 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, | ||
149 | { FR_AZ_SRM_TX_DC_CFG, | ||
150 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, | ||
151 | { FR_AZ_RX_DC_CFG, | ||
152 | EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, | ||
153 | { FR_AZ_RX_DC_PF_WM, | ||
154 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, | ||
155 | { FR_BZ_DP_CTRL, | ||
156 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, | ||
157 | { FR_BZ_RX_RSS_TKEY, | ||
158 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | ||
159 | { FR_CZ_RX_RSS_IPV6_REG1, | ||
160 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | ||
161 | { FR_CZ_RX_RSS_IPV6_REG2, | ||
162 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | ||
163 | { FR_CZ_RX_RSS_IPV6_REG3, | ||
164 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, | ||
165 | }; | ||
166 | |||
167 | static int siena_test_registers(struct efx_nic *efx) | ||
168 | { | ||
169 | return efx_nic_test_registers(efx, siena_register_tests, | ||
170 | ARRAY_SIZE(siena_register_tests)); | ||
171 | } | ||
172 | |||
173 | /************************************************************************** | ||
174 | * | ||
175 | * Device reset | ||
176 | * | ||
177 | ************************************************************************** | ||
178 | */ | ||
179 | |||
180 | static enum reset_type siena_map_reset_reason(enum reset_type reason) | ||
181 | { | ||
182 | return RESET_TYPE_ALL; | ||
183 | } | ||
184 | |||
185 | static int siena_map_reset_flags(u32 *flags) | ||
186 | { | ||
187 | enum { | ||
188 | SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER | | ||
189 | ETH_RESET_OFFLOAD | ETH_RESET_MAC | | ||
190 | ETH_RESET_PHY), | ||
191 | SIENA_RESET_MC = (SIENA_RESET_PORT | | ||
192 | ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT), | ||
193 | }; | ||
194 | |||
195 | if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) { | ||
196 | *flags &= ~SIENA_RESET_MC; | ||
197 | return RESET_TYPE_WORLD; | ||
198 | } | ||
199 | |||
200 | if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) { | ||
201 | *flags &= ~SIENA_RESET_PORT; | ||
202 | return RESET_TYPE_ALL; | ||
203 | } | ||
204 | |||
205 | /* no invisible reset implemented */ | ||
206 | |||
207 | return -EINVAL; | ||
208 | } | ||
209 | |||
210 | static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) | ||
211 | { | ||
212 | int rc; | ||
213 | |||
214 | /* Recover from a failed assertion pre-reset */ | ||
215 | rc = efx_mcdi_handle_assertion(efx); | ||
216 | if (rc) | ||
217 | return rc; | ||
218 | |||
219 | if (method == RESET_TYPE_WORLD) | ||
220 | return efx_mcdi_reset_mc(efx); | ||
221 | else | ||
222 | return efx_mcdi_reset_port(efx); | ||
223 | } | ||
224 | |||
225 | static int siena_probe_nvconfig(struct efx_nic *efx) | ||
226 | { | ||
227 | return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL); | ||
228 | } | ||
229 | |||
230 | static int siena_probe_nic(struct efx_nic *efx) | ||
231 | { | ||
232 | struct siena_nic_data *nic_data; | ||
233 | bool already_attached = 0; | ||
234 | efx_oword_t reg; | ||
235 | int rc; | ||
236 | |||
237 | /* Allocate storage for hardware specific data */ | ||
238 | nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); | ||
239 | if (!nic_data) | ||
240 | return -ENOMEM; | ||
241 | efx->nic_data = nic_data; | ||
242 | |||
243 | if (efx_nic_fpga_ver(efx) != 0) { | ||
244 | netif_err(efx, probe, efx->net_dev, | ||
245 | "Siena FPGA not supported\n"); | ||
246 | rc = -ENODEV; | ||
247 | goto fail1; | ||
248 | } | ||
249 | |||
250 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); | ||
251 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; | ||
252 | |||
253 | efx_mcdi_init(efx); | ||
254 | |||
255 | /* Recover from a failed assertion before probing */ | ||
256 | rc = efx_mcdi_handle_assertion(efx); | ||
257 | if (rc) | ||
258 | goto fail1; | ||
259 | |||
260 | /* Let the BMC know that the driver is now in charge of link and | ||
261 | * filter settings. We must do this before we reset the NIC */ | ||
262 | rc = efx_mcdi_drv_attach(efx, true, &already_attached); | ||
263 | if (rc) { | ||
264 | netif_err(efx, probe, efx->net_dev, | ||
265 | "Unable to register driver with MCPU\n"); | ||
266 | goto fail2; | ||
267 | } | ||
268 | if (already_attached) | ||
269 | /* Not a fatal error */ | ||
270 | netif_err(efx, probe, efx->net_dev, | ||
271 | "Host already registered with MCPU\n"); | ||
272 | |||
273 | /* Now we can reset the NIC */ | ||
274 | rc = siena_reset_hw(efx, RESET_TYPE_ALL); | ||
275 | if (rc) { | ||
276 | netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); | ||
277 | goto fail3; | ||
278 | } | ||
279 | |||
280 | siena_init_wol(efx); | ||
281 | |||
282 | /* Allocate memory for INT_KER */ | ||
283 | rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); | ||
284 | if (rc) | ||
285 | goto fail4; | ||
286 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | ||
287 | |||
288 | netif_dbg(efx, probe, efx->net_dev, | ||
289 | "INT_KER at %llx (virt %p phys %llx)\n", | ||
290 | (unsigned long long)efx->irq_status.dma_addr, | ||
291 | efx->irq_status.addr, | ||
292 | (unsigned long long)virt_to_phys(efx->irq_status.addr)); | ||
293 | |||
294 | /* Read in the non-volatile configuration */ | ||
295 | rc = siena_probe_nvconfig(efx); | ||
296 | if (rc == -EINVAL) { | ||
297 | netif_err(efx, probe, efx->net_dev, | ||
298 | "NVRAM is invalid therefore using defaults\n"); | ||
299 | efx->phy_type = PHY_TYPE_NONE; | ||
300 | efx->mdio.prtad = MDIO_PRTAD_NONE; | ||
301 | } else if (rc) { | ||
302 | goto fail5; | ||
303 | } | ||
304 | |||
305 | return 0; | ||
306 | |||
307 | fail5: | ||
308 | efx_nic_free_buffer(efx, &efx->irq_status); | ||
309 | fail4: | ||
310 | fail3: | ||
311 | efx_mcdi_drv_attach(efx, false, NULL); | ||
312 | fail2: | ||
313 | fail1: | ||
314 | kfree(efx->nic_data); | ||
315 | return rc; | ||
316 | } | ||
317 | |||
318 | /* This call performs hardware-specific global initialisation, such as | ||
319 | * defining the descriptor cache sizes and number of RSS channels. | ||
320 | * It does not set up any buffers, descriptor rings or event queues. | ||
321 | */ | ||
322 | static int siena_init_nic(struct efx_nic *efx) | ||
323 | { | ||
324 | efx_oword_t temp; | ||
325 | int rc; | ||
326 | |||
327 | /* Recover from a failed assertion post-reset */ | ||
328 | rc = efx_mcdi_handle_assertion(efx); | ||
329 | if (rc) | ||
330 | return rc; | ||
331 | |||
332 | /* Squash TX of packets of 16 bytes or less */ | ||
333 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | ||
334 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | ||
335 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | ||
336 | |||
337 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 | ||
338 | * descriptors (which is bad). | ||
339 | */ | ||
340 | efx_reado(efx, &temp, FR_AZ_TX_CFG); | ||
341 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); | ||
342 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); | ||
343 | efx_writeo(efx, &temp, FR_AZ_TX_CFG); | ||
344 | |||
345 | efx_reado(efx, &temp, FR_AZ_RX_CFG); | ||
346 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); | ||
347 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); | ||
348 | /* Enable hash insertion. This is broken for the 'Falcon' hash | ||
349 | * if IPv6 hashing is also enabled, so also select Toeplitz | ||
350 | * TCP/IPv4 and IPv4 hashes. */ | ||
351 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); | ||
352 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); | ||
353 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); | ||
354 | efx_writeo(efx, &temp, FR_AZ_RX_CFG); | ||
355 | |||
356 | /* Set hash key for IPv4 */ | ||
357 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); | ||
358 | efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); | ||
359 | |||
360 | /* Enable IPv6 RSS */ | ||
361 | BUILD_BUG_ON(sizeof(efx->rx_hash_key) < | ||
362 | 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || | ||
363 | FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); | ||
364 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); | ||
365 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); | ||
366 | memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); | ||
367 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); | ||
368 | EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, | ||
369 | FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); | ||
370 | memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), | ||
371 | FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); | ||
372 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); | ||
373 | |||
374 | /* Enable event logging */ | ||
375 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); | ||
376 | if (rc) | ||
377 | return rc; | ||
378 | |||
379 | /* Set destination of both TX and RX Flush events */ | ||
380 | EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); | ||
381 | efx_writeo(efx, &temp, FR_BZ_DP_CTRL); | ||
382 | |||
383 | EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); | ||
384 | efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); | ||
385 | |||
386 | efx_nic_init_common(efx); | ||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static void siena_remove_nic(struct efx_nic *efx) | ||
391 | { | ||
392 | efx_nic_free_buffer(efx, &efx->irq_status); | ||
393 | |||
394 | siena_reset_hw(efx, RESET_TYPE_ALL); | ||
395 | |||
396 | /* Relinquish the device back to the BMC */ | ||
397 | if (efx_nic_has_mc(efx)) | ||
398 | efx_mcdi_drv_attach(efx, false, NULL); | ||
399 | |||
400 | /* Tear down the private nic state */ | ||
401 | kfree(efx->nic_data); | ||
402 | efx->nic_data = NULL; | ||
403 | } | ||
404 | |||
405 | #define STATS_GENERATION_INVALID ((__force __le64)(-1)) | ||
406 | |||
407 | static int siena_try_update_nic_stats(struct efx_nic *efx) | ||
408 | { | ||
409 | __le64 *dma_stats; | ||
410 | struct efx_mac_stats *mac_stats; | ||
411 | __le64 generation_start, generation_end; | ||
412 | |||
413 | mac_stats = &efx->mac_stats; | ||
414 | dma_stats = efx->stats_buffer.addr; | ||
415 | |||
416 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; | ||
417 | if (generation_end == STATS_GENERATION_INVALID) | ||
418 | return 0; | ||
419 | rmb(); | ||
420 | |||
421 | #define MAC_STAT(M, D) \ | ||
422 | mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D]) | ||
423 | |||
424 | MAC_STAT(tx_bytes, TX_BYTES); | ||
425 | MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); | ||
426 | mac_stats->tx_good_bytes = (mac_stats->tx_bytes - | ||
427 | mac_stats->tx_bad_bytes); | ||
428 | MAC_STAT(tx_packets, TX_PKTS); | ||
429 | MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); | ||
430 | MAC_STAT(tx_pause, TX_PAUSE_PKTS); | ||
431 | MAC_STAT(tx_control, TX_CONTROL_PKTS); | ||
432 | MAC_STAT(tx_unicast, TX_UNICAST_PKTS); | ||
433 | MAC_STAT(tx_multicast, TX_MULTICAST_PKTS); | ||
434 | MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS); | ||
435 | MAC_STAT(tx_lt64, TX_LT64_PKTS); | ||
436 | MAC_STAT(tx_64, TX_64_PKTS); | ||
437 | MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS); | ||
438 | MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS); | ||
439 | MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS); | ||
440 | MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS); | ||
441 | MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS); | ||
442 | MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS); | ||
443 | MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS); | ||
444 | mac_stats->tx_collision = 0; | ||
445 | MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS); | ||
446 | MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS); | ||
447 | MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS); | ||
448 | MAC_STAT(tx_deferred, TX_DEFERRED_PKTS); | ||
449 | MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS); | ||
450 | mac_stats->tx_collision = (mac_stats->tx_single_collision + | ||
451 | mac_stats->tx_multiple_collision + | ||
452 | mac_stats->tx_excessive_collision + | ||
453 | mac_stats->tx_late_collision); | ||
454 | MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS); | ||
455 | MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS); | ||
456 | MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS); | ||
457 | MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); | ||
458 | MAC_STAT(rx_bytes, RX_BYTES); | ||
459 | MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); | ||
460 | mac_stats->rx_good_bytes = (mac_stats->rx_bytes - | ||
461 | mac_stats->rx_bad_bytes); | ||
462 | MAC_STAT(rx_packets, RX_PKTS); | ||
463 | MAC_STAT(rx_good, RX_GOOD_PKTS); | ||
464 | MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); | ||
465 | MAC_STAT(rx_pause, RX_PAUSE_PKTS); | ||
466 | MAC_STAT(rx_control, RX_CONTROL_PKTS); | ||
467 | MAC_STAT(rx_unicast, RX_UNICAST_PKTS); | ||
468 | MAC_STAT(rx_multicast, RX_MULTICAST_PKTS); | ||
469 | MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS); | ||
470 | MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS); | ||
471 | MAC_STAT(rx_64, RX_64_PKTS); | ||
472 | MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS); | ||
473 | MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS); | ||
474 | MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS); | ||
475 | MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS); | ||
476 | MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS); | ||
477 | MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS); | ||
478 | MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS); | ||
479 | mac_stats->rx_bad_lt64 = 0; | ||
480 | mac_stats->rx_bad_64_to_15xx = 0; | ||
481 | mac_stats->rx_bad_15xx_to_jumbo = 0; | ||
482 | MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS); | ||
483 | MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS); | ||
484 | mac_stats->rx_missed = 0; | ||
485 | MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS); | ||
486 | MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS); | ||
487 | MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS); | ||
488 | MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS); | ||
489 | MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS); | ||
490 | mac_stats->rx_good_lt64 = 0; | ||
491 | |||
492 | efx->n_rx_nodesc_drop_cnt = | ||
493 | le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]); | ||
494 | |||
495 | #undef MAC_STAT | ||
496 | |||
497 | rmb(); | ||
498 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; | ||
499 | if (generation_end != generation_start) | ||
500 | return -EAGAIN; | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | static void siena_update_nic_stats(struct efx_nic *efx) | ||
506 | { | ||
507 | int retry; | ||
508 | |||
509 | /* If we're unlucky enough to read statistics wduring the DMA, wait | ||
510 | * up to 10ms for it to finish (typically takes <500us) */ | ||
511 | for (retry = 0; retry < 100; ++retry) { | ||
512 | if (siena_try_update_nic_stats(efx) == 0) | ||
513 | return; | ||
514 | udelay(100); | ||
515 | } | ||
516 | |||
517 | /* Use the old values instead */ | ||
518 | } | ||
519 | |||
520 | static void siena_start_nic_stats(struct efx_nic *efx) | ||
521 | { | ||
522 | __le64 *dma_stats = efx->stats_buffer.addr; | ||
523 | |||
524 | dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; | ||
525 | |||
526 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, | ||
527 | MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); | ||
528 | } | ||
529 | |||
530 | static void siena_stop_nic_stats(struct efx_nic *efx) | ||
531 | { | ||
532 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); | ||
533 | } | ||
534 | |||
535 | /************************************************************************** | ||
536 | * | ||
537 | * Wake on LAN | ||
538 | * | ||
539 | ************************************************************************** | ||
540 | */ | ||
541 | |||
542 | static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) | ||
543 | { | ||
544 | struct siena_nic_data *nic_data = efx->nic_data; | ||
545 | |||
546 | wol->supported = WAKE_MAGIC; | ||
547 | if (nic_data->wol_filter_id != -1) | ||
548 | wol->wolopts = WAKE_MAGIC; | ||
549 | else | ||
550 | wol->wolopts = 0; | ||
551 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | ||
552 | } | ||
553 | |||
554 | |||
555 | static int siena_set_wol(struct efx_nic *efx, u32 type) | ||
556 | { | ||
557 | struct siena_nic_data *nic_data = efx->nic_data; | ||
558 | int rc; | ||
559 | |||
560 | if (type & ~WAKE_MAGIC) | ||
561 | return -EINVAL; | ||
562 | |||
563 | if (type & WAKE_MAGIC) { | ||
564 | if (nic_data->wol_filter_id != -1) | ||
565 | efx_mcdi_wol_filter_remove(efx, | ||
566 | nic_data->wol_filter_id); | ||
567 | rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr, | ||
568 | &nic_data->wol_filter_id); | ||
569 | if (rc) | ||
570 | goto fail; | ||
571 | |||
572 | pci_wake_from_d3(efx->pci_dev, true); | ||
573 | } else { | ||
574 | rc = efx_mcdi_wol_filter_reset(efx); | ||
575 | nic_data->wol_filter_id = -1; | ||
576 | pci_wake_from_d3(efx->pci_dev, false); | ||
577 | if (rc) | ||
578 | goto fail; | ||
579 | } | ||
580 | |||
581 | return 0; | ||
582 | fail: | ||
583 | netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", | ||
584 | __func__, type, rc); | ||
585 | return rc; | ||
586 | } | ||
587 | |||
588 | |||
589 | static void siena_init_wol(struct efx_nic *efx) | ||
590 | { | ||
591 | struct siena_nic_data *nic_data = efx->nic_data; | ||
592 | int rc; | ||
593 | |||
594 | rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); | ||
595 | |||
596 | if (rc != 0) { | ||
597 | /* If it failed, attempt to get into a synchronised | ||
598 | * state with MC by resetting any set WoL filters */ | ||
599 | efx_mcdi_wol_filter_reset(efx); | ||
600 | nic_data->wol_filter_id = -1; | ||
601 | } else if (nic_data->wol_filter_id != -1) { | ||
602 | pci_wake_from_d3(efx->pci_dev, true); | ||
603 | } | ||
604 | } | ||
605 | |||
606 | |||
607 | /************************************************************************** | ||
608 | * | ||
609 | * Revision-dependent attributes used by efx.c and nic.c | ||
610 | * | ||
611 | ************************************************************************** | ||
612 | */ | ||
613 | |||
614 | const struct efx_nic_type siena_a0_nic_type = { | ||
615 | .probe = siena_probe_nic, | ||
616 | .remove = siena_remove_nic, | ||
617 | .init = siena_init_nic, | ||
618 | .fini = efx_port_dummy_op_void, | ||
619 | .monitor = NULL, | ||
620 | .map_reset_reason = siena_map_reset_reason, | ||
621 | .map_reset_flags = siena_map_reset_flags, | ||
622 | .reset = siena_reset_hw, | ||
623 | .probe_port = siena_probe_port, | ||
624 | .remove_port = siena_remove_port, | ||
625 | .prepare_flush = efx_port_dummy_op_void, | ||
626 | .update_stats = siena_update_nic_stats, | ||
627 | .start_stats = siena_start_nic_stats, | ||
628 | .stop_stats = siena_stop_nic_stats, | ||
629 | .set_id_led = efx_mcdi_set_id_led, | ||
630 | .push_irq_moderation = siena_push_irq_moderation, | ||
631 | .push_multicast_hash = siena_push_multicast_hash, | ||
632 | .reconfigure_port = efx_mcdi_phy_reconfigure, | ||
633 | .get_wol = siena_get_wol, | ||
634 | .set_wol = siena_set_wol, | ||
635 | .resume_wol = siena_init_wol, | ||
636 | .test_registers = siena_test_registers, | ||
637 | .test_nvram = efx_mcdi_nvram_test_all, | ||
638 | .default_mac_ops = &efx_mcdi_mac_operations, | ||
639 | |||
640 | .revision = EFX_REV_SIENA_A0, | ||
641 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + | ||
642 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), | ||
643 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | ||
644 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, | ||
645 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, | ||
646 | .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, | ||
647 | .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, | ||
648 | .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), | ||
649 | .rx_buffer_hash_size = 0x10, | ||
650 | .rx_buffer_padding = 0, | ||
651 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | ||
652 | .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy | ||
653 | * interrupt handler only supports 32 | ||
654 | * channels */ | ||
655 | .tx_dc_base = 0x88000, | ||
656 | .rx_dc_base = 0x68000, | ||
657 | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | ||
658 | NETIF_F_RXHASH | NETIF_F_NTUPLE), | ||
659 | }; | ||
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h new file mode 100644 index 00000000000..71f2e3ebe1c --- /dev/null +++ b/drivers/net/sfc/spi.h | |||
@@ -0,0 +1,99 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005 Fen Systems Ltd. | ||
4 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_SPI_H | ||
12 | #define EFX_SPI_H | ||
13 | |||
14 | #include "net_driver.h" | ||
15 | |||
16 | /************************************************************************** | ||
17 | * | ||
18 | * Basic SPI command set and bit definitions | ||
19 | * | ||
20 | *************************************************************************/ | ||
21 | |||
22 | #define SPI_WRSR 0x01 /* Write status register */ | ||
23 | #define SPI_WRITE 0x02 /* Write data to memory array */ | ||
24 | #define SPI_READ 0x03 /* Read data from memory array */ | ||
25 | #define SPI_WRDI 0x04 /* Reset write enable latch */ | ||
26 | #define SPI_RDSR 0x05 /* Read status register */ | ||
27 | #define SPI_WREN 0x06 /* Set write enable latch */ | ||
28 | #define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */ | ||
29 | |||
30 | #define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */ | ||
31 | #define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */ | ||
32 | #define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */ | ||
33 | #define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */ | ||
34 | #define SPI_STATUS_WEN 0x02 /* State of the write enable latch */ | ||
35 | #define SPI_STATUS_NRDY 0x01 /* Device busy flag */ | ||
36 | |||
37 | /** | ||
38 | * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device | ||
39 | * @device_id: Controller's id for the device | ||
40 | * @size: Size (in bytes) | ||
41 | * @addr_len: Number of address bytes in read/write commands | ||
42 | * @munge_address: Flag whether addresses should be munged. | ||
43 | * Some devices with 9-bit addresses (e.g. AT25040A EEPROM) | ||
44 | * use bit 3 of the command byte as address bit A8, rather | ||
45 | * than having a two-byte address. If this flag is set, then | ||
46 | * commands should be munged in this way. | ||
47 | * @erase_command: Erase command (or 0 if sector erase not needed). | ||
48 | * @erase_size: Erase sector size (in bytes) | ||
49 | * Erase commands affect sectors with this size and alignment. | ||
50 | * This must be a power of two. | ||
51 | * @block_size: Write block size (in bytes). | ||
52 | * Write commands are limited to blocks with this size and alignment. | ||
53 | */ | ||
54 | struct efx_spi_device { | ||
55 | int device_id; | ||
56 | unsigned int size; | ||
57 | unsigned int addr_len; | ||
58 | unsigned int munge_address:1; | ||
59 | u8 erase_command; | ||
60 | unsigned int erase_size; | ||
61 | unsigned int block_size; | ||
62 | }; | ||
63 | |||
64 | static inline bool efx_spi_present(const struct efx_spi_device *spi) | ||
65 | { | ||
66 | return spi->size != 0; | ||
67 | } | ||
68 | |||
69 | int falcon_spi_cmd(struct efx_nic *efx, | ||
70 | const struct efx_spi_device *spi, unsigned int command, | ||
71 | int address, const void* in, void *out, size_t len); | ||
72 | int falcon_spi_wait_write(struct efx_nic *efx, | ||
73 | const struct efx_spi_device *spi); | ||
74 | int falcon_spi_read(struct efx_nic *efx, | ||
75 | const struct efx_spi_device *spi, loff_t start, | ||
76 | size_t len, size_t *retlen, u8 *buffer); | ||
77 | int falcon_spi_write(struct efx_nic *efx, | ||
78 | const struct efx_spi_device *spi, loff_t start, | ||
79 | size_t len, size_t *retlen, const u8 *buffer); | ||
80 | |||
81 | /* | ||
82 | * SFC4000 flash is partitioned into: | ||
83 | * 0-0x400 chip and board config (see falcon_hwdefs.h) | ||
84 | * 0x400-0x8000 unused (or may contain VPD if EEPROM not present) | ||
85 | * 0x8000-end boot code (mapped to PCI expansion ROM) | ||
86 | * SFC4000 small EEPROM (size < 0x400) is used for VPD only. | ||
87 | * SFC4000 large EEPROM (size >= 0x400) is partitioned into: | ||
88 | * 0-0x400 chip and board config | ||
89 | * configurable VPD | ||
90 | * 0x800-0x1800 boot config | ||
91 | * Aside from the chip and board config, all of these are optional and may | ||
92 | * be absent or truncated depending on the devices used. | ||
93 | */ | ||
94 | #define FALCON_NVCONFIG_END 0x400U | ||
95 | #define FALCON_FLASH_BOOTCODE_START 0x8000U | ||
96 | #define EFX_EEPROM_BOOTCONFIG_START 0x800U | ||
97 | #define EFX_EEPROM_BOOTCONFIG_END 0x1800U | ||
98 | |||
99 | #endif /* EFX_SPI_H */ | ||
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c new file mode 100644 index 00000000000..7b0fd89e7b8 --- /dev/null +++ b/drivers/net/sfc/tenxpress.c | |||
@@ -0,0 +1,494 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2007-2011 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include <linux/delay.h> | ||
11 | #include <linux/rtnetlink.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include "efx.h" | ||
15 | #include "mdio_10g.h" | ||
16 | #include "nic.h" | ||
17 | #include "phy.h" | ||
18 | #include "workarounds.h" | ||
19 | |||
20 | /* We expect these MMDs to be in the package. */ | ||
21 | #define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \ | ||
22 | MDIO_DEVS_PCS | \ | ||
23 | MDIO_DEVS_PHYXS | \ | ||
24 | MDIO_DEVS_AN) | ||
25 | |||
26 | #define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ | ||
27 | (1 << LOOPBACK_PCS) | \ | ||
28 | (1 << LOOPBACK_PMAPMD) | \ | ||
29 | (1 << LOOPBACK_PHYXS_WS)) | ||
30 | |||
31 | /* We complain if we fail to see the link partner as 10G capable this many | ||
32 | * times in a row (must be > 1 as sampling the autoneg. registers is racy) | ||
33 | */ | ||
34 | #define MAX_BAD_LP_TRIES (5) | ||
35 | |||
36 | /* Extended control register */ | ||
37 | #define PMA_PMD_XCONTROL_REG 49152 | ||
38 | #define PMA_PMD_EXT_GMII_EN_LBN 1 | ||
39 | #define PMA_PMD_EXT_GMII_EN_WIDTH 1 | ||
40 | #define PMA_PMD_EXT_CLK_OUT_LBN 2 | ||
41 | #define PMA_PMD_EXT_CLK_OUT_WIDTH 1 | ||
42 | #define PMA_PMD_LNPGA_POWERDOWN_LBN 8 | ||
43 | #define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1 | ||
44 | #define PMA_PMD_EXT_CLK312_WIDTH 1 | ||
45 | #define PMA_PMD_EXT_LPOWER_LBN 12 | ||
46 | #define PMA_PMD_EXT_LPOWER_WIDTH 1 | ||
47 | #define PMA_PMD_EXT_ROBUST_LBN 14 | ||
48 | #define PMA_PMD_EXT_ROBUST_WIDTH 1 | ||
49 | #define PMA_PMD_EXT_SSR_LBN 15 | ||
50 | #define PMA_PMD_EXT_SSR_WIDTH 1 | ||
51 | |||
52 | /* extended status register */ | ||
53 | #define PMA_PMD_XSTATUS_REG 49153 | ||
54 | #define PMA_PMD_XSTAT_MDIX_LBN 14 | ||
55 | #define PMA_PMD_XSTAT_FLP_LBN (12) | ||
56 | |||
57 | /* LED control register */ | ||
58 | #define PMA_PMD_LED_CTRL_REG 49159 | ||
59 | #define PMA_PMA_LED_ACTIVITY_LBN (3) | ||
60 | |||
61 | /* LED function override register */ | ||
62 | #define PMA_PMD_LED_OVERR_REG 49161 | ||
63 | /* Bit positions for different LEDs (there are more but not wired on SFE4001)*/ | ||
64 | #define PMA_PMD_LED_LINK_LBN (0) | ||
65 | #define PMA_PMD_LED_SPEED_LBN (2) | ||
66 | #define PMA_PMD_LED_TX_LBN (4) | ||
67 | #define PMA_PMD_LED_RX_LBN (6) | ||
68 | /* Override settings */ | ||
69 | #define PMA_PMD_LED_AUTO (0) /* H/W control */ | ||
70 | #define PMA_PMD_LED_ON (1) | ||
71 | #define PMA_PMD_LED_OFF (2) | ||
72 | #define PMA_PMD_LED_FLASH (3) | ||
73 | #define PMA_PMD_LED_MASK 3 | ||
74 | /* All LEDs under hardware control */ | ||
75 | /* Green and Amber under hardware control, Red off */ | ||
76 | #define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) | ||
77 | |||
78 | #define PMA_PMD_SPEED_ENABLE_REG 49192 | ||
79 | #define PMA_PMD_100TX_ADV_LBN 1 | ||
80 | #define PMA_PMD_100TX_ADV_WIDTH 1 | ||
81 | #define PMA_PMD_1000T_ADV_LBN 2 | ||
82 | #define PMA_PMD_1000T_ADV_WIDTH 1 | ||
83 | #define PMA_PMD_10000T_ADV_LBN 3 | ||
84 | #define PMA_PMD_10000T_ADV_WIDTH 1 | ||
85 | #define PMA_PMD_SPEED_LBN 4 | ||
86 | #define PMA_PMD_SPEED_WIDTH 4 | ||
87 | |||
88 | /* Misc register defines */ | ||
89 | #define PCS_CLOCK_CTRL_REG 55297 | ||
90 | #define PLL312_RST_N_LBN 2 | ||
91 | |||
92 | #define PCS_SOFT_RST2_REG 55302 | ||
93 | #define SERDES_RST_N_LBN 13 | ||
94 | #define XGXS_RST_N_LBN 12 | ||
95 | |||
96 | #define PCS_TEST_SELECT_REG 55303 /* PRM 10.5.8 */ | ||
97 | #define CLK312_EN_LBN 3 | ||
98 | |||
99 | /* PHYXS registers */ | ||
100 | #define PHYXS_XCONTROL_REG 49152 | ||
101 | #define PHYXS_RESET_LBN 15 | ||
102 | #define PHYXS_RESET_WIDTH 1 | ||
103 | |||
104 | #define PHYXS_TEST1 (49162) | ||
105 | #define LOOPBACK_NEAR_LBN (8) | ||
106 | #define LOOPBACK_NEAR_WIDTH (1) | ||
107 | |||
108 | /* Boot status register */ | ||
109 | #define PCS_BOOT_STATUS_REG 53248 | ||
110 | #define PCS_BOOT_FATAL_ERROR_LBN 0 | ||
111 | #define PCS_BOOT_PROGRESS_LBN 1 | ||
112 | #define PCS_BOOT_PROGRESS_WIDTH 2 | ||
113 | #define PCS_BOOT_PROGRESS_INIT 0 | ||
114 | #define PCS_BOOT_PROGRESS_WAIT_MDIO 1 | ||
115 | #define PCS_BOOT_PROGRESS_CHECKSUM 2 | ||
116 | #define PCS_BOOT_PROGRESS_JUMP 3 | ||
117 | #define PCS_BOOT_DOWNLOAD_WAIT_LBN 3 | ||
118 | #define PCS_BOOT_CODE_STARTED_LBN 4 | ||
119 | |||
120 | /* 100M/1G PHY registers */ | ||
121 | #define GPHY_XCONTROL_REG 49152 | ||
122 | #define GPHY_ISOLATE_LBN 10 | ||
123 | #define GPHY_ISOLATE_WIDTH 1 | ||
124 | #define GPHY_DUPLEX_LBN 8 | ||
125 | #define GPHY_DUPLEX_WIDTH 1 | ||
126 | #define GPHY_LOOPBACK_NEAR_LBN 14 | ||
127 | #define GPHY_LOOPBACK_NEAR_WIDTH 1 | ||
128 | |||
129 | #define C22EXT_STATUS_REG 49153 | ||
130 | #define C22EXT_STATUS_LINK_LBN 2 | ||
131 | #define C22EXT_STATUS_LINK_WIDTH 1 | ||
132 | |||
133 | #define C22EXT_MSTSLV_CTRL 49161 | ||
134 | #define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8 | ||
135 | #define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9 | ||
136 | |||
137 | #define C22EXT_MSTSLV_STATUS 49162 | ||
138 | #define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10 | ||
139 | #define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11 | ||
140 | |||
141 | /* Time to wait between powering down the LNPGA and turning off the power | ||
142 | * rails */ | ||
143 | #define LNPGA_PDOWN_WAIT (HZ / 5) | ||
144 | |||
145 | struct tenxpress_phy_data { | ||
146 | enum efx_loopback_mode loopback_mode; | ||
147 | enum efx_phy_mode phy_mode; | ||
148 | int bad_lp_tries; | ||
149 | }; | ||
150 | |||
151 | static int tenxpress_init(struct efx_nic *efx) | ||
152 | { | ||
153 | /* Enable 312.5 MHz clock */ | ||
154 | efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG, | ||
155 | 1 << CLK312_EN_LBN); | ||
156 | |||
157 | /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */ | ||
158 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG, | ||
159 | 1 << PMA_PMA_LED_ACTIVITY_LBN, true); | ||
160 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, | ||
161 | SFX7101_PMA_PMD_LED_DEFAULT); | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | static int tenxpress_phy_probe(struct efx_nic *efx) | ||
167 | { | ||
168 | struct tenxpress_phy_data *phy_data; | ||
169 | |||
170 | /* Allocate phy private storage */ | ||
171 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | ||
172 | if (!phy_data) | ||
173 | return -ENOMEM; | ||
174 | efx->phy_data = phy_data; | ||
175 | phy_data->phy_mode = efx->phy_mode; | ||
176 | |||
177 | efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; | ||
178 | efx->mdio.mode_support = MDIO_SUPPORTS_C45; | ||
179 | |||
180 | efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS; | ||
181 | |||
182 | efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | | ||
183 | ADVERTISED_10000baseT_Full); | ||
184 | |||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static int tenxpress_phy_init(struct efx_nic *efx) | ||
189 | { | ||
190 | int rc; | ||
191 | |||
192 | falcon_board(efx)->type->init_phy(efx); | ||
193 | |||
194 | if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { | ||
195 | rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); | ||
196 | if (rc < 0) | ||
197 | return rc; | ||
198 | |||
199 | rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS); | ||
200 | if (rc < 0) | ||
201 | return rc; | ||
202 | } | ||
203 | |||
204 | rc = tenxpress_init(efx); | ||
205 | if (rc < 0) | ||
206 | return rc; | ||
207 | |||
208 | /* Reinitialise flow control settings */ | ||
209 | efx_link_set_wanted_fc(efx, efx->wanted_fc); | ||
210 | efx_mdio_an_reconfigure(efx); | ||
211 | |||
212 | schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ | ||
213 | |||
214 | /* Let XGXS and SerDes out of reset */ | ||
215 | falcon_reset_xaui(efx); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | /* Perform a "special software reset" on the PHY. The caller is | ||
221 | * responsible for saving and restoring the PHY hardware registers | ||
222 | * properly, and masking/unmasking LASI */ | ||
223 | static int tenxpress_special_reset(struct efx_nic *efx) | ||
224 | { | ||
225 | int rc, reg; | ||
226 | |||
227 | /* The XGMAC clock is driven from the SFX7101 312MHz clock, so | ||
228 | * a special software reset can glitch the XGMAC sufficiently for stats | ||
229 | * requests to fail. */ | ||
230 | falcon_stop_nic_stats(efx); | ||
231 | |||
232 | /* Initiate reset */ | ||
233 | reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG); | ||
234 | reg |= (1 << PMA_PMD_EXT_SSR_LBN); | ||
235 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); | ||
236 | |||
237 | mdelay(200); | ||
238 | |||
239 | /* Wait for the blocks to come out of reset */ | ||
240 | rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); | ||
241 | if (rc < 0) | ||
242 | goto out; | ||
243 | |||
244 | /* Try and reconfigure the device */ | ||
245 | rc = tenxpress_init(efx); | ||
246 | if (rc < 0) | ||
247 | goto out; | ||
248 | |||
249 | /* Wait for the XGXS state machine to churn */ | ||
250 | mdelay(10); | ||
251 | out: | ||
252 | falcon_start_nic_stats(efx); | ||
253 | return rc; | ||
254 | } | ||
255 | |||
256 | static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok) | ||
257 | { | ||
258 | struct tenxpress_phy_data *pd = efx->phy_data; | ||
259 | bool bad_lp; | ||
260 | int reg; | ||
261 | |||
262 | if (link_ok) { | ||
263 | bad_lp = false; | ||
264 | } else { | ||
265 | /* Check that AN has started but not completed. */ | ||
266 | reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1); | ||
267 | if (!(reg & MDIO_AN_STAT1_LPABLE)) | ||
268 | return; /* LP status is unknown */ | ||
269 | bad_lp = !(reg & MDIO_AN_STAT1_COMPLETE); | ||
270 | if (bad_lp) | ||
271 | pd->bad_lp_tries++; | ||
272 | } | ||
273 | |||
274 | /* Nothing to do if all is well and was previously so. */ | ||
275 | if (!pd->bad_lp_tries) | ||
276 | return; | ||
277 | |||
278 | /* Use the RX (red) LED as an error indicator once we've seen AN | ||
279 | * failure several times in a row, and also log a message. */ | ||
280 | if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) { | ||
281 | reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, | ||
282 | PMA_PMD_LED_OVERR_REG); | ||
283 | reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN); | ||
284 | if (!bad_lp) { | ||
285 | reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN; | ||
286 | } else { | ||
287 | reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN; | ||
288 | netif_err(efx, link, efx->net_dev, | ||
289 | "appears to be plugged into a port" | ||
290 | " that is not 10GBASE-T capable. The PHY" | ||
291 | " supports 10GBASE-T ONLY, so no link can" | ||
292 | " be established\n"); | ||
293 | } | ||
294 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, | ||
295 | PMA_PMD_LED_OVERR_REG, reg); | ||
296 | pd->bad_lp_tries = bad_lp; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | static bool sfx7101_link_ok(struct efx_nic *efx) | ||
301 | { | ||
302 | return efx_mdio_links_ok(efx, | ||
303 | MDIO_DEVS_PMAPMD | | ||
304 | MDIO_DEVS_PCS | | ||
305 | MDIO_DEVS_PHYXS); | ||
306 | } | ||
307 | |||
308 | static void tenxpress_ext_loopback(struct efx_nic *efx) | ||
309 | { | ||
310 | efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1, | ||
311 | 1 << LOOPBACK_NEAR_LBN, | ||
312 | efx->loopback_mode == LOOPBACK_PHYXS); | ||
313 | } | ||
314 | |||
315 | static void tenxpress_low_power(struct efx_nic *efx) | ||
316 | { | ||
317 | efx_mdio_set_mmds_lpower( | ||
318 | efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER), | ||
319 | TENXPRESS_REQUIRED_DEVS); | ||
320 | } | ||
321 | |||
322 | static int tenxpress_phy_reconfigure(struct efx_nic *efx) | ||
323 | { | ||
324 | struct tenxpress_phy_data *phy_data = efx->phy_data; | ||
325 | bool phy_mode_change, loop_reset; | ||
326 | |||
327 | if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { | ||
328 | phy_data->phy_mode = efx->phy_mode; | ||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && | ||
333 | phy_data->phy_mode != PHY_MODE_NORMAL); | ||
334 | loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, LOOPBACKS_EXTERNAL(efx)) || | ||
335 | LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); | ||
336 | |||
337 | if (loop_reset || phy_mode_change) { | ||
338 | tenxpress_special_reset(efx); | ||
339 | falcon_reset_xaui(efx); | ||
340 | } | ||
341 | |||
342 | tenxpress_low_power(efx); | ||
343 | efx_mdio_transmit_disable(efx); | ||
344 | efx_mdio_phy_reconfigure(efx); | ||
345 | tenxpress_ext_loopback(efx); | ||
346 | efx_mdio_an_reconfigure(efx); | ||
347 | |||
348 | phy_data->loopback_mode = efx->loopback_mode; | ||
349 | phy_data->phy_mode = efx->phy_mode; | ||
350 | |||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | static void | ||
355 | tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); | ||
356 | |||
357 | /* Poll for link state changes */ | ||
358 | static bool tenxpress_phy_poll(struct efx_nic *efx) | ||
359 | { | ||
360 | struct efx_link_state old_state = efx->link_state; | ||
361 | |||
362 | efx->link_state.up = sfx7101_link_ok(efx); | ||
363 | efx->link_state.speed = 10000; | ||
364 | efx->link_state.fd = true; | ||
365 | efx->link_state.fc = efx_mdio_get_pause(efx); | ||
366 | |||
367 | sfx7101_check_bad_lp(efx, efx->link_state.up); | ||
368 | |||
369 | return !efx_link_state_equal(&efx->link_state, &old_state); | ||
370 | } | ||
371 | |||
372 | static void sfx7101_phy_fini(struct efx_nic *efx) | ||
373 | { | ||
374 | int reg; | ||
375 | |||
376 | /* Power down the LNPGA */ | ||
377 | reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); | ||
378 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); | ||
379 | |||
380 | /* Waiting here ensures that the board fini, which can turn | ||
381 | * off the power to the PHY, won't get run until the LNPGA | ||
382 | * powerdown has been given long enough to complete. */ | ||
383 | schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ | ||
384 | } | ||
385 | |||
386 | static void tenxpress_phy_remove(struct efx_nic *efx) | ||
387 | { | ||
388 | kfree(efx->phy_data); | ||
389 | efx->phy_data = NULL; | ||
390 | } | ||
391 | |||
392 | |||
393 | /* Override the RX, TX and link LEDs */ | ||
394 | void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
395 | { | ||
396 | int reg; | ||
397 | |||
398 | switch (mode) { | ||
399 | case EFX_LED_OFF: | ||
400 | reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) | | ||
401 | (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) | | ||
402 | (PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN); | ||
403 | break; | ||
404 | case EFX_LED_ON: | ||
405 | reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) | | ||
406 | (PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) | | ||
407 | (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN); | ||
408 | break; | ||
409 | default: | ||
410 | reg = SFX7101_PMA_PMD_LED_DEFAULT; | ||
411 | break; | ||
412 | } | ||
413 | |||
414 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg); | ||
415 | } | ||
416 | |||
417 | static const char *const sfx7101_test_names[] = { | ||
418 | "bist" | ||
419 | }; | ||
420 | |||
421 | static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index) | ||
422 | { | ||
423 | if (index < ARRAY_SIZE(sfx7101_test_names)) | ||
424 | return sfx7101_test_names[index]; | ||
425 | return NULL; | ||
426 | } | ||
427 | |||
428 | static int | ||
429 | sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags) | ||
430 | { | ||
431 | int rc; | ||
432 | |||
433 | if (!(flags & ETH_TEST_FL_OFFLINE)) | ||
434 | return 0; | ||
435 | |||
436 | /* BIST is automatically run after a special software reset */ | ||
437 | rc = tenxpress_special_reset(efx); | ||
438 | results[0] = rc ? -1 : 1; | ||
439 | |||
440 | efx_mdio_an_reconfigure(efx); | ||
441 | |||
442 | return rc; | ||
443 | } | ||
444 | |||
445 | static void | ||
446 | tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
447 | { | ||
448 | u32 adv = 0, lpa = 0; | ||
449 | int reg; | ||
450 | |||
451 | reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL); | ||
452 | if (reg & MDIO_AN_10GBT_CTRL_ADV10G) | ||
453 | adv |= ADVERTISED_10000baseT_Full; | ||
454 | reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT); | ||
455 | if (reg & MDIO_AN_10GBT_STAT_LP10G) | ||
456 | lpa |= ADVERTISED_10000baseT_Full; | ||
457 | |||
458 | mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); | ||
459 | |||
460 | /* In loopback, the PHY automatically brings up the correct interface, | ||
461 | * but doesn't advertise the correct speed. So override it */ | ||
462 | if (LOOPBACK_EXTERNAL(efx)) | ||
463 | ethtool_cmd_speed_set(ecmd, SPEED_10000); | ||
464 | } | ||
465 | |||
466 | static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
467 | { | ||
468 | if (!ecmd->autoneg) | ||
469 | return -EINVAL; | ||
470 | |||
471 | return efx_mdio_set_settings(efx, ecmd); | ||
472 | } | ||
473 | |||
474 | static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising) | ||
475 | { | ||
476 | efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, | ||
477 | MDIO_AN_10GBT_CTRL_ADV10G, | ||
478 | advertising & ADVERTISED_10000baseT_Full); | ||
479 | } | ||
480 | |||
481 | const struct efx_phy_operations falcon_sfx7101_phy_ops = { | ||
482 | .probe = tenxpress_phy_probe, | ||
483 | .init = tenxpress_phy_init, | ||
484 | .reconfigure = tenxpress_phy_reconfigure, | ||
485 | .poll = tenxpress_phy_poll, | ||
486 | .fini = sfx7101_phy_fini, | ||
487 | .remove = tenxpress_phy_remove, | ||
488 | .get_settings = tenxpress_get_settings, | ||
489 | .set_settings = tenxpress_set_settings, | ||
490 | .set_npage_adv = sfx7101_set_npage_adv, | ||
491 | .test_alive = efx_mdio_test_alive, | ||
492 | .test_name = sfx7101_test_name, | ||
493 | .run_tests = sfx7101_run_tests, | ||
494 | }; | ||
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c new file mode 100644 index 00000000000..84eb99e0f8d --- /dev/null +++ b/drivers/net/sfc/tx.c | |||
@@ -0,0 +1,1212 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2005-2010 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/pci.h> | ||
12 | #include <linux/tcp.h> | ||
13 | #include <linux/ip.h> | ||
14 | #include <linux/in.h> | ||
15 | #include <linux/ipv6.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <net/ipv6.h> | ||
18 | #include <linux/if_ether.h> | ||
19 | #include <linux/highmem.h> | ||
20 | #include "net_driver.h" | ||
21 | #include "efx.h" | ||
22 | #include "nic.h" | ||
23 | #include "workarounds.h" | ||
24 | |||
25 | /* | ||
26 | * TX descriptor ring full threshold | ||
27 | * | ||
28 | * The tx_queue descriptor ring fill-level must fall below this value | ||
29 | * before we restart the netif queue | ||
30 | */ | ||
31 | #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) | ||
32 | |||
33 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, | ||
34 | struct efx_tx_buffer *buffer) | ||
35 | { | ||
36 | if (buffer->unmap_len) { | ||
37 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | ||
38 | dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - | ||
39 | buffer->unmap_len); | ||
40 | if (buffer->unmap_single) | ||
41 | pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, | ||
42 | PCI_DMA_TODEVICE); | ||
43 | else | ||
44 | pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, | ||
45 | PCI_DMA_TODEVICE); | ||
46 | buffer->unmap_len = 0; | ||
47 | buffer->unmap_single = false; | ||
48 | } | ||
49 | |||
50 | if (buffer->skb) { | ||
51 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); | ||
52 | buffer->skb = NULL; | ||
53 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, | ||
54 | "TX queue %d transmission id %x complete\n", | ||
55 | tx_queue->queue, tx_queue->read_count); | ||
56 | } | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * struct efx_tso_header - a DMA mapped buffer for packet headers | ||
61 | * @next: Linked list of free ones. | ||
62 | * The list is protected by the TX queue lock. | ||
63 | * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. | ||
64 | * @dma_addr: The DMA address of the header below. | ||
65 | * | ||
66 | * This controls the memory used for a TSO header. Use TSOH_DATA() | ||
67 | * to find the packet header data. Use TSOH_SIZE() to calculate the | ||
68 | * total size required for a given packet header length. TSO headers | ||
69 | * in the free list are exactly %TSOH_STD_SIZE bytes in size. | ||
70 | */ | ||
71 | struct efx_tso_header { | ||
72 | union { | ||
73 | struct efx_tso_header *next; | ||
74 | size_t unmap_len; | ||
75 | }; | ||
76 | dma_addr_t dma_addr; | ||
77 | }; | ||
78 | |||
79 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | ||
80 | struct sk_buff *skb); | ||
81 | static void efx_fini_tso(struct efx_tx_queue *tx_queue); | ||
82 | static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, | ||
83 | struct efx_tso_header *tsoh); | ||
84 | |||
85 | static void efx_tsoh_free(struct efx_tx_queue *tx_queue, | ||
86 | struct efx_tx_buffer *buffer) | ||
87 | { | ||
88 | if (buffer->tsoh) { | ||
89 | if (likely(!buffer->tsoh->unmap_len)) { | ||
90 | buffer->tsoh->next = tx_queue->tso_headers_free; | ||
91 | tx_queue->tso_headers_free = buffer->tsoh; | ||
92 | } else { | ||
93 | efx_tsoh_heap_free(tx_queue, buffer->tsoh); | ||
94 | } | ||
95 | buffer->tsoh = NULL; | ||
96 | } | ||
97 | } | ||
98 | |||
99 | |||
100 | static inline unsigned | ||
101 | efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) | ||
102 | { | ||
103 | /* Depending on the NIC revision, we can use descriptor | ||
104 | * lengths up to 8K or 8K-1. However, since PCI Express | ||
105 | * devices must split read requests at 4K boundaries, there is | ||
106 | * little benefit from using descriptors that cross those | ||
107 | * boundaries and we keep things simple by not doing so. | ||
108 | */ | ||
109 | unsigned len = (~dma_addr & 0xfff) + 1; | ||
110 | |||
111 | /* Work around hardware bug for unaligned buffers. */ | ||
112 | if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) | ||
113 | len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); | ||
114 | |||
115 | return len; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * Add a socket buffer to a TX queue | ||
120 | * | ||
121 | * This maps all fragments of a socket buffer for DMA and adds them to | ||
122 | * the TX queue. The queue's insert pointer will be incremented by | ||
123 | * the number of fragments in the socket buffer. | ||
124 | * | ||
125 | * If any DMA mapping fails, any mapped fragments will be unmapped, | ||
126 | * the queue's insert pointer will be restored to its original value. | ||
127 | * | ||
128 | * This function is split out from efx_hard_start_xmit to allow the | ||
129 | * loopback test to direct packets via specific TX queues. | ||
130 | * | ||
131 | * Returns NETDEV_TX_OK or NETDEV_TX_BUSY | ||
132 | * You must hold netif_tx_lock() to call this function. | ||
133 | */ | ||
134 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | ||
135 | { | ||
136 | struct efx_nic *efx = tx_queue->efx; | ||
137 | struct pci_dev *pci_dev = efx->pci_dev; | ||
138 | struct efx_tx_buffer *buffer; | ||
139 | skb_frag_t *fragment; | ||
140 | struct page *page; | ||
141 | int page_offset; | ||
142 | unsigned int len, unmap_len = 0, fill_level, insert_ptr; | ||
143 | dma_addr_t dma_addr, unmap_addr = 0; | ||
144 | unsigned int dma_len; | ||
145 | bool unmap_single; | ||
146 | int q_space, i = 0; | ||
147 | netdev_tx_t rc = NETDEV_TX_OK; | ||
148 | |||
149 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | ||
150 | |||
151 | if (skb_shinfo(skb)->gso_size) | ||
152 | return efx_enqueue_skb_tso(tx_queue, skb); | ||
153 | |||
154 | /* Get size of the initial fragment */ | ||
155 | len = skb_headlen(skb); | ||
156 | |||
157 | /* Pad if necessary */ | ||
158 | if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { | ||
159 | EFX_BUG_ON_PARANOID(skb->data_len); | ||
160 | len = 32 + 1; | ||
161 | if (skb_pad(skb, len - skb->len)) | ||
162 | return NETDEV_TX_OK; | ||
163 | } | ||
164 | |||
165 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | ||
166 | q_space = efx->txq_entries - 1 - fill_level; | ||
167 | |||
168 | /* Map for DMA. Use pci_map_single rather than pci_map_page | ||
169 | * since this is more efficient on machines with sparse | ||
170 | * memory. | ||
171 | */ | ||
172 | unmap_single = true; | ||
173 | dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); | ||
174 | |||
175 | /* Process all fragments */ | ||
176 | while (1) { | ||
177 | if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) | ||
178 | goto pci_err; | ||
179 | |||
180 | /* Store fields for marking in the per-fragment final | ||
181 | * descriptor */ | ||
182 | unmap_len = len; | ||
183 | unmap_addr = dma_addr; | ||
184 | |||
185 | /* Add to TX queue, splitting across DMA boundaries */ | ||
186 | do { | ||
187 | if (unlikely(q_space-- <= 0)) { | ||
188 | /* It might be that completions have | ||
189 | * happened since the xmit path last | ||
190 | * checked. Update the xmit path's | ||
191 | * copy of read_count. | ||
192 | */ | ||
193 | netif_tx_stop_queue(tx_queue->core_txq); | ||
194 | /* This memory barrier protects the | ||
195 | * change of queue state from the access | ||
196 | * of read_count. */ | ||
197 | smp_mb(); | ||
198 | tx_queue->old_read_count = | ||
199 | ACCESS_ONCE(tx_queue->read_count); | ||
200 | fill_level = (tx_queue->insert_count | ||
201 | - tx_queue->old_read_count); | ||
202 | q_space = efx->txq_entries - 1 - fill_level; | ||
203 | if (unlikely(q_space-- <= 0)) { | ||
204 | rc = NETDEV_TX_BUSY; | ||
205 | goto unwind; | ||
206 | } | ||
207 | smp_mb(); | ||
208 | if (likely(!efx->loopback_selftest)) | ||
209 | netif_tx_start_queue( | ||
210 | tx_queue->core_txq); | ||
211 | } | ||
212 | |||
213 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; | ||
214 | buffer = &tx_queue->buffer[insert_ptr]; | ||
215 | efx_tsoh_free(tx_queue, buffer); | ||
216 | EFX_BUG_ON_PARANOID(buffer->tsoh); | ||
217 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
218 | EFX_BUG_ON_PARANOID(buffer->len); | ||
219 | EFX_BUG_ON_PARANOID(!buffer->continuation); | ||
220 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | ||
221 | |||
222 | dma_len = efx_max_tx_len(efx, dma_addr); | ||
223 | if (likely(dma_len >= len)) | ||
224 | dma_len = len; | ||
225 | |||
226 | /* Fill out per descriptor fields */ | ||
227 | buffer->len = dma_len; | ||
228 | buffer->dma_addr = dma_addr; | ||
229 | len -= dma_len; | ||
230 | dma_addr += dma_len; | ||
231 | ++tx_queue->insert_count; | ||
232 | } while (len); | ||
233 | |||
234 | /* Transfer ownership of the unmapping to the final buffer */ | ||
235 | buffer->unmap_single = unmap_single; | ||
236 | buffer->unmap_len = unmap_len; | ||
237 | unmap_len = 0; | ||
238 | |||
239 | /* Get address and size of next fragment */ | ||
240 | if (i >= skb_shinfo(skb)->nr_frags) | ||
241 | break; | ||
242 | fragment = &skb_shinfo(skb)->frags[i]; | ||
243 | len = fragment->size; | ||
244 | page = fragment->page; | ||
245 | page_offset = fragment->page_offset; | ||
246 | i++; | ||
247 | /* Map for DMA */ | ||
248 | unmap_single = false; | ||
249 | dma_addr = pci_map_page(pci_dev, page, page_offset, len, | ||
250 | PCI_DMA_TODEVICE); | ||
251 | } | ||
252 | |||
253 | /* Transfer ownership of the skb to the final buffer */ | ||
254 | buffer->skb = skb; | ||
255 | buffer->continuation = false; | ||
256 | |||
257 | /* Pass off to hardware */ | ||
258 | efx_nic_push_buffers(tx_queue); | ||
259 | |||
260 | return NETDEV_TX_OK; | ||
261 | |||
262 | pci_err: | ||
263 | netif_err(efx, tx_err, efx->net_dev, | ||
264 | " TX queue %d could not map skb with %d bytes %d " | ||
265 | "fragments for DMA\n", tx_queue->queue, skb->len, | ||
266 | skb_shinfo(skb)->nr_frags + 1); | ||
267 | |||
268 | /* Mark the packet as transmitted, and free the SKB ourselves */ | ||
269 | dev_kfree_skb_any(skb); | ||
270 | |||
271 | unwind: | ||
272 | /* Work backwards until we hit the original insert pointer value */ | ||
273 | while (tx_queue->insert_count != tx_queue->write_count) { | ||
274 | --tx_queue->insert_count; | ||
275 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; | ||
276 | buffer = &tx_queue->buffer[insert_ptr]; | ||
277 | efx_dequeue_buffer(tx_queue, buffer); | ||
278 | buffer->len = 0; | ||
279 | } | ||
280 | |||
281 | /* Free the fragment we were mid-way through pushing */ | ||
282 | if (unmap_len) { | ||
283 | if (unmap_single) | ||
284 | pci_unmap_single(pci_dev, unmap_addr, unmap_len, | ||
285 | PCI_DMA_TODEVICE); | ||
286 | else | ||
287 | pci_unmap_page(pci_dev, unmap_addr, unmap_len, | ||
288 | PCI_DMA_TODEVICE); | ||
289 | } | ||
290 | |||
291 | return rc; | ||
292 | } | ||
293 | |||
294 | /* Remove packets from the TX queue | ||
295 | * | ||
296 | * This removes packets from the TX queue, up to and including the | ||
297 | * specified index. | ||
298 | */ | ||
299 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | ||
300 | unsigned int index) | ||
301 | { | ||
302 | struct efx_nic *efx = tx_queue->efx; | ||
303 | unsigned int stop_index, read_ptr; | ||
304 | |||
305 | stop_index = (index + 1) & tx_queue->ptr_mask; | ||
306 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; | ||
307 | |||
308 | while (read_ptr != stop_index) { | ||
309 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | ||
310 | if (unlikely(buffer->len == 0)) { | ||
311 | netif_err(efx, tx_err, efx->net_dev, | ||
312 | "TX queue %d spurious TX completion id %x\n", | ||
313 | tx_queue->queue, read_ptr); | ||
314 | efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); | ||
315 | return; | ||
316 | } | ||
317 | |||
318 | efx_dequeue_buffer(tx_queue, buffer); | ||
319 | buffer->continuation = true; | ||
320 | buffer->len = 0; | ||
321 | |||
322 | ++tx_queue->read_count; | ||
323 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; | ||
324 | } | ||
325 | } | ||
326 | |||
327 | /* Initiate a packet transmission. We use one channel per CPU | ||
328 | * (sharing when we have more CPUs than channels). On Falcon, the TX | ||
329 | * completion events will be directed back to the CPU that transmitted | ||
330 | * the packet, which should be cache-efficient. | ||
331 | * | ||
332 | * Context: non-blocking. | ||
333 | * Note that returning anything other than NETDEV_TX_OK will cause the | ||
334 | * OS to free the skb. | ||
335 | */ | ||
336 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, | ||
337 | struct net_device *net_dev) | ||
338 | { | ||
339 | struct efx_nic *efx = netdev_priv(net_dev); | ||
340 | struct efx_tx_queue *tx_queue; | ||
341 | unsigned index, type; | ||
342 | |||
343 | EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); | ||
344 | |||
345 | index = skb_get_queue_mapping(skb); | ||
346 | type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; | ||
347 | if (index >= efx->n_tx_channels) { | ||
348 | index -= efx->n_tx_channels; | ||
349 | type |= EFX_TXQ_TYPE_HIGHPRI; | ||
350 | } | ||
351 | tx_queue = efx_get_tx_queue(efx, index, type); | ||
352 | |||
353 | return efx_enqueue_skb(tx_queue, skb); | ||
354 | } | ||
355 | |||
356 | void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) | ||
357 | { | ||
358 | struct efx_nic *efx = tx_queue->efx; | ||
359 | |||
360 | /* Must be inverse of queue lookup in efx_hard_start_xmit() */ | ||
361 | tx_queue->core_txq = | ||
362 | netdev_get_tx_queue(efx->net_dev, | ||
363 | tx_queue->queue / EFX_TXQ_TYPES + | ||
364 | ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | ||
365 | efx->n_tx_channels : 0)); | ||
366 | } | ||
367 | |||
368 | int efx_setup_tc(struct net_device *net_dev, u8 num_tc) | ||
369 | { | ||
370 | struct efx_nic *efx = netdev_priv(net_dev); | ||
371 | struct efx_channel *channel; | ||
372 | struct efx_tx_queue *tx_queue; | ||
373 | unsigned tc; | ||
374 | int rc; | ||
375 | |||
376 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) | ||
377 | return -EINVAL; | ||
378 | |||
379 | if (num_tc == net_dev->num_tc) | ||
380 | return 0; | ||
381 | |||
382 | for (tc = 0; tc < num_tc; tc++) { | ||
383 | net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; | ||
384 | net_dev->tc_to_txq[tc].count = efx->n_tx_channels; | ||
385 | } | ||
386 | |||
387 | if (num_tc > net_dev->num_tc) { | ||
388 | /* Initialise high-priority queues as necessary */ | ||
389 | efx_for_each_channel(channel, efx) { | ||
390 | efx_for_each_possible_channel_tx_queue(tx_queue, | ||
391 | channel) { | ||
392 | if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) | ||
393 | continue; | ||
394 | if (!tx_queue->buffer) { | ||
395 | rc = efx_probe_tx_queue(tx_queue); | ||
396 | if (rc) | ||
397 | return rc; | ||
398 | } | ||
399 | if (!tx_queue->initialised) | ||
400 | efx_init_tx_queue(tx_queue); | ||
401 | efx_init_tx_queue_core_txq(tx_queue); | ||
402 | } | ||
403 | } | ||
404 | } else { | ||
405 | /* Reduce number of classes before number of queues */ | ||
406 | net_dev->num_tc = num_tc; | ||
407 | } | ||
408 | |||
409 | rc = netif_set_real_num_tx_queues(net_dev, | ||
410 | max_t(int, num_tc, 1) * | ||
411 | efx->n_tx_channels); | ||
412 | if (rc) | ||
413 | return rc; | ||
414 | |||
415 | /* Do not destroy high-priority queues when they become | ||
416 | * unused. We would have to flush them first, and it is | ||
417 | * fairly difficult to flush a subset of TX queues. Leave | ||
418 | * it to efx_fini_channels(). | ||
419 | */ | ||
420 | |||
421 | net_dev->num_tc = num_tc; | ||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | ||
426 | { | ||
427 | unsigned fill_level; | ||
428 | struct efx_nic *efx = tx_queue->efx; | ||
429 | |||
430 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); | ||
431 | |||
432 | efx_dequeue_buffers(tx_queue, index); | ||
433 | |||
434 | /* See if we need to restart the netif queue. This barrier | ||
435 | * separates the update of read_count from the test of the | ||
436 | * queue state. */ | ||
437 | smp_mb(); | ||
438 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && | ||
439 | likely(efx->port_enabled) && | ||
440 | likely(netif_device_present(efx->net_dev))) { | ||
441 | fill_level = tx_queue->insert_count - tx_queue->read_count; | ||
442 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) { | ||
443 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); | ||
444 | netif_tx_wake_queue(tx_queue->core_txq); | ||
445 | } | ||
446 | } | ||
447 | |||
448 | /* Check whether the hardware queue is now empty */ | ||
449 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | ||
450 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | ||
451 | if (tx_queue->read_count == tx_queue->old_write_count) { | ||
452 | smp_mb(); | ||
453 | tx_queue->empty_read_count = | ||
454 | tx_queue->read_count | EFX_EMPTY_COUNT_VALID; | ||
455 | } | ||
456 | } | ||
457 | } | ||
458 | |||
459 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | ||
460 | { | ||
461 | struct efx_nic *efx = tx_queue->efx; | ||
462 | unsigned int entries; | ||
463 | int i, rc; | ||
464 | |||
465 | /* Create the smallest power-of-two aligned ring */ | ||
466 | entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); | ||
467 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | ||
468 | tx_queue->ptr_mask = entries - 1; | ||
469 | |||
470 | netif_dbg(efx, probe, efx->net_dev, | ||
471 | "creating TX queue %d size %#x mask %#x\n", | ||
472 | tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); | ||
473 | |||
474 | /* Allocate software ring */ | ||
475 | tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer), | ||
476 | GFP_KERNEL); | ||
477 | if (!tx_queue->buffer) | ||
478 | return -ENOMEM; | ||
479 | for (i = 0; i <= tx_queue->ptr_mask; ++i) | ||
480 | tx_queue->buffer[i].continuation = true; | ||
481 | |||
482 | /* Allocate hardware ring */ | ||
483 | rc = efx_nic_probe_tx(tx_queue); | ||
484 | if (rc) | ||
485 | goto fail; | ||
486 | |||
487 | return 0; | ||
488 | |||
489 | fail: | ||
490 | kfree(tx_queue->buffer); | ||
491 | tx_queue->buffer = NULL; | ||
492 | return rc; | ||
493 | } | ||
494 | |||
495 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) | ||
496 | { | ||
497 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, | ||
498 | "initialising TX queue %d\n", tx_queue->queue); | ||
499 | |||
500 | tx_queue->insert_count = 0; | ||
501 | tx_queue->write_count = 0; | ||
502 | tx_queue->old_write_count = 0; | ||
503 | tx_queue->read_count = 0; | ||
504 | tx_queue->old_read_count = 0; | ||
505 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; | ||
506 | |||
507 | /* Set up TX descriptor ring */ | ||
508 | efx_nic_init_tx(tx_queue); | ||
509 | |||
510 | tx_queue->initialised = true; | ||
511 | } | ||
512 | |||
513 | void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) | ||
514 | { | ||
515 | struct efx_tx_buffer *buffer; | ||
516 | |||
517 | if (!tx_queue->buffer) | ||
518 | return; | ||
519 | |||
520 | /* Free any buffers left in the ring */ | ||
521 | while (tx_queue->read_count != tx_queue->write_count) { | ||
522 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; | ||
523 | efx_dequeue_buffer(tx_queue, buffer); | ||
524 | buffer->continuation = true; | ||
525 | buffer->len = 0; | ||
526 | |||
527 | ++tx_queue->read_count; | ||
528 | } | ||
529 | } | ||
530 | |||
531 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | ||
532 | { | ||
533 | if (!tx_queue->initialised) | ||
534 | return; | ||
535 | |||
536 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, | ||
537 | "shutting down TX queue %d\n", tx_queue->queue); | ||
538 | |||
539 | tx_queue->initialised = false; | ||
540 | |||
541 | /* Flush TX queue, remove descriptor ring */ | ||
542 | efx_nic_fini_tx(tx_queue); | ||
543 | |||
544 | efx_release_tx_buffers(tx_queue); | ||
545 | |||
546 | /* Free up TSO header cache */ | ||
547 | efx_fini_tso(tx_queue); | ||
548 | } | ||
549 | |||
550 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | ||
551 | { | ||
552 | if (!tx_queue->buffer) | ||
553 | return; | ||
554 | |||
555 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, | ||
556 | "destroying TX queue %d\n", tx_queue->queue); | ||
557 | efx_nic_remove_tx(tx_queue); | ||
558 | |||
559 | kfree(tx_queue->buffer); | ||
560 | tx_queue->buffer = NULL; | ||
561 | } | ||
562 | |||
563 | |||
564 | /* Efx TCP segmentation acceleration. | ||
565 | * | ||
566 | * Why? Because by doing it here in the driver we can go significantly | ||
567 | * faster than the GSO. | ||
568 | * | ||
569 | * Requires TX checksum offload support. | ||
570 | */ | ||
571 | |||
572 | /* Number of bytes inserted at the start of a TSO header buffer, | ||
573 | * similar to NET_IP_ALIGN. | ||
574 | */ | ||
575 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
576 | #define TSOH_OFFSET 0 | ||
577 | #else | ||
578 | #define TSOH_OFFSET NET_IP_ALIGN | ||
579 | #endif | ||
580 | |||
581 | #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) | ||
582 | |||
583 | /* Total size of struct efx_tso_header, buffer and padding */ | ||
584 | #define TSOH_SIZE(hdr_len) \ | ||
585 | (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) | ||
586 | |||
587 | /* Size of blocks on free list. Larger blocks must be allocated from | ||
588 | * the heap. | ||
589 | */ | ||
590 | #define TSOH_STD_SIZE 128 | ||
591 | |||
592 | #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) | ||
593 | #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) | ||
594 | #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) | ||
595 | #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) | ||
596 | #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) | ||
597 | |||
598 | /** | ||
599 | * struct tso_state - TSO state for an SKB | ||
600 | * @out_len: Remaining length in current segment | ||
601 | * @seqnum: Current sequence number | ||
602 | * @ipv4_id: Current IPv4 ID, host endian | ||
603 | * @packet_space: Remaining space in current packet | ||
604 | * @dma_addr: DMA address of current position | ||
605 | * @in_len: Remaining length in current SKB fragment | ||
606 | * @unmap_len: Length of SKB fragment | ||
607 | * @unmap_addr: DMA address of SKB fragment | ||
608 | * @unmap_single: DMA single vs page mapping flag | ||
609 | * @protocol: Network protocol (after any VLAN header) | ||
610 | * @header_len: Number of bytes of header | ||
611 | * @full_packet_size: Number of bytes to put in each outgoing segment | ||
612 | * | ||
613 | * The state used during segmentation. It is put into this data structure | ||
614 | * just to make it easy to pass into inline functions. | ||
615 | */ | ||
616 | struct tso_state { | ||
617 | /* Output position */ | ||
618 | unsigned out_len; | ||
619 | unsigned seqnum; | ||
620 | unsigned ipv4_id; | ||
621 | unsigned packet_space; | ||
622 | |||
623 | /* Input position */ | ||
624 | dma_addr_t dma_addr; | ||
625 | unsigned in_len; | ||
626 | unsigned unmap_len; | ||
627 | dma_addr_t unmap_addr; | ||
628 | bool unmap_single; | ||
629 | |||
630 | __be16 protocol; | ||
631 | unsigned header_len; | ||
632 | int full_packet_size; | ||
633 | }; | ||
634 | |||
635 | |||
636 | /* | ||
637 | * Verify that our various assumptions about sk_buffs and the conditions | ||
638 | * under which TSO will be attempted hold true. Return the protocol number. | ||
639 | */ | ||
640 | static __be16 efx_tso_check_protocol(struct sk_buff *skb) | ||
641 | { | ||
642 | __be16 protocol = skb->protocol; | ||
643 | |||
644 | EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != | ||
645 | protocol); | ||
646 | if (protocol == htons(ETH_P_8021Q)) { | ||
647 | /* Find the encapsulated protocol; reset network header | ||
648 | * and transport header based on that. */ | ||
649 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | ||
650 | protocol = veh->h_vlan_encapsulated_proto; | ||
651 | skb_set_network_header(skb, sizeof(*veh)); | ||
652 | if (protocol == htons(ETH_P_IP)) | ||
653 | skb_set_transport_header(skb, sizeof(*veh) + | ||
654 | 4 * ip_hdr(skb)->ihl); | ||
655 | else if (protocol == htons(ETH_P_IPV6)) | ||
656 | skb_set_transport_header(skb, sizeof(*veh) + | ||
657 | sizeof(struct ipv6hdr)); | ||
658 | } | ||
659 | |||
660 | if (protocol == htons(ETH_P_IP)) { | ||
661 | EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); | ||
662 | } else { | ||
663 | EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); | ||
664 | EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); | ||
665 | } | ||
666 | EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) | ||
667 | + (tcp_hdr(skb)->doff << 2u)) > | ||
668 | skb_headlen(skb)); | ||
669 | |||
670 | return protocol; | ||
671 | } | ||
672 | |||
673 | |||
674 | /* | ||
675 | * Allocate a page worth of efx_tso_header structures, and string them | ||
676 | * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. | ||
677 | */ | ||
678 | static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) | ||
679 | { | ||
680 | |||
681 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | ||
682 | struct efx_tso_header *tsoh; | ||
683 | dma_addr_t dma_addr; | ||
684 | u8 *base_kva, *kva; | ||
685 | |||
686 | base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); | ||
687 | if (base_kva == NULL) { | ||
688 | netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, | ||
689 | "Unable to allocate page for TSO headers\n"); | ||
690 | return -ENOMEM; | ||
691 | } | ||
692 | |||
693 | /* pci_alloc_consistent() allocates pages. */ | ||
694 | EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); | ||
695 | |||
696 | for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { | ||
697 | tsoh = (struct efx_tso_header *)kva; | ||
698 | tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); | ||
699 | tsoh->next = tx_queue->tso_headers_free; | ||
700 | tx_queue->tso_headers_free = tsoh; | ||
701 | } | ||
702 | |||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | |||
707 | /* Free up a TSO header, and all others in the same page. */ | ||
708 | static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | ||
709 | struct efx_tso_header *tsoh, | ||
710 | struct pci_dev *pci_dev) | ||
711 | { | ||
712 | struct efx_tso_header **p; | ||
713 | unsigned long base_kva; | ||
714 | dma_addr_t base_dma; | ||
715 | |||
716 | base_kva = (unsigned long)tsoh & PAGE_MASK; | ||
717 | base_dma = tsoh->dma_addr & PAGE_MASK; | ||
718 | |||
719 | p = &tx_queue->tso_headers_free; | ||
720 | while (*p != NULL) { | ||
721 | if (((unsigned long)*p & PAGE_MASK) == base_kva) | ||
722 | *p = (*p)->next; | ||
723 | else | ||
724 | p = &(*p)->next; | ||
725 | } | ||
726 | |||
727 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); | ||
728 | } | ||
729 | |||
730 | static struct efx_tso_header * | ||
731 | efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | ||
732 | { | ||
733 | struct efx_tso_header *tsoh; | ||
734 | |||
735 | tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); | ||
736 | if (unlikely(!tsoh)) | ||
737 | return NULL; | ||
738 | |||
739 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, | ||
740 | TSOH_BUFFER(tsoh), header_len, | ||
741 | PCI_DMA_TODEVICE); | ||
742 | if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, | ||
743 | tsoh->dma_addr))) { | ||
744 | kfree(tsoh); | ||
745 | return NULL; | ||
746 | } | ||
747 | |||
748 | tsoh->unmap_len = header_len; | ||
749 | return tsoh; | ||
750 | } | ||
751 | |||
752 | static void | ||
753 | efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) | ||
754 | { | ||
755 | pci_unmap_single(tx_queue->efx->pci_dev, | ||
756 | tsoh->dma_addr, tsoh->unmap_len, | ||
757 | PCI_DMA_TODEVICE); | ||
758 | kfree(tsoh); | ||
759 | } | ||
760 | |||
761 | /** | ||
762 | * efx_tx_queue_insert - push descriptors onto the TX queue | ||
763 | * @tx_queue: Efx TX queue | ||
764 | * @dma_addr: DMA address of fragment | ||
765 | * @len: Length of fragment | ||
766 | * @final_buffer: The final buffer inserted into the queue | ||
767 | * | ||
768 | * Push descriptors onto the TX queue. Return 0 on success or 1 if | ||
769 | * @tx_queue full. | ||
770 | */ | ||
771 | static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | ||
772 | dma_addr_t dma_addr, unsigned len, | ||
773 | struct efx_tx_buffer **final_buffer) | ||
774 | { | ||
775 | struct efx_tx_buffer *buffer; | ||
776 | struct efx_nic *efx = tx_queue->efx; | ||
777 | unsigned dma_len, fill_level, insert_ptr; | ||
778 | int q_space; | ||
779 | |||
780 | EFX_BUG_ON_PARANOID(len <= 0); | ||
781 | |||
782 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | ||
783 | /* -1 as there is no way to represent all descriptors used */ | ||
784 | q_space = efx->txq_entries - 1 - fill_level; | ||
785 | |||
786 | while (1) { | ||
787 | if (unlikely(q_space-- <= 0)) { | ||
788 | /* It might be that completions have happened | ||
789 | * since the xmit path last checked. Update | ||
790 | * the xmit path's copy of read_count. | ||
791 | */ | ||
792 | netif_tx_stop_queue(tx_queue->core_txq); | ||
793 | /* This memory barrier protects the change of | ||
794 | * queue state from the access of read_count. */ | ||
795 | smp_mb(); | ||
796 | tx_queue->old_read_count = | ||
797 | ACCESS_ONCE(tx_queue->read_count); | ||
798 | fill_level = (tx_queue->insert_count | ||
799 | - tx_queue->old_read_count); | ||
800 | q_space = efx->txq_entries - 1 - fill_level; | ||
801 | if (unlikely(q_space-- <= 0)) { | ||
802 | *final_buffer = NULL; | ||
803 | return 1; | ||
804 | } | ||
805 | smp_mb(); | ||
806 | netif_tx_start_queue(tx_queue->core_txq); | ||
807 | } | ||
808 | |||
809 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; | ||
810 | buffer = &tx_queue->buffer[insert_ptr]; | ||
811 | ++tx_queue->insert_count; | ||
812 | |||
813 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - | ||
814 | tx_queue->read_count >= | ||
815 | efx->txq_entries); | ||
816 | |||
817 | efx_tsoh_free(tx_queue, buffer); | ||
818 | EFX_BUG_ON_PARANOID(buffer->len); | ||
819 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | ||
820 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
821 | EFX_BUG_ON_PARANOID(!buffer->continuation); | ||
822 | EFX_BUG_ON_PARANOID(buffer->tsoh); | ||
823 | |||
824 | buffer->dma_addr = dma_addr; | ||
825 | |||
826 | dma_len = efx_max_tx_len(efx, dma_addr); | ||
827 | |||
828 | /* If there is enough space to send then do so */ | ||
829 | if (dma_len >= len) | ||
830 | break; | ||
831 | |||
832 | buffer->len = dma_len; /* Don't set the other members */ | ||
833 | dma_addr += dma_len; | ||
834 | len -= dma_len; | ||
835 | } | ||
836 | |||
837 | EFX_BUG_ON_PARANOID(!len); | ||
838 | buffer->len = len; | ||
839 | *final_buffer = buffer; | ||
840 | return 0; | ||
841 | } | ||
842 | |||
843 | |||
844 | /* | ||
845 | * Put a TSO header into the TX queue. | ||
846 | * | ||
847 | * This is special-cased because we know that it is small enough to fit in | ||
848 | * a single fragment, and we know it doesn't cross a page boundary. It | ||
849 | * also allows us to not worry about end-of-packet etc. | ||
850 | */ | ||
851 | static void efx_tso_put_header(struct efx_tx_queue *tx_queue, | ||
852 | struct efx_tso_header *tsoh, unsigned len) | ||
853 | { | ||
854 | struct efx_tx_buffer *buffer; | ||
855 | |||
856 | buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; | ||
857 | efx_tsoh_free(tx_queue, buffer); | ||
858 | EFX_BUG_ON_PARANOID(buffer->len); | ||
859 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | ||
860 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
861 | EFX_BUG_ON_PARANOID(!buffer->continuation); | ||
862 | EFX_BUG_ON_PARANOID(buffer->tsoh); | ||
863 | buffer->len = len; | ||
864 | buffer->dma_addr = tsoh->dma_addr; | ||
865 | buffer->tsoh = tsoh; | ||
866 | |||
867 | ++tx_queue->insert_count; | ||
868 | } | ||
869 | |||
870 | |||
871 | /* Remove descriptors put into a tx_queue. */ | ||
872 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | ||
873 | { | ||
874 | struct efx_tx_buffer *buffer; | ||
875 | dma_addr_t unmap_addr; | ||
876 | |||
877 | /* Work backwards until we hit the original insert pointer value */ | ||
878 | while (tx_queue->insert_count != tx_queue->write_count) { | ||
879 | --tx_queue->insert_count; | ||
880 | buffer = &tx_queue->buffer[tx_queue->insert_count & | ||
881 | tx_queue->ptr_mask]; | ||
882 | efx_tsoh_free(tx_queue, buffer); | ||
883 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
884 | if (buffer->unmap_len) { | ||
885 | unmap_addr = (buffer->dma_addr + buffer->len - | ||
886 | buffer->unmap_len); | ||
887 | if (buffer->unmap_single) | ||
888 | pci_unmap_single(tx_queue->efx->pci_dev, | ||
889 | unmap_addr, buffer->unmap_len, | ||
890 | PCI_DMA_TODEVICE); | ||
891 | else | ||
892 | pci_unmap_page(tx_queue->efx->pci_dev, | ||
893 | unmap_addr, buffer->unmap_len, | ||
894 | PCI_DMA_TODEVICE); | ||
895 | buffer->unmap_len = 0; | ||
896 | } | ||
897 | buffer->len = 0; | ||
898 | buffer->continuation = true; | ||
899 | } | ||
900 | } | ||
901 | |||
902 | |||
903 | /* Parse the SKB header and initialise state. */ | ||
904 | static void tso_start(struct tso_state *st, const struct sk_buff *skb) | ||
905 | { | ||
906 | /* All ethernet/IP/TCP headers combined size is TCP header size | ||
907 | * plus offset of TCP header relative to start of packet. | ||
908 | */ | ||
909 | st->header_len = ((tcp_hdr(skb)->doff << 2u) | ||
910 | + PTR_DIFF(tcp_hdr(skb), skb->data)); | ||
911 | st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; | ||
912 | |||
913 | if (st->protocol == htons(ETH_P_IP)) | ||
914 | st->ipv4_id = ntohs(ip_hdr(skb)->id); | ||
915 | else | ||
916 | st->ipv4_id = 0; | ||
917 | st->seqnum = ntohl(tcp_hdr(skb)->seq); | ||
918 | |||
919 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); | ||
920 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); | ||
921 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); | ||
922 | |||
923 | st->packet_space = st->full_packet_size; | ||
924 | st->out_len = skb->len - st->header_len; | ||
925 | st->unmap_len = 0; | ||
926 | st->unmap_single = false; | ||
927 | } | ||
928 | |||
929 | static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, | ||
930 | skb_frag_t *frag) | ||
931 | { | ||
932 | st->unmap_addr = pci_map_page(efx->pci_dev, frag->page, | ||
933 | frag->page_offset, frag->size, | ||
934 | PCI_DMA_TODEVICE); | ||
935 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { | ||
936 | st->unmap_single = false; | ||
937 | st->unmap_len = frag->size; | ||
938 | st->in_len = frag->size; | ||
939 | st->dma_addr = st->unmap_addr; | ||
940 | return 0; | ||
941 | } | ||
942 | return -ENOMEM; | ||
943 | } | ||
944 | |||
945 | static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, | ||
946 | const struct sk_buff *skb) | ||
947 | { | ||
948 | int hl = st->header_len; | ||
949 | int len = skb_headlen(skb) - hl; | ||
950 | |||
951 | st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, | ||
952 | len, PCI_DMA_TODEVICE); | ||
953 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { | ||
954 | st->unmap_single = true; | ||
955 | st->unmap_len = len; | ||
956 | st->in_len = len; | ||
957 | st->dma_addr = st->unmap_addr; | ||
958 | return 0; | ||
959 | } | ||
960 | return -ENOMEM; | ||
961 | } | ||
962 | |||
963 | |||
964 | /** | ||
965 | * tso_fill_packet_with_fragment - form descriptors for the current fragment | ||
966 | * @tx_queue: Efx TX queue | ||
967 | * @skb: Socket buffer | ||
968 | * @st: TSO state | ||
969 | * | ||
970 | * Form descriptors for the current fragment, until we reach the end | ||
971 | * of fragment or end-of-packet. Return 0 on success, 1 if not enough | ||
972 | * space in @tx_queue. | ||
973 | */ | ||
974 | static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, | ||
975 | const struct sk_buff *skb, | ||
976 | struct tso_state *st) | ||
977 | { | ||
978 | struct efx_tx_buffer *buffer; | ||
979 | int n, end_of_packet, rc; | ||
980 | |||
981 | if (st->in_len == 0) | ||
982 | return 0; | ||
983 | if (st->packet_space == 0) | ||
984 | return 0; | ||
985 | |||
986 | EFX_BUG_ON_PARANOID(st->in_len <= 0); | ||
987 | EFX_BUG_ON_PARANOID(st->packet_space <= 0); | ||
988 | |||
989 | n = min(st->in_len, st->packet_space); | ||
990 | |||
991 | st->packet_space -= n; | ||
992 | st->out_len -= n; | ||
993 | st->in_len -= n; | ||
994 | |||
995 | rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); | ||
996 | if (likely(rc == 0)) { | ||
997 | if (st->out_len == 0) | ||
998 | /* Transfer ownership of the skb */ | ||
999 | buffer->skb = skb; | ||
1000 | |||
1001 | end_of_packet = st->out_len == 0 || st->packet_space == 0; | ||
1002 | buffer->continuation = !end_of_packet; | ||
1003 | |||
1004 | if (st->in_len == 0) { | ||
1005 | /* Transfer ownership of the pci mapping */ | ||
1006 | buffer->unmap_len = st->unmap_len; | ||
1007 | buffer->unmap_single = st->unmap_single; | ||
1008 | st->unmap_len = 0; | ||
1009 | } | ||
1010 | } | ||
1011 | |||
1012 | st->dma_addr += n; | ||
1013 | return rc; | ||
1014 | } | ||
1015 | |||
1016 | |||
1017 | /** | ||
1018 | * tso_start_new_packet - generate a new header and prepare for the new packet | ||
1019 | * @tx_queue: Efx TX queue | ||
1020 | * @skb: Socket buffer | ||
1021 | * @st: TSO state | ||
1022 | * | ||
1023 | * Generate a new header and prepare for the new packet. Return 0 on | ||
1024 | * success, or -1 if failed to alloc header. | ||
1025 | */ | ||
1026 | static int tso_start_new_packet(struct efx_tx_queue *tx_queue, | ||
1027 | const struct sk_buff *skb, | ||
1028 | struct tso_state *st) | ||
1029 | { | ||
1030 | struct efx_tso_header *tsoh; | ||
1031 | struct tcphdr *tsoh_th; | ||
1032 | unsigned ip_length; | ||
1033 | u8 *header; | ||
1034 | |||
1035 | /* Allocate a DMA-mapped header buffer. */ | ||
1036 | if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { | ||
1037 | if (tx_queue->tso_headers_free == NULL) { | ||
1038 | if (efx_tsoh_block_alloc(tx_queue)) | ||
1039 | return -1; | ||
1040 | } | ||
1041 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); | ||
1042 | tsoh = tx_queue->tso_headers_free; | ||
1043 | tx_queue->tso_headers_free = tsoh->next; | ||
1044 | tsoh->unmap_len = 0; | ||
1045 | } else { | ||
1046 | tx_queue->tso_long_headers++; | ||
1047 | tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); | ||
1048 | if (unlikely(!tsoh)) | ||
1049 | return -1; | ||
1050 | } | ||
1051 | |||
1052 | header = TSOH_BUFFER(tsoh); | ||
1053 | tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); | ||
1054 | |||
1055 | /* Copy and update the headers. */ | ||
1056 | memcpy(header, skb->data, st->header_len); | ||
1057 | |||
1058 | tsoh_th->seq = htonl(st->seqnum); | ||
1059 | st->seqnum += skb_shinfo(skb)->gso_size; | ||
1060 | if (st->out_len > skb_shinfo(skb)->gso_size) { | ||
1061 | /* This packet will not finish the TSO burst. */ | ||
1062 | ip_length = st->full_packet_size - ETH_HDR_LEN(skb); | ||
1063 | tsoh_th->fin = 0; | ||
1064 | tsoh_th->psh = 0; | ||
1065 | } else { | ||
1066 | /* This packet will be the last in the TSO burst. */ | ||
1067 | ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; | ||
1068 | tsoh_th->fin = tcp_hdr(skb)->fin; | ||
1069 | tsoh_th->psh = tcp_hdr(skb)->psh; | ||
1070 | } | ||
1071 | |||
1072 | if (st->protocol == htons(ETH_P_IP)) { | ||
1073 | struct iphdr *tsoh_iph = | ||
1074 | (struct iphdr *)(header + SKB_IPV4_OFF(skb)); | ||
1075 | |||
1076 | tsoh_iph->tot_len = htons(ip_length); | ||
1077 | |||
1078 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ | ||
1079 | tsoh_iph->id = htons(st->ipv4_id); | ||
1080 | st->ipv4_id++; | ||
1081 | } else { | ||
1082 | struct ipv6hdr *tsoh_iph = | ||
1083 | (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); | ||
1084 | |||
1085 | tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); | ||
1086 | } | ||
1087 | |||
1088 | st->packet_space = skb_shinfo(skb)->gso_size; | ||
1089 | ++tx_queue->tso_packets; | ||
1090 | |||
1091 | /* Form a descriptor for this header. */ | ||
1092 | efx_tso_put_header(tx_queue, tsoh, st->header_len); | ||
1093 | |||
1094 | return 0; | ||
1095 | } | ||
1096 | |||
1097 | |||
1098 | /** | ||
1099 | * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer | ||
1100 | * @tx_queue: Efx TX queue | ||
1101 | * @skb: Socket buffer | ||
1102 | * | ||
1103 | * Context: You must hold netif_tx_lock() to call this function. | ||
1104 | * | ||
1105 | * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if | ||
1106 | * @skb was not enqueued. In all cases @skb is consumed. Return | ||
1107 | * %NETDEV_TX_OK or %NETDEV_TX_BUSY. | ||
1108 | */ | ||
1109 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | ||
1110 | struct sk_buff *skb) | ||
1111 | { | ||
1112 | struct efx_nic *efx = tx_queue->efx; | ||
1113 | int frag_i, rc, rc2 = NETDEV_TX_OK; | ||
1114 | struct tso_state state; | ||
1115 | |||
1116 | /* Find the packet protocol and sanity-check it */ | ||
1117 | state.protocol = efx_tso_check_protocol(skb); | ||
1118 | |||
1119 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | ||
1120 | |||
1121 | tso_start(&state, skb); | ||
1122 | |||
1123 | /* Assume that skb header area contains exactly the headers, and | ||
1124 | * all payload is in the frag list. | ||
1125 | */ | ||
1126 | if (skb_headlen(skb) == state.header_len) { | ||
1127 | /* Grab the first payload fragment. */ | ||
1128 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); | ||
1129 | frag_i = 0; | ||
1130 | rc = tso_get_fragment(&state, efx, | ||
1131 | skb_shinfo(skb)->frags + frag_i); | ||
1132 | if (rc) | ||
1133 | goto mem_err; | ||
1134 | } else { | ||
1135 | rc = tso_get_head_fragment(&state, efx, skb); | ||
1136 | if (rc) | ||
1137 | goto mem_err; | ||
1138 | frag_i = -1; | ||
1139 | } | ||
1140 | |||
1141 | if (tso_start_new_packet(tx_queue, skb, &state) < 0) | ||
1142 | goto mem_err; | ||
1143 | |||
1144 | while (1) { | ||
1145 | rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); | ||
1146 | if (unlikely(rc)) { | ||
1147 | rc2 = NETDEV_TX_BUSY; | ||
1148 | goto unwind; | ||
1149 | } | ||
1150 | |||
1151 | /* Move onto the next fragment? */ | ||
1152 | if (state.in_len == 0) { | ||
1153 | if (++frag_i >= skb_shinfo(skb)->nr_frags) | ||
1154 | /* End of payload reached. */ | ||
1155 | break; | ||
1156 | rc = tso_get_fragment(&state, efx, | ||
1157 | skb_shinfo(skb)->frags + frag_i); | ||
1158 | if (rc) | ||
1159 | goto mem_err; | ||
1160 | } | ||
1161 | |||
1162 | /* Start at new packet? */ | ||
1163 | if (state.packet_space == 0 && | ||
1164 | tso_start_new_packet(tx_queue, skb, &state) < 0) | ||
1165 | goto mem_err; | ||
1166 | } | ||
1167 | |||
1168 | /* Pass off to hardware */ | ||
1169 | efx_nic_push_buffers(tx_queue); | ||
1170 | |||
1171 | tx_queue->tso_bursts++; | ||
1172 | return NETDEV_TX_OK; | ||
1173 | |||
1174 | mem_err: | ||
1175 | netif_err(efx, tx_err, efx->net_dev, | ||
1176 | "Out of memory for TSO headers, or PCI mapping error\n"); | ||
1177 | dev_kfree_skb_any(skb); | ||
1178 | |||
1179 | unwind: | ||
1180 | /* Free the DMA mapping we were in the process of writing out */ | ||
1181 | if (state.unmap_len) { | ||
1182 | if (state.unmap_single) | ||
1183 | pci_unmap_single(efx->pci_dev, state.unmap_addr, | ||
1184 | state.unmap_len, PCI_DMA_TODEVICE); | ||
1185 | else | ||
1186 | pci_unmap_page(efx->pci_dev, state.unmap_addr, | ||
1187 | state.unmap_len, PCI_DMA_TODEVICE); | ||
1188 | } | ||
1189 | |||
1190 | efx_enqueue_unwind(tx_queue); | ||
1191 | return rc2; | ||
1192 | } | ||
1193 | |||
1194 | |||
1195 | /* | ||
1196 | * Free up all TSO datastructures associated with tx_queue. This | ||
1197 | * routine should be called only once the tx_queue is both empty and | ||
1198 | * will no longer be used. | ||
1199 | */ | ||
1200 | static void efx_fini_tso(struct efx_tx_queue *tx_queue) | ||
1201 | { | ||
1202 | unsigned i; | ||
1203 | |||
1204 | if (tx_queue->buffer) { | ||
1205 | for (i = 0; i <= tx_queue->ptr_mask; ++i) | ||
1206 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); | ||
1207 | } | ||
1208 | |||
1209 | while (tx_queue->tso_headers_free != NULL) | ||
1210 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, | ||
1211 | tx_queue->efx->pci_dev); | ||
1212 | } | ||
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c new file mode 100644 index 00000000000..7c21b334a75 --- /dev/null +++ b/drivers/net/sfc/txc43128_phy.c | |||
@@ -0,0 +1,560 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2006-2011 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * Driver for Transwitch/Mysticom CX4 retimer | ||
12 | * see www.transwitch.com, part is TXC-43128 | ||
13 | */ | ||
14 | |||
15 | #include <linux/delay.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include "efx.h" | ||
18 | #include "mdio_10g.h" | ||
19 | #include "phy.h" | ||
20 | #include "nic.h" | ||
21 | |||
22 | /* We expect these MMDs to be in the package */ | ||
23 | #define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \ | ||
24 | MDIO_DEVS_PMAPMD | \ | ||
25 | MDIO_DEVS_PHYXS) | ||
26 | |||
27 | #define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \ | ||
28 | (1 << LOOPBACK_PMAPMD) | \ | ||
29 | (1 << LOOPBACK_PHYXS_WS)) | ||
30 | |||
31 | /************************************************************************** | ||
32 | * | ||
33 | * Compile-time config | ||
34 | * | ||
35 | ************************************************************************** | ||
36 | */ | ||
37 | #define TXCNAME "TXC43128" | ||
38 | /* Total length of time we'll wait for the PHY to come out of reset (ms) */ | ||
39 | #define TXC_MAX_RESET_TIME 500 | ||
40 | /* Interval between checks (ms) */ | ||
41 | #define TXC_RESET_WAIT 10 | ||
42 | /* How long to run BIST (us) */ | ||
43 | #define TXC_BIST_DURATION 50 | ||
44 | |||
45 | /************************************************************************** | ||
46 | * | ||
47 | * Register definitions | ||
48 | * | ||
49 | ************************************************************************** | ||
50 | */ | ||
51 | |||
52 | /* Command register */ | ||
53 | #define TXC_GLRGS_GLCMD 0xc004 | ||
54 | /* Useful bits in command register */ | ||
55 | /* Lane power-down */ | ||
56 | #define TXC_GLCMD_L01PD_LBN 5 | ||
57 | #define TXC_GLCMD_L23PD_LBN 6 | ||
58 | /* Limited SW reset: preserves configuration but | ||
59 | * initiates a logic reset. Self-clearing */ | ||
60 | #define TXC_GLCMD_LMTSWRST_LBN 14 | ||
61 | |||
62 | /* Signal Quality Control */ | ||
63 | #define TXC_GLRGS_GSGQLCTL 0xc01a | ||
64 | /* Enable bit */ | ||
65 | #define TXC_GSGQLCT_SGQLEN_LBN 15 | ||
66 | /* Lane selection */ | ||
67 | #define TXC_GSGQLCT_LNSL_LBN 13 | ||
68 | #define TXC_GSGQLCT_LNSL_WIDTH 2 | ||
69 | |||
70 | /* Analog TX control */ | ||
71 | #define TXC_ALRGS_ATXCTL 0xc040 | ||
72 | /* Lane power-down */ | ||
73 | #define TXC_ATXCTL_TXPD3_LBN 15 | ||
74 | #define TXC_ATXCTL_TXPD2_LBN 14 | ||
75 | #define TXC_ATXCTL_TXPD1_LBN 13 | ||
76 | #define TXC_ATXCTL_TXPD0_LBN 12 | ||
77 | |||
78 | /* Amplitude on lanes 0, 1 */ | ||
79 | #define TXC_ALRGS_ATXAMP0 0xc041 | ||
80 | /* Amplitude on lanes 2, 3 */ | ||
81 | #define TXC_ALRGS_ATXAMP1 0xc042 | ||
82 | /* Bit position of value for lane 0 (or 2) */ | ||
83 | #define TXC_ATXAMP_LANE02_LBN 3 | ||
84 | /* Bit position of value for lane 1 (or 3) */ | ||
85 | #define TXC_ATXAMP_LANE13_LBN 11 | ||
86 | |||
87 | #define TXC_ATXAMP_1280_mV 0 | ||
88 | #define TXC_ATXAMP_1200_mV 8 | ||
89 | #define TXC_ATXAMP_1120_mV 12 | ||
90 | #define TXC_ATXAMP_1060_mV 14 | ||
91 | #define TXC_ATXAMP_0820_mV 25 | ||
92 | #define TXC_ATXAMP_0720_mV 26 | ||
93 | #define TXC_ATXAMP_0580_mV 27 | ||
94 | #define TXC_ATXAMP_0440_mV 28 | ||
95 | |||
96 | #define TXC_ATXAMP_0820_BOTH \ | ||
97 | ((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \ | ||
98 | | (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN)) | ||
99 | |||
100 | #define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */ | ||
101 | |||
102 | /* Preemphasis on lanes 0, 1 */ | ||
103 | #define TXC_ALRGS_ATXPRE0 0xc043 | ||
104 | /* Preemphasis on lanes 2, 3 */ | ||
105 | #define TXC_ALRGS_ATXPRE1 0xc044 | ||
106 | |||
107 | #define TXC_ATXPRE_NONE 0 | ||
108 | #define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */ | ||
109 | |||
110 | #define TXC_ALRGS_ARXCTL 0xc045 | ||
111 | /* Lane power-down */ | ||
112 | #define TXC_ARXCTL_RXPD3_LBN 15 | ||
113 | #define TXC_ARXCTL_RXPD2_LBN 14 | ||
114 | #define TXC_ARXCTL_RXPD1_LBN 13 | ||
115 | #define TXC_ARXCTL_RXPD0_LBN 12 | ||
116 | |||
117 | /* Main control */ | ||
118 | #define TXC_MRGS_CTL 0xc340 | ||
119 | /* Bits in main control */ | ||
120 | #define TXC_MCTL_RESET_LBN 15 /* Self clear */ | ||
121 | #define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */ | ||
122 | #define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */ | ||
123 | |||
124 | /* GPIO output */ | ||
125 | #define TXC_GPIO_OUTPUT 0xc346 | ||
126 | #define TXC_GPIO_DIR 0xc348 | ||
127 | |||
128 | /* Vendor-specific BIST registers */ | ||
129 | #define TXC_BIST_CTL 0xc280 | ||
130 | #define TXC_BIST_TXFRMCNT 0xc281 | ||
131 | #define TXC_BIST_RX0FRMCNT 0xc282 | ||
132 | #define TXC_BIST_RX1FRMCNT 0xc283 | ||
133 | #define TXC_BIST_RX2FRMCNT 0xc284 | ||
134 | #define TXC_BIST_RX3FRMCNT 0xc285 | ||
135 | #define TXC_BIST_RX0ERRCNT 0xc286 | ||
136 | #define TXC_BIST_RX1ERRCNT 0xc287 | ||
137 | #define TXC_BIST_RX2ERRCNT 0xc288 | ||
138 | #define TXC_BIST_RX3ERRCNT 0xc289 | ||
139 | |||
140 | /* BIST type (controls bit patter in test) */ | ||
141 | #define TXC_BIST_CTRL_TYPE_LBN 10 | ||
142 | #define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */ | ||
143 | #define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */ | ||
144 | #define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */ | ||
145 | #define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */ | ||
146 | /* Set this to 1 for 10 bit and 0 for 8 bit */ | ||
147 | #define TXC_BIST_CTRL_B10EN_LBN 12 | ||
148 | /* Enable BIST (write 0 to disable) */ | ||
149 | #define TXC_BIST_CTRL_ENAB_LBN 13 | ||
150 | /* Stop BIST (self-clears when stop complete) */ | ||
151 | #define TXC_BIST_CTRL_STOP_LBN 14 | ||
152 | /* Start BIST (cleared by writing 1 to STOP) */ | ||
153 | #define TXC_BIST_CTRL_STRT_LBN 15 | ||
154 | |||
155 | /* Mt. Diablo test configuration */ | ||
156 | #define TXC_MTDIABLO_CTRL 0xc34f | ||
157 | #define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10 | ||
158 | |||
159 | struct txc43128_data { | ||
160 | unsigned long bug10934_timer; | ||
161 | enum efx_phy_mode phy_mode; | ||
162 | enum efx_loopback_mode loopback_mode; | ||
163 | }; | ||
164 | |||
165 | /* The PHY sometimes needs a reset to bring the link back up. So long as | ||
166 | * it reports link down, we reset it every 5 seconds. | ||
167 | */ | ||
168 | #define BUG10934_RESET_INTERVAL (5 * HZ) | ||
169 | |||
170 | /* Perform a reset that doesn't clear configuration changes */ | ||
171 | static void txc_reset_logic(struct efx_nic *efx); | ||
172 | |||
173 | /* Set the output value of a gpio */ | ||
174 | void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on) | ||
175 | { | ||
176 | efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on); | ||
177 | } | ||
178 | |||
179 | /* Set up the GPIO direction register */ | ||
180 | void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir) | ||
181 | { | ||
182 | efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir); | ||
183 | } | ||
184 | |||
185 | /* Reset the PMA/PMD MMD. The documentation is explicit that this does a | ||
186 | * global reset (it's less clear what reset of other MMDs does).*/ | ||
187 | static int txc_reset_phy(struct efx_nic *efx) | ||
188 | { | ||
189 | int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD, | ||
190 | TXC_MAX_RESET_TIME / TXC_RESET_WAIT, | ||
191 | TXC_RESET_WAIT); | ||
192 | if (rc < 0) | ||
193 | goto fail; | ||
194 | |||
195 | /* Check that all the MMDs we expect are present and responding. */ | ||
196 | rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS); | ||
197 | if (rc < 0) | ||
198 | goto fail; | ||
199 | |||
200 | return 0; | ||
201 | |||
202 | fail: | ||
203 | netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n"); | ||
204 | return rc; | ||
205 | } | ||
206 | |||
207 | /* Run a single BIST on one MMD */ | ||
208 | static int txc_bist_one(struct efx_nic *efx, int mmd, int test) | ||
209 | { | ||
210 | int ctrl, bctl; | ||
211 | int lane; | ||
212 | int rc = 0; | ||
213 | |||
214 | /* Set PMA to test into loopback using Mt Diablo reg as per app note */ | ||
215 | ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL); | ||
216 | ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN); | ||
217 | efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl); | ||
218 | |||
219 | /* The BIST app. note lists these as 3 distinct steps. */ | ||
220 | /* Set the BIST type */ | ||
221 | bctl = (test << TXC_BIST_CTRL_TYPE_LBN); | ||
222 | efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl); | ||
223 | |||
224 | /* Set the BSTEN bit in the BIST Control register to enable */ | ||
225 | bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN); | ||
226 | efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl); | ||
227 | |||
228 | /* Set the BSTRT bit in the BIST Control register */ | ||
229 | efx_mdio_write(efx, mmd, TXC_BIST_CTL, | ||
230 | bctl | (1 << TXC_BIST_CTRL_STRT_LBN)); | ||
231 | |||
232 | /* Wait. */ | ||
233 | udelay(TXC_BIST_DURATION); | ||
234 | |||
235 | /* Set the BSTOP bit in the BIST Control register */ | ||
236 | bctl |= (1 << TXC_BIST_CTRL_STOP_LBN); | ||
237 | efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl); | ||
238 | |||
239 | /* The STOP bit should go off when things have stopped */ | ||
240 | while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN)) | ||
241 | bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL); | ||
242 | |||
243 | /* Check all the error counts are 0 and all the frame counts are | ||
244 | non-zero */ | ||
245 | for (lane = 0; lane < 4; lane++) { | ||
246 | int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane); | ||
247 | if (count != 0) { | ||
248 | netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. " | ||
249 | "Lane %d had %d errs\n", lane, count); | ||
250 | rc = -EIO; | ||
251 | } | ||
252 | count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane); | ||
253 | if (count == 0) { | ||
254 | netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. " | ||
255 | "Lane %d got 0 frames\n", lane); | ||
256 | rc = -EIO; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | if (rc == 0) | ||
261 | netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n"); | ||
262 | |||
263 | /* Disable BIST */ | ||
264 | efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0); | ||
265 | |||
266 | /* Turn off loopback */ | ||
267 | ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN); | ||
268 | efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl); | ||
269 | |||
270 | return rc; | ||
271 | } | ||
272 | |||
273 | static int txc_bist(struct efx_nic *efx) | ||
274 | { | ||
275 | return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD); | ||
276 | } | ||
277 | |||
278 | /* Push the non-configurable defaults into the PHY. This must be | ||
279 | * done after every full reset */ | ||
280 | static void txc_apply_defaults(struct efx_nic *efx) | ||
281 | { | ||
282 | int mctrl; | ||
283 | |||
284 | /* Turn amplitude down and preemphasis off on the host side | ||
285 | * (PHY<->MAC) as this is believed less likely to upset Falcon | ||
286 | * and no adverse effects have been noted. It probably also | ||
287 | * saves a picowatt or two */ | ||
288 | |||
289 | /* Turn off preemphasis */ | ||
290 | efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE); | ||
291 | efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE); | ||
292 | |||
293 | /* Turn down the amplitude */ | ||
294 | efx_mdio_write(efx, MDIO_MMD_PHYXS, | ||
295 | TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH); | ||
296 | efx_mdio_write(efx, MDIO_MMD_PHYXS, | ||
297 | TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH); | ||
298 | |||
299 | /* Set the line side amplitude and preemphasis to the databook | ||
300 | * defaults as an erratum causes them to be 0 on at least some | ||
301 | * PHY rev.s */ | ||
302 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, | ||
303 | TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT); | ||
304 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, | ||
305 | TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT); | ||
306 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, | ||
307 | TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT); | ||
308 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, | ||
309 | TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT); | ||
310 | |||
311 | /* Set up the LEDs */ | ||
312 | mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL); | ||
313 | |||
314 | /* Set the Green and Red LEDs to their default modes */ | ||
315 | mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN)); | ||
316 | efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl); | ||
317 | |||
318 | /* Databook recommends doing this after configuration changes */ | ||
319 | txc_reset_logic(efx); | ||
320 | |||
321 | falcon_board(efx)->type->init_phy(efx); | ||
322 | } | ||
323 | |||
324 | static int txc43128_phy_probe(struct efx_nic *efx) | ||
325 | { | ||
326 | struct txc43128_data *phy_data; | ||
327 | |||
328 | /* Allocate phy private storage */ | ||
329 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | ||
330 | if (!phy_data) | ||
331 | return -ENOMEM; | ||
332 | efx->phy_data = phy_data; | ||
333 | phy_data->phy_mode = efx->phy_mode; | ||
334 | |||
335 | efx->mdio.mmds = TXC_REQUIRED_DEVS; | ||
336 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
337 | |||
338 | efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS; | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | /* Initialisation entry point for this PHY driver */ | ||
344 | static int txc43128_phy_init(struct efx_nic *efx) | ||
345 | { | ||
346 | int rc; | ||
347 | |||
348 | rc = txc_reset_phy(efx); | ||
349 | if (rc < 0) | ||
350 | return rc; | ||
351 | |||
352 | rc = txc_bist(efx); | ||
353 | if (rc < 0) | ||
354 | return rc; | ||
355 | |||
356 | txc_apply_defaults(efx); | ||
357 | |||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | /* Set the lane power down state in the global registers */ | ||
362 | static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd) | ||
363 | { | ||
364 | int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN); | ||
365 | int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); | ||
366 | |||
367 | if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) | ||
368 | ctl &= ~pd; | ||
369 | else | ||
370 | ctl |= pd; | ||
371 | |||
372 | efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl); | ||
373 | } | ||
374 | |||
375 | /* Set the lane power down state in the analog control registers */ | ||
376 | static void txc_analog_lane_power(struct efx_nic *efx, int mmd) | ||
377 | { | ||
378 | int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN) | ||
379 | | (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN); | ||
380 | int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN) | ||
381 | | (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN); | ||
382 | int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL); | ||
383 | int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL); | ||
384 | |||
385 | if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) { | ||
386 | txctl &= ~txpd; | ||
387 | rxctl &= ~rxpd; | ||
388 | } else { | ||
389 | txctl |= txpd; | ||
390 | rxctl |= rxpd; | ||
391 | } | ||
392 | |||
393 | efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl); | ||
394 | efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl); | ||
395 | } | ||
396 | |||
397 | static void txc_set_power(struct efx_nic *efx) | ||
398 | { | ||
399 | /* According to the data book, all the MMDs can do low power */ | ||
400 | efx_mdio_set_mmds_lpower(efx, | ||
401 | !!(efx->phy_mode & PHY_MODE_LOW_POWER), | ||
402 | TXC_REQUIRED_DEVS); | ||
403 | |||
404 | /* Global register bank is in PCS, PHY XS. These control the host | ||
405 | * side and line side settings respectively. */ | ||
406 | txc_glrgs_lane_power(efx, MDIO_MMD_PCS); | ||
407 | txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS); | ||
408 | |||
409 | /* Analog register bank in PMA/PMD, PHY XS */ | ||
410 | txc_analog_lane_power(efx, MDIO_MMD_PMAPMD); | ||
411 | txc_analog_lane_power(efx, MDIO_MMD_PHYXS); | ||
412 | } | ||
413 | |||
414 | static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd) | ||
415 | { | ||
416 | int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); | ||
417 | int tries = 50; | ||
418 | |||
419 | val |= (1 << TXC_GLCMD_LMTSWRST_LBN); | ||
420 | efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); | ||
421 | while (tries--) { | ||
422 | val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); | ||
423 | if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) | ||
424 | break; | ||
425 | udelay(1); | ||
426 | } | ||
427 | if (!tries) | ||
428 | netif_info(efx, hw, efx->net_dev, | ||
429 | TXCNAME " Logic reset timed out!\n"); | ||
430 | } | ||
431 | |||
432 | /* Perform a logic reset. This preserves the configuration registers | ||
433 | * and is needed for some configuration changes to take effect */ | ||
434 | static void txc_reset_logic(struct efx_nic *efx) | ||
435 | { | ||
436 | /* The data sheet claims we can do the logic reset on either the | ||
437 | * PCS or the PHYXS and the result is a reset of both host- and | ||
438 | * line-side logic. */ | ||
439 | txc_reset_logic_mmd(efx, MDIO_MMD_PCS); | ||
440 | } | ||
441 | |||
442 | static bool txc43128_phy_read_link(struct efx_nic *efx) | ||
443 | { | ||
444 | return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS); | ||
445 | } | ||
446 | |||
447 | static int txc43128_phy_reconfigure(struct efx_nic *efx) | ||
448 | { | ||
449 | struct txc43128_data *phy_data = efx->phy_data; | ||
450 | enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode; | ||
451 | bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS); | ||
452 | |||
453 | if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) { | ||
454 | txc_reset_phy(efx); | ||
455 | txc_apply_defaults(efx); | ||
456 | falcon_reset_xaui(efx); | ||
457 | mode_change &= ~PHY_MODE_TX_DISABLED; | ||
458 | } | ||
459 | |||
460 | efx_mdio_transmit_disable(efx); | ||
461 | efx_mdio_phy_reconfigure(efx); | ||
462 | if (mode_change & PHY_MODE_LOW_POWER) | ||
463 | txc_set_power(efx); | ||
464 | |||
465 | /* The data sheet claims this is required after every reconfiguration | ||
466 | * (note at end of 7.1), but we mustn't do it when nothing changes as | ||
467 | * it glitches the link, and reconfigure gets called on link change, | ||
468 | * so we get an IRQ storm on link up. */ | ||
469 | if (loop_change || mode_change) | ||
470 | txc_reset_logic(efx); | ||
471 | |||
472 | phy_data->phy_mode = efx->phy_mode; | ||
473 | phy_data->loopback_mode = efx->loopback_mode; | ||
474 | |||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static void txc43128_phy_fini(struct efx_nic *efx) | ||
479 | { | ||
480 | /* Disable link events */ | ||
481 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0); | ||
482 | } | ||
483 | |||
484 | static void txc43128_phy_remove(struct efx_nic *efx) | ||
485 | { | ||
486 | kfree(efx->phy_data); | ||
487 | efx->phy_data = NULL; | ||
488 | } | ||
489 | |||
490 | /* Periodic callback: this exists mainly to poll link status as we | ||
491 | * don't use LASI interrupts */ | ||
492 | static bool txc43128_phy_poll(struct efx_nic *efx) | ||
493 | { | ||
494 | struct txc43128_data *data = efx->phy_data; | ||
495 | bool was_up = efx->link_state.up; | ||
496 | |||
497 | efx->link_state.up = txc43128_phy_read_link(efx); | ||
498 | efx->link_state.speed = 10000; | ||
499 | efx->link_state.fd = true; | ||
500 | efx->link_state.fc = efx->wanted_fc; | ||
501 | |||
502 | if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) { | ||
503 | data->bug10934_timer = jiffies; | ||
504 | } else { | ||
505 | if (time_after_eq(jiffies, (data->bug10934_timer + | ||
506 | BUG10934_RESET_INTERVAL))) { | ||
507 | data->bug10934_timer = jiffies; | ||
508 | txc_reset_logic(efx); | ||
509 | } | ||
510 | } | ||
511 | |||
512 | return efx->link_state.up != was_up; | ||
513 | } | ||
514 | |||
515 | static const char *txc43128_test_names[] = { | ||
516 | "bist" | ||
517 | }; | ||
518 | |||
519 | static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index) | ||
520 | { | ||
521 | if (index < ARRAY_SIZE(txc43128_test_names)) | ||
522 | return txc43128_test_names[index]; | ||
523 | return NULL; | ||
524 | } | ||
525 | |||
526 | static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags) | ||
527 | { | ||
528 | int rc; | ||
529 | |||
530 | if (!(flags & ETH_TEST_FL_OFFLINE)) | ||
531 | return 0; | ||
532 | |||
533 | rc = txc_reset_phy(efx); | ||
534 | if (rc < 0) | ||
535 | return rc; | ||
536 | |||
537 | rc = txc_bist(efx); | ||
538 | txc_apply_defaults(efx); | ||
539 | results[0] = rc ? -1 : 1; | ||
540 | return rc; | ||
541 | } | ||
542 | |||
543 | static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
544 | { | ||
545 | mdio45_ethtool_gset(&efx->mdio, ecmd); | ||
546 | } | ||
547 | |||
548 | const struct efx_phy_operations falcon_txc_phy_ops = { | ||
549 | .probe = txc43128_phy_probe, | ||
550 | .init = txc43128_phy_init, | ||
551 | .reconfigure = txc43128_phy_reconfigure, | ||
552 | .poll = txc43128_phy_poll, | ||
553 | .fini = txc43128_phy_fini, | ||
554 | .remove = txc43128_phy_remove, | ||
555 | .get_settings = txc43128_get_settings, | ||
556 | .set_settings = efx_mdio_set_settings, | ||
557 | .test_alive = efx_mdio_test_alive, | ||
558 | .run_tests = txc43128_run_tests, | ||
559 | .test_name = txc43128_test_name, | ||
560 | }; | ||
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h new file mode 100644 index 00000000000..e4dd3a7f304 --- /dev/null +++ b/drivers/net/sfc/workarounds.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2006-2010 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #ifndef EFX_WORKAROUNDS_H | ||
11 | #define EFX_WORKAROUNDS_H | ||
12 | |||
13 | /* | ||
14 | * Hardware workarounds. | ||
15 | * Bug numbers are from Solarflare's Bugzilla. | ||
16 | */ | ||
17 | |||
18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 | ||
19 | #define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) | ||
20 | #define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) | ||
21 | #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) | ||
22 | #define EFX_WORKAROUND_10G(efx) 1 | ||
23 | |||
24 | /* XAUI resets if link not detected */ | ||
25 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS | ||
26 | /* RX PCIe double split performance issue */ | ||
27 | #define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS | ||
28 | /* Bit-bashed I2C reads cause performance drop */ | ||
29 | #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G | ||
30 | /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor | ||
31 | * or a PCIe error (bug 11028) */ | ||
32 | #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS | ||
33 | /* Transmit flow control may get disabled */ | ||
34 | #define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB | ||
35 | /* Truncated IPv4 packets can confuse the TX packet parser */ | ||
36 | #define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB | ||
37 | /* Legacy ISR read can return zero once */ | ||
38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS | ||
39 | /* Legacy interrupt storm when interrupt fifo fills */ | ||
40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA | ||
41 | |||
42 | /* Spurious parity errors in TSORT buffers */ | ||
43 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A | ||
44 | /* Unaligned read request >512 bytes after aligning may break TSORT */ | ||
45 | #define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A | ||
46 | /* iSCSI parsing errors */ | ||
47 | #define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A | ||
48 | /* RX events go missing */ | ||
49 | #define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A | ||
50 | /* RX_RESET on A1 */ | ||
51 | #define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A | ||
52 | /* Increase filter depth to avoid RX_RESET */ | ||
53 | #define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A | ||
54 | /* Flushes may never complete */ | ||
55 | #define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB | ||
56 | /* Leak overlength packets rather than free */ | ||
57 | #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A | ||
58 | |||
59 | #endif /* EFX_WORKAROUNDS_H */ | ||