aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2008-04-27 07:55:59 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-04-29 01:42:43 -0400
commit8ceee660aacb29721e26f08e336c58dc4847d1bd (patch)
tree158122642e6f21fe85d072c50d6185a0d0cf6834 /drivers/net
parent358c12953b88c5a06a57c33eb27c753b2e7934d1 (diff)
New driver "sfc" for Solarstorm SFC4000 controller.
The driver supports the 10Xpress PHY and XFP modules on our reference designs SFE4001 and SFE4002 and the SMC models SMC10GPCIe-XFP and SMC10GPCIe-10BT. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/sfc/Kconfig12
-rw-r--r--drivers/net/sfc/Makefile5
-rw-r--r--drivers/net/sfc/bitfield.h508
-rw-r--r--drivers/net/sfc/boards.c167
-rw-r--r--drivers/net/sfc/boards.h26
-rw-r--r--drivers/net/sfc/efx.c2208
-rw-r--r--drivers/net/sfc/efx.h67
-rw-r--r--drivers/net/sfc/enum.h50
-rw-r--r--drivers/net/sfc/ethtool.c460
-rw-r--r--drivers/net/sfc/ethtool.h27
-rw-r--r--drivers/net/sfc/falcon.c2722
-rw-r--r--drivers/net/sfc/falcon.h130
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h1135
-rw-r--r--drivers/net/sfc/falcon_io.h243
-rw-r--r--drivers/net/sfc/falcon_xmac.c585
-rw-r--r--drivers/net/sfc/gmii.h195
-rw-r--r--drivers/net/sfc/i2c-direct.c381
-rw-r--r--drivers/net/sfc/i2c-direct.h91
-rw-r--r--drivers/net/sfc/mac.h33
-rw-r--r--drivers/net/sfc/mdio_10g.c282
-rw-r--r--drivers/net/sfc/mdio_10g.h232
-rw-r--r--drivers/net/sfc/net_driver.h883
-rw-r--r--drivers/net/sfc/phy.h48
-rw-r--r--drivers/net/sfc/rx.c875
-rw-r--r--drivers/net/sfc/rx.h29
-rw-r--r--drivers/net/sfc/sfe4001.c252
-rw-r--r--drivers/net/sfc/spi.h71
-rw-r--r--drivers/net/sfc/tenxpress.c434
-rw-r--r--drivers/net/sfc/tx.c452
-rw-r--r--drivers/net/sfc/tx.h24
-rw-r--r--drivers/net/sfc/workarounds.h56
-rw-r--r--drivers/net/sfc/xenpack.h62
-rw-r--r--drivers/net/sfc/xfp_phy.c132
35 files changed, 12880 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 45c3a208d93f..50b36b408ca1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2592,6 +2592,7 @@ config BNX2X
2592 To compile this driver as a module, choose M here: the module 2592 To compile this driver as a module, choose M here: the module
2593 will be called bnx2x. This is recommended. 2593 will be called bnx2x. This is recommended.
2594 2594
2595source "drivers/net/sfc/Kconfig"
2595 2596
2596endif # NETDEV_10000 2597endif # NETDEV_10000
2597 2598
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4d71729e85e5..371cb0785b27 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -252,3 +252,5 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
252obj-$(CONFIG_NETXEN_NIC) += netxen/ 252obj-$(CONFIG_NETXEN_NIC) += netxen/
253obj-$(CONFIG_NIU) += niu.o 253obj-$(CONFIG_NIU) += niu.o
254obj-$(CONFIG_VIRTIO_NET) += virtio_net.o 254obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
255obj-$(CONFIG_SFC) += sfc/
256
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
new file mode 100644
index 000000000000..dbad95c295bd
--- /dev/null
+++ b/drivers/net/sfc/Kconfig
@@ -0,0 +1,12 @@
1config SFC
2 tristate "Solarflare Solarstorm SFC4000 support"
3 depends on PCI && INET
4 select MII
5 select INET_LRO
6 select CRC32
7 help
8 This driver supports 10-gigabit Ethernet cards based on
9 the Solarflare Communications Solarstorm SFC4000 controller.
10
11 To compile this driver as a module, choose M here. The module
12 will be called sfc.
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
new file mode 100644
index 000000000000..0f023447eafd
--- /dev/null
+++ b/drivers/net/sfc/Makefile
@@ -0,0 +1,5 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \
2 i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \
3 tenxpress.o boards.o sfe4001.o
4
5obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
new file mode 100644
index 000000000000..2806201644cc
--- /dev/null
+++ b/drivers/net/sfc/bitfield.h
@@ -0,0 +1,508 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_BITFIELD_H
12#define EFX_BITFIELD_H
13
14/*
15 * Efx bitfield access
16 *
17 * Efx NICs make extensive use of bitfields up to 128 bits
18 * wide. Since there is no native 128-bit datatype on most systems,
19 * and since 64-bit datatypes are inefficient on 32-bit systems and
20 * vice versa, we wrap accesses in a way that uses the most efficient
21 * datatype.
22 *
23 * The NICs are PCI devices and therefore little-endian. Since most
24 * of the quantities that we deal with are DMAed to/from host memory,
25 * we define our datatypes (efx_oword_t, efx_qword_t and
26 * efx_dword_t) to be little-endian.
27 */
28
29/* Lowest bit numbers and widths */
30#define EFX_DUMMY_FIELD_LBN 0
31#define EFX_DUMMY_FIELD_WIDTH 0
32#define EFX_DWORD_0_LBN 0
33#define EFX_DWORD_0_WIDTH 32
34#define EFX_DWORD_1_LBN 32
35#define EFX_DWORD_1_WIDTH 32
36#define EFX_DWORD_2_LBN 64
37#define EFX_DWORD_2_WIDTH 32
38#define EFX_DWORD_3_LBN 96
39#define EFX_DWORD_3_WIDTH 32
40
41/* Specified attribute (e.g. LBN) of the specified field */
42#define EFX_VAL(field, attribute) field ## _ ## attribute
43/* Low bit number of the specified field */
44#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
45/* Bit width of the specified field */
46#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
47/* High bit number of the specified field */
48#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
49/* Mask equal in width to the specified field.
50 *
51 * For example, a field with width 5 would have a mask of 0x1f.
52 *
53 * The maximum width mask that can be generated is 64 bits.
54 */
55#define EFX_MASK64(field) \
56 (EFX_WIDTH(field) == 64 ? ~((u64) 0) : \
57 (((((u64) 1) << EFX_WIDTH(field))) - 1))
58
59/* Mask equal in width to the specified field.
60 *
61 * For example, a field with width 5 would have a mask of 0x1f.
62 *
63 * The maximum width mask that can be generated is 32 bits. Use
64 * EFX_MASK64 for higher width fields.
65 */
66#define EFX_MASK32(field) \
67 (EFX_WIDTH(field) == 32 ? ~((u32) 0) : \
68 (((((u32) 1) << EFX_WIDTH(field))) - 1))
69
70/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
71typedef union efx_dword {
72 __le32 u32[1];
73} efx_dword_t;
74
75/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
76typedef union efx_qword {
77 __le64 u64[1];
78 __le32 u32[2];
79 efx_dword_t dword[2];
80} efx_qword_t;
81
82/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
83typedef union efx_oword {
84 __le64 u64[2];
85 efx_qword_t qword[2];
86 __le32 u32[4];
87 efx_dword_t dword[4];
88} efx_oword_t;
89
90/* Format string and value expanders for printk */
91#define EFX_DWORD_FMT "%08x"
92#define EFX_QWORD_FMT "%08x:%08x"
93#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
94#define EFX_DWORD_VAL(dword) \
95 ((unsigned int) le32_to_cpu((dword).u32[0]))
96#define EFX_QWORD_VAL(qword) \
97 ((unsigned int) le32_to_cpu((qword).u32[1])), \
98 ((unsigned int) le32_to_cpu((qword).u32[0]))
99#define EFX_OWORD_VAL(oword) \
100 ((unsigned int) le32_to_cpu((oword).u32[3])), \
101 ((unsigned int) le32_to_cpu((oword).u32[2])), \
102 ((unsigned int) le32_to_cpu((oword).u32[1])), \
103 ((unsigned int) le32_to_cpu((oword).u32[0]))
104
105/*
106 * Extract bit field portion [low,high) from the native-endian element
107 * which contains bits [min,max).
108 *
109 * For example, suppose "element" represents the high 32 bits of a
110 * 64-bit value, and we wish to extract the bits belonging to the bit
111 * field occupying bits 28-45 of this 64-bit value.
112 *
113 * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
114 *
115 * ( element ) << 4
116 *
117 * The result will contain the relevant bits filled in in the range
118 * [0,high-low), with garbage in bits [high-low+1,...).
119 */
120#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
121 (((low > max) || (high < min)) ? 0 : \
122 ((low > min) ? \
123 ((native_element) >> (low - min)) : \
124 ((native_element) << (min - low))))
125
126/*
127 * Extract bit field portion [low,high) from the 64-bit little-endian
128 * element which contains bits [min,max)
129 */
130#define EFX_EXTRACT64(element, min, max, low, high) \
131 EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
132
133/*
134 * Extract bit field portion [low,high) from the 32-bit little-endian
135 * element which contains bits [min,max)
136 */
137#define EFX_EXTRACT32(element, min, max, low, high) \
138 EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
139
140#define EFX_EXTRACT_OWORD64(oword, low, high) \
141 (EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
142 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high))
143
144#define EFX_EXTRACT_QWORD64(qword, low, high) \
145 EFX_EXTRACT64((qword).u64[0], 0, 63, low, high)
146
147#define EFX_EXTRACT_OWORD32(oword, low, high) \
148 (EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
149 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
150 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
151 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high))
152
153#define EFX_EXTRACT_QWORD32(qword, low, high) \
154 (EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
155 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high))
156
157#define EFX_EXTRACT_DWORD(dword, low, high) \
158 EFX_EXTRACT32((dword).u32[0], 0, 31, low, high)
159
160#define EFX_OWORD_FIELD64(oword, field) \
161 (EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
162 & EFX_MASK64(field))
163
164#define EFX_QWORD_FIELD64(qword, field) \
165 (EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
166 & EFX_MASK64(field))
167
168#define EFX_OWORD_FIELD32(oword, field) \
169 (EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
170 & EFX_MASK32(field))
171
172#define EFX_QWORD_FIELD32(qword, field) \
173 (EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
174 & EFX_MASK32(field))
175
176#define EFX_DWORD_FIELD(dword, field) \
177 (EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
178 & EFX_MASK32(field))
179
180#define EFX_OWORD_IS_ZERO64(oword) \
181 (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
182
183#define EFX_QWORD_IS_ZERO64(qword) \
184 (((qword).u64[0]) == (__force __le64) 0)
185
186#define EFX_OWORD_IS_ZERO32(oword) \
187 (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
188 == (__force __le32) 0)
189
190#define EFX_QWORD_IS_ZERO32(qword) \
191 (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
192
193#define EFX_DWORD_IS_ZERO(dword) \
194 (((dword).u32[0]) == (__force __le32) 0)
195
196#define EFX_OWORD_IS_ALL_ONES64(oword) \
197 (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
198
199#define EFX_QWORD_IS_ALL_ONES64(qword) \
200 ((qword).u64[0] == ~((__force __le64) 0))
201
202#define EFX_OWORD_IS_ALL_ONES32(oword) \
203 (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
204 == ~((__force __le32) 0))
205
206#define EFX_QWORD_IS_ALL_ONES32(qword) \
207 (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
208
209#define EFX_DWORD_IS_ALL_ONES(dword) \
210 ((dword).u32[0] == ~((__force __le32) 0))
211
212#if BITS_PER_LONG == 64
213#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
214#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
215#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
216#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
217#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
218#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
219#else
220#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
221#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
222#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
223#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
224#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
225#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
226#endif
227
228/*
229 * Construct bit field portion
230 *
231 * Creates the portion of the bit field [low,high) that lies within
232 * the range [min,max).
233 */
234#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
235 (((low > max) || (high < min)) ? 0 : \
236 ((low > min) ? \
237 (((u64) (value)) << (low - min)) : \
238 (((u64) (value)) >> (min - low))))
239
240#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
241 (((low > max) || (high < min)) ? 0 : \
242 ((low > min) ? \
243 (((u32) (value)) << (low - min)) : \
244 (((u32) (value)) >> (min - low))))
245
246#define EFX_INSERT_NATIVE(min, max, low, high, value) \
247 ((((max - min) >= 32) || ((high - low) >= 32)) ? \
248 EFX_INSERT_NATIVE64(min, max, low, high, value) : \
249 EFX_INSERT_NATIVE32(min, max, low, high, value))
250
251/*
252 * Construct bit field portion
253 *
254 * Creates the portion of the named bit field that lies within the
255 * range [min,max).
256 */
257#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
258 EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
259 EFX_HIGH_BIT(field), value)
260
261/*
262 * Construct bit field
263 *
264 * Creates the portion of the named bit fields that lie within the
265 * range [min,max).
266 */
267#define EFX_INSERT_FIELDS_NATIVE(min, max, \
268 field1, value1, \
269 field2, value2, \
270 field3, value3, \
271 field4, value4, \
272 field5, value5, \
273 field6, value6, \
274 field7, value7, \
275 field8, value8, \
276 field9, value9, \
277 field10, value10) \
278 (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
279 EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
280 EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
281 EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
282 EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
283 EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
284 EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
285 EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
286 EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
287 EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
288
289#define EFX_INSERT_FIELDS64(...) \
290 cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
291
292#define EFX_INSERT_FIELDS32(...) \
293 cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
294
295#define EFX_POPULATE_OWORD64(oword, ...) do { \
296 (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
297 (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
298 } while (0)
299
300#define EFX_POPULATE_QWORD64(qword, ...) do { \
301 (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
302 } while (0)
303
304#define EFX_POPULATE_OWORD32(oword, ...) do { \
305 (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
306 (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
307 (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
308 (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
309 } while (0)
310
311#define EFX_POPULATE_QWORD32(qword, ...) do { \
312 (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
313 (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
314 } while (0)
315
316#define EFX_POPULATE_DWORD(dword, ...) do { \
317 (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
318 } while (0)
319
320#if BITS_PER_LONG == 64
321#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
322#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
323#else
324#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
325#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
326#endif
327
328/* Populate an octword field with various numbers of arguments */
329#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
330#define EFX_POPULATE_OWORD_9(oword, ...) \
331 EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
332#define EFX_POPULATE_OWORD_8(oword, ...) \
333 EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
334#define EFX_POPULATE_OWORD_7(oword, ...) \
335 EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
336#define EFX_POPULATE_OWORD_6(oword, ...) \
337 EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
338#define EFX_POPULATE_OWORD_5(oword, ...) \
339 EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
340#define EFX_POPULATE_OWORD_4(oword, ...) \
341 EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
342#define EFX_POPULATE_OWORD_3(oword, ...) \
343 EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
344#define EFX_POPULATE_OWORD_2(oword, ...) \
345 EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
346#define EFX_POPULATE_OWORD_1(oword, ...) \
347 EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
348#define EFX_ZERO_OWORD(oword) \
349 EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
350#define EFX_SET_OWORD(oword) \
351 EFX_POPULATE_OWORD_4(oword, \
352 EFX_DWORD_0, 0xffffffff, \
353 EFX_DWORD_1, 0xffffffff, \
354 EFX_DWORD_2, 0xffffffff, \
355 EFX_DWORD_3, 0xffffffff)
356
357/* Populate a quadword field with various numbers of arguments */
358#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
359#define EFX_POPULATE_QWORD_9(qword, ...) \
360 EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
361#define EFX_POPULATE_QWORD_8(qword, ...) \
362 EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
363#define EFX_POPULATE_QWORD_7(qword, ...) \
364 EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
365#define EFX_POPULATE_QWORD_6(qword, ...) \
366 EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
367#define EFX_POPULATE_QWORD_5(qword, ...) \
368 EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
369#define EFX_POPULATE_QWORD_4(qword, ...) \
370 EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
371#define EFX_POPULATE_QWORD_3(qword, ...) \
372 EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
373#define EFX_POPULATE_QWORD_2(qword, ...) \
374 EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
375#define EFX_POPULATE_QWORD_1(qword, ...) \
376 EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
377#define EFX_ZERO_QWORD(qword) \
378 EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
379#define EFX_SET_QWORD(qword) \
380 EFX_POPULATE_QWORD_2(qword, \
381 EFX_DWORD_0, 0xffffffff, \
382 EFX_DWORD_1, 0xffffffff)
383
384/* Populate a dword field with various numbers of arguments */
385#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
386#define EFX_POPULATE_DWORD_9(dword, ...) \
387 EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
388#define EFX_POPULATE_DWORD_8(dword, ...) \
389 EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
390#define EFX_POPULATE_DWORD_7(dword, ...) \
391 EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
392#define EFX_POPULATE_DWORD_6(dword, ...) \
393 EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
394#define EFX_POPULATE_DWORD_5(dword, ...) \
395 EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
396#define EFX_POPULATE_DWORD_4(dword, ...) \
397 EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
398#define EFX_POPULATE_DWORD_3(dword, ...) \
399 EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
400#define EFX_POPULATE_DWORD_2(dword, ...) \
401 EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
402#define EFX_POPULATE_DWORD_1(dword, ...) \
403 EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
404#define EFX_ZERO_DWORD(dword) \
405 EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
406#define EFX_SET_DWORD(dword) \
407 EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
408
409/*
410 * Modify a named field within an already-populated structure. Used
411 * for read-modify-write operations.
412 *
413 */
414
415#define EFX_INVERT_OWORD(oword) do { \
416 (oword).u64[0] = ~((oword).u64[0]); \
417 (oword).u64[1] = ~((oword).u64[1]); \
418 } while (0)
419
420#define EFX_INSERT_FIELD64(...) \
421 cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
422
423#define EFX_INSERT_FIELD32(...) \
424 cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
425
426#define EFX_INPLACE_MASK64(min, max, field) \
427 EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field))
428
429#define EFX_INPLACE_MASK32(min, max, field) \
430 EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field))
431
432#define EFX_SET_OWORD_FIELD64(oword, field, value) do { \
433 (oword).u64[0] = (((oword).u64[0] \
434 & ~EFX_INPLACE_MASK64(0, 63, field)) \
435 | EFX_INSERT_FIELD64(0, 63, field, value)); \
436 (oword).u64[1] = (((oword).u64[1] \
437 & ~EFX_INPLACE_MASK64(64, 127, field)) \
438 | EFX_INSERT_FIELD64(64, 127, field, value)); \
439 } while (0)
440
441#define EFX_SET_QWORD_FIELD64(qword, field, value) do { \
442 (qword).u64[0] = (((qword).u64[0] \
443 & ~EFX_INPLACE_MASK64(0, 63, field)) \
444 | EFX_INSERT_FIELD64(0, 63, field, value)); \
445 } while (0)
446
447#define EFX_SET_OWORD_FIELD32(oword, field, value) do { \
448 (oword).u32[0] = (((oword).u32[0] \
449 & ~EFX_INPLACE_MASK32(0, 31, field)) \
450 | EFX_INSERT_FIELD32(0, 31, field, value)); \
451 (oword).u32[1] = (((oword).u32[1] \
452 & ~EFX_INPLACE_MASK32(32, 63, field)) \
453 | EFX_INSERT_FIELD32(32, 63, field, value)); \
454 (oword).u32[2] = (((oword).u32[2] \
455 & ~EFX_INPLACE_MASK32(64, 95, field)) \
456 | EFX_INSERT_FIELD32(64, 95, field, value)); \
457 (oword).u32[3] = (((oword).u32[3] \
458 & ~EFX_INPLACE_MASK32(96, 127, field)) \
459 | EFX_INSERT_FIELD32(96, 127, field, value)); \
460 } while (0)
461
462#define EFX_SET_QWORD_FIELD32(qword, field, value) do { \
463 (qword).u32[0] = (((qword).u32[0] \
464 & ~EFX_INPLACE_MASK32(0, 31, field)) \
465 | EFX_INSERT_FIELD32(0, 31, field, value)); \
466 (qword).u32[1] = (((qword).u32[1] \
467 & ~EFX_INPLACE_MASK32(32, 63, field)) \
468 | EFX_INSERT_FIELD32(32, 63, field, value)); \
469 } while (0)
470
471#define EFX_SET_DWORD_FIELD(dword, field, value) do { \
472 (dword).u32[0] = (((dword).u32[0] \
473 & ~EFX_INPLACE_MASK32(0, 31, field)) \
474 | EFX_INSERT_FIELD32(0, 31, field, value)); \
475 } while (0)
476
477#if BITS_PER_LONG == 64
478#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
479#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
480#else
481#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
482#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
483#endif
484
485#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
486 if (FALCON_REV(efx) >= FALCON_REV_B0) { \
487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
488 } else { \
489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
490 } \
491} while (0)
492
493#define EFX_QWORD_FIELD_VER(efx, qword, field) \
494 (FALCON_REV(efx) >= FALCON_REV_B0 ? \
495 EFX_QWORD_FIELD((qword), field##_B0) : \
496 EFX_QWORD_FIELD((qword), field##_A1))
497
498/* Used to avoid compiler warnings about shift range exceeding width
499 * of the data types when dma_addr_t is only 32 bits wide.
500 */
501#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
502#define EFX_DMA_TYPE_WIDTH(width) \
503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
504#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
505 ~((u64) 0) : ~((u32) 0))
506#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
507
508#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
new file mode 100644
index 000000000000..eecaa6d58584
--- /dev/null
+++ b/drivers/net/sfc/boards.c
@@ -0,0 +1,167 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "phy.h"
12#include "boards.h"
13#include "efx.h"
14
15/* Macros for unpacking the board revision */
16/* The revision info is in host byte order. */
17#define BOARD_TYPE(_rev) (_rev >> 8)
18#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
19#define BOARD_MINOR(_rev) (_rev & 0xf)
20
21/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
22#define BLINK_INTERVAL (HZ/2)
23
24static void blink_led_timer(unsigned long context)
25{
26 struct efx_nic *efx = (struct efx_nic *)context;
27 struct efx_blinker *bl = &efx->board_info.blinker;
28 efx->board_info.set_fault_led(efx, bl->state);
29 bl->state = !bl->state;
30 if (bl->resubmit) {
31 bl->timer.expires = jiffies + BLINK_INTERVAL;
32 add_timer(&bl->timer);
33 }
34}
35
36static void board_blink(struct efx_nic *efx, int blink)
37{
38 struct efx_blinker *blinker = &efx->board_info.blinker;
39
40 /* The rtnl mutex serialises all ethtool ioctls, so
41 * nothing special needs doing here. */
42 if (blink) {
43 blinker->resubmit = 1;
44 blinker->state = 0;
45 setup_timer(&blinker->timer, blink_led_timer,
46 (unsigned long)efx);
47 blinker->timer.expires = jiffies + BLINK_INTERVAL;
48 add_timer(&blinker->timer);
49 } else {
50 blinker->resubmit = 0;
51 if (blinker->timer.function)
52 del_timer_sync(&blinker->timer);
53 efx->board_info.set_fault_led(efx, 0);
54 }
55}
56
57/*****************************************************************************
58 * Support for the SFE4002
59 *
60 */
61/****************************************************************************/
62/* LED allocations. Note that on rev A0 boards the schematic and the reality
63 * differ: red and green are swapped. Below is the fixed (A1) layout (there
64 * are only 3 A0 boards in existence, so no real reason to make this
65 * conditional).
66 */
67#define SFE4002_FAULT_LED (2) /* Red */
68#define SFE4002_RX_LED (0) /* Green */
69#define SFE4002_TX_LED (1) /* Amber */
70
71static int sfe4002_init_leds(struct efx_nic *efx)
72{
73 /* Set the TX and RX LEDs to reflect status and activity, and the
74 * fault LED off */
75 xfp_set_led(efx, SFE4002_TX_LED,
76 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
77 xfp_set_led(efx, SFE4002_RX_LED,
78 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
79 xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
80 efx->board_info.blinker.led_num = SFE4002_FAULT_LED;
81 return 0;
82}
83
84static void sfe4002_fault_led(struct efx_nic *efx, int state)
85{
86 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
87 QUAKE_LED_OFF);
88}
89
90static int sfe4002_init(struct efx_nic *efx)
91{
92 efx->board_info.init_leds = sfe4002_init_leds;
93 efx->board_info.set_fault_led = sfe4002_fault_led;
94 efx->board_info.blink = board_blink;
95 return 0;
96}
97
98/* This will get expanded as board-specific details get moved out of the
99 * PHY drivers. */
100struct efx_board_data {
101 const char *ref_model;
102 const char *gen_type;
103 int (*init) (struct efx_nic *nic);
104};
105
106static int dummy_init(struct efx_nic *nic)
107{
108 return 0;
109}
110
111static struct efx_board_data board_data[] = {
112 [EFX_BOARD_INVALID] =
113 {NULL, NULL, dummy_init},
114 [EFX_BOARD_SFE4001] =
115 {"SFE4001", "10GBASE-T adapter", sfe4001_poweron},
116 [EFX_BOARD_SFE4002] =
117 {"SFE4002", "XFP adapter", sfe4002_init},
118};
119
120int efx_set_board_info(struct efx_nic *efx, u16 revision_info)
121{
122 int rc = 0;
123 struct efx_board_data *data;
124
125 if (BOARD_TYPE(revision_info) >= EFX_BOARD_MAX) {
126 EFX_ERR(efx, "squashing unknown board type %d\n",
127 BOARD_TYPE(revision_info));
128 revision_info = 0;
129 }
130
131 if (BOARD_TYPE(revision_info) == 0) {
132 efx->board_info.major = 0;
133 efx->board_info.minor = 0;
134 /* For early boards that don't have revision info. there is
135 * only 1 board for each PHY type, so we can work it out, with
136 * the exception of the PHY-less boards. */
137 switch (efx->phy_type) {
138 case PHY_TYPE_10XPRESS:
139 efx->board_info.type = EFX_BOARD_SFE4001;
140 break;
141 case PHY_TYPE_XFP:
142 efx->board_info.type = EFX_BOARD_SFE4002;
143 break;
144 default:
145 efx->board_info.type = 0;
146 break;
147 }
148 } else {
149 efx->board_info.type = BOARD_TYPE(revision_info);
150 efx->board_info.major = BOARD_MAJOR(revision_info);
151 efx->board_info.minor = BOARD_MINOR(revision_info);
152 }
153
154 data = &board_data[efx->board_info.type];
155
156 /* Report the board model number or generic type for recognisable
157 * boards. */
158 if (efx->board_info.type != 0)
159 EFX_INFO(efx, "board is %s rev %c%d\n",
160 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
161 ? data->ref_model : data->gen_type,
162 'A' + efx->board_info.major, efx->board_info.minor);
163
164 efx->board_info.init = data->init;
165
166 return rc;
167}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
new file mode 100644
index 000000000000..f56341d428e1
--- /dev/null
+++ b/drivers/net/sfc/boards.h
@@ -0,0 +1,26 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_BOARDS_H
11#define EFX_BOARDS_H
12
13/* Board IDs (must fit in 8 bits) */
14enum efx_board_type {
15 EFX_BOARD_INVALID = 0,
16 EFX_BOARD_SFE4001 = 1, /* SFE4001 (10GBASE-T) */
17 EFX_BOARD_SFE4002 = 2,
18 /* Insert new types before here */
19 EFX_BOARD_MAX
20};
21
22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
23extern int sfe4001_poweron(struct efx_nic *efx);
24extern void sfe4001_poweroff(struct efx_nic *efx);
25
26#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
new file mode 100644
index 000000000000..59edcf793c19
--- /dev/null
+++ b/drivers/net/sfc/efx.c
@@ -0,0 +1,2208 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/notifier.h>
17#include <linux/ip.h>
18#include <linux/tcp.h>
19#include <linux/in.h>
20#include <linux/crc32.h>
21#include <linux/ethtool.h>
22#include "net_driver.h"
23#include "gmii.h"
24#include "ethtool.h"
25#include "tx.h"
26#include "rx.h"
27#include "efx.h"
28#include "mdio_10g.h"
29#include "falcon.h"
30#include "workarounds.h"
31#include "mac.h"
32
33#define EFX_MAX_MTU (9 * 1024)
34
35/* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
39 */
40static struct workqueue_struct *refill_workqueue;
41
42/**************************************************************************
43 *
44 * Configurable values
45 *
46 *************************************************************************/
47
48/*
49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
50 *
51 * This sets the default for new devices. It can be controlled later
52 * using ethtool.
53 */
54static int lro = 1;
55module_param(lro, int, 0644);
56MODULE_PARM_DESC(lro, "Large receive offload acceleration");
57
58/*
59 * Use separate channels for TX and RX events
60 *
61 * Set this to 1 to use separate channels for TX and RX. It allows us to
62 * apply a higher level of interrupt moderation to TX events.
63 *
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
65 * is not written
66 */
67static unsigned int separate_tx_and_rx_channels = 1;
68
69/* This is the weight assigned to each of the (per-channel) virtual
70 * NAPI devices.
71 */
72static int napi_weight = 64;
73
74/* This is the time (in jiffies) between invocations of the hardware
75 * monitor, which checks for known hardware bugs and resets the
76 * hardware and driver as necessary.
77 */
78unsigned int efx_monitor_interval = 1 * HZ;
79
80/* This controls whether or not the hardware monitor will trigger a
81 * reset when it detects an error condition.
82 */
83static unsigned int monitor_reset = 1;
84
85/* This controls whether or not the driver will initialise devices
86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
87 * such devices will be initialised with a random locally-generated
88 * MAC address. This allows for loading the sfc_mtd driver to
89 * reprogram the flash, even if the flash contents (including the MAC
90 * address) have previously been erased.
91 */
92static unsigned int allow_bad_hwaddr;
93
94/* Initial interrupt moderation settings. They can be modified after
95 * module load with ethtool.
96 *
97 * The default for RX should strike a balance between increasing the
98 * round-trip latency and reducing overhead.
99 */
100static unsigned int rx_irq_mod_usec = 60;
101
102/* Initial interrupt moderation settings. They can be modified after
103 * module load with ethtool.
104 *
105 * This default is chosen to ensure that a 10G link does not go idle
106 * while a TX queue is stopped after it has become full. A queue is
107 * restarted when it drops below half full. The time this takes (assuming
108 * worst case 3 descriptors per packet and 1024 descriptors) is
109 * 512 / 3 * 1.2 = 205 usec.
110 */
111static unsigned int tx_irq_mod_usec = 150;
112
113/* This is the first interrupt mode to try out of:
114 * 0 => MSI-X
115 * 1 => MSI
116 * 2 => legacy
117 */
118static unsigned int interrupt_mode;
119
120/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
121 * i.e. the number of CPUs among which we may distribute simultaneous
122 * interrupt handling.
123 *
124 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
125 * The default (0) means to assign an interrupt to each package (level II cache)
126 */
127static unsigned int rss_cpus;
128module_param(rss_cpus, uint, 0444);
129MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
130
131/**************************************************************************
132 *
133 * Utility functions and prototypes
134 *
135 *************************************************************************/
136static void efx_remove_channel(struct efx_channel *channel);
137static void efx_remove_port(struct efx_nic *efx);
138static void efx_fini_napi(struct efx_nic *efx);
139static void efx_fini_channels(struct efx_nic *efx);
140
141#define EFX_ASSERT_RESET_SERIALISED(efx) \
142 do { \
143 if ((efx->state == STATE_RUNNING) || \
144 (efx->state == STATE_RESETTING)) \
145 ASSERT_RTNL(); \
146 } while (0)
147
148/**************************************************************************
149 *
150 * Event queue processing
151 *
152 *************************************************************************/
153
154/* Process channel's event queue
155 *
156 * This function is responsible for processing the event queue of a
157 * single channel. The caller must guarantee that this function will
158 * never be concurrently called more than once on the same channel,
159 * though different channels may be being processed concurrently.
160 */
161static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
162{
163 int rxdmaqs;
164 struct efx_rx_queue *rx_queue;
165
166 if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
167 !channel->enabled))
168 return rx_quota;
169
170 rxdmaqs = falcon_process_eventq(channel, &rx_quota);
171
172 /* Deliver last RX packet. */
173 if (channel->rx_pkt) {
174 __efx_rx_packet(channel, channel->rx_pkt,
175 channel->rx_pkt_csummed);
176 channel->rx_pkt = NULL;
177 }
178
179 efx_flush_lro(channel);
180 efx_rx_strategy(channel);
181
182 /* Refill descriptor rings as necessary */
183 rx_queue = &channel->efx->rx_queue[0];
184 while (rxdmaqs) {
185 if (rxdmaqs & 0x01)
186 efx_fast_push_rx_descriptors(rx_queue);
187 rx_queue++;
188 rxdmaqs >>= 1;
189 }
190
191 return rx_quota;
192}
193
194/* Mark channel as finished processing
195 *
196 * Note that since we will not receive further interrupts for this
197 * channel before we finish processing and call the eventq_read_ack()
198 * method, there is no need to use the interrupt hold-off timers.
199 */
200static inline void efx_channel_processed(struct efx_channel *channel)
201{
202 /* Write to EVQ_RPTR_REG. If a new event arrived in a race
203 * with finishing processing, a new interrupt will be raised.
204 */
205 channel->work_pending = 0;
206 smp_wmb(); /* Ensure channel updated before any new interrupt. */
207 falcon_eventq_read_ack(channel);
208}
209
210/* NAPI poll handler
211 *
212 * NAPI guarantees serialisation of polls of the same device, which
213 * provides the guarantee required by efx_process_channel().
214 */
215static int efx_poll(struct napi_struct *napi, int budget)
216{
217 struct efx_channel *channel =
218 container_of(napi, struct efx_channel, napi_str);
219 struct net_device *napi_dev = channel->napi_dev;
220 int unused;
221 int rx_packets;
222
223 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
224 channel->channel, raw_smp_processor_id());
225
226 unused = efx_process_channel(channel, budget);
227 rx_packets = (budget - unused);
228
229 if (rx_packets < budget) {
230 /* There is no race here; although napi_disable() will
231 * only wait for netif_rx_complete(), this isn't a problem
232 * since efx_channel_processed() will have no effect if
233 * interrupts have already been disabled.
234 */
235 netif_rx_complete(napi_dev, napi);
236 efx_channel_processed(channel);
237 }
238
239 return rx_packets;
240}
241
242/* Process the eventq of the specified channel immediately on this CPU
243 *
244 * Disable hardware generated interrupts, wait for any existing
245 * processing to finish, then directly poll (and ack ) the eventq.
246 * Finally reenable NAPI and interrupts.
247 *
248 * Since we are touching interrupts the caller should hold the suspend lock
249 */
250void efx_process_channel_now(struct efx_channel *channel)
251{
252 struct efx_nic *efx = channel->efx;
253
254 BUG_ON(!channel->used_flags);
255 BUG_ON(!channel->enabled);
256
257 /* Disable interrupts and wait for ISRs to complete */
258 falcon_disable_interrupts(efx);
259 if (efx->legacy_irq)
260 synchronize_irq(efx->legacy_irq);
261 if (channel->has_interrupt && channel->irq)
262 synchronize_irq(channel->irq);
263
264 /* Wait for any NAPI processing to complete */
265 napi_disable(&channel->napi_str);
266
267 /* Poll the channel */
268 (void) efx_process_channel(channel, efx->type->evq_size);
269
270 /* Ack the eventq. This may cause an interrupt to be generated
271 * when they are reenabled */
272 efx_channel_processed(channel);
273
274 napi_enable(&channel->napi_str);
275 falcon_enable_interrupts(efx);
276}
277
278/* Create event queue
279 * Event queue memory allocations are done only once. If the channel
280 * is reset, the memory buffer will be reused; this guards against
281 * errors during channel reset and also simplifies interrupt handling.
282 */
283static int efx_probe_eventq(struct efx_channel *channel)
284{
285 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
286
287 return falcon_probe_eventq(channel);
288}
289
290/* Prepare channel's event queue */
291static int efx_init_eventq(struct efx_channel *channel)
292{
293 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
294
295 channel->eventq_read_ptr = 0;
296
297 return falcon_init_eventq(channel);
298}
299
300static void efx_fini_eventq(struct efx_channel *channel)
301{
302 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
303
304 falcon_fini_eventq(channel);
305}
306
307static void efx_remove_eventq(struct efx_channel *channel)
308{
309 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
310
311 falcon_remove_eventq(channel);
312}
313
314/**************************************************************************
315 *
316 * Channel handling
317 *
318 *************************************************************************/
319
320/* Setup per-NIC RX buffer parameters.
321 * Calculate the rx buffer allocation parameters required to support
322 * the current MTU, including padding for header alignment and overruns.
323 */
324static void efx_calc_rx_buffer_params(struct efx_nic *efx)
325{
326 unsigned int order, len;
327
328 len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
329 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
330 efx->type->rx_buffer_padding);
331
332 /* Calculate page-order */
333 for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
334 ;
335
336 efx->rx_buffer_len = len;
337 efx->rx_buffer_order = order;
338}
339
340static int efx_probe_channel(struct efx_channel *channel)
341{
342 struct efx_tx_queue *tx_queue;
343 struct efx_rx_queue *rx_queue;
344 int rc;
345
346 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
347
348 rc = efx_probe_eventq(channel);
349 if (rc)
350 goto fail1;
351
352 efx_for_each_channel_tx_queue(tx_queue, channel) {
353 rc = efx_probe_tx_queue(tx_queue);
354 if (rc)
355 goto fail2;
356 }
357
358 efx_for_each_channel_rx_queue(rx_queue, channel) {
359 rc = efx_probe_rx_queue(rx_queue);
360 if (rc)
361 goto fail3;
362 }
363
364 channel->n_rx_frm_trunc = 0;
365
366 return 0;
367
368 fail3:
369 efx_for_each_channel_rx_queue(rx_queue, channel)
370 efx_remove_rx_queue(rx_queue);
371 fail2:
372 efx_for_each_channel_tx_queue(tx_queue, channel)
373 efx_remove_tx_queue(tx_queue);
374 fail1:
375 return rc;
376}
377
378
379/* Channels are shutdown and reinitialised whilst the NIC is running
380 * to propagate configuration changes (mtu, checksum offload), or
381 * to clear hardware error conditions
382 */
383static int efx_init_channels(struct efx_nic *efx)
384{
385 struct efx_tx_queue *tx_queue;
386 struct efx_rx_queue *rx_queue;
387 struct efx_channel *channel;
388 int rc = 0;
389
390 efx_calc_rx_buffer_params(efx);
391
392 /* Initialise the channels */
393 efx_for_each_channel(channel, efx) {
394 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
395
396 rc = efx_init_eventq(channel);
397 if (rc)
398 goto err;
399
400 efx_for_each_channel_tx_queue(tx_queue, channel) {
401 rc = efx_init_tx_queue(tx_queue);
402 if (rc)
403 goto err;
404 }
405
406 /* The rx buffer allocation strategy is MTU dependent */
407 efx_rx_strategy(channel);
408
409 efx_for_each_channel_rx_queue(rx_queue, channel) {
410 rc = efx_init_rx_queue(rx_queue);
411 if (rc)
412 goto err;
413 }
414
415 WARN_ON(channel->rx_pkt != NULL);
416 efx_rx_strategy(channel);
417 }
418
419 return 0;
420
421 err:
422 EFX_ERR(efx, "failed to initialise channel %d\n",
423 channel ? channel->channel : -1);
424 efx_fini_channels(efx);
425 return rc;
426}
427
428/* This enables event queue processing and packet transmission.
429 *
430 * Note that this function is not allowed to fail, since that would
431 * introduce too much complexity into the suspend/resume path.
432 */
433static void efx_start_channel(struct efx_channel *channel)
434{
435 struct efx_rx_queue *rx_queue;
436
437 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
438
439 if (!(channel->efx->net_dev->flags & IFF_UP))
440 netif_napi_add(channel->napi_dev, &channel->napi_str,
441 efx_poll, napi_weight);
442
443 channel->work_pending = 0;
444 channel->enabled = 1;
445 smp_wmb(); /* ensure channel updated before first interrupt */
446
447 napi_enable(&channel->napi_str);
448
449 /* Load up RX descriptors */
450 efx_for_each_channel_rx_queue(rx_queue, channel)
451 efx_fast_push_rx_descriptors(rx_queue);
452}
453
454/* This disables event queue processing and packet transmission.
455 * This function does not guarantee that all queue processing
456 * (e.g. RX refill) is complete.
457 */
458static void efx_stop_channel(struct efx_channel *channel)
459{
460 struct efx_rx_queue *rx_queue;
461
462 if (!channel->enabled)
463 return;
464
465 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
466
467 channel->enabled = 0;
468 napi_disable(&channel->napi_str);
469
470 /* Ensure that any worker threads have exited or will be no-ops */
471 efx_for_each_channel_rx_queue(rx_queue, channel) {
472 spin_lock_bh(&rx_queue->add_lock);
473 spin_unlock_bh(&rx_queue->add_lock);
474 }
475}
476
477static void efx_fini_channels(struct efx_nic *efx)
478{
479 struct efx_channel *channel;
480 struct efx_tx_queue *tx_queue;
481 struct efx_rx_queue *rx_queue;
482
483 EFX_ASSERT_RESET_SERIALISED(efx);
484 BUG_ON(efx->port_enabled);
485
486 efx_for_each_channel(channel, efx) {
487 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
488
489 efx_for_each_channel_rx_queue(rx_queue, channel)
490 efx_fini_rx_queue(rx_queue);
491 efx_for_each_channel_tx_queue(tx_queue, channel)
492 efx_fini_tx_queue(tx_queue);
493 }
494
495 /* Do the event queues last so that we can handle flush events
496 * for all DMA queues. */
497 efx_for_each_channel(channel, efx) {
498 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
499
500 efx_fini_eventq(channel);
501 }
502}
503
504static void efx_remove_channel(struct efx_channel *channel)
505{
506 struct efx_tx_queue *tx_queue;
507 struct efx_rx_queue *rx_queue;
508
509 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
510
511 efx_for_each_channel_rx_queue(rx_queue, channel)
512 efx_remove_rx_queue(rx_queue);
513 efx_for_each_channel_tx_queue(tx_queue, channel)
514 efx_remove_tx_queue(tx_queue);
515 efx_remove_eventq(channel);
516
517 channel->used_flags = 0;
518}
519
520void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
521{
522 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
523}
524
525/**************************************************************************
526 *
527 * Port handling
528 *
529 **************************************************************************/
530
531/* This ensures that the kernel is kept informed (via
532 * netif_carrier_on/off) of the link status, and also maintains the
533 * link status's stop on the port's TX queue.
534 */
535static void efx_link_status_changed(struct efx_nic *efx)
536{
537 int carrier_ok;
538
539 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
540 * that no events are triggered between unregister_netdev() and the
541 * driver unloading. A more general condition is that NETDEV_CHANGE
542 * can only be generated between NETDEV_UP and NETDEV_DOWN */
543 if (!netif_running(efx->net_dev))
544 return;
545
546 carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
547 if (efx->link_up != carrier_ok) {
548 efx->n_link_state_changes++;
549
550 if (efx->link_up)
551 netif_carrier_on(efx->net_dev);
552 else
553 netif_carrier_off(efx->net_dev);
554 }
555
556 /* Status message for kernel log */
557 if (efx->link_up) {
558 struct mii_if_info *gmii = &efx->mii;
559 unsigned adv, lpa;
560 /* NONE here means direct XAUI from the controller, with no
561 * MDIO-attached device we can query. */
562 if (efx->phy_type != PHY_TYPE_NONE) {
563 adv = gmii_advertised(gmii);
564 lpa = gmii_lpa(gmii);
565 } else {
566 lpa = GM_LPA_10000 | LPA_DUPLEX;
567 adv = lpa;
568 }
569 EFX_INFO(efx, "link up at %dMbps %s-duplex "
570 "(adv %04x lpa %04x) (MTU %d)%s\n",
571 (efx->link_options & GM_LPA_10000 ? 10000 :
572 (efx->link_options & GM_LPA_1000 ? 1000 :
573 (efx->link_options & GM_LPA_100 ? 100 :
574 10))),
575 (efx->link_options & GM_LPA_DUPLEX ?
576 "full" : "half"),
577 adv, lpa,
578 efx->net_dev->mtu,
579 (efx->promiscuous ? " [PROMISC]" : ""));
580 } else {
581 EFX_INFO(efx, "link down\n");
582 }
583
584}
585
586/* This call reinitialises the MAC to pick up new PHY settings. The
587 * caller must hold the mac_lock */
588static void __efx_reconfigure_port(struct efx_nic *efx)
589{
590 WARN_ON(!mutex_is_locked(&efx->mac_lock));
591
592 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
593 raw_smp_processor_id());
594
595 falcon_reconfigure_xmac(efx);
596
597 /* Inform kernel of loss/gain of carrier */
598 efx_link_status_changed(efx);
599}
600
601/* Reinitialise the MAC to pick up new PHY settings, even if the port is
602 * disabled. */
603void efx_reconfigure_port(struct efx_nic *efx)
604{
605 EFX_ASSERT_RESET_SERIALISED(efx);
606
607 mutex_lock(&efx->mac_lock);
608 __efx_reconfigure_port(efx);
609 mutex_unlock(&efx->mac_lock);
610}
611
612/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
613 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
614 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
615static void efx_reconfigure_work(struct work_struct *data)
616{
617 struct efx_nic *efx = container_of(data, struct efx_nic,
618 reconfigure_work);
619
620 mutex_lock(&efx->mac_lock);
621 if (efx->port_enabled)
622 __efx_reconfigure_port(efx);
623 mutex_unlock(&efx->mac_lock);
624}
625
626static int efx_probe_port(struct efx_nic *efx)
627{
628 int rc;
629
630 EFX_LOG(efx, "create port\n");
631
632 /* Connect up MAC/PHY operations table and read MAC address */
633 rc = falcon_probe_port(efx);
634 if (rc)
635 goto err;
636
637 /* Sanity check MAC address */
638 if (is_valid_ether_addr(efx->mac_address)) {
639 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
640 } else {
641 DECLARE_MAC_BUF(mac);
642
643 EFX_ERR(efx, "invalid MAC address %s\n",
644 print_mac(mac, efx->mac_address));
645 if (!allow_bad_hwaddr) {
646 rc = -EINVAL;
647 goto err;
648 }
649 random_ether_addr(efx->net_dev->dev_addr);
650 EFX_INFO(efx, "using locally-generated MAC %s\n",
651 print_mac(mac, efx->net_dev->dev_addr));
652 }
653
654 return 0;
655
656 err:
657 efx_remove_port(efx);
658 return rc;
659}
660
661static int efx_init_port(struct efx_nic *efx)
662{
663 int rc;
664
665 EFX_LOG(efx, "init port\n");
666
667 /* Initialise the MAC and PHY */
668 rc = falcon_init_xmac(efx);
669 if (rc)
670 return rc;
671
672 efx->port_initialized = 1;
673
674 /* Reconfigure port to program MAC registers */
675 falcon_reconfigure_xmac(efx);
676
677 return 0;
678}
679
680/* Allow efx_reconfigure_port() to be scheduled, and close the window
681 * between efx_stop_port and efx_flush_all whereby a previously scheduled
682 * efx_reconfigure_port() may have been cancelled */
683static void efx_start_port(struct efx_nic *efx)
684{
685 EFX_LOG(efx, "start port\n");
686 BUG_ON(efx->port_enabled);
687
688 mutex_lock(&efx->mac_lock);
689 efx->port_enabled = 1;
690 __efx_reconfigure_port(efx);
691 mutex_unlock(&efx->mac_lock);
692}
693
694/* Prevent efx_reconfigure_work and efx_monitor() from executing, and
695 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
696 * efx_reconfigure_work can still be scheduled via NAPI processing
697 * until efx_flush_all() is called */
698static void efx_stop_port(struct efx_nic *efx)
699{
700 EFX_LOG(efx, "stop port\n");
701
702 mutex_lock(&efx->mac_lock);
703 efx->port_enabled = 0;
704 mutex_unlock(&efx->mac_lock);
705
706 /* Serialise against efx_set_multicast_list() */
707 if (NET_DEV_REGISTERED(efx)) {
708 netif_tx_lock_bh(efx->net_dev);
709 netif_tx_unlock_bh(efx->net_dev);
710 }
711}
712
713static void efx_fini_port(struct efx_nic *efx)
714{
715 EFX_LOG(efx, "shut down port\n");
716
717 if (!efx->port_initialized)
718 return;
719
720 falcon_fini_xmac(efx);
721 efx->port_initialized = 0;
722
723 efx->link_up = 0;
724 efx_link_status_changed(efx);
725}
726
727static void efx_remove_port(struct efx_nic *efx)
728{
729 EFX_LOG(efx, "destroying port\n");
730
731 falcon_remove_port(efx);
732}
733
734/**************************************************************************
735 *
736 * NIC handling
737 *
738 **************************************************************************/
739
740/* This configures the PCI device to enable I/O and DMA. */
741static int efx_init_io(struct efx_nic *efx)
742{
743 struct pci_dev *pci_dev = efx->pci_dev;
744 dma_addr_t dma_mask = efx->type->max_dma_mask;
745 int rc;
746
747 EFX_LOG(efx, "initialising I/O\n");
748
749 rc = pci_enable_device(pci_dev);
750 if (rc) {
751 EFX_ERR(efx, "failed to enable PCI device\n");
752 goto fail1;
753 }
754
755 pci_set_master(pci_dev);
756
757 /* Set the PCI DMA mask. Try all possibilities from our
758 * genuine mask down to 32 bits, because some architectures
759 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
760 * masks event though they reject 46 bit masks.
761 */
762 while (dma_mask > 0x7fffffffUL) {
763 if (pci_dma_supported(pci_dev, dma_mask) &&
764 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
765 break;
766 dma_mask >>= 1;
767 }
768 if (rc) {
769 EFX_ERR(efx, "could not find a suitable DMA mask\n");
770 goto fail2;
771 }
772 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
773 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
774 if (rc) {
775 /* pci_set_consistent_dma_mask() is not *allowed* to
776 * fail with a mask that pci_set_dma_mask() accepted,
777 * but just in case...
778 */
779 EFX_ERR(efx, "failed to set consistent DMA mask\n");
780 goto fail2;
781 }
782
783 efx->membase_phys = pci_resource_start(efx->pci_dev,
784 efx->type->mem_bar);
785 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
786 if (rc) {
787 EFX_ERR(efx, "request for memory BAR failed\n");
788 rc = -EIO;
789 goto fail3;
790 }
791 efx->membase = ioremap_nocache(efx->membase_phys,
792 efx->type->mem_map_size);
793 if (!efx->membase) {
794 EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n",
795 efx->type->mem_bar, efx->membase_phys,
796 efx->type->mem_map_size);
797 rc = -ENOMEM;
798 goto fail4;
799 }
800 EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n",
801 efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size,
802 efx->membase);
803
804 return 0;
805
806 fail4:
807 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
808 fail3:
809 efx->membase_phys = 0UL;
810 fail2:
811 pci_disable_device(efx->pci_dev);
812 fail1:
813 return rc;
814}
815
816static void efx_fini_io(struct efx_nic *efx)
817{
818 EFX_LOG(efx, "shutting down I/O\n");
819
820 if (efx->membase) {
821 iounmap(efx->membase);
822 efx->membase = NULL;
823 }
824
825 if (efx->membase_phys) {
826 pci_release_region(efx->pci_dev, efx->type->mem_bar);
827 efx->membase_phys = 0UL;
828 }
829
830 pci_disable_device(efx->pci_dev);
831}
832
833/* Probe the number and type of interrupts we are able to obtain. */
834static void efx_probe_interrupts(struct efx_nic *efx)
835{
836 int max_channel = efx->type->phys_addr_channels - 1;
837 struct msix_entry xentries[EFX_MAX_CHANNELS];
838 int rc, i;
839
840 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
841 BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
842
843 efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus();
844 efx->rss_queues = min(efx->rss_queues, max_channel + 1);
845 efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
846
847 /* Request maximum number of MSI interrupts, and fill out
848 * the channel interrupt information the allowed allocation */
849 for (i = 0; i < efx->rss_queues; i++)
850 xentries[i].entry = i;
851 rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
852 if (rc > 0) {
853 EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
854 efx->rss_queues = rc;
855 rc = pci_enable_msix(efx->pci_dev, xentries,
856 efx->rss_queues);
857 }
858
859 if (rc == 0) {
860 for (i = 0; i < efx->rss_queues; i++) {
861 efx->channel[i].has_interrupt = 1;
862 efx->channel[i].irq = xentries[i].vector;
863 }
864 } else {
865 /* Fall back to single channel MSI */
866 efx->interrupt_mode = EFX_INT_MODE_MSI;
867 EFX_ERR(efx, "could not enable MSI-X\n");
868 }
869 }
870
871 /* Try single interrupt MSI */
872 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
873 efx->rss_queues = 1;
874 rc = pci_enable_msi(efx->pci_dev);
875 if (rc == 0) {
876 efx->channel[0].irq = efx->pci_dev->irq;
877 efx->channel[0].has_interrupt = 1;
878 } else {
879 EFX_ERR(efx, "could not enable MSI\n");
880 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
881 }
882 }
883
884 /* Assume legacy interrupts */
885 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
886 efx->rss_queues = 1;
887 /* Every channel is interruptible */
888 for (i = 0; i < EFX_MAX_CHANNELS; i++)
889 efx->channel[i].has_interrupt = 1;
890 efx->legacy_irq = efx->pci_dev->irq;
891 }
892}
893
894static void efx_remove_interrupts(struct efx_nic *efx)
895{
896 struct efx_channel *channel;
897
898 /* Remove MSI/MSI-X interrupts */
899 efx_for_each_channel_with_interrupt(channel, efx)
900 channel->irq = 0;
901 pci_disable_msi(efx->pci_dev);
902 pci_disable_msix(efx->pci_dev);
903
904 /* Remove legacy interrupt */
905 efx->legacy_irq = 0;
906}
907
908/* Select number of used resources
909 * Should be called after probe_interrupts()
910 */
911static void efx_select_used(struct efx_nic *efx)
912{
913 struct efx_tx_queue *tx_queue;
914 struct efx_rx_queue *rx_queue;
915 int i;
916
917 /* TX queues. One per port per channel with TX capability
918 * (more than one per port won't work on Linux, due to out
919 * of order issues... but will be fine on Solaris)
920 */
921 tx_queue = &efx->tx_queue[0];
922
923 /* Perform this for each channel with TX capabilities.
924 * At the moment, we only support a single TX queue
925 */
926 tx_queue->used = 1;
927 if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
928 tx_queue->channel = &efx->channel[1];
929 else
930 tx_queue->channel = &efx->channel[0];
931 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
932 tx_queue++;
933
934 /* RX queues. Each has a dedicated channel. */
935 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
936 rx_queue = &efx->rx_queue[i];
937
938 if (i < efx->rss_queues) {
939 rx_queue->used = 1;
940 /* If we allow multiple RX queues per channel
941 * we need to decide that here
942 */
943 rx_queue->channel = &efx->channel[rx_queue->queue];
944 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
945 rx_queue++;
946 }
947 }
948}
949
950static int efx_probe_nic(struct efx_nic *efx)
951{
952 int rc;
953
954 EFX_LOG(efx, "creating NIC\n");
955
956 /* Carry out hardware-type specific initialisation */
957 rc = falcon_probe_nic(efx);
958 if (rc)
959 return rc;
960
961 /* Determine the number of channels and RX queues by trying to hook
962 * in MSI-X interrupts. */
963 efx_probe_interrupts(efx);
964
965 /* Determine number of RX queues and TX queues */
966 efx_select_used(efx);
967
968 /* Initialise the interrupt moderation settings */
969 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
970
971 return 0;
972}
973
974static void efx_remove_nic(struct efx_nic *efx)
975{
976 EFX_LOG(efx, "destroying NIC\n");
977
978 efx_remove_interrupts(efx);
979 falcon_remove_nic(efx);
980}
981
982/**************************************************************************
983 *
984 * NIC startup/shutdown
985 *
986 *************************************************************************/
987
988static int efx_probe_all(struct efx_nic *efx)
989{
990 struct efx_channel *channel;
991 int rc;
992
993 /* Create NIC */
994 rc = efx_probe_nic(efx);
995 if (rc) {
996 EFX_ERR(efx, "failed to create NIC\n");
997 goto fail1;
998 }
999
1000 /* Create port */
1001 rc = efx_probe_port(efx);
1002 if (rc) {
1003 EFX_ERR(efx, "failed to create port\n");
1004 goto fail2;
1005 }
1006
1007 /* Create channels */
1008 efx_for_each_channel(channel, efx) {
1009 rc = efx_probe_channel(channel);
1010 if (rc) {
1011 EFX_ERR(efx, "failed to create channel %d\n",
1012 channel->channel);
1013 goto fail3;
1014 }
1015 }
1016
1017 return 0;
1018
1019 fail3:
1020 efx_for_each_channel(channel, efx)
1021 efx_remove_channel(channel);
1022 efx_remove_port(efx);
1023 fail2:
1024 efx_remove_nic(efx);
1025 fail1:
1026 return rc;
1027}
1028
1029/* Called after previous invocation(s) of efx_stop_all, restarts the
1030 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1031 * and ensures that the port is scheduled to be reconfigured.
1032 * This function is safe to call multiple times when the NIC is in any
1033 * state. */
1034static void efx_start_all(struct efx_nic *efx)
1035{
1036 struct efx_channel *channel;
1037
1038 EFX_ASSERT_RESET_SERIALISED(efx);
1039
1040 /* Check that it is appropriate to restart the interface. All
1041 * of these flags are safe to read under just the rtnl lock */
1042 if (efx->port_enabled)
1043 return;
1044 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1045 return;
1046 if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev))
1047 return;
1048
1049 /* Mark the port as enabled so port reconfigurations can start, then
1050 * restart the transmit interface early so the watchdog timer stops */
1051 efx_start_port(efx);
1052 efx_wake_queue(efx);
1053
1054 efx_for_each_channel(channel, efx)
1055 efx_start_channel(channel);
1056
1057 falcon_enable_interrupts(efx);
1058
1059 /* Start hardware monitor if we're in RUNNING */
1060 if (efx->state == STATE_RUNNING)
1061 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1062 efx_monitor_interval);
1063}
1064
1065/* Flush all delayed work. Should only be called when no more delayed work
1066 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1067 * since we're holding the rtnl_lock at this point. */
1068static void efx_flush_all(struct efx_nic *efx)
1069{
1070 struct efx_rx_queue *rx_queue;
1071
1072 /* Make sure the hardware monitor is stopped */
1073 cancel_delayed_work_sync(&efx->monitor_work);
1074
1075 /* Ensure that all RX slow refills are complete. */
1076 efx_for_each_rx_queue(rx_queue, efx) {
1077 cancel_delayed_work_sync(&rx_queue->work);
1078 }
1079
1080 /* Stop scheduled port reconfigurations */
1081 cancel_work_sync(&efx->reconfigure_work);
1082
1083}
1084
1085/* Quiesce hardware and software without bringing the link down.
1086 * Safe to call multiple times, when the nic and interface is in any
1087 * state. The caller is guaranteed to subsequently be in a position
1088 * to modify any hardware and software state they see fit without
1089 * taking locks. */
1090static void efx_stop_all(struct efx_nic *efx)
1091{
1092 struct efx_channel *channel;
1093
1094 EFX_ASSERT_RESET_SERIALISED(efx);
1095
1096 /* port_enabled can be read safely under the rtnl lock */
1097 if (!efx->port_enabled)
1098 return;
1099
1100 /* Disable interrupts and wait for ISR to complete */
1101 falcon_disable_interrupts(efx);
1102 if (efx->legacy_irq)
1103 synchronize_irq(efx->legacy_irq);
1104 efx_for_each_channel_with_interrupt(channel, efx)
1105 if (channel->irq)
1106 synchronize_irq(channel->irq);
1107
1108 /* Stop all NAPI processing and synchronous rx refills */
1109 efx_for_each_channel(channel, efx)
1110 efx_stop_channel(channel);
1111
1112 /* Stop all asynchronous port reconfigurations. Since all
1113 * event processing has already been stopped, there is no
1114 * window to loose phy events */
1115 efx_stop_port(efx);
1116
1117 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1118 efx_flush_all(efx);
1119
1120 /* Isolate the MAC from the TX and RX engines, so that queue
1121 * flushes will complete in a timely fashion. */
1122 falcon_deconfigure_mac_wrapper(efx);
1123 falcon_drain_tx_fifo(efx);
1124
1125 /* Stop the kernel transmit interface late, so the watchdog
1126 * timer isn't ticking over the flush */
1127 efx_stop_queue(efx);
1128 if (NET_DEV_REGISTERED(efx)) {
1129 netif_tx_lock_bh(efx->net_dev);
1130 netif_tx_unlock_bh(efx->net_dev);
1131 }
1132}
1133
1134static void efx_remove_all(struct efx_nic *efx)
1135{
1136 struct efx_channel *channel;
1137
1138 efx_for_each_channel(channel, efx)
1139 efx_remove_channel(channel);
1140 efx_remove_port(efx);
1141 efx_remove_nic(efx);
1142}
1143
1144/* A convinience function to safely flush all the queues */
1145int efx_flush_queues(struct efx_nic *efx)
1146{
1147 int rc;
1148
1149 EFX_ASSERT_RESET_SERIALISED(efx);
1150
1151 efx_stop_all(efx);
1152
1153 efx_fini_channels(efx);
1154 rc = efx_init_channels(efx);
1155 if (rc) {
1156 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1157 return rc;
1158 }
1159
1160 efx_start_all(efx);
1161
1162 return 0;
1163}
1164
1165/**************************************************************************
1166 *
1167 * Interrupt moderation
1168 *
1169 **************************************************************************/
1170
1171/* Set interrupt moderation parameters */
1172void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1173{
1174 struct efx_tx_queue *tx_queue;
1175 struct efx_rx_queue *rx_queue;
1176
1177 EFX_ASSERT_RESET_SERIALISED(efx);
1178
1179 efx_for_each_tx_queue(tx_queue, efx)
1180 tx_queue->channel->irq_moderation = tx_usecs;
1181
1182 efx_for_each_rx_queue(rx_queue, efx)
1183 rx_queue->channel->irq_moderation = rx_usecs;
1184}
1185
1186/**************************************************************************
1187 *
1188 * Hardware monitor
1189 *
1190 **************************************************************************/
1191
1192/* Run periodically off the general workqueue. Serialised against
1193 * efx_reconfigure_port via the mac_lock */
1194static void efx_monitor(struct work_struct *data)
1195{
1196 struct efx_nic *efx = container_of(data, struct efx_nic,
1197 monitor_work.work);
1198 int rc = 0;
1199
1200 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1201 raw_smp_processor_id());
1202
1203
1204 /* If the mac_lock is already held then it is likely a port
1205 * reconfiguration is already in place, which will likely do
1206 * most of the work of check_hw() anyway. */
1207 if (!mutex_trylock(&efx->mac_lock)) {
1208 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1209 efx_monitor_interval);
1210 return;
1211 }
1212
1213 if (efx->port_enabled)
1214 rc = falcon_check_xmac(efx);
1215 mutex_unlock(&efx->mac_lock);
1216
1217 if (rc) {
1218 if (monitor_reset) {
1219 EFX_ERR(efx, "hardware monitor detected a fault: "
1220 "triggering reset\n");
1221 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1222 } else {
1223 EFX_ERR(efx, "hardware monitor detected a fault, "
1224 "skipping reset\n");
1225 }
1226 }
1227
1228 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1229 efx_monitor_interval);
1230}
1231
1232/**************************************************************************
1233 *
1234 * ioctls
1235 *
1236 *************************************************************************/
1237
1238/* Net device ioctl
1239 * Context: process, rtnl_lock() held.
1240 */
1241static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1242{
1243 struct efx_nic *efx = net_dev->priv;
1244
1245 EFX_ASSERT_RESET_SERIALISED(efx);
1246
1247 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1248}
1249
1250/**************************************************************************
1251 *
1252 * NAPI interface
1253 *
1254 **************************************************************************/
1255
1256static int efx_init_napi(struct efx_nic *efx)
1257{
1258 struct efx_channel *channel;
1259 int rc;
1260
1261 efx_for_each_channel(channel, efx) {
1262 channel->napi_dev = efx->net_dev;
1263 rc = efx_lro_init(&channel->lro_mgr, efx);
1264 if (rc)
1265 goto err;
1266 }
1267 return 0;
1268 err:
1269 efx_fini_napi(efx);
1270 return rc;
1271}
1272
1273static void efx_fini_napi(struct efx_nic *efx)
1274{
1275 struct efx_channel *channel;
1276
1277 efx_for_each_channel(channel, efx) {
1278 efx_lro_fini(&channel->lro_mgr);
1279 channel->napi_dev = NULL;
1280 }
1281}
1282
1283/**************************************************************************
1284 *
1285 * Kernel netpoll interface
1286 *
1287 *************************************************************************/
1288
1289#ifdef CONFIG_NET_POLL_CONTROLLER
1290
1291/* Although in the common case interrupts will be disabled, this is not
1292 * guaranteed. However, all our work happens inside the NAPI callback,
1293 * so no locking is required.
1294 */
1295static void efx_netpoll(struct net_device *net_dev)
1296{
1297 struct efx_nic *efx = net_dev->priv;
1298 struct efx_channel *channel;
1299
1300 efx_for_each_channel_with_interrupt(channel, efx)
1301 efx_schedule_channel(channel);
1302}
1303
1304#endif
1305
1306/**************************************************************************
1307 *
1308 * Kernel net device interface
1309 *
1310 *************************************************************************/
1311
1312/* Context: process, rtnl_lock() held. */
1313static int efx_net_open(struct net_device *net_dev)
1314{
1315 struct efx_nic *efx = net_dev->priv;
1316 EFX_ASSERT_RESET_SERIALISED(efx);
1317
1318 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1319 raw_smp_processor_id());
1320
1321 efx_start_all(efx);
1322 return 0;
1323}
1324
1325/* Context: process, rtnl_lock() held.
1326 * Note that the kernel will ignore our return code; this method
1327 * should really be a void.
1328 */
1329static int efx_net_stop(struct net_device *net_dev)
1330{
1331 struct efx_nic *efx = net_dev->priv;
1332 int rc;
1333
1334 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1335 raw_smp_processor_id());
1336
1337 /* Stop the device and flush all the channels */
1338 efx_stop_all(efx);
1339 efx_fini_channels(efx);
1340 rc = efx_init_channels(efx);
1341 if (rc)
1342 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1343
1344 return 0;
1345}
1346
1347/* Context: process, dev_base_lock held, non-blocking. */
1348static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1349{
1350 struct efx_nic *efx = net_dev->priv;
1351 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1352 struct net_device_stats *stats = &net_dev->stats;
1353
1354 if (!spin_trylock(&efx->stats_lock))
1355 return stats;
1356 if (efx->state == STATE_RUNNING) {
1357 falcon_update_stats_xmac(efx);
1358 falcon_update_nic_stats(efx);
1359 }
1360 spin_unlock(&efx->stats_lock);
1361
1362 stats->rx_packets = mac_stats->rx_packets;
1363 stats->tx_packets = mac_stats->tx_packets;
1364 stats->rx_bytes = mac_stats->rx_bytes;
1365 stats->tx_bytes = mac_stats->tx_bytes;
1366 stats->multicast = mac_stats->rx_multicast;
1367 stats->collisions = mac_stats->tx_collision;
1368 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1369 mac_stats->rx_length_error);
1370 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1371 stats->rx_crc_errors = mac_stats->rx_bad;
1372 stats->rx_frame_errors = mac_stats->rx_align_error;
1373 stats->rx_fifo_errors = mac_stats->rx_overflow;
1374 stats->rx_missed_errors = mac_stats->rx_missed;
1375 stats->tx_window_errors = mac_stats->tx_late_collision;
1376
1377 stats->rx_errors = (stats->rx_length_errors +
1378 stats->rx_over_errors +
1379 stats->rx_crc_errors +
1380 stats->rx_frame_errors +
1381 stats->rx_fifo_errors +
1382 stats->rx_missed_errors +
1383 mac_stats->rx_symbol_error);
1384 stats->tx_errors = (stats->tx_window_errors +
1385 mac_stats->tx_bad);
1386
1387 return stats;
1388}
1389
1390/* Context: netif_tx_lock held, BHs disabled. */
1391static void efx_watchdog(struct net_device *net_dev)
1392{
1393 struct efx_nic *efx = net_dev->priv;
1394
1395 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1396 atomic_read(&efx->netif_stop_count), efx->port_enabled,
1397 monitor_reset ? "resetting channels" : "skipping reset");
1398
1399 if (monitor_reset)
1400 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1401}
1402
1403
1404/* Context: process, rtnl_lock() held. */
1405static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1406{
1407 struct efx_nic *efx = net_dev->priv;
1408 int rc = 0;
1409
1410 EFX_ASSERT_RESET_SERIALISED(efx);
1411
1412 if (new_mtu > EFX_MAX_MTU)
1413 return -EINVAL;
1414
1415 efx_stop_all(efx);
1416
1417 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1418
1419 efx_fini_channels(efx);
1420 net_dev->mtu = new_mtu;
1421 rc = efx_init_channels(efx);
1422 if (rc)
1423 goto fail;
1424
1425 efx_start_all(efx);
1426 return rc;
1427
1428 fail:
1429 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1430 return rc;
1431}
1432
1433static int efx_set_mac_address(struct net_device *net_dev, void *data)
1434{
1435 struct efx_nic *efx = net_dev->priv;
1436 struct sockaddr *addr = data;
1437 char *new_addr = addr->sa_data;
1438
1439 EFX_ASSERT_RESET_SERIALISED(efx);
1440
1441 if (!is_valid_ether_addr(new_addr)) {
1442 DECLARE_MAC_BUF(mac);
1443 EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
1444 print_mac(mac, new_addr));
1445 return -EINVAL;
1446 }
1447
1448 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1449
1450 /* Reconfigure the MAC */
1451 efx_reconfigure_port(efx);
1452
1453 return 0;
1454}
1455
1456/* Context: netif_tx_lock held, BHs disabled. */
1457static void efx_set_multicast_list(struct net_device *net_dev)
1458{
1459 struct efx_nic *efx = net_dev->priv;
1460 struct dev_mc_list *mc_list = net_dev->mc_list;
1461 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1462 int promiscuous;
1463 u32 crc;
1464 int bit;
1465 int i;
1466
1467 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
1468 promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
1469 if (efx->promiscuous != promiscuous) {
1470 efx->promiscuous = promiscuous;
1471 /* Close the window between efx_stop_port() and efx_flush_all()
1472 * by only queuing work when the port is enabled. */
1473 if (efx->port_enabled)
1474 queue_work(efx->workqueue, &efx->reconfigure_work);
1475 }
1476
1477 /* Build multicast hash table */
1478 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1479 memset(mc_hash, 0xff, sizeof(*mc_hash));
1480 } else {
1481 memset(mc_hash, 0x00, sizeof(*mc_hash));
1482 for (i = 0; i < net_dev->mc_count; i++) {
1483 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1484 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1485 set_bit_le(bit, mc_hash->byte);
1486 mc_list = mc_list->next;
1487 }
1488 }
1489
1490 /* Create and activate new global multicast hash table */
1491 falcon_set_multicast_hash(efx);
1492}
1493
1494static int efx_netdev_event(struct notifier_block *this,
1495 unsigned long event, void *ptr)
1496{
1497 struct net_device *net_dev = (struct net_device *)ptr;
1498
1499 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1500 struct efx_nic *efx = net_dev->priv;
1501
1502 strcpy(efx->name, net_dev->name);
1503 }
1504
1505 return NOTIFY_DONE;
1506}
1507
1508static struct notifier_block efx_netdev_notifier = {
1509 .notifier_call = efx_netdev_event,
1510};
1511
1512static int efx_register_netdev(struct efx_nic *efx)
1513{
1514 struct net_device *net_dev = efx->net_dev;
1515 int rc;
1516
1517 net_dev->watchdog_timeo = 5 * HZ;
1518 net_dev->irq = efx->pci_dev->irq;
1519 net_dev->open = efx_net_open;
1520 net_dev->stop = efx_net_stop;
1521 net_dev->get_stats = efx_net_stats;
1522 net_dev->tx_timeout = &efx_watchdog;
1523 net_dev->hard_start_xmit = efx_hard_start_xmit;
1524 net_dev->do_ioctl = efx_ioctl;
1525 net_dev->change_mtu = efx_change_mtu;
1526 net_dev->set_mac_address = efx_set_mac_address;
1527 net_dev->set_multicast_list = efx_set_multicast_list;
1528#ifdef CONFIG_NET_POLL_CONTROLLER
1529 net_dev->poll_controller = efx_netpoll;
1530#endif
1531 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1532 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1533
1534 /* Always start with carrier off; PHY events will detect the link */
1535 netif_carrier_off(efx->net_dev);
1536
1537 /* Clear MAC statistics */
1538 falcon_update_stats_xmac(efx);
1539 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1540
1541 rc = register_netdev(net_dev);
1542 if (rc) {
1543 EFX_ERR(efx, "could not register net dev\n");
1544 return rc;
1545 }
1546 strcpy(efx->name, net_dev->name);
1547
1548 return 0;
1549}
1550
1551static void efx_unregister_netdev(struct efx_nic *efx)
1552{
1553 struct efx_tx_queue *tx_queue;
1554
1555 if (!efx->net_dev)
1556 return;
1557
1558 BUG_ON(efx->net_dev->priv != efx);
1559
1560 /* Free up any skbs still remaining. This has to happen before
1561 * we try to unregister the netdev as running their destructors
1562 * may be needed to get the device ref. count to 0. */
1563 efx_for_each_tx_queue(tx_queue, efx)
1564 efx_release_tx_buffers(tx_queue);
1565
1566 if (NET_DEV_REGISTERED(efx)) {
1567 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1568 unregister_netdev(efx->net_dev);
1569 }
1570}
1571
1572/**************************************************************************
1573 *
1574 * Device reset and suspend
1575 *
1576 **************************************************************************/
1577
1578/* The final hardware and software finalisation before reset. */
1579static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1580{
1581 int rc;
1582
1583 EFX_ASSERT_RESET_SERIALISED(efx);
1584
1585 rc = falcon_xmac_get_settings(efx, ecmd);
1586 if (rc) {
1587 EFX_ERR(efx, "could not back up PHY settings\n");
1588 goto fail;
1589 }
1590
1591 efx_fini_channels(efx);
1592 return 0;
1593
1594 fail:
1595 return rc;
1596}
1597
1598/* The first part of software initialisation after a hardware reset
1599 * This function does not handle serialisation with the kernel, it
1600 * assumes the caller has done this */
1601static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1602{
1603 int rc;
1604
1605 rc = efx_init_channels(efx);
1606 if (rc)
1607 goto fail1;
1608
1609 /* Restore MAC and PHY settings. */
1610 rc = falcon_xmac_set_settings(efx, ecmd);
1611 if (rc) {
1612 EFX_ERR(efx, "could not restore PHY settings\n");
1613 goto fail2;
1614 }
1615
1616 return 0;
1617
1618 fail2:
1619 efx_fini_channels(efx);
1620 fail1:
1621 return rc;
1622}
1623
1624/* Reset the NIC as transparently as possible. Do not reset the PHY
1625 * Note that the reset may fail, in which case the card will be left
1626 * in a most-probably-unusable state.
1627 *
1628 * This function will sleep. You cannot reset from within an atomic
1629 * state; use efx_schedule_reset() instead.
1630 *
1631 * Grabs the rtnl_lock.
1632 */
1633static int efx_reset(struct efx_nic *efx)
1634{
1635 struct ethtool_cmd ecmd;
1636 enum reset_type method = efx->reset_pending;
1637 int rc;
1638
1639 /* Serialise with kernel interfaces */
1640 rtnl_lock();
1641
1642 /* If we're not RUNNING then don't reset. Leave the reset_pending
1643 * flag set so that efx_pci_probe_main will be retried */
1644 if (efx->state != STATE_RUNNING) {
1645 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1646 goto unlock_rtnl;
1647 }
1648
1649 efx->state = STATE_RESETTING;
1650 EFX_INFO(efx, "resetting (%d)\n", method);
1651
1652 /* The net_dev->get_stats handler is quite slow, and will fail
1653 * if a fetch is pending over reset. Serialise against it. */
1654 spin_lock(&efx->stats_lock);
1655 spin_unlock(&efx->stats_lock);
1656
1657 efx_stop_all(efx);
1658 mutex_lock(&efx->mac_lock);
1659
1660 rc = efx_reset_down(efx, &ecmd);
1661 if (rc)
1662 goto fail1;
1663
1664 rc = falcon_reset_hw(efx, method);
1665 if (rc) {
1666 EFX_ERR(efx, "failed to reset hardware\n");
1667 goto fail2;
1668 }
1669
1670 /* Allow resets to be rescheduled. */
1671 efx->reset_pending = RESET_TYPE_NONE;
1672
1673 /* Reinitialise bus-mastering, which may have been turned off before
1674 * the reset was scheduled. This is still appropriate, even in the
1675 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1676 * can respond to requests. */
1677 pci_set_master(efx->pci_dev);
1678
1679 /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
1680 * case so the driver can talk to external SRAM */
1681 rc = falcon_init_nic(efx);
1682 if (rc) {
1683 EFX_ERR(efx, "failed to initialise NIC\n");
1684 goto fail3;
1685 }
1686
1687 /* Leave device stopped if necessary */
1688 if (method == RESET_TYPE_DISABLE) {
1689 /* Reinitialise the device anyway so the driver unload sequence
1690 * can talk to the external SRAM */
1691 (void) falcon_init_nic(efx);
1692 rc = -EIO;
1693 goto fail4;
1694 }
1695
1696 rc = efx_reset_up(efx, &ecmd);
1697 if (rc)
1698 goto fail5;
1699
1700 mutex_unlock(&efx->mac_lock);
1701 EFX_LOG(efx, "reset complete\n");
1702
1703 efx->state = STATE_RUNNING;
1704 efx_start_all(efx);
1705
1706 unlock_rtnl:
1707 rtnl_unlock();
1708 return 0;
1709
1710 fail5:
1711 fail4:
1712 fail3:
1713 fail2:
1714 fail1:
1715 EFX_ERR(efx, "has been disabled\n");
1716 efx->state = STATE_DISABLED;
1717
1718 mutex_unlock(&efx->mac_lock);
1719 rtnl_unlock();
1720 efx_unregister_netdev(efx);
1721 efx_fini_port(efx);
1722 return rc;
1723}
1724
1725/* The worker thread exists so that code that cannot sleep can
1726 * schedule a reset for later.
1727 */
1728static void efx_reset_work(struct work_struct *data)
1729{
1730 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1731
1732 efx_reset(nic);
1733}
1734
1735void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1736{
1737 enum reset_type method;
1738
1739 if (efx->reset_pending != RESET_TYPE_NONE) {
1740 EFX_INFO(efx, "quenching already scheduled reset\n");
1741 return;
1742 }
1743
1744 switch (type) {
1745 case RESET_TYPE_INVISIBLE:
1746 case RESET_TYPE_ALL:
1747 case RESET_TYPE_WORLD:
1748 case RESET_TYPE_DISABLE:
1749 method = type;
1750 break;
1751 case RESET_TYPE_RX_RECOVERY:
1752 case RESET_TYPE_RX_DESC_FETCH:
1753 case RESET_TYPE_TX_DESC_FETCH:
1754 case RESET_TYPE_TX_SKIP:
1755 method = RESET_TYPE_INVISIBLE;
1756 break;
1757 default:
1758 method = RESET_TYPE_ALL;
1759 break;
1760 }
1761
1762 if (method != type)
1763 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1764 else
1765 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1766
1767 efx->reset_pending = method;
1768
1769 queue_work(efx->workqueue, &efx->reset_work);
1770}
1771
1772/**************************************************************************
1773 *
1774 * List of NICs we support
1775 *
1776 **************************************************************************/
1777
1778/* PCI device ID table */
1779static struct pci_device_id efx_pci_table[] __devinitdata = {
1780 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1781 .driver_data = (unsigned long) &falcon_a_nic_type},
1782 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1783 .driver_data = (unsigned long) &falcon_b_nic_type},
1784 {0} /* end of list */
1785};
1786
1787/**************************************************************************
1788 *
1789 * Dummy PHY/MAC/Board operations
1790 *
1791 * Can be used where the MAC does not implement this operation
1792 * Needed so all function pointers are valid and do not have to be tested
1793 * before use
1794 *
1795 **************************************************************************/
1796int efx_port_dummy_op_int(struct efx_nic *efx)
1797{
1798 return 0;
1799}
1800void efx_port_dummy_op_void(struct efx_nic *efx) {}
1801void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
1802
1803static struct efx_phy_operations efx_dummy_phy_operations = {
1804 .init = efx_port_dummy_op_int,
1805 .reconfigure = efx_port_dummy_op_void,
1806 .check_hw = efx_port_dummy_op_int,
1807 .fini = efx_port_dummy_op_void,
1808 .clear_interrupt = efx_port_dummy_op_void,
1809 .reset_xaui = efx_port_dummy_op_void,
1810};
1811
1812/* Dummy board operations */
1813static int efx_nic_dummy_op_int(struct efx_nic *nic)
1814{
1815 return 0;
1816}
1817
1818static struct efx_board efx_dummy_board_info = {
1819 .init = efx_nic_dummy_op_int,
1820 .init_leds = efx_port_dummy_op_int,
1821 .set_fault_led = efx_port_dummy_op_blink,
1822};
1823
1824/**************************************************************************
1825 *
1826 * Data housekeeping
1827 *
1828 **************************************************************************/
1829
1830/* This zeroes out and then fills in the invariants in a struct
1831 * efx_nic (including all sub-structures).
1832 */
1833static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1834 struct pci_dev *pci_dev, struct net_device *net_dev)
1835{
1836 struct efx_channel *channel;
1837 struct efx_tx_queue *tx_queue;
1838 struct efx_rx_queue *rx_queue;
1839 int i, rc;
1840
1841 /* Initialise common structures */
1842 memset(efx, 0, sizeof(*efx));
1843 spin_lock_init(&efx->biu_lock);
1844 spin_lock_init(&efx->phy_lock);
1845 INIT_WORK(&efx->reset_work, efx_reset_work);
1846 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1847 efx->pci_dev = pci_dev;
1848 efx->state = STATE_INIT;
1849 efx->reset_pending = RESET_TYPE_NONE;
1850 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1851 efx->board_info = efx_dummy_board_info;
1852
1853 efx->net_dev = net_dev;
1854 efx->rx_checksum_enabled = 1;
1855 spin_lock_init(&efx->netif_stop_lock);
1856 spin_lock_init(&efx->stats_lock);
1857 mutex_init(&efx->mac_lock);
1858 efx->phy_op = &efx_dummy_phy_operations;
1859 efx->mii.dev = net_dev;
1860 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1861 atomic_set(&efx->netif_stop_count, 1);
1862
1863 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1864 channel = &efx->channel[i];
1865 channel->efx = efx;
1866 channel->channel = i;
1867 channel->evqnum = i;
1868 channel->work_pending = 0;
1869 }
1870 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
1871 tx_queue = &efx->tx_queue[i];
1872 tx_queue->efx = efx;
1873 tx_queue->queue = i;
1874 tx_queue->buffer = NULL;
1875 tx_queue->channel = &efx->channel[0]; /* for safety */
1876 }
1877 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1878 rx_queue = &efx->rx_queue[i];
1879 rx_queue->efx = efx;
1880 rx_queue->queue = i;
1881 rx_queue->channel = &efx->channel[0]; /* for safety */
1882 rx_queue->buffer = NULL;
1883 spin_lock_init(&rx_queue->add_lock);
1884 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1885 }
1886
1887 efx->type = type;
1888
1889 /* Sanity-check NIC type */
1890 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1891 (efx->type->txd_ring_mask + 1));
1892 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1893 (efx->type->rxd_ring_mask + 1));
1894 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1895 (efx->type->evq_size - 1));
1896 /* As close as we can get to guaranteeing that we don't overflow */
1897 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1898 (efx->type->txd_ring_mask + 1 +
1899 efx->type->rxd_ring_mask + 1));
1900 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1901
1902 /* Higher numbered interrupt modes are less capable! */
1903 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1904 interrupt_mode);
1905
1906 efx->workqueue = create_singlethread_workqueue("sfc_work");
1907 if (!efx->workqueue) {
1908 rc = -ENOMEM;
1909 goto fail1;
1910 }
1911
1912 return 0;
1913
1914 fail1:
1915 return rc;
1916}
1917
1918static void efx_fini_struct(struct efx_nic *efx)
1919{
1920 if (efx->workqueue) {
1921 destroy_workqueue(efx->workqueue);
1922 efx->workqueue = NULL;
1923 }
1924}
1925
1926/**************************************************************************
1927 *
1928 * PCI interface
1929 *
1930 **************************************************************************/
1931
1932/* Main body of final NIC shutdown code
1933 * This is called only at module unload (or hotplug removal).
1934 */
1935static void efx_pci_remove_main(struct efx_nic *efx)
1936{
1937 EFX_ASSERT_RESET_SERIALISED(efx);
1938
1939 /* Skip everything if we never obtained a valid membase */
1940 if (!efx->membase)
1941 return;
1942
1943 efx_fini_channels(efx);
1944 efx_fini_port(efx);
1945
1946 /* Shutdown the board, then the NIC and board state */
1947 falcon_fini_interrupt(efx);
1948
1949 efx_fini_napi(efx);
1950 efx_remove_all(efx);
1951}
1952
1953/* Final NIC shutdown
1954 * This is called only at module unload (or hotplug removal).
1955 */
1956static void efx_pci_remove(struct pci_dev *pci_dev)
1957{
1958 struct efx_nic *efx;
1959
1960 efx = pci_get_drvdata(pci_dev);
1961 if (!efx)
1962 return;
1963
1964 /* Mark the NIC as fini, then stop the interface */
1965 rtnl_lock();
1966 efx->state = STATE_FINI;
1967 dev_close(efx->net_dev);
1968
1969 /* Allow any queued efx_resets() to complete */
1970 rtnl_unlock();
1971
1972 if (efx->membase == NULL)
1973 goto out;
1974
1975 efx_unregister_netdev(efx);
1976
1977 /* Wait for any scheduled resets to complete. No more will be
1978 * scheduled from this point because efx_stop_all() has been
1979 * called, we are no longer registered with driverlink, and
1980 * the net_device's have been removed. */
1981 flush_workqueue(efx->workqueue);
1982
1983 efx_pci_remove_main(efx);
1984
1985out:
1986 efx_fini_io(efx);
1987 EFX_LOG(efx, "shutdown successful\n");
1988
1989 pci_set_drvdata(pci_dev, NULL);
1990 efx_fini_struct(efx);
1991 free_netdev(efx->net_dev);
1992};
1993
1994/* Main body of NIC initialisation
1995 * This is called at module load (or hotplug insertion, theoretically).
1996 */
1997static int efx_pci_probe_main(struct efx_nic *efx)
1998{
1999 int rc;
2000
2001 /* Do start-of-day initialisation */
2002 rc = efx_probe_all(efx);
2003 if (rc)
2004 goto fail1;
2005
2006 rc = efx_init_napi(efx);
2007 if (rc)
2008 goto fail2;
2009
2010 /* Initialise the board */
2011 rc = efx->board_info.init(efx);
2012 if (rc) {
2013 EFX_ERR(efx, "failed to initialise board\n");
2014 goto fail3;
2015 }
2016
2017 rc = falcon_init_nic(efx);
2018 if (rc) {
2019 EFX_ERR(efx, "failed to initialise NIC\n");
2020 goto fail4;
2021 }
2022
2023 rc = efx_init_port(efx);
2024 if (rc) {
2025 EFX_ERR(efx, "failed to initialise port\n");
2026 goto fail5;
2027 }
2028
2029 rc = efx_init_channels(efx);
2030 if (rc)
2031 goto fail6;
2032
2033 rc = falcon_init_interrupt(efx);
2034 if (rc)
2035 goto fail7;
2036
2037 return 0;
2038
2039 fail7:
2040 efx_fini_channels(efx);
2041 fail6:
2042 efx_fini_port(efx);
2043 fail5:
2044 fail4:
2045 fail3:
2046 efx_fini_napi(efx);
2047 fail2:
2048 efx_remove_all(efx);
2049 fail1:
2050 return rc;
2051}
2052
2053/* NIC initialisation
2054 *
2055 * This is called at module load (or hotplug insertion,
2056 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2057 * sets up and registers the network devices with the kernel and hooks
2058 * the interrupt service routine. It does not prepare the device for
2059 * transmission; this is left to the first time one of the network
2060 * interfaces is brought up (i.e. efx_net_open).
2061 */
2062static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2063 const struct pci_device_id *entry)
2064{
2065 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2066 struct net_device *net_dev;
2067 struct efx_nic *efx;
2068 int i, rc;
2069
2070 /* Allocate and initialise a struct net_device and struct efx_nic */
2071 net_dev = alloc_etherdev(sizeof(*efx));
2072 if (!net_dev)
2073 return -ENOMEM;
2074 net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
2075 if (lro)
2076 net_dev->features |= NETIF_F_LRO;
2077 efx = net_dev->priv;
2078 pci_set_drvdata(pci_dev, efx);
2079 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2080 if (rc)
2081 goto fail1;
2082
2083 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2084
2085 /* Set up basic I/O (BAR mappings etc) */
2086 rc = efx_init_io(efx);
2087 if (rc)
2088 goto fail2;
2089
2090 /* No serialisation is required with the reset path because
2091 * we're in STATE_INIT. */
2092 for (i = 0; i < 5; i++) {
2093 rc = efx_pci_probe_main(efx);
2094 if (rc == 0)
2095 break;
2096
2097 /* Serialise against efx_reset(). No more resets will be
2098 * scheduled since efx_stop_all() has been called, and we
2099 * have not and never have been registered with either
2100 * the rtnetlink or driverlink layers. */
2101 cancel_work_sync(&efx->reset_work);
2102
2103 /* Retry if a recoverably reset event has been scheduled */
2104 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2105 (efx->reset_pending != RESET_TYPE_ALL))
2106 goto fail3;
2107
2108 efx->reset_pending = RESET_TYPE_NONE;
2109 }
2110
2111 if (rc) {
2112 EFX_ERR(efx, "Could not reset NIC\n");
2113 goto fail4;
2114 }
2115
2116 /* Switch to the running state before we expose the device to
2117 * the OS. This is to ensure that the initial gathering of
2118 * MAC stats succeeds. */
2119 rtnl_lock();
2120 efx->state = STATE_RUNNING;
2121 rtnl_unlock();
2122
2123 rc = efx_register_netdev(efx);
2124 if (rc)
2125 goto fail5;
2126
2127 EFX_LOG(efx, "initialisation successful\n");
2128
2129 return 0;
2130
2131 fail5:
2132 efx_pci_remove_main(efx);
2133 fail4:
2134 fail3:
2135 efx_fini_io(efx);
2136 fail2:
2137 efx_fini_struct(efx);
2138 fail1:
2139 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2140 free_netdev(net_dev);
2141 return rc;
2142}
2143
2144static struct pci_driver efx_pci_driver = {
2145 .name = EFX_DRIVER_NAME,
2146 .id_table = efx_pci_table,
2147 .probe = efx_pci_probe,
2148 .remove = efx_pci_remove,
2149};
2150
2151/**************************************************************************
2152 *
2153 * Kernel module interface
2154 *
2155 *************************************************************************/
2156
2157module_param(interrupt_mode, uint, 0444);
2158MODULE_PARM_DESC(interrupt_mode,
2159 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2160
2161static int __init efx_init_module(void)
2162{
2163 int rc;
2164
2165 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2166
2167 rc = register_netdevice_notifier(&efx_netdev_notifier);
2168 if (rc)
2169 goto err_notifier;
2170
2171 refill_workqueue = create_workqueue("sfc_refill");
2172 if (!refill_workqueue) {
2173 rc = -ENOMEM;
2174 goto err_refill;
2175 }
2176
2177 rc = pci_register_driver(&efx_pci_driver);
2178 if (rc < 0)
2179 goto err_pci;
2180
2181 return 0;
2182
2183 err_pci:
2184 destroy_workqueue(refill_workqueue);
2185 err_refill:
2186 unregister_netdevice_notifier(&efx_netdev_notifier);
2187 err_notifier:
2188 return rc;
2189}
2190
2191static void __exit efx_exit_module(void)
2192{
2193 printk(KERN_INFO "Solarflare NET driver unloading\n");
2194
2195 pci_unregister_driver(&efx_pci_driver);
2196 destroy_workqueue(refill_workqueue);
2197 unregister_netdevice_notifier(&efx_netdev_notifier);
2198
2199}
2200
2201module_init(efx_init_module);
2202module_exit(efx_exit_module);
2203
2204MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2205 "Solarflare Communications");
2206MODULE_DESCRIPTION("Solarflare Communications network driver");
2207MODULE_LICENSE("GPL");
2208MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
new file mode 100644
index 000000000000..3b2f69f4a9ab
--- /dev/null
+++ b/drivers/net/sfc/efx.h
@@ -0,0 +1,67 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_EFX_H
12#define EFX_EFX_H
13
14#include "net_driver.h"
15
16/* PCI IDs */
17#define EFX_VENDID_SFC 0x1924
18#define FALCON_A_P_DEVID 0x0703
19#define FALCON_A_S_DEVID 0x6703
20#define FALCON_B_P_DEVID 0x0710
21
22/* TX */
23extern int efx_xmit(struct efx_nic *efx,
24 struct efx_tx_queue *tx_queue, struct sk_buff *skb);
25extern void efx_stop_queue(struct efx_nic *efx);
26extern void efx_wake_queue(struct efx_nic *efx);
27
28/* RX */
29extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
30extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
31 unsigned int len, int checksummed, int discard);
32extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
33
34/* Channels */
35extern void efx_process_channel_now(struct efx_channel *channel);
36extern int efx_flush_queues(struct efx_nic *efx);
37
38/* Ports */
39extern void efx_reconfigure_port(struct efx_nic *efx);
40
41/* Global */
42extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
43extern void efx_suspend(struct efx_nic *efx);
44extern void efx_resume(struct efx_nic *efx);
45extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
46 int rx_usecs);
47extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
48extern void efx_hex_dump(const u8 *, unsigned int, const char *);
49
50/* Dummy PHY ops for PHY drivers */
51extern int efx_port_dummy_op_int(struct efx_nic *efx);
52extern void efx_port_dummy_op_void(struct efx_nic *efx);
53extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink);
54
55
56extern unsigned int efx_monitor_interval;
57
58static inline void efx_schedule_channel(struct efx_channel *channel)
59{
60 EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
61 channel->channel, raw_smp_processor_id());
62 channel->work_pending = 1;
63
64 netif_rx_schedule(channel->napi_dev, &channel->napi_str);
65}
66
67#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
new file mode 100644
index 000000000000..43663a4619da
--- /dev/null
+++ b/drivers/net/sfc/enum.h
@@ -0,0 +1,50 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_ENUM_H
11#define EFX_ENUM_H
12
13/*****************************************************************************/
14
15/**
16 * enum reset_type - reset types
17 *
18 * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
19 * %RESET_TYPE_DISABLE specify the method/scope of the reset. The
20 * other valuesspecify reasons, which efx_schedule_reset() will choose
21 * a method for.
22 *
23 * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
24 * @RESET_TYPE_ALL: reset everything but PCI core blocks
25 * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
26 * @RESET_TYPE_DISABLE: disable NIC
27 * @RESET_TYPE_MONITOR: reset due to hardware monitor
28 * @RESET_TYPE_INT_ERROR: reset due to internal error
29 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
30 * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
31 * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
32 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
33 */
34enum reset_type {
35 RESET_TYPE_NONE = -1,
36 RESET_TYPE_INVISIBLE = 0,
37 RESET_TYPE_ALL = 1,
38 RESET_TYPE_WORLD = 2,
39 RESET_TYPE_DISABLE = 3,
40 RESET_TYPE_MAX_METHOD,
41 RESET_TYPE_MONITOR,
42 RESET_TYPE_INT_ERROR,
43 RESET_TYPE_RX_RECOVERY,
44 RESET_TYPE_RX_DESC_FETCH,
45 RESET_TYPE_TX_DESC_FETCH,
46 RESET_TYPE_TX_SKIP,
47 RESET_TYPE_MAX,
48};
49
50#endif /* EFX_ENUM_H */
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
new file mode 100644
index 000000000000..ad541badbd98
--- /dev/null
+++ b/drivers/net/sfc/ethtool.c
@@ -0,0 +1,460 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/netdevice.h>
12#include <linux/ethtool.h>
13#include <linux/rtnetlink.h>
14#include "net_driver.h"
15#include "efx.h"
16#include "ethtool.h"
17#include "falcon.h"
18#include "gmii.h"
19#include "mac.h"
20
21static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
22
23struct ethtool_string {
24 char name[ETH_GSTRING_LEN];
25};
26
27struct efx_ethtool_stat {
28 const char *name;
29 enum {
30 EFX_ETHTOOL_STAT_SOURCE_mac_stats,
31 EFX_ETHTOOL_STAT_SOURCE_nic,
32 EFX_ETHTOOL_STAT_SOURCE_channel
33 } source;
34 unsigned offset;
35 u64(*get_stat) (void *field); /* Reader function */
36};
37
38/* Initialiser for a struct #efx_ethtool_stat with type-checking */
39#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
40 get_stat_function) { \
41 .name = #stat_name, \
42 .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
43 .offset = ((((field_type *) 0) == \
44 &((struct efx_##source_name *)0)->field) ? \
45 offsetof(struct efx_##source_name, field) : \
46 offsetof(struct efx_##source_name, field)), \
47 .get_stat = get_stat_function, \
48}
49
50static u64 efx_get_uint_stat(void *field)
51{
52 return *(unsigned int *)field;
53}
54
55static u64 efx_get_ulong_stat(void *field)
56{
57 return *(unsigned long *)field;
58}
59
60static u64 efx_get_u64_stat(void *field)
61{
62 return *(u64 *) field;
63}
64
65static u64 efx_get_atomic_stat(void *field)
66{
67 return atomic_read((atomic_t *) field);
68}
69
70#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
71 EFX_ETHTOOL_STAT(field, mac_stats, field, \
72 unsigned long, efx_get_ulong_stat)
73
74#define EFX_ETHTOOL_U64_MAC_STAT(field) \
75 EFX_ETHTOOL_STAT(field, mac_stats, field, \
76 u64, efx_get_u64_stat)
77
78#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
79 EFX_ETHTOOL_STAT(name, nic, n_##name, \
80 unsigned int, efx_get_uint_stat)
81
82#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
83 EFX_ETHTOOL_STAT(field, nic, field, \
84 atomic_t, efx_get_atomic_stat)
85
86#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
87 EFX_ETHTOOL_STAT(field, channel, n_##field, \
88 unsigned int, efx_get_uint_stat)
89
90static struct efx_ethtool_stat efx_ethtool_stats[] = {
91 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
92 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
93 EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
94 EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
95 EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
96 EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
97 EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
98 EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
99 EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
100 EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
101 EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
102 EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
103 EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
104 EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
105 EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
106 EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
107 EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
108 EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
109 EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
110 EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
111 EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
112 EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
113 EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
114 EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
115 EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
116 EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
117 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
118 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
119 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
120 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
121 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
122 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
123 EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
124 EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
125 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
126 EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
127 EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
128 EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
129 EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
130 EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
131 EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
132 EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
133 EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
134 EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
135 EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
136 EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
137 EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
138 EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
139 EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
140 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
141 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
142 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
143 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
144 EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
145 EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
146 EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
147 EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
148 EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
149 EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
150 EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
151 EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
152 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
153 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
154 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
157};
158
159/* Number of ethtool statistics */
160#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
161
162/**************************************************************************
163 *
164 * Ethtool operations
165 *
166 **************************************************************************
167 */
168
169/* Identify device by flashing LEDs */
170static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
171{
172 struct efx_nic *efx = net_dev->priv;
173
174 efx->board_info.blink(efx, 1);
175 schedule_timeout_interruptible(seconds * HZ);
176 efx->board_info.blink(efx, 0);
177 return 0;
178}
179
180/* This must be called with rtnl_lock held. */
181int efx_ethtool_get_settings(struct net_device *net_dev,
182 struct ethtool_cmd *ecmd)
183{
184 struct efx_nic *efx = net_dev->priv;
185 int rc;
186
187 mutex_lock(&efx->mac_lock);
188 rc = falcon_xmac_get_settings(efx, ecmd);
189 mutex_unlock(&efx->mac_lock);
190
191 return rc;
192}
193
194/* This must be called with rtnl_lock held. */
195int efx_ethtool_set_settings(struct net_device *net_dev,
196 struct ethtool_cmd *ecmd)
197{
198 struct efx_nic *efx = net_dev->priv;
199 int rc;
200
201 mutex_lock(&efx->mac_lock);
202 rc = falcon_xmac_set_settings(efx, ecmd);
203 mutex_unlock(&efx->mac_lock);
204 if (!rc)
205 efx_reconfigure_port(efx);
206
207 return rc;
208}
209
210static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
211 struct ethtool_drvinfo *info)
212{
213 struct efx_nic *efx = net_dev->priv;
214
215 strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
216 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
217 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
218}
219
220static int efx_ethtool_get_stats_count(struct net_device *net_dev)
221{
222 return EFX_ETHTOOL_NUM_STATS;
223}
224
225static void efx_ethtool_get_strings(struct net_device *net_dev,
226 u32 string_set, u8 *strings)
227{
228 struct ethtool_string *ethtool_strings =
229 (struct ethtool_string *)strings;
230 int i;
231
232 if (string_set == ETH_SS_STATS)
233 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
234 strncpy(ethtool_strings[i].name,
235 efx_ethtool_stats[i].name,
236 sizeof(ethtool_strings[i].name));
237}
238
239static void efx_ethtool_get_stats(struct net_device *net_dev,
240 struct ethtool_stats *stats,
241 u64 *data)
242{
243 struct efx_nic *efx = net_dev->priv;
244 struct efx_mac_stats *mac_stats = &efx->mac_stats;
245 struct efx_ethtool_stat *stat;
246 struct efx_channel *channel;
247 int i;
248
249 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
250
251 /* Update MAC and NIC statistics */
252 net_dev->get_stats(net_dev);
253
254 /* Fill detailed statistics buffer */
255 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
256 stat = &efx_ethtool_stats[i];
257 switch (stat->source) {
258 case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
259 data[i] = stat->get_stat((void *)mac_stats +
260 stat->offset);
261 break;
262 case EFX_ETHTOOL_STAT_SOURCE_nic:
263 data[i] = stat->get_stat((void *)efx + stat->offset);
264 break;
265 case EFX_ETHTOOL_STAT_SOURCE_channel:
266 data[i] = 0;
267 efx_for_each_channel(channel, efx)
268 data[i] += stat->get_stat((void *)channel +
269 stat->offset);
270 break;
271 }
272 }
273}
274
275static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
276{
277 struct efx_nic *efx = net_dev->priv;
278 int rc;
279
280 rc = ethtool_op_set_tx_csum(net_dev, enable);
281 if (rc)
282 return rc;
283
284 efx_flush_queues(efx);
285
286 return 0;
287}
288
289static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
290{
291 struct efx_nic *efx = net_dev->priv;
292
293 /* No way to stop the hardware doing the checks; we just
294 * ignore the result.
295 */
296 efx->rx_checksum_enabled = (enable ? 1 : 0);
297
298 return 0;
299}
300
301static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
302{
303 struct efx_nic *efx = net_dev->priv;
304
305 return efx->rx_checksum_enabled;
306}
307
308/* Restart autonegotiation */
309static int efx_ethtool_nway_reset(struct net_device *net_dev)
310{
311 struct efx_nic *efx = net_dev->priv;
312
313 return mii_nway_restart(&efx->mii);
314}
315
316static u32 efx_ethtool_get_link(struct net_device *net_dev)
317{
318 struct efx_nic *efx = net_dev->priv;
319
320 return efx->link_up;
321}
322
323static int efx_ethtool_get_coalesce(struct net_device *net_dev,
324 struct ethtool_coalesce *coalesce)
325{
326 struct efx_nic *efx = net_dev->priv;
327 struct efx_tx_queue *tx_queue;
328 struct efx_rx_queue *rx_queue;
329 struct efx_channel *channel;
330
331 memset(coalesce, 0, sizeof(*coalesce));
332
333 /* Find lowest IRQ moderation across all used TX queues */
334 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
335 efx_for_each_tx_queue(tx_queue, efx) {
336 channel = tx_queue->channel;
337 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
338 if (channel->used_flags != EFX_USED_BY_RX_TX)
339 coalesce->tx_coalesce_usecs_irq =
340 channel->irq_moderation;
341 else
342 coalesce->tx_coalesce_usecs_irq = 0;
343 }
344 }
345
346 /* Find lowest IRQ moderation across all used RX queues */
347 coalesce->rx_coalesce_usecs_irq = ~((u32) 0);
348 efx_for_each_rx_queue(rx_queue, efx) {
349 channel = rx_queue->channel;
350 if (channel->irq_moderation < coalesce->rx_coalesce_usecs_irq)
351 coalesce->rx_coalesce_usecs_irq =
352 channel->irq_moderation;
353 }
354
355 return 0;
356}
357
358/* Set coalescing parameters
359 * The difficulties occur for shared channels
360 */
361static int efx_ethtool_set_coalesce(struct net_device *net_dev,
362 struct ethtool_coalesce *coalesce)
363{
364 struct efx_nic *efx = net_dev->priv;
365 struct efx_channel *channel;
366 struct efx_tx_queue *tx_queue;
367 unsigned tx_usecs, rx_usecs;
368
369 if (coalesce->use_adaptive_rx_coalesce ||
370 coalesce->use_adaptive_tx_coalesce)
371 return -EOPNOTSUPP;
372
373 if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
374 EFX_ERR(efx, "invalid coalescing setting. "
375 "Only rx/tx_coalesce_usecs_irq are supported\n");
376 return -EOPNOTSUPP;
377 }
378
379 rx_usecs = coalesce->rx_coalesce_usecs_irq;
380 tx_usecs = coalesce->tx_coalesce_usecs_irq;
381
382 /* If the channel is shared only allow RX parameters to be set */
383 efx_for_each_tx_queue(tx_queue, efx) {
384 if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) &&
385 tx_usecs) {
386 EFX_ERR(efx, "Channel is shared. "
387 "Only RX coalescing may be set\n");
388 return -EOPNOTSUPP;
389 }
390 }
391
392 efx_init_irq_moderation(efx, tx_usecs, rx_usecs);
393
394 /* Reset channel to pick up new moderation value. Note that
395 * this may change the value of the irq_moderation field
396 * (e.g. to allow for hardware timer granularity).
397 */
398 efx_for_each_channel(channel, efx)
399 falcon_set_int_moderation(channel);
400
401 return 0;
402}
403
404static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
405 struct ethtool_pauseparam *pause)
406{
407 struct efx_nic *efx = net_dev->priv;
408 enum efx_fc_type flow_control = efx->flow_control;
409 int rc;
410
411 flow_control &= ~(EFX_FC_RX | EFX_FC_TX | EFX_FC_AUTO);
412 flow_control |= pause->rx_pause ? EFX_FC_RX : 0;
413 flow_control |= pause->tx_pause ? EFX_FC_TX : 0;
414 flow_control |= pause->autoneg ? EFX_FC_AUTO : 0;
415
416 /* Try to push the pause parameters */
417 mutex_lock(&efx->mac_lock);
418 rc = falcon_xmac_set_pause(efx, flow_control);
419 mutex_unlock(&efx->mac_lock);
420
421 if (!rc)
422 efx_reconfigure_port(efx);
423
424 return rc;
425}
426
427static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
428 struct ethtool_pauseparam *pause)
429{
430 struct efx_nic *efx = net_dev->priv;
431
432 pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
433 pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
434 pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0;
435}
436
437
438struct ethtool_ops efx_ethtool_ops = {
439 .get_settings = efx_ethtool_get_settings,
440 .set_settings = efx_ethtool_set_settings,
441 .get_drvinfo = efx_ethtool_get_drvinfo,
442 .nway_reset = efx_ethtool_nway_reset,
443 .get_link = efx_ethtool_get_link,
444 .get_coalesce = efx_ethtool_get_coalesce,
445 .set_coalesce = efx_ethtool_set_coalesce,
446 .get_pauseparam = efx_ethtool_get_pauseparam,
447 .set_pauseparam = efx_ethtool_set_pauseparam,
448 .get_rx_csum = efx_ethtool_get_rx_csum,
449 .set_rx_csum = efx_ethtool_set_rx_csum,
450 .get_tx_csum = ethtool_op_get_tx_csum,
451 .set_tx_csum = efx_ethtool_set_tx_csum,
452 .get_sg = ethtool_op_get_sg,
453 .set_sg = ethtool_op_set_sg,
454 .get_flags = ethtool_op_get_flags,
455 .set_flags = ethtool_op_set_flags,
456 .get_strings = efx_ethtool_get_strings,
457 .phys_id = efx_ethtool_phys_id,
458 .get_stats_count = efx_ethtool_get_stats_count,
459 .get_ethtool_stats = efx_ethtool_get_stats,
460};
diff --git a/drivers/net/sfc/ethtool.h b/drivers/net/sfc/ethtool.h
new file mode 100644
index 000000000000..3628e43df14d
--- /dev/null
+++ b/drivers/net/sfc/ethtool.h
@@ -0,0 +1,27 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_ETHTOOL_H
12#define EFX_ETHTOOL_H
13
14#include "net_driver.h"
15
16/*
17 * Ethtool support
18 */
19
20extern int efx_ethtool_get_settings(struct net_device *net_dev,
21 struct ethtool_cmd *ecmd);
22extern int efx_ethtool_set_settings(struct net_device *net_dev,
23 struct ethtool_cmd *ecmd);
24
25extern struct ethtool_ops efx_ethtool_ops;
26
27#endif /* EFX_ETHTOOL_H */
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
new file mode 100644
index 000000000000..46db549ce580
--- /dev/null
+++ b/drivers/net/sfc/falcon.c
@@ -0,0 +1,2722 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/pci.h>
14#include <linux/module.h>
15#include <linux/seq_file.h>
16#include "net_driver.h"
17#include "bitfield.h"
18#include "efx.h"
19#include "mac.h"
20#include "gmii.h"
21#include "spi.h"
22#include "falcon.h"
23#include "falcon_hwdefs.h"
24#include "falcon_io.h"
25#include "mdio_10g.h"
26#include "phy.h"
27#include "boards.h"
28#include "workarounds.h"
29
30/* Falcon hardware control.
31 * Falcon is the internal codename for the SFC4000 controller that is
32 * present in SFE400X evaluation boards
33 */
34
35/**
36 * struct falcon_nic_data - Falcon NIC state
37 * @next_buffer_table: First available buffer table id
38 * @pci_dev2: The secondary PCI device if present
39 */
40struct falcon_nic_data {
41 unsigned next_buffer_table;
42 struct pci_dev *pci_dev2;
43};
44
45/**************************************************************************
46 *
47 * Configurable values
48 *
49 **************************************************************************
50 */
51
52static int disable_dma_stats;
53
54/* This is set to 16 for a good reason. In summary, if larger than
55 * 16, the descriptor cache holds more than a default socket
56 * buffer's worth of packets (for UDP we can only have at most one
57 * socket buffer's worth outstanding). This combined with the fact
58 * that we only get 1 TX event per descriptor cache means the NIC
59 * goes idle.
60 */
61#define TX_DC_ENTRIES 16
62#define TX_DC_ENTRIES_ORDER 0
63#define TX_DC_BASE 0x130000
64
65#define RX_DC_ENTRIES 64
66#define RX_DC_ENTRIES_ORDER 2
67#define RX_DC_BASE 0x100000
68
69/* RX FIFO XOFF watermark
70 *
71 * When the amount of the RX FIFO increases used increases past this
72 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
73 * This also has an effect on RX/TX arbitration
74 */
75static int rx_xoff_thresh_bytes = -1;
76module_param(rx_xoff_thresh_bytes, int, 0644);
77MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
78
79/* RX FIFO XON watermark
80 *
81 * When the amount of the RX FIFO used decreases below this
82 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
83 * This also has an effect on RX/TX arbitration
84 */
85static int rx_xon_thresh_bytes = -1;
86module_param(rx_xon_thresh_bytes, int, 0644);
87MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
88
89/* TX descriptor ring size - min 512 max 4k */
90#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
91#define FALCON_TXD_RING_SIZE 1024
92#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
93
94/* RX descriptor ring size - min 512 max 4k */
95#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
96#define FALCON_RXD_RING_SIZE 1024
97#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
98
99/* Event queue size - max 32k */
100#define FALCON_EVQ_ORDER EVQ_SIZE_4K
101#define FALCON_EVQ_SIZE 4096
102#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
103
104/* Max number of internal errors. After this resets will not be performed */
105#define FALCON_MAX_INT_ERRORS 4
106
107/* Maximum period that we wait for flush events. If the flush event
108 * doesn't arrive in this period of time then we check if the queue
109 * was disabled anyway. */
110#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */
111
112/**************************************************************************
113 *
114 * Falcon constants
115 *
116 **************************************************************************
117 */
118
119/* DMA address mask (up to 46-bit, avoiding compiler warnings)
120 *
121 * Note that it is possible to have a platform with 64-bit longs and
122 * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
123 * platform DMA mask.
124 */
125#if BITS_PER_LONG == 64
126#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
127#else
128#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
129#endif
130
131/* TX DMA length mask (13-bit) */
132#define FALCON_TX_DMA_MASK (4096 - 1)
133
134/* Size and alignment of special buffers (4KB) */
135#define FALCON_BUF_SIZE 4096
136
137/* Dummy SRAM size code */
138#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
139
140/* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
141#define PCI_EXP_DEVCAP_PWR_VAL_LBN 18
142#define PCI_EXP_DEVCAP_PWR_SCL_LBN 26
143#define PCI_EXP_DEVCTL_PAYLOAD_LBN 5
144#define PCI_EXP_LNKSTA_LNK_WID 0x3f0
145#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
146
147#define FALCON_IS_DUAL_FUNC(efx) \
148 (FALCON_REV(efx) < FALCON_REV_B0)
149
150/**************************************************************************
151 *
152 * Falcon hardware access
153 *
154 **************************************************************************/
155
156/* Read the current event from the event queue */
157static inline efx_qword_t *falcon_event(struct efx_channel *channel,
158 unsigned int index)
159{
160 return (((efx_qword_t *) (channel->eventq.addr)) + index);
161}
162
163/* See if an event is present
164 *
165 * We check both the high and low dword of the event for all ones. We
166 * wrote all ones when we cleared the event, and no valid event can
167 * have all ones in either its high or low dwords. This approach is
168 * robust against reordering.
169 *
170 * Note that using a single 64-bit comparison is incorrect; even
171 * though the CPU read will be atomic, the DMA write may not be.
172 */
173static inline int falcon_event_present(efx_qword_t *event)
174{
175 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
176 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
177}
178
179/**************************************************************************
180 *
181 * I2C bus - this is a bit-bashing interface using GPIO pins
182 * Note that it uses the output enables to tristate the outputs
183 * SDA is the data pin and SCL is the clock
184 *
185 **************************************************************************
186 */
187static void falcon_setsdascl(struct efx_i2c_interface *i2c)
188{
189 efx_oword_t reg;
190
191 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
192 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, (i2c->scl ? 0 : 1));
193 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, (i2c->sda ? 0 : 1));
194 falcon_write(i2c->efx, &reg, GPIO_CTL_REG_KER);
195}
196
197static int falcon_getsda(struct efx_i2c_interface *i2c)
198{
199 efx_oword_t reg;
200
201 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
202 return EFX_OWORD_FIELD(reg, GPIO3_IN);
203}
204
205static int falcon_getscl(struct efx_i2c_interface *i2c)
206{
207 efx_oword_t reg;
208
209 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
210 return EFX_DWORD_FIELD(reg, GPIO0_IN);
211}
212
213static struct efx_i2c_bit_operations falcon_i2c_bit_operations = {
214 .setsda = falcon_setsdascl,
215 .setscl = falcon_setsdascl,
216 .getsda = falcon_getsda,
217 .getscl = falcon_getscl,
218 .udelay = 100,
219 .mdelay = 10,
220};
221
222/**************************************************************************
223 *
224 * Falcon special buffer handling
225 * Special buffers are used for event queues and the TX and RX
226 * descriptor rings.
227 *
228 *************************************************************************/
229
230/*
231 * Initialise a Falcon special buffer
232 *
233 * This will define a buffer (previously allocated via
234 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
235 * it to be used for event queues, descriptor rings etc.
236 */
237static int
238falcon_init_special_buffer(struct efx_nic *efx,
239 struct efx_special_buffer *buffer)
240{
241 efx_qword_t buf_desc;
242 int index;
243 dma_addr_t dma_addr;
244 int i;
245
246 EFX_BUG_ON_PARANOID(!buffer->addr);
247
248 /* Write buffer descriptors to NIC */
249 for (i = 0; i < buffer->entries; i++) {
250 index = buffer->index + i;
251 dma_addr = buffer->dma_addr + (i * 4096);
252 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
253 index, (unsigned long long)dma_addr);
254 EFX_POPULATE_QWORD_4(buf_desc,
255 IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
256 BUF_ADR_REGION, 0,
257 BUF_ADR_FBUF, (dma_addr >> 12),
258 BUF_OWNER_ID_FBUF, 0);
259 falcon_write_sram(efx, &buf_desc, index);
260 }
261
262 return 0;
263}
264
265/* Unmaps a buffer from Falcon and clears the buffer table entries */
266static void
267falcon_fini_special_buffer(struct efx_nic *efx,
268 struct efx_special_buffer *buffer)
269{
270 efx_oword_t buf_tbl_upd;
271 unsigned int start = buffer->index;
272 unsigned int end = (buffer->index + buffer->entries - 1);
273
274 if (!buffer->entries)
275 return;
276
277 EFX_LOG(efx, "unmapping special buffers %d-%d\n",
278 buffer->index, buffer->index + buffer->entries - 1);
279
280 EFX_POPULATE_OWORD_4(buf_tbl_upd,
281 BUF_UPD_CMD, 0,
282 BUF_CLR_CMD, 1,
283 BUF_CLR_END_ID, end,
284 BUF_CLR_START_ID, start);
285 falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
286}
287
288/*
289 * Allocate a new Falcon special buffer
290 *
291 * This allocates memory for a new buffer, clears it and allocates a
292 * new buffer ID range. It does not write into Falcon's buffer table.
293 *
294 * This call will allocate 4KB buffers, since Falcon can't use 8KB
295 * buffers for event queues and descriptor rings.
296 */
297static int falcon_alloc_special_buffer(struct efx_nic *efx,
298 struct efx_special_buffer *buffer,
299 unsigned int len)
300{
301 struct falcon_nic_data *nic_data = efx->nic_data;
302
303 len = ALIGN(len, FALCON_BUF_SIZE);
304
305 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
306 &buffer->dma_addr);
307 if (!buffer->addr)
308 return -ENOMEM;
309 buffer->len = len;
310 buffer->entries = len / FALCON_BUF_SIZE;
311 BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
312
313 /* All zeros is a potentially valid event so memset to 0xff */
314 memset(buffer->addr, 0xff, len);
315
316 /* Select new buffer ID */
317 buffer->index = nic_data->next_buffer_table;
318 nic_data->next_buffer_table += buffer->entries;
319
320 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
321 "(virt %p phys %lx)\n", buffer->index,
322 buffer->index + buffer->entries - 1,
323 (unsigned long long)buffer->dma_addr, len,
324 buffer->addr, virt_to_phys(buffer->addr));
325
326 return 0;
327}
328
329static void falcon_free_special_buffer(struct efx_nic *efx,
330 struct efx_special_buffer *buffer)
331{
332 if (!buffer->addr)
333 return;
334
335 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
336 "(virt %p phys %lx)\n", buffer->index,
337 buffer->index + buffer->entries - 1,
338 (unsigned long long)buffer->dma_addr, buffer->len,
339 buffer->addr, virt_to_phys(buffer->addr));
340
341 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
342 buffer->dma_addr);
343 buffer->addr = NULL;
344 buffer->entries = 0;
345}
346
347/**************************************************************************
348 *
349 * Falcon generic buffer handling
350 * These buffers are used for interrupt status and MAC stats
351 *
352 **************************************************************************/
353
354static int falcon_alloc_buffer(struct efx_nic *efx,
355 struct efx_buffer *buffer, unsigned int len)
356{
357 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
358 &buffer->dma_addr);
359 if (!buffer->addr)
360 return -ENOMEM;
361 buffer->len = len;
362 memset(buffer->addr, 0, len);
363 return 0;
364}
365
366static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
367{
368 if (buffer->addr) {
369 pci_free_consistent(efx->pci_dev, buffer->len,
370 buffer->addr, buffer->dma_addr);
371 buffer->addr = NULL;
372 }
373}
374
375/**************************************************************************
376 *
377 * Falcon TX path
378 *
379 **************************************************************************/
380
381/* Returns a pointer to the specified transmit descriptor in the TX
382 * descriptor queue belonging to the specified channel.
383 */
384static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
385 unsigned int index)
386{
387 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
388}
389
390/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
391static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
392{
393 unsigned write_ptr;
394 efx_dword_t reg;
395
396 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
397 EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
398 falcon_writel_page(tx_queue->efx, &reg,
399 TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
400}
401
402
403/* For each entry inserted into the software descriptor ring, create a
404 * descriptor in the hardware TX descriptor ring (in host memory), and
405 * write a doorbell.
406 */
407void falcon_push_buffers(struct efx_tx_queue *tx_queue)
408{
409
410 struct efx_tx_buffer *buffer;
411 efx_qword_t *txd;
412 unsigned write_ptr;
413
414 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
415
416 do {
417 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
418 buffer = &tx_queue->buffer[write_ptr];
419 txd = falcon_tx_desc(tx_queue, write_ptr);
420 ++tx_queue->write_count;
421
422 /* Create TX descriptor ring entry */
423 EFX_POPULATE_QWORD_5(*txd,
424 TX_KER_PORT, 0,
425 TX_KER_CONT, buffer->continuation,
426 TX_KER_BYTE_CNT, buffer->len,
427 TX_KER_BUF_REGION, 0,
428 TX_KER_BUF_ADR, buffer->dma_addr);
429 } while (tx_queue->write_count != tx_queue->insert_count);
430
431 wmb(); /* Ensure descriptors are written before they are fetched */
432 falcon_notify_tx_desc(tx_queue);
433}
434
435/* Allocate hardware resources for a TX queue */
436int falcon_probe_tx(struct efx_tx_queue *tx_queue)
437{
438 struct efx_nic *efx = tx_queue->efx;
439 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
440 FALCON_TXD_RING_SIZE *
441 sizeof(efx_qword_t));
442}
443
444int falcon_init_tx(struct efx_tx_queue *tx_queue)
445{
446 efx_oword_t tx_desc_ptr;
447 struct efx_nic *efx = tx_queue->efx;
448 int rc;
449
450 /* Pin TX descriptor ring */
451 rc = falcon_init_special_buffer(efx, &tx_queue->txd);
452 if (rc)
453 return rc;
454
455 /* Push TX descriptor ring to card */
456 EFX_POPULATE_OWORD_10(tx_desc_ptr,
457 TX_DESCQ_EN, 1,
458 TX_ISCSI_DDIG_EN, 0,
459 TX_ISCSI_HDIG_EN, 0,
460 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
461 TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum,
462 TX_DESCQ_OWNER_ID, 0,
463 TX_DESCQ_LABEL, tx_queue->queue,
464 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
465 TX_DESCQ_TYPE, 0,
466 TX_NON_IP_DROP_DIS_B0, 1);
467
468 if (FALCON_REV(efx) >= FALCON_REV_B0) {
469 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
470 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
471 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
472 }
473
474 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
475 tx_queue->queue);
476
477 if (FALCON_REV(efx) < FALCON_REV_B0) {
478 efx_oword_t reg;
479
480 BUG_ON(tx_queue->queue >= 128); /* HW limit */
481
482 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
483 if (efx->net_dev->features & NETIF_F_IP_CSUM)
484 clear_bit_le(tx_queue->queue, (void *)&reg);
485 else
486 set_bit_le(tx_queue->queue, (void *)&reg);
487 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
488 }
489
490 return 0;
491}
492
493static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
494{
495 struct efx_nic *efx = tx_queue->efx;
496 struct efx_channel *channel = &efx->channel[0];
497 efx_oword_t tx_flush_descq;
498 unsigned int read_ptr, i;
499
500 /* Post a flush command */
501 EFX_POPULATE_OWORD_2(tx_flush_descq,
502 TX_FLUSH_DESCQ_CMD, 1,
503 TX_FLUSH_DESCQ, tx_queue->queue);
504 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
505 msleep(FALCON_FLUSH_TIMEOUT);
506
507 if (EFX_WORKAROUND_7803(efx))
508 return 0;
509
510 /* Look for a flush completed event */
511 read_ptr = channel->eventq_read_ptr;
512 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
513 efx_qword_t *event = falcon_event(channel, read_ptr);
514 int ev_code, ev_sub_code, ev_queue;
515 if (!falcon_event_present(event))
516 break;
517
518 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
519 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
520 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
521 if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
522 (ev_queue == tx_queue->queue)) {
523 EFX_LOG(efx, "tx queue %d flush command succesful\n",
524 tx_queue->queue);
525 return 0;
526 }
527
528 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
529 }
530
531 if (EFX_WORKAROUND_11557(efx)) {
532 efx_oword_t reg;
533 int enabled;
534
535 falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
536 tx_queue->queue);
537 enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
538 if (!enabled) {
539 EFX_LOG(efx, "tx queue %d disabled without a "
540 "flush event seen\n", tx_queue->queue);
541 return 0;
542 }
543 }
544
545 EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
546 return -ETIMEDOUT;
547}
548
549void falcon_fini_tx(struct efx_tx_queue *tx_queue)
550{
551 struct efx_nic *efx = tx_queue->efx;
552 efx_oword_t tx_desc_ptr;
553
554 /* Stop the hardware using the queue */
555 if (falcon_flush_tx_queue(tx_queue))
556 EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
557
558 /* Remove TX descriptor ring from card */
559 EFX_ZERO_OWORD(tx_desc_ptr);
560 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
561 tx_queue->queue);
562
563 /* Unpin TX descriptor ring */
564 falcon_fini_special_buffer(efx, &tx_queue->txd);
565}
566
567/* Free buffers backing TX queue */
568void falcon_remove_tx(struct efx_tx_queue *tx_queue)
569{
570 falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
571}
572
573/**************************************************************************
574 *
575 * Falcon RX path
576 *
577 **************************************************************************/
578
579/* Returns a pointer to the specified descriptor in the RX descriptor queue */
580static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
581 unsigned int index)
582{
583 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
584}
585
586/* This creates an entry in the RX descriptor queue */
587static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
588 unsigned index)
589{
590 struct efx_rx_buffer *rx_buf;
591 efx_qword_t *rxd;
592
593 rxd = falcon_rx_desc(rx_queue, index);
594 rx_buf = efx_rx_buffer(rx_queue, index);
595 EFX_POPULATE_QWORD_3(*rxd,
596 RX_KER_BUF_SIZE,
597 rx_buf->len -
598 rx_queue->efx->type->rx_buffer_padding,
599 RX_KER_BUF_REGION, 0,
600 RX_KER_BUF_ADR, rx_buf->dma_addr);
601}
602
603/* This writes to the RX_DESC_WPTR register for the specified receive
604 * descriptor ring.
605 */
606void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
607{
608 efx_dword_t reg;
609 unsigned write_ptr;
610
611 while (rx_queue->notified_count != rx_queue->added_count) {
612 falcon_build_rx_desc(rx_queue,
613 rx_queue->notified_count &
614 FALCON_RXD_RING_MASK);
615 ++rx_queue->notified_count;
616 }
617
618 wmb();
619 write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
620 EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
621 falcon_writel_page(rx_queue->efx, &reg,
622 RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
623}
624
625int falcon_probe_rx(struct efx_rx_queue *rx_queue)
626{
627 struct efx_nic *efx = rx_queue->efx;
628 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
629 FALCON_RXD_RING_SIZE *
630 sizeof(efx_qword_t));
631}
632
633int falcon_init_rx(struct efx_rx_queue *rx_queue)
634{
635 efx_oword_t rx_desc_ptr;
636 struct efx_nic *efx = rx_queue->efx;
637 int rc;
638 int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0;
639 int iscsi_digest_en = is_b0;
640
641 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
642 rx_queue->queue, rx_queue->rxd.index,
643 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
644
645 /* Pin RX descriptor ring */
646 rc = falcon_init_special_buffer(efx, &rx_queue->rxd);
647 if (rc)
648 return rc;
649
650 /* Push RX descriptor ring to card */
651 EFX_POPULATE_OWORD_10(rx_desc_ptr,
652 RX_ISCSI_DDIG_EN, iscsi_digest_en,
653 RX_ISCSI_HDIG_EN, iscsi_digest_en,
654 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
655 RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum,
656 RX_DESCQ_OWNER_ID, 0,
657 RX_DESCQ_LABEL, rx_queue->queue,
658 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
659 RX_DESCQ_TYPE, 0 /* kernel queue */ ,
660 /* For >=B0 this is scatter so disable */
661 RX_DESCQ_JUMBO, !is_b0,
662 RX_DESCQ_EN, 1);
663 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
664 rx_queue->queue);
665 return 0;
666}
667
668static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
669{
670 struct efx_nic *efx = rx_queue->efx;
671 struct efx_channel *channel = &efx->channel[0];
672 unsigned int read_ptr, i;
673 efx_oword_t rx_flush_descq;
674
675 /* Post a flush command */
676 EFX_POPULATE_OWORD_2(rx_flush_descq,
677 RX_FLUSH_DESCQ_CMD, 1,
678 RX_FLUSH_DESCQ, rx_queue->queue);
679 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
680 msleep(FALCON_FLUSH_TIMEOUT);
681
682 if (EFX_WORKAROUND_7803(efx))
683 return 0;
684
685 /* Look for a flush completed event */
686 read_ptr = channel->eventq_read_ptr;
687 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
688 efx_qword_t *event = falcon_event(channel, read_ptr);
689 int ev_code, ev_sub_code, ev_queue, ev_failed;
690 if (!falcon_event_present(event))
691 break;
692
693 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
694 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
695 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
696 ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
697
698 if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
699 (ev_queue == rx_queue->queue)) {
700 if (ev_failed) {
701 EFX_INFO(efx, "rx queue %d flush command "
702 "failed\n", rx_queue->queue);
703 return -EAGAIN;
704 } else {
705 EFX_LOG(efx, "rx queue %d flush command "
706 "succesful\n", rx_queue->queue);
707 return 0;
708 }
709 }
710
711 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
712 }
713
714 if (EFX_WORKAROUND_11557(efx)) {
715 efx_oword_t reg;
716 int enabled;
717
718 falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
719 rx_queue->queue);
720 enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
721 if (!enabled) {
722 EFX_LOG(efx, "rx queue %d disabled without a "
723 "flush event seen\n", rx_queue->queue);
724 return 0;
725 }
726 }
727
728 EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
729 return -ETIMEDOUT;
730}
731
732void falcon_fini_rx(struct efx_rx_queue *rx_queue)
733{
734 efx_oword_t rx_desc_ptr;
735 struct efx_nic *efx = rx_queue->efx;
736 int i, rc;
737
738 /* Try and flush the rx queue. This may need to be repeated */
739 for (i = 0; i < 5; i++) {
740 rc = falcon_flush_rx_queue(rx_queue);
741 if (rc == -EAGAIN)
742 continue;
743 break;
744 }
745 if (rc)
746 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
747
748 /* Remove RX descriptor ring from card */
749 EFX_ZERO_OWORD(rx_desc_ptr);
750 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
751 rx_queue->queue);
752
753 /* Unpin RX descriptor ring */
754 falcon_fini_special_buffer(efx, &rx_queue->rxd);
755}
756
757/* Free buffers backing RX queue */
758void falcon_remove_rx(struct efx_rx_queue *rx_queue)
759{
760 falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
761}
762
763/**************************************************************************
764 *
765 * Falcon event queue processing
766 * Event queues are processed by per-channel tasklets.
767 *
768 **************************************************************************/
769
770/* Update a channel's event queue's read pointer (RPTR) register
771 *
772 * This writes the EVQ_RPTR_REG register for the specified channel's
773 * event queue.
774 *
775 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
776 * whereas channel->eventq_read_ptr contains the index of the "next to
777 * read" event.
778 */
779void falcon_eventq_read_ack(struct efx_channel *channel)
780{
781 efx_dword_t reg;
782 struct efx_nic *efx = channel->efx;
783
784 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
785 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
786 channel->evqnum);
787}
788
789/* Use HW to insert a SW defined event */
790void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
791{
792 efx_oword_t drv_ev_reg;
793
794 EFX_POPULATE_OWORD_2(drv_ev_reg,
795 DRV_EV_QID, channel->evqnum,
796 DRV_EV_DATA,
797 EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
798 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
799}
800
801/* Handle a transmit completion event
802 *
803 * Falcon batches TX completion events; the message we receive is of
804 * the form "complete all TX events up to this index".
805 */
806static inline void falcon_handle_tx_event(struct efx_channel *channel,
807 efx_qword_t *event)
808{
809 unsigned int tx_ev_desc_ptr;
810 unsigned int tx_ev_q_label;
811 struct efx_tx_queue *tx_queue;
812 struct efx_nic *efx = channel->efx;
813
814 if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
815 /* Transmit completion */
816 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
817 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
818 tx_queue = &efx->tx_queue[tx_ev_q_label];
819 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
820 } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
821 /* Rewrite the FIFO write pointer */
822 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
823 tx_queue = &efx->tx_queue[tx_ev_q_label];
824
825 if (NET_DEV_REGISTERED(efx))
826 netif_tx_lock(efx->net_dev);
827 falcon_notify_tx_desc(tx_queue);
828 if (NET_DEV_REGISTERED(efx))
829 netif_tx_unlock(efx->net_dev);
830 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
831 EFX_WORKAROUND_10727(efx)) {
832 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
833 } else {
834 EFX_ERR(efx, "channel %d unexpected TX event "
835 EFX_QWORD_FMT"\n", channel->channel,
836 EFX_QWORD_VAL(*event));
837 }
838}
839
840/* Check received packet's destination MAC address. */
841static int check_dest_mac(struct efx_rx_queue *rx_queue,
842 const efx_qword_t *event)
843{
844 struct efx_rx_buffer *rx_buf;
845 struct efx_nic *efx = rx_queue->efx;
846 int rx_ev_desc_ptr;
847 struct ethhdr *eh;
848
849 if (efx->promiscuous)
850 return 1;
851
852 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
853 rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr);
854 eh = (struct ethhdr *)rx_buf->data;
855 if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN))
856 return 0;
857 return 1;
858}
859
860/* Detect errors included in the rx_evt_pkt_ok bit. */
861static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
862 const efx_qword_t *event,
863 unsigned *rx_ev_pkt_ok,
864 int *discard, int byte_count)
865{
866 struct efx_nic *efx = rx_queue->efx;
867 unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
868 unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
869 unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
870 unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm;
871 unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
872 int snap, non_ip;
873
874 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
875 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
876 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
877 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
878 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
879 RX_EV_BUF_OWNER_ID_ERR);
880 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
881 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
882 RX_EV_IP_HDR_CHKSUM_ERR);
883 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
884 RX_EV_TCP_UDP_CHKSUM_ERR);
885 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
886 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
887 rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ?
888 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
889 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
890
891 /* Every error apart from tobe_disc and pause_frm */
892 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
893 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
894 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
895
896 snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) ||
897 (rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE);
898 non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE);
899
900 /* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the
901 * length field of an LLC frame, which sets TOBE_DISC. We could set
902 * PASS_LEN_ERR, but we want the MAC to filter out short frames (to
903 * protect the RX block).
904 *
905 * bug5475 - LLC/SNAP: Falcon identifies SNAP packets.
906 * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag.
907 * LLC can't encapsulate IP, so by definition
908 * these packets are NON_IP.
909 *
910 * Unicast mismatch will also cause TOBE_DISC, so the driver needs
911 * to check this.
912 */
913 if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) {
914 /* If all the other flags are zero then we can state the
915 * entire packet is ok, which will flag to the kernel not
916 * to recalculate checksums.
917 */
918 if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm))
919 *rx_ev_pkt_ok = 1;
920
921 rx_ev_tobe_disc = 0;
922
923 /* TOBE_DISC is set for unicast mismatch. But given that
924 * we can't trust TOBE_DISC here, we must validate the dest
925 * MAC address ourselves.
926 */
927 if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event))
928 rx_ev_tobe_disc = 1;
929 }
930
931 /* Count errors that are not in MAC stats. */
932 if (rx_ev_frm_trunc)
933 ++rx_queue->channel->n_rx_frm_trunc;
934 else if (rx_ev_tobe_disc)
935 ++rx_queue->channel->n_rx_tobe_disc;
936 else if (rx_ev_ip_hdr_chksum_err)
937 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
938 else if (rx_ev_tcp_udp_chksum_err)
939 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
940 if (rx_ev_ip_frag_err)
941 ++rx_queue->channel->n_rx_ip_frag_err;
942
943 /* The frame must be discarded if any of these are true. */
944 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
945 rx_ev_tobe_disc | rx_ev_pause_frm);
946
947 /* TOBE_DISC is expected on unicast mismatches; don't print out an
948 * error message. FRM_TRUNC indicates RXDP dropped the packet due
949 * to a FIFO overflow.
950 */
951#ifdef EFX_ENABLE_DEBUG
952 if (rx_ev_other_err) {
953 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
954 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n",
955 rx_queue->queue, EFX_QWORD_VAL(*event),
956 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
957 rx_ev_ip_hdr_chksum_err ?
958 " [IP_HDR_CHKSUM_ERR]" : "",
959 rx_ev_tcp_udp_chksum_err ?
960 " [TCP_UDP_CHKSUM_ERR]" : "",
961 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
962 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
963 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
964 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
965 rx_ev_pause_frm ? " [PAUSE]" : "",
966 snap ? " [SNAP/LLC]" : "");
967 }
968#endif
969
970 if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
971 efx->phy_type == PHY_TYPE_10XPRESS))
972 tenxpress_crc_err(efx);
973}
974
975/* Handle receive events that are not in-order. */
976static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
977 unsigned index)
978{
979 struct efx_nic *efx = rx_queue->efx;
980 unsigned expected, dropped;
981
982 expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
983 dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
984 FALCON_RXD_RING_MASK);
985 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
986 dropped, index, expected);
987
988 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
989 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
990}
991
992/* Handle a packet received event
993 *
994 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
995 * wrong destination address
996 * Also "is multicast" and "matches multicast filter" flags can be used to
997 * discard non-matching multicast packets.
998 */
999static inline int falcon_handle_rx_event(struct efx_channel *channel,
1000 const efx_qword_t *event)
1001{
1002 unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt;
1003 unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt;
1004 unsigned expected_ptr;
1005 int discard = 0, checksummed;
1006 struct efx_rx_queue *rx_queue;
1007 struct efx_nic *efx = channel->efx;
1008
1009 /* Basic packet information */
1010 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
1011 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
1012 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
1013 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
1014 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
1015
1016 rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL);
1017 rx_queue = &efx->rx_queue[rx_ev_q_label];
1018
1019 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
1020 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
1021 if (unlikely(rx_ev_desc_ptr != expected_ptr)) {
1022 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
1023 return rx_ev_q_label;
1024 }
1025
1026 if (likely(rx_ev_pkt_ok)) {
1027 /* If packet is marked as OK and packet type is TCP/IPv4 or
1028 * UDP/IPv4, then we can rely on the hardware checksum.
1029 */
1030 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
1031 } else {
1032 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
1033 &discard, rx_ev_byte_cnt);
1034 checksummed = 0;
1035 }
1036
1037 /* Detect multicast packets that didn't match the filter */
1038 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
1039 if (rx_ev_mcast_pkt) {
1040 unsigned int rx_ev_mcast_hash_match =
1041 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
1042
1043 if (unlikely(!rx_ev_mcast_hash_match))
1044 discard = 1;
1045 }
1046
1047 /* Handle received packet */
1048 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
1049 checksummed, discard);
1050
1051 return rx_ev_q_label;
1052}
1053
1054/* Global events are basically PHY events */
1055static void falcon_handle_global_event(struct efx_channel *channel,
1056 efx_qword_t *event)
1057{
1058 struct efx_nic *efx = channel->efx;
1059 int is_phy_event = 0, handled = 0;
1060
1061 /* Check for interrupt on either port. Some boards have a
1062 * single PHY wired to the interrupt line for port 1. */
1063 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
1064 EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
1065 EFX_QWORD_FIELD(*event, XG_PHY_INTR))
1066 is_phy_event = 1;
1067
1068 if ((FALCON_REV(efx) >= FALCON_REV_B0) &&
1069 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
1070 is_phy_event = 1;
1071
1072 if (is_phy_event) {
1073 efx->phy_op->clear_interrupt(efx);
1074 queue_work(efx->workqueue, &efx->reconfigure_work);
1075 handled = 1;
1076 }
1077
1078 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
1079 EFX_ERR(efx, "channel %d seen global RX_RESET "
1080 "event. Resetting.\n", channel->channel);
1081
1082 atomic_inc(&efx->rx_reset);
1083 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
1084 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1085 handled = 1;
1086 }
1087
1088 if (!handled)
1089 EFX_ERR(efx, "channel %d unknown global event "
1090 EFX_QWORD_FMT "\n", channel->channel,
1091 EFX_QWORD_VAL(*event));
1092}
1093
1094static void falcon_handle_driver_event(struct efx_channel *channel,
1095 efx_qword_t *event)
1096{
1097 struct efx_nic *efx = channel->efx;
1098 unsigned int ev_sub_code;
1099 unsigned int ev_sub_data;
1100
1101 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
1102 ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
1103
1104 switch (ev_sub_code) {
1105 case TX_DESCQ_FLS_DONE_EV_DECODE:
1106 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
1107 channel->channel, ev_sub_data);
1108 break;
1109 case RX_DESCQ_FLS_DONE_EV_DECODE:
1110 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
1111 channel->channel, ev_sub_data);
1112 break;
1113 case EVQ_INIT_DONE_EV_DECODE:
1114 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
1115 channel->channel, ev_sub_data);
1116 break;
1117 case SRM_UPD_DONE_EV_DECODE:
1118 EFX_TRACE(efx, "channel %d SRAM update done\n",
1119 channel->channel);
1120 break;
1121 case WAKE_UP_EV_DECODE:
1122 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
1123 channel->channel, ev_sub_data);
1124 break;
1125 case TIMER_EV_DECODE:
1126 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
1127 channel->channel, ev_sub_data);
1128 break;
1129 case RX_RECOVERY_EV_DECODE:
1130 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
1131 "Resetting.\n", channel->channel);
1132 efx_schedule_reset(efx,
1133 EFX_WORKAROUND_6555(efx) ?
1134 RESET_TYPE_RX_RECOVERY :
1135 RESET_TYPE_DISABLE);
1136 break;
1137 case RX_DSC_ERROR_EV_DECODE:
1138 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
1139 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1140 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1141 break;
1142 case TX_DSC_ERROR_EV_DECODE:
1143 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
1144 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1145 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1146 break;
1147 default:
1148 EFX_TRACE(efx, "channel %d unknown driver event code %d "
1149 "data %04x\n", channel->channel, ev_sub_code,
1150 ev_sub_data);
1151 break;
1152 }
1153}
1154
1155int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
1156{
1157 unsigned int read_ptr;
1158 efx_qword_t event, *p_event;
1159 int ev_code;
1160 int rxq;
1161 int rxdmaqs = 0;
1162
1163 read_ptr = channel->eventq_read_ptr;
1164
1165 do {
1166 p_event = falcon_event(channel, read_ptr);
1167 event = *p_event;
1168
1169 if (!falcon_event_present(&event))
1170 /* End of events */
1171 break;
1172
1173 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
1174 channel->channel, EFX_QWORD_VAL(event));
1175
1176 /* Clear this event by marking it all ones */
1177 EFX_SET_QWORD(*p_event);
1178
1179 ev_code = EFX_QWORD_FIELD(event, EV_CODE);
1180
1181 switch (ev_code) {
1182 case RX_IP_EV_DECODE:
1183 rxq = falcon_handle_rx_event(channel, &event);
1184 rxdmaqs |= (1 << rxq);
1185 (*rx_quota)--;
1186 break;
1187 case TX_IP_EV_DECODE:
1188 falcon_handle_tx_event(channel, &event);
1189 break;
1190 case DRV_GEN_EV_DECODE:
1191 channel->eventq_magic
1192 = EFX_QWORD_FIELD(event, EVQ_MAGIC);
1193 EFX_LOG(channel->efx, "channel %d received generated "
1194 "event "EFX_QWORD_FMT"\n", channel->channel,
1195 EFX_QWORD_VAL(event));
1196 break;
1197 case GLOBAL_EV_DECODE:
1198 falcon_handle_global_event(channel, &event);
1199 break;
1200 case DRIVER_EV_DECODE:
1201 falcon_handle_driver_event(channel, &event);
1202 break;
1203 default:
1204 EFX_ERR(channel->efx, "channel %d unknown event type %d"
1205 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1206 ev_code, EFX_QWORD_VAL(event));
1207 }
1208
1209 /* Increment read pointer */
1210 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1211
1212 } while (*rx_quota);
1213
1214 channel->eventq_read_ptr = read_ptr;
1215 return rxdmaqs;
1216}
1217
1218void falcon_set_int_moderation(struct efx_channel *channel)
1219{
1220 efx_dword_t timer_cmd;
1221 struct efx_nic *efx = channel->efx;
1222
1223 /* Set timer register */
1224 if (channel->irq_moderation) {
1225 /* Round to resolution supported by hardware. The value we
1226 * program is based at 0. So actual interrupt moderation
1227 * achieved is ((x + 1) * res).
1228 */
1229 unsigned int res = 5;
1230 channel->irq_moderation -= (channel->irq_moderation % res);
1231 if (channel->irq_moderation < res)
1232 channel->irq_moderation = res;
1233 EFX_POPULATE_DWORD_2(timer_cmd,
1234 TIMER_MODE, TIMER_MODE_INT_HLDOFF,
1235 TIMER_VAL,
1236 (channel->irq_moderation / res) - 1);
1237 } else {
1238 EFX_POPULATE_DWORD_2(timer_cmd,
1239 TIMER_MODE, TIMER_MODE_DIS,
1240 TIMER_VAL, 0);
1241 }
1242 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
1243 channel->evqnum);
1244
1245}
1246
1247/* Allocate buffer table entries for event queue */
1248int falcon_probe_eventq(struct efx_channel *channel)
1249{
1250 struct efx_nic *efx = channel->efx;
1251 unsigned int evq_size;
1252
1253 evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
1254 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
1255}
1256
1257int falcon_init_eventq(struct efx_channel *channel)
1258{
1259 efx_oword_t evq_ptr;
1260 struct efx_nic *efx = channel->efx;
1261 int rc;
1262
1263 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1264 channel->channel, channel->eventq.index,
1265 channel->eventq.index + channel->eventq.entries - 1);
1266
1267 /* Pin event queue buffer */
1268 rc = falcon_init_special_buffer(efx, &channel->eventq);
1269 if (rc)
1270 return rc;
1271
1272 /* Fill event queue with all ones (i.e. empty events) */
1273 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1274
1275 /* Push event queue to card */
1276 EFX_POPULATE_OWORD_3(evq_ptr,
1277 EVQ_EN, 1,
1278 EVQ_SIZE, FALCON_EVQ_ORDER,
1279 EVQ_BUF_BASE_ID, channel->eventq.index);
1280 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1281 channel->evqnum);
1282
1283 falcon_set_int_moderation(channel);
1284
1285 return 0;
1286}
1287
1288void falcon_fini_eventq(struct efx_channel *channel)
1289{
1290 efx_oword_t eventq_ptr;
1291 struct efx_nic *efx = channel->efx;
1292
1293 /* Remove event queue from card */
1294 EFX_ZERO_OWORD(eventq_ptr);
1295 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1296 channel->evqnum);
1297
1298 /* Unpin event queue */
1299 falcon_fini_special_buffer(efx, &channel->eventq);
1300}
1301
1302/* Free buffers backing event queue */
1303void falcon_remove_eventq(struct efx_channel *channel)
1304{
1305 falcon_free_special_buffer(channel->efx, &channel->eventq);
1306}
1307
1308
1309/* Generates a test event on the event queue. A subsequent call to
1310 * process_eventq() should pick up the event and place the value of
1311 * "magic" into channel->eventq_magic;
1312 */
1313void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1314{
1315 efx_qword_t test_event;
1316
1317 EFX_POPULATE_QWORD_2(test_event,
1318 EV_CODE, DRV_GEN_EV_DECODE,
1319 EVQ_MAGIC, magic);
1320 falcon_generate_event(channel, &test_event);
1321}
1322
1323
1324/**************************************************************************
1325 *
1326 * Falcon hardware interrupts
1327 * The hardware interrupt handler does very little work; all the event
1328 * queue processing is carried out by per-channel tasklets.
1329 *
1330 **************************************************************************/
1331
1332/* Enable/disable/generate Falcon interrupts */
1333static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1334 int force)
1335{
1336 efx_oword_t int_en_reg_ker;
1337
1338 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1339 KER_INT_KER, force,
1340 DRV_INT_EN_KER, enabled);
1341 falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
1342}
1343
1344void falcon_enable_interrupts(struct efx_nic *efx)
1345{
1346 efx_oword_t int_adr_reg_ker;
1347 struct efx_channel *channel;
1348
1349 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1350 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1351
1352 /* Program address */
1353 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1354 NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
1355 INT_ADR_KER, efx->irq_status.dma_addr);
1356 falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
1357
1358 /* Enable interrupts */
1359 falcon_interrupts(efx, 1, 0);
1360
1361 /* Force processing of all the channels to get the EVQ RPTRs up to
1362 date */
1363 efx_for_each_channel_with_interrupt(channel, efx)
1364 efx_schedule_channel(channel);
1365}
1366
1367void falcon_disable_interrupts(struct efx_nic *efx)
1368{
1369 /* Disable interrupts */
1370 falcon_interrupts(efx, 0, 0);
1371}
1372
1373/* Generate a Falcon test interrupt
1374 * Interrupt must already have been enabled, otherwise nasty things
1375 * may happen.
1376 */
1377void falcon_generate_interrupt(struct efx_nic *efx)
1378{
1379 falcon_interrupts(efx, 1, 1);
1380}
1381
1382/* Acknowledge a legacy interrupt from Falcon
1383 *
1384 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1385 *
1386 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1387 * BIU. Interrupt acknowledge is read sensitive so must write instead
1388 * (then read to ensure the BIU collector is flushed)
1389 *
1390 * NB most hardware supports MSI interrupts
1391 */
1392static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1393{
1394 efx_dword_t reg;
1395
1396 EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
1397 falcon_writel(efx, &reg, INT_ACK_REG_KER_A1);
1398 falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
1399}
1400
1401/* Process a fatal interrupt
1402 * Disable bus mastering ASAP and schedule a reset
1403 */
1404static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1405{
1406 struct falcon_nic_data *nic_data = efx->nic_data;
1407 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1408 efx_oword_t fatal_intr;
1409 int error, mem_perr;
1410 static int n_int_errors;
1411
1412 falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
1413 error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
1414
1415 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1416 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1417 EFX_OWORD_VAL(fatal_intr),
1418 error ? "disabling bus mastering" : "no recognised error");
1419 if (error == 0)
1420 goto out;
1421
1422 /* If this is a memory parity error dump which blocks are offending */
1423 mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
1424 if (mem_perr) {
1425 efx_oword_t reg;
1426 falcon_read(efx, &reg, MEM_STAT_REG_KER);
1427 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1428 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1429 }
1430
1431 /* Disable DMA bus mastering on both devices */
1432 pci_disable_device(efx->pci_dev);
1433 if (FALCON_IS_DUAL_FUNC(efx))
1434 pci_disable_device(nic_data->pci_dev2);
1435
1436 if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
1437 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1438 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1439 } else {
1440 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
1441 "NIC will be disabled\n");
1442 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1443 }
1444out:
1445 return IRQ_HANDLED;
1446}
1447
1448/* Handle a legacy interrupt from Falcon
1449 * Acknowledges the interrupt and schedule event queue processing.
1450 */
1451static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1452{
1453 struct efx_nic *efx = (struct efx_nic *)dev_id;
1454 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1455 struct efx_channel *channel;
1456 efx_dword_t reg;
1457 u32 queues;
1458 int syserr;
1459
1460 /* Read the ISR which also ACKs the interrupts */
1461 falcon_readl(efx, &reg, INT_ISR0_B0);
1462 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1463
1464 /* Check to see if we have a serious error condition */
1465 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1466 if (unlikely(syserr))
1467 return falcon_fatal_interrupt(efx);
1468
1469 if (queues == 0)
1470 return IRQ_NONE;
1471
1472 efx->last_irq_cpu = raw_smp_processor_id();
1473 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1474 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1475
1476 /* Schedule processing of any interrupting queues */
1477 channel = &efx->channel[0];
1478 while (queues) {
1479 if (queues & 0x01)
1480 efx_schedule_channel(channel);
1481 channel++;
1482 queues >>= 1;
1483 }
1484
1485 return IRQ_HANDLED;
1486}
1487
1488
1489static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1490{
1491 struct efx_nic *efx = (struct efx_nic *)dev_id;
1492 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1493 struct efx_channel *channel;
1494 int syserr;
1495 int queues;
1496
1497 /* Check to see if this is our interrupt. If it isn't, we
1498 * exit without having touched the hardware.
1499 */
1500 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
1501 EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
1502 raw_smp_processor_id());
1503 return IRQ_NONE;
1504 }
1505 efx->last_irq_cpu = raw_smp_processor_id();
1506 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1507 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1508
1509 /* Check to see if we have a serious error condition */
1510 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1511 if (unlikely(syserr))
1512 return falcon_fatal_interrupt(efx);
1513
1514 /* Determine interrupting queues, clear interrupt status
1515 * register and acknowledge the device interrupt.
1516 */
1517 BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
1518 queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
1519 EFX_ZERO_OWORD(*int_ker);
1520 wmb(); /* Ensure the vector is cleared before interrupt ack */
1521 falcon_irq_ack_a1(efx);
1522
1523 /* Schedule processing of any interrupting queues */
1524 channel = &efx->channel[0];
1525 while (queues) {
1526 if (queues & 0x01)
1527 efx_schedule_channel(channel);
1528 channel++;
1529 queues >>= 1;
1530 }
1531
1532 return IRQ_HANDLED;
1533}
1534
1535/* Handle an MSI interrupt from Falcon
1536 *
1537 * Handle an MSI hardware interrupt. This routine schedules event
1538 * queue processing. No interrupt acknowledgement cycle is necessary.
1539 * Also, we never need to check that the interrupt is for us, since
1540 * MSI interrupts cannot be shared.
1541 */
1542static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1543{
1544 struct efx_channel *channel = (struct efx_channel *)dev_id;
1545 struct efx_nic *efx = channel->efx;
1546 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1547 int syserr;
1548
1549 efx->last_irq_cpu = raw_smp_processor_id();
1550 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1551 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1552
1553 /* Check to see if we have a serious error condition */
1554 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1555 if (unlikely(syserr))
1556 return falcon_fatal_interrupt(efx);
1557
1558 /* Schedule processing of the channel */
1559 efx_schedule_channel(channel);
1560
1561 return IRQ_HANDLED;
1562}
1563
1564
1565/* Setup RSS indirection table.
1566 * This maps from the hash value of the packet to RXQ
1567 */
1568static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1569{
1570 int i = 0;
1571 unsigned long offset;
1572 efx_dword_t dword;
1573
1574 if (FALCON_REV(efx) < FALCON_REV_B0)
1575 return;
1576
1577 for (offset = RX_RSS_INDIR_TBL_B0;
1578 offset < RX_RSS_INDIR_TBL_B0 + 0x800;
1579 offset += 0x10) {
1580 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
1581 i % efx->rss_queues);
1582 falcon_writel(efx, &dword, offset);
1583 i++;
1584 }
1585}
1586
1587/* Hook interrupt handler(s)
1588 * Try MSI and then legacy interrupts.
1589 */
1590int falcon_init_interrupt(struct efx_nic *efx)
1591{
1592 struct efx_channel *channel;
1593 int rc;
1594
1595 if (!EFX_INT_MODE_USE_MSI(efx)) {
1596 irq_handler_t handler;
1597 if (FALCON_REV(efx) >= FALCON_REV_B0)
1598 handler = falcon_legacy_interrupt_b0;
1599 else
1600 handler = falcon_legacy_interrupt_a1;
1601
1602 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1603 efx->name, efx);
1604 if (rc) {
1605 EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
1606 efx->pci_dev->irq);
1607 goto fail1;
1608 }
1609 return 0;
1610 }
1611
1612 /* Hook MSI or MSI-X interrupt */
1613 efx_for_each_channel_with_interrupt(channel, efx) {
1614 rc = request_irq(channel->irq, falcon_msi_interrupt,
1615 IRQF_PROBE_SHARED, /* Not shared */
1616 efx->name, channel);
1617 if (rc) {
1618 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1619 goto fail2;
1620 }
1621 }
1622
1623 return 0;
1624
1625 fail2:
1626 efx_for_each_channel_with_interrupt(channel, efx)
1627 free_irq(channel->irq, channel);
1628 fail1:
1629 return rc;
1630}
1631
1632void falcon_fini_interrupt(struct efx_nic *efx)
1633{
1634 struct efx_channel *channel;
1635 efx_oword_t reg;
1636
1637 /* Disable MSI/MSI-X interrupts */
1638 efx_for_each_channel_with_interrupt(channel, efx)
1639 if (channel->irq)
1640 free_irq(channel->irq, channel);
1641
1642 /* ACK legacy interrupt */
1643 if (FALCON_REV(efx) >= FALCON_REV_B0)
1644 falcon_read(efx, &reg, INT_ISR0_B0);
1645 else
1646 falcon_irq_ack_a1(efx);
1647
1648 /* Disable legacy interrupt */
1649 if (efx->legacy_irq)
1650 free_irq(efx->legacy_irq, efx);
1651}
1652
1653/**************************************************************************
1654 *
1655 * EEPROM/flash
1656 *
1657 **************************************************************************
1658 */
1659
1660#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1661
1662/* Wait for SPI command completion */
1663static int falcon_spi_wait(struct efx_nic *efx)
1664{
1665 efx_oword_t reg;
1666 int cmd_en, timer_active;
1667 int count;
1668
1669 count = 0;
1670 do {
1671 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
1672 cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
1673 timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
1674 if (!cmd_en && !timer_active)
1675 return 0;
1676 udelay(10);
1677 } while (++count < 10000); /* wait upto 100msec */
1678 EFX_ERR(efx, "timed out waiting for SPI\n");
1679 return -ETIMEDOUT;
1680}
1681
1682static int
1683falcon_spi_read(struct efx_nic *efx, int device_id, unsigned int command,
1684 unsigned int address, unsigned int addr_len,
1685 void *data, unsigned int len)
1686{
1687 efx_oword_t reg;
1688 int rc;
1689
1690 BUG_ON(len > FALCON_SPI_MAX_LEN);
1691
1692 /* Check SPI not currently being accessed */
1693 rc = falcon_spi_wait(efx);
1694 if (rc)
1695 return rc;
1696
1697 /* Program address register */
1698 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
1699 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
1700
1701 /* Issue read command */
1702 EFX_POPULATE_OWORD_7(reg,
1703 EE_SPI_HCMD_CMD_EN, 1,
1704 EE_SPI_HCMD_SF_SEL, device_id,
1705 EE_SPI_HCMD_DABCNT, len,
1706 EE_SPI_HCMD_READ, EE_SPI_READ,
1707 EE_SPI_HCMD_DUBCNT, 0,
1708 EE_SPI_HCMD_ADBCNT, addr_len,
1709 EE_SPI_HCMD_ENC, command);
1710 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
1711
1712 /* Wait for read to complete */
1713 rc = falcon_spi_wait(efx);
1714 if (rc)
1715 return rc;
1716
1717 /* Read data */
1718 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
1719 memcpy(data, &reg, len);
1720 return 0;
1721}
1722
1723/**************************************************************************
1724 *
1725 * MAC wrapper
1726 *
1727 **************************************************************************
1728 */
1729void falcon_drain_tx_fifo(struct efx_nic *efx)
1730{
1731 efx_oword_t temp;
1732 int count;
1733
1734 if (FALCON_REV(efx) < FALCON_REV_B0)
1735 return;
1736
1737 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
1738 /* There is no point in draining more than once */
1739 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
1740 return;
1741
1742 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1743 * the drain sequence with the statistics fetch */
1744 spin_lock(&efx->stats_lock);
1745
1746 EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1);
1747 falcon_write(efx, &temp, MAC0_CTRL_REG_KER);
1748
1749 /* Reset the MAC and EM block. */
1750 falcon_read(efx, &temp, GLB_CTL_REG_KER);
1751 EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1);
1752 EFX_SET_OWORD_FIELD(temp, RST_XGRX, 1);
1753 EFX_SET_OWORD_FIELD(temp, RST_EM, 1);
1754 falcon_write(efx, &temp, GLB_CTL_REG_KER);
1755
1756 count = 0;
1757 while (1) {
1758 falcon_read(efx, &temp, GLB_CTL_REG_KER);
1759 if (!EFX_OWORD_FIELD(temp, RST_XGTX) &&
1760 !EFX_OWORD_FIELD(temp, RST_XGRX) &&
1761 !EFX_OWORD_FIELD(temp, RST_EM)) {
1762 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1763 count);
1764 break;
1765 }
1766 if (count > 20) {
1767 EFX_ERR(efx, "MAC reset failed\n");
1768 break;
1769 }
1770 count++;
1771 udelay(10);
1772 }
1773
1774 spin_unlock(&efx->stats_lock);
1775
1776 /* If we've reset the EM block and the link is up, then
1777 * we'll have to kick the XAUI link so the PHY can recover */
1778 if (efx->link_up && EFX_WORKAROUND_5147(efx))
1779 falcon_reset_xaui(efx);
1780}
1781
1782void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1783{
1784 efx_oword_t temp;
1785
1786 if (FALCON_REV(efx) < FALCON_REV_B0)
1787 return;
1788
1789 /* Isolate the MAC -> RX */
1790 falcon_read(efx, &temp, RX_CFG_REG_KER);
1791 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 0);
1792 falcon_write(efx, &temp, RX_CFG_REG_KER);
1793
1794 if (!efx->link_up)
1795 falcon_drain_tx_fifo(efx);
1796}
1797
1798void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1799{
1800 efx_oword_t reg;
1801 int link_speed;
1802 unsigned int tx_fc;
1803
1804 if (efx->link_options & GM_LPA_10000)
1805 link_speed = 0x3;
1806 else if (efx->link_options & GM_LPA_1000)
1807 link_speed = 0x2;
1808 else if (efx->link_options & GM_LPA_100)
1809 link_speed = 0x1;
1810 else
1811 link_speed = 0x0;
1812 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1813 * as advertised. Disable to ensure packets are not
1814 * indefinitely held and TX queue can be flushed at any point
1815 * while the link is down. */
1816 EFX_POPULATE_OWORD_5(reg,
1817 MAC_XOFF_VAL, 0xffff /* max pause time */,
1818 MAC_BCAD_ACPT, 1,
1819 MAC_UC_PROM, efx->promiscuous,
1820 MAC_LINK_STATUS, 1, /* always set */
1821 MAC_SPEED, link_speed);
1822 /* On B0, MAC backpressure can be disabled and packets get
1823 * discarded. */
1824 if (FALCON_REV(efx) >= FALCON_REV_B0) {
1825 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
1826 !efx->link_up);
1827 }
1828
1829 falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
1830
1831 /* Restore the multicast hash registers. */
1832 falcon_set_multicast_hash(efx);
1833
1834 /* Transmission of pause frames when RX crosses the threshold is
1835 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1836 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1837 tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
1838 falcon_read(efx, &reg, RX_CFG_REG_KER);
1839 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1840
1841 /* Unisolate the MAC -> RX */
1842 if (FALCON_REV(efx) >= FALCON_REV_B0)
1843 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
1844 falcon_write(efx, &reg, RX_CFG_REG_KER);
1845}
1846
1847int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1848{
1849 efx_oword_t reg;
1850 u32 *dma_done;
1851 int i;
1852
1853 if (disable_dma_stats)
1854 return 0;
1855
1856 /* Statistics fetch will fail if the MAC is in TX drain */
1857 if (FALCON_REV(efx) >= FALCON_REV_B0) {
1858 efx_oword_t temp;
1859 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
1860 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
1861 return 0;
1862 }
1863
1864 dma_done = (efx->stats_buffer.addr + done_offset);
1865 *dma_done = FALCON_STATS_NOT_DONE;
1866 wmb(); /* ensure done flag is clear */
1867
1868 /* Initiate DMA transfer of stats */
1869 EFX_POPULATE_OWORD_2(reg,
1870 MAC_STAT_DMA_CMD, 1,
1871 MAC_STAT_DMA_ADR,
1872 efx->stats_buffer.dma_addr);
1873 falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER);
1874
1875 /* Wait for transfer to complete */
1876 for (i = 0; i < 400; i++) {
1877 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE)
1878 return 0;
1879 udelay(10);
1880 }
1881
1882 EFX_ERR(efx, "timed out waiting for statistics\n");
1883 return -ETIMEDOUT;
1884}
1885
1886/**************************************************************************
1887 *
1888 * PHY access via GMII
1889 *
1890 **************************************************************************
1891 */
1892
1893/* Use the top bit of the MII PHY id to indicate the PHY type
1894 * (1G/10G), with the remaining bits as the actual PHY id.
1895 *
1896 * This allows us to avoid leaking information from the mii_if_info
1897 * structure into other data structures.
1898 */
1899#define FALCON_PHY_ID_ID_WIDTH EFX_WIDTH(MD_PRT_DEV_ADR)
1900#define FALCON_PHY_ID_ID_MASK ((1 << FALCON_PHY_ID_ID_WIDTH) - 1)
1901#define FALCON_PHY_ID_WIDTH (FALCON_PHY_ID_ID_WIDTH + 1)
1902#define FALCON_PHY_ID_MASK ((1 << FALCON_PHY_ID_WIDTH) - 1)
1903#define FALCON_PHY_ID_10G (1 << (FALCON_PHY_ID_WIDTH - 1))
1904
1905
1906/* Packing the clause 45 port and device fields into a single value */
1907#define MD_PRT_ADR_COMP_LBN (MD_PRT_ADR_LBN - MD_DEV_ADR_LBN)
1908#define MD_PRT_ADR_COMP_WIDTH MD_PRT_ADR_WIDTH
1909#define MD_DEV_ADR_COMP_LBN 0
1910#define MD_DEV_ADR_COMP_WIDTH MD_DEV_ADR_WIDTH
1911
1912
1913/* Wait for GMII access to complete */
1914static int falcon_gmii_wait(struct efx_nic *efx)
1915{
1916 efx_dword_t md_stat;
1917 int count;
1918
1919 for (count = 0; count < 1000; count++) { /* wait upto 10ms */
1920 falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
1921 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
1922 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
1923 EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) {
1924 EFX_ERR(efx, "error from GMII access "
1925 EFX_DWORD_FMT"\n",
1926 EFX_DWORD_VAL(md_stat));
1927 return -EIO;
1928 }
1929 return 0;
1930 }
1931 udelay(10);
1932 }
1933 EFX_ERR(efx, "timed out waiting for GMII\n");
1934 return -ETIMEDOUT;
1935}
1936
1937/* Writes a GMII register of a PHY connected to Falcon using MDIO. */
1938static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
1939 int addr, int value)
1940{
1941 struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
1942 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
1943 efx_oword_t reg;
1944
1945 /* The 'generic' prt/dev packing in mdio_10g.h is conveniently
1946 * chosen so that the only current user, Falcon, can take the
1947 * packed value and use them directly.
1948 * Fail to build if this assumption is broken.
1949 */
1950 BUILD_BUG_ON(FALCON_PHY_ID_10G != MDIO45_XPRT_ID_IS10G);
1951 BUILD_BUG_ON(FALCON_PHY_ID_ID_WIDTH != MDIO45_PRT_DEV_WIDTH);
1952 BUILD_BUG_ON(MD_PRT_ADR_COMP_LBN != MDIO45_PRT_ID_COMP_LBN);
1953 BUILD_BUG_ON(MD_DEV_ADR_COMP_LBN != MDIO45_DEV_ID_COMP_LBN);
1954
1955 if (phy_id2 == PHY_ADDR_INVALID)
1956 return;
1957
1958 /* See falcon_mdio_read for an explanation. */
1959 if (!(phy_id & FALCON_PHY_ID_10G)) {
1960 int mmd = ffs(efx->phy_op->mmds) - 1;
1961 EFX_TRACE(efx, "Fixing erroneous clause22 write\n");
1962 phy_id2 = mdio_clause45_pack(phy_id2, mmd)
1963 & FALCON_PHY_ID_ID_MASK;
1964 }
1965
1966 EFX_REGDUMP(efx, "writing GMII %d register %02x with %04x\n", phy_id,
1967 addr, value);
1968
1969 spin_lock_bh(&efx->phy_lock);
1970
1971 /* Check MII not currently being accessed */
1972 if (falcon_gmii_wait(efx) != 0)
1973 goto out;
1974
1975 /* Write the address/ID register */
1976 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
1977 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
1978
1979 EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_id2);
1980 falcon_write(efx, &reg, MD_ID_REG_KER);
1981
1982 /* Write data */
1983 EFX_POPULATE_OWORD_1(reg, MD_TXD, value);
1984 falcon_write(efx, &reg, MD_TXD_REG_KER);
1985
1986 EFX_POPULATE_OWORD_2(reg,
1987 MD_WRC, 1,
1988 MD_GC, 0);
1989 falcon_write(efx, &reg, MD_CS_REG_KER);
1990
1991 /* Wait for data to be written */
1992 if (falcon_gmii_wait(efx) != 0) {
1993 /* Abort the write operation */
1994 EFX_POPULATE_OWORD_2(reg,
1995 MD_WRC, 0,
1996 MD_GC, 1);
1997 falcon_write(efx, &reg, MD_CS_REG_KER);
1998 udelay(10);
1999 }
2000
2001 out:
2002 spin_unlock_bh(&efx->phy_lock);
2003}
2004
2005/* Reads a GMII register from a PHY connected to Falcon. If no value
2006 * could be read, -1 will be returned. */
2007static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
2008{
2009 struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
2010 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
2011 efx_oword_t reg;
2012 int value = -1;
2013
2014 if (phy_addr == PHY_ADDR_INVALID)
2015 return -1;
2016
2017 /* Our PHY code knows whether it needs to talk clause 22(1G) or 45(10G)
2018 * but the generic Linux code does not make any distinction or have
2019 * any state for this.
2020 * We spot the case where someone tried to talk 22 to a 45 PHY and
2021 * redirect the request to the lowest numbered MMD as a clause45
2022 * request. This is enough to allow simple queries like id and link
2023 * state to succeed. TODO: We may need to do more in future.
2024 */
2025 if (!(phy_id & FALCON_PHY_ID_10G)) {
2026 int mmd = ffs(efx->phy_op->mmds) - 1;
2027 EFX_TRACE(efx, "Fixing erroneous clause22 read\n");
2028 phy_addr = mdio_clause45_pack(phy_addr, mmd)
2029 & FALCON_PHY_ID_ID_MASK;
2030 }
2031
2032 spin_lock_bh(&efx->phy_lock);
2033
2034 /* Check MII not currently being accessed */
2035 if (falcon_gmii_wait(efx) != 0)
2036 goto out;
2037
2038 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
2039 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
2040
2041 EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_addr);
2042 falcon_write(efx, &reg, MD_ID_REG_KER);
2043
2044 /* Request data to be read */
2045 EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0);
2046 falcon_write(efx, &reg, MD_CS_REG_KER);
2047
2048 /* Wait for data to become available */
2049 value = falcon_gmii_wait(efx);
2050 if (value == 0) {
2051 falcon_read(efx, &reg, MD_RXD_REG_KER);
2052 value = EFX_OWORD_FIELD(reg, MD_RXD);
2053 EFX_REGDUMP(efx, "read from GMII %d register %02x, got %04x\n",
2054 phy_id, addr, value);
2055 } else {
2056 /* Abort the read operation */
2057 EFX_POPULATE_OWORD_2(reg,
2058 MD_RIC, 0,
2059 MD_GC, 1);
2060 falcon_write(efx, &reg, MD_CS_REG_KER);
2061
2062 EFX_LOG(efx, "read from GMII 0x%x register %02x, got "
2063 "error %d\n", phy_id, addr, value);
2064 }
2065
2066 out:
2067 spin_unlock_bh(&efx->phy_lock);
2068
2069 return value;
2070}
2071
2072static void falcon_init_mdio(struct mii_if_info *gmii)
2073{
2074 gmii->mdio_read = falcon_mdio_read;
2075 gmii->mdio_write = falcon_mdio_write;
2076 gmii->phy_id_mask = FALCON_PHY_ID_MASK;
2077 gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_PHY_ADR)) - 1);
2078}
2079
2080static int falcon_probe_phy(struct efx_nic *efx)
2081{
2082 switch (efx->phy_type) {
2083 case PHY_TYPE_10XPRESS:
2084 efx->phy_op = &falcon_tenxpress_phy_ops;
2085 break;
2086 case PHY_TYPE_XFP:
2087 efx->phy_op = &falcon_xfp_phy_ops;
2088 break;
2089 default:
2090 EFX_ERR(efx, "Unknown PHY type %d\n",
2091 efx->phy_type);
2092 return -1;
2093 }
2094 return 0;
2095}
2096
2097/* This call is responsible for hooking in the MAC and PHY operations */
2098int falcon_probe_port(struct efx_nic *efx)
2099{
2100 int rc;
2101
2102 /* Hook in PHY operations table */
2103 rc = falcon_probe_phy(efx);
2104 if (rc)
2105 return rc;
2106
2107 /* Set up GMII structure for PHY */
2108 efx->mii.supports_gmii = 1;
2109 falcon_init_mdio(&efx->mii);
2110
2111 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2112 if (FALCON_REV(efx) >= FALCON_REV_B0)
2113 efx->flow_control = EFX_FC_RX | EFX_FC_TX;
2114 else
2115 efx->flow_control = EFX_FC_RX;
2116
2117 /* Allocate buffer for stats */
2118 rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
2119 FALCON_MAC_STATS_SIZE);
2120 if (rc)
2121 return rc;
2122 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n",
2123 (unsigned long long)efx->stats_buffer.dma_addr,
2124 efx->stats_buffer.addr,
2125 virt_to_phys(efx->stats_buffer.addr));
2126
2127 return 0;
2128}
2129
2130void falcon_remove_port(struct efx_nic *efx)
2131{
2132 falcon_free_buffer(efx, &efx->stats_buffer);
2133}
2134
2135/**************************************************************************
2136 *
2137 * Multicast filtering
2138 *
2139 **************************************************************************
2140 */
2141
2142void falcon_set_multicast_hash(struct efx_nic *efx)
2143{
2144 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2145
2146 /* Broadcast packets go through the multicast hash filter.
2147 * ether_crc_le() of the broadcast address is 0xbe2612ff
2148 * so we always add bit 0xff to the mask.
2149 */
2150 set_bit_le(0xff, mc_hash->byte);
2151
2152 falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
2153 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
2154}
2155
2156/**************************************************************************
2157 *
2158 * Device reset
2159 *
2160 **************************************************************************
2161 */
2162
2163/* Resets NIC to known state. This routine must be called in process
2164 * context and is allowed to sleep. */
2165int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2166{
2167 struct falcon_nic_data *nic_data = efx->nic_data;
2168 efx_oword_t glb_ctl_reg_ker;
2169 int rc;
2170
2171 EFX_LOG(efx, "performing hardware reset (%d)\n", method);
2172
2173 /* Initiate device reset */
2174 if (method == RESET_TYPE_WORLD) {
2175 rc = pci_save_state(efx->pci_dev);
2176 if (rc) {
2177 EFX_ERR(efx, "failed to backup PCI state of primary "
2178 "function prior to hardware reset\n");
2179 goto fail1;
2180 }
2181 if (FALCON_IS_DUAL_FUNC(efx)) {
2182 rc = pci_save_state(nic_data->pci_dev2);
2183 if (rc) {
2184 EFX_ERR(efx, "failed to backup PCI state of "
2185 "secondary function prior to "
2186 "hardware reset\n");
2187 goto fail2;
2188 }
2189 }
2190
2191 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2192 EXT_PHY_RST_DUR, 0x7,
2193 SWRST, 1);
2194 } else {
2195 int reset_phy = (method == RESET_TYPE_INVISIBLE ?
2196 EXCLUDE_FROM_RESET : 0);
2197
2198 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2199 EXT_PHY_RST_CTL, reset_phy,
2200 PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET,
2201 PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET,
2202 PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET,
2203 EE_RST_CTL, EXCLUDE_FROM_RESET,
2204 EXT_PHY_RST_DUR, 0x7 /* 10ms */,
2205 SWRST, 1);
2206 }
2207 falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
2208
2209 EFX_LOG(efx, "waiting for hardware reset\n");
2210 schedule_timeout_uninterruptible(HZ / 20);
2211
2212 /* Restore PCI configuration if needed */
2213 if (method == RESET_TYPE_WORLD) {
2214 if (FALCON_IS_DUAL_FUNC(efx)) {
2215 rc = pci_restore_state(nic_data->pci_dev2);
2216 if (rc) {
2217 EFX_ERR(efx, "failed to restore PCI config for "
2218 "the secondary function\n");
2219 goto fail3;
2220 }
2221 }
2222 rc = pci_restore_state(efx->pci_dev);
2223 if (rc) {
2224 EFX_ERR(efx, "failed to restore PCI config for the "
2225 "primary function\n");
2226 goto fail4;
2227 }
2228 EFX_LOG(efx, "successfully restored PCI config\n");
2229 }
2230
2231 /* Assert that reset complete */
2232 falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
2233 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) {
2234 rc = -ETIMEDOUT;
2235 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2236 goto fail5;
2237 }
2238 EFX_LOG(efx, "hardware reset complete\n");
2239
2240 return 0;
2241
2242 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2243fail2:
2244fail3:
2245 pci_restore_state(efx->pci_dev);
2246fail1:
2247fail4:
2248fail5:
2249 return rc;
2250}
2251
2252/* Zeroes out the SRAM contents. This routine must be called in
2253 * process context and is allowed to sleep.
2254 */
2255static int falcon_reset_sram(struct efx_nic *efx)
2256{
2257 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2258 int count;
2259
2260 /* Set the SRAM wake/sleep GPIO appropriately. */
2261 falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
2262 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
2263 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1);
2264 falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
2265
2266 /* Initiate SRAM reset */
2267 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2268 SRAM_OOB_BT_INIT_EN, 1,
2269 SRM_NUM_BANKS_AND_BANK_SIZE, 0);
2270 falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
2271
2272 /* Wait for SRAM reset to complete */
2273 count = 0;
2274 do {
2275 EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
2276
2277 /* SRAM reset is slow; expect around 16ms */
2278 schedule_timeout_uninterruptible(HZ / 50);
2279
2280 /* Check for reset complete */
2281 falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
2282 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) {
2283 EFX_LOG(efx, "SRAM reset complete\n");
2284
2285 return 0;
2286 }
2287 } while (++count < 20); /* wait upto 0.4 sec */
2288
2289 EFX_ERR(efx, "timed out waiting for SRAM reset\n");
2290 return -ETIMEDOUT;
2291}
2292
2293/* Extract non-volatile configuration */
2294static int falcon_probe_nvconfig(struct efx_nic *efx)
2295{
2296 struct falcon_nvconfig *nvconfig;
2297 efx_oword_t nic_stat;
2298 int device_id;
2299 unsigned addr_len;
2300 size_t offset, len;
2301 int magic_num, struct_ver, board_rev;
2302 int rc;
2303
2304 /* Find the boot device. */
2305 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2306 if (EFX_OWORD_FIELD(nic_stat, SF_PRST)) {
2307 device_id = EE_SPI_FLASH;
2308 addr_len = 3;
2309 } else if (EFX_OWORD_FIELD(nic_stat, EE_PRST)) {
2310 device_id = EE_SPI_EEPROM;
2311 addr_len = 2;
2312 } else {
2313 return -ENODEV;
2314 }
2315
2316 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2317
2318 /* Read the whole configuration structure into memory. */
2319 for (offset = 0; offset < sizeof(*nvconfig); offset += len) {
2320 len = min(sizeof(*nvconfig) - offset,
2321 (size_t) FALCON_SPI_MAX_LEN);
2322 rc = falcon_spi_read(efx, device_id, SPI_READ,
2323 NVCONFIG_BASE + offset, addr_len,
2324 (char *)nvconfig + offset, len);
2325 if (rc)
2326 goto out;
2327 }
2328
2329 /* Read the MAC addresses */
2330 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2331
2332 /* Read the board configuration. */
2333 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2334 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2335
2336 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) {
2337 EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x "
2338 "therefore using defaults\n", magic_num, struct_ver);
2339 efx->phy_type = PHY_TYPE_NONE;
2340 efx->mii.phy_id = PHY_ADDR_INVALID;
2341 board_rev = 0;
2342 } else {
2343 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2344
2345 efx->phy_type = v2->port0_phy_type;
2346 efx->mii.phy_id = v2->port0_phy_addr;
2347 board_rev = le16_to_cpu(v2->board_revision);
2348 }
2349
2350 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id);
2351
2352 efx_set_board_info(efx, board_rev);
2353
2354 out:
2355 kfree(nvconfig);
2356 return rc;
2357}
2358
2359/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2360 * count, port speed). Set workaround and feature flags accordingly.
2361 */
2362static int falcon_probe_nic_variant(struct efx_nic *efx)
2363{
2364 efx_oword_t altera_build;
2365
2366 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
2367 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
2368 EFX_ERR(efx, "Falcon FPGA not supported\n");
2369 return -ENODEV;
2370 }
2371
2372 switch (FALCON_REV(efx)) {
2373 case FALCON_REV_A0:
2374 case 0xff:
2375 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2376 return -ENODEV;
2377
2378 case FALCON_REV_A1:{
2379 efx_oword_t nic_stat;
2380
2381 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2382
2383 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
2384 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2385 return -ENODEV;
2386 }
2387 if (!EFX_OWORD_FIELD(nic_stat, STRAP_10G)) {
2388 EFX_ERR(efx, "1G mode not supported\n");
2389 return -ENODEV;
2390 }
2391 break;
2392 }
2393
2394 case FALCON_REV_B0:
2395 break;
2396
2397 default:
2398 EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx));
2399 return -ENODEV;
2400 }
2401
2402 return 0;
2403}
2404
2405int falcon_probe_nic(struct efx_nic *efx)
2406{
2407 struct falcon_nic_data *nic_data;
2408 int rc;
2409
2410 /* Initialise I2C interface state */
2411 efx->i2c.efx = efx;
2412 efx->i2c.op = &falcon_i2c_bit_operations;
2413 efx->i2c.sda = 1;
2414 efx->i2c.scl = 1;
2415
2416 /* Allocate storage for hardware specific data */
2417 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2418 efx->nic_data = (void *) nic_data;
2419
2420 /* Determine number of ports etc. */
2421 rc = falcon_probe_nic_variant(efx);
2422 if (rc)
2423 goto fail1;
2424
2425 /* Probe secondary function if expected */
2426 if (FALCON_IS_DUAL_FUNC(efx)) {
2427 struct pci_dev *dev = pci_dev_get(efx->pci_dev);
2428
2429 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
2430 dev))) {
2431 if (dev->bus == efx->pci_dev->bus &&
2432 dev->devfn == efx->pci_dev->devfn + 1) {
2433 nic_data->pci_dev2 = dev;
2434 break;
2435 }
2436 }
2437 if (!nic_data->pci_dev2) {
2438 EFX_ERR(efx, "failed to find secondary function\n");
2439 rc = -ENODEV;
2440 goto fail2;
2441 }
2442 }
2443
2444 /* Now we can reset the NIC */
2445 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
2446 if (rc) {
2447 EFX_ERR(efx, "failed to reset NIC\n");
2448 goto fail3;
2449 }
2450
2451 /* Allocate memory for INT_KER */
2452 rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
2453 if (rc)
2454 goto fail4;
2455 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2456
2457 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n",
2458 (unsigned long long)efx->irq_status.dma_addr,
2459 efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
2460
2461 /* Read in the non-volatile configuration */
2462 rc = falcon_probe_nvconfig(efx);
2463 if (rc)
2464 goto fail5;
2465
2466 return 0;
2467
2468 fail5:
2469 falcon_free_buffer(efx, &efx->irq_status);
2470 fail4:
2471 /* fall-thru */
2472 fail3:
2473 if (nic_data->pci_dev2) {
2474 pci_dev_put(nic_data->pci_dev2);
2475 nic_data->pci_dev2 = NULL;
2476 }
2477 fail2:
2478 /* fall-thru */
2479 fail1:
2480 kfree(efx->nic_data);
2481 return rc;
2482}
2483
2484/* This call performs hardware-specific global initialisation, such as
2485 * defining the descriptor cache sizes and number of RSS channels.
2486 * It does not set up any buffers, descriptor rings or event queues.
2487 */
2488int falcon_init_nic(struct efx_nic *efx)
2489{
2490 struct falcon_nic_data *data;
2491 efx_oword_t temp;
2492 unsigned thresh;
2493 int rc;
2494
2495 data = (struct falcon_nic_data *)efx->nic_data;
2496
2497 /* Set up the address region register. This is only needed
2498 * for the B0 FPGA, but since we are just pushing in the
2499 * reset defaults this may as well be unconditional. */
2500 EFX_POPULATE_OWORD_4(temp, ADR_REGION0, 0,
2501 ADR_REGION1, (1 << 16),
2502 ADR_REGION2, (2 << 16),
2503 ADR_REGION3, (3 << 16));
2504 falcon_write(efx, &temp, ADR_REGION_REG_KER);
2505
2506 /* Use on-chip SRAM */
2507 falcon_read(efx, &temp, NIC_STAT_REG);
2508 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
2509 falcon_write(efx, &temp, NIC_STAT_REG);
2510
2511 /* Set buffer table mode */
2512 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
2513 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
2514
2515 rc = falcon_reset_sram(efx);
2516 if (rc)
2517 return rc;
2518
2519 /* Set positions of descriptor caches in SRAM. */
2520 EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
2521 falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
2522 EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
2523 falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
2524
2525 /* Set TX descriptor cache size. */
2526 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
2527 EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
2528 falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
2529
2530 /* Set RX descriptor cache size. Set low watermark to size-8, as
2531 * this allows most efficient prefetching.
2532 */
2533 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
2534 EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
2535 falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
2536 EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
2537 falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
2538
2539 /* Clear the parity enables on the TX data fifos as
2540 * they produce false parity errors because of timing issues
2541 */
2542 if (EFX_WORKAROUND_5129(efx)) {
2543 falcon_read(efx, &temp, SPARE_REG_KER);
2544 EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0);
2545 falcon_write(efx, &temp, SPARE_REG_KER);
2546 }
2547
2548 /* Enable all the genuinely fatal interrupts. (They are still
2549 * masked by the overall interrupt mask, controlled by
2550 * falcon_interrupts()).
2551 *
2552 * Note: All other fatal interrupts are enabled
2553 */
2554 EFX_POPULATE_OWORD_3(temp,
2555 ILL_ADR_INT_KER_EN, 1,
2556 RBUF_OWN_INT_KER_EN, 1,
2557 TBUF_OWN_INT_KER_EN, 1);
2558 EFX_INVERT_OWORD(temp);
2559 falcon_write(efx, &temp, FATAL_INTR_REG_KER);
2560
2561 /* Set number of RSS queues for receive path. */
2562 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2563 if (FALCON_REV(efx) >= FALCON_REV_B0)
2564 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
2565 else
2566 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
2567 if (EFX_WORKAROUND_7244(efx)) {
2568 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
2569 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
2570 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
2571 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
2572 }
2573 falcon_write(efx, &temp, RX_FILTER_CTL_REG);
2574
2575 falcon_setup_rss_indir_table(efx);
2576
2577 /* Setup RX. Wait for descriptor is broken and must
2578 * be disabled. RXDP recovery shouldn't be needed, but is.
2579 */
2580 falcon_read(efx, &temp, RX_SELF_RST_REG_KER);
2581 EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1);
2582 EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1);
2583 if (EFX_WORKAROUND_5583(efx))
2584 EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1);
2585 falcon_write(efx, &temp, RX_SELF_RST_REG_KER);
2586
2587 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
2588 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
2589 */
2590 falcon_read(efx, &temp, TX_CFG2_REG_KER);
2591 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
2592 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
2593 EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
2594 EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
2595 EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
2596 /* Enable SW_EV to inherit in char driver - assume harmless here */
2597 EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
2598 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
2599 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
2600 /* Squash TX of packets of 16 bytes or less */
2601 if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
2602 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
2603 falcon_write(efx, &temp, TX_CFG2_REG_KER);
2604
2605 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2606 * descriptors (which is bad).
2607 */
2608 falcon_read(efx, &temp, TX_CFG_REG_KER);
2609 EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0);
2610 falcon_write(efx, &temp, TX_CFG_REG_KER);
2611
2612 /* RX config */
2613 falcon_read(efx, &temp, RX_CFG_REG_KER);
2614 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
2615 if (EFX_WORKAROUND_7575(efx))
2616 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
2617 (3 * 4096) / 32);
2618 if (FALCON_REV(efx) >= FALCON_REV_B0)
2619 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
2620
2621 /* RX FIFO flow control thresholds */
2622 thresh = ((rx_xon_thresh_bytes >= 0) ?
2623 rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
2624 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
2625 thresh = ((rx_xoff_thresh_bytes >= 0) ?
2626 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
2627 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
2628 /* RX control FIFO thresholds [32 entries] */
2629 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25);
2630 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20);
2631 falcon_write(efx, &temp, RX_CFG_REG_KER);
2632
2633 /* Set destination of both TX and RX Flush events */
2634 if (FALCON_REV(efx) >= FALCON_REV_B0) {
2635 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
2636 falcon_write(efx, &temp, DP_CTRL_REG);
2637 }
2638
2639 return 0;
2640}
2641
2642void falcon_remove_nic(struct efx_nic *efx)
2643{
2644 struct falcon_nic_data *nic_data = efx->nic_data;
2645
2646 falcon_free_buffer(efx, &efx->irq_status);
2647
2648 (void) falcon_reset_hw(efx, RESET_TYPE_ALL);
2649
2650 /* Release the second function after the reset */
2651 if (nic_data->pci_dev2) {
2652 pci_dev_put(nic_data->pci_dev2);
2653 nic_data->pci_dev2 = NULL;
2654 }
2655
2656 /* Tear down the private nic state */
2657 kfree(efx->nic_data);
2658 efx->nic_data = NULL;
2659}
2660
2661void falcon_update_nic_stats(struct efx_nic *efx)
2662{
2663 efx_oword_t cnt;
2664
2665 falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER);
2666 efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT);
2667}
2668
2669/**************************************************************************
2670 *
2671 * Revision-dependent attributes used by efx.c
2672 *
2673 **************************************************************************
2674 */
2675
2676struct efx_nic_type falcon_a_nic_type = {
2677 .mem_bar = 2,
2678 .mem_map_size = 0x20000,
2679 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
2680 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1,
2681 .buf_tbl_base = BUF_TBL_KER_A1,
2682 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1,
2683 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1,
2684 .txd_ring_mask = FALCON_TXD_RING_MASK,
2685 .rxd_ring_mask = FALCON_RXD_RING_MASK,
2686 .evq_size = FALCON_EVQ_SIZE,
2687 .max_dma_mask = FALCON_DMA_MASK,
2688 .tx_dma_mask = FALCON_TX_DMA_MASK,
2689 .bug5391_mask = 0xf,
2690 .rx_xoff_thresh = 2048,
2691 .rx_xon_thresh = 512,
2692 .rx_buffer_padding = 0x24,
2693 .max_interrupt_mode = EFX_INT_MODE_MSI,
2694 .phys_addr_channels = 4,
2695};
2696
2697struct efx_nic_type falcon_b_nic_type = {
2698 .mem_bar = 2,
2699 /* Map everything up to and including the RSS indirection
2700 * table. Don't map MSI-X table, MSI-X PBA since Linux
2701 * requires that they not be mapped. */
2702 .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800,
2703 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0,
2704 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0,
2705 .buf_tbl_base = BUF_TBL_KER_B0,
2706 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0,
2707 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0,
2708 .txd_ring_mask = FALCON_TXD_RING_MASK,
2709 .rxd_ring_mask = FALCON_RXD_RING_MASK,
2710 .evq_size = FALCON_EVQ_SIZE,
2711 .max_dma_mask = FALCON_DMA_MASK,
2712 .tx_dma_mask = FALCON_TX_DMA_MASK,
2713 .bug5391_mask = 0,
2714 .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
2715 .rx_xon_thresh = 27648, /* ~3*max MTU */
2716 .rx_buffer_padding = 0,
2717 .max_interrupt_mode = EFX_INT_MODE_MSIX,
2718 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
2719 * interrupt handler only supports 32
2720 * channels */
2721};
2722
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
new file mode 100644
index 000000000000..6117403b0c03
--- /dev/null
+++ b/drivers/net/sfc/falcon.h
@@ -0,0 +1,130 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_H
12#define EFX_FALCON_H
13
14#include "net_driver.h"
15
16/*
17 * Falcon hardware control
18 */
19
20enum falcon_revision {
21 FALCON_REV_A0 = 0,
22 FALCON_REV_A1 = 1,
23 FALCON_REV_B0 = 2,
24};
25
26#define FALCON_REV(efx) ((efx)->pci_dev->revision)
27
28extern struct efx_nic_type falcon_a_nic_type;
29extern struct efx_nic_type falcon_b_nic_type;
30
31/**************************************************************************
32 *
33 * Externs
34 *
35 **************************************************************************
36 */
37
38/* TX data path */
39extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
40extern int falcon_init_tx(struct efx_tx_queue *tx_queue);
41extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
42extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
43extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
44
45/* RX data path */
46extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
47extern int falcon_init_rx(struct efx_rx_queue *rx_queue);
48extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
49extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
50extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
51
52/* Event data path */
53extern int falcon_probe_eventq(struct efx_channel *channel);
54extern int falcon_init_eventq(struct efx_channel *channel);
55extern void falcon_fini_eventq(struct efx_channel *channel);
56extern void falcon_remove_eventq(struct efx_channel *channel);
57extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota);
58extern void falcon_eventq_read_ack(struct efx_channel *channel);
59
60/* Ports */
61extern int falcon_probe_port(struct efx_nic *efx);
62extern void falcon_remove_port(struct efx_nic *efx);
63
64/* MAC/PHY */
65extern int falcon_xaui_link_ok(struct efx_nic *efx);
66extern int falcon_dma_stats(struct efx_nic *efx,
67 unsigned int done_offset);
68extern void falcon_drain_tx_fifo(struct efx_nic *efx);
69extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
70extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
71
72/* Interrupts and test events */
73extern int falcon_init_interrupt(struct efx_nic *efx);
74extern void falcon_enable_interrupts(struct efx_nic *efx);
75extern void falcon_generate_test_event(struct efx_channel *channel,
76 unsigned int magic);
77extern void falcon_generate_interrupt(struct efx_nic *efx);
78extern void falcon_set_int_moderation(struct efx_channel *channel);
79extern void falcon_disable_interrupts(struct efx_nic *efx);
80extern void falcon_fini_interrupt(struct efx_nic *efx);
81
82/* Global Resources */
83extern int falcon_probe_nic(struct efx_nic *efx);
84extern int falcon_probe_resources(struct efx_nic *efx);
85extern int falcon_init_nic(struct efx_nic *efx);
86extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
87extern void falcon_remove_resources(struct efx_nic *efx);
88extern void falcon_remove_nic(struct efx_nic *efx);
89extern void falcon_update_nic_stats(struct efx_nic *efx);
90extern void falcon_set_multicast_hash(struct efx_nic *efx);
91extern int falcon_reset_xaui(struct efx_nic *efx);
92
93/**************************************************************************
94 *
95 * Falcon MAC stats
96 *
97 **************************************************************************
98 */
99
100#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
101#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
102
103/* Retrieve statistic from statistics block */
104#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
105 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
106 (efx)->mac_stats.efx_stat += le16_to_cpu( \
107 *((__force __le16 *) \
108 (efx->stats_buffer.addr + \
109 FALCON_STAT_OFFSET(falcon_stat)))); \
110 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
111 (efx)->mac_stats.efx_stat += le32_to_cpu( \
112 *((__force __le32 *) \
113 (efx->stats_buffer.addr + \
114 FALCON_STAT_OFFSET(falcon_stat)))); \
115 else \
116 (efx)->mac_stats.efx_stat += le64_to_cpu( \
117 *((__force __le64 *) \
118 (efx->stats_buffer.addr + \
119 FALCON_STAT_OFFSET(falcon_stat)))); \
120 } while (0)
121
122#define FALCON_MAC_STATS_SIZE 0x100
123
124#define MAC_DATA_LBN 0
125#define MAC_DATA_WIDTH 32
126
127extern void falcon_generate_event(struct efx_channel *channel,
128 efx_qword_t *event);
129
130#endif /* EFX_FALCON_H */
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
new file mode 100644
index 000000000000..0485a63eaff6
--- /dev/null
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -0,0 +1,1135 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_HWDEFS_H
12#define EFX_FALCON_HWDEFS_H
13
14/*
15 * Falcon hardware value definitions.
16 * Falcon is the internal codename for the SFC4000 controller that is
17 * present in SFE400X evaluation boards
18 */
19
20/**************************************************************************
21 *
22 * Falcon registers
23 *
24 **************************************************************************
25 */
26
27/* Address region register */
28#define ADR_REGION_REG_KER 0x00
29#define ADR_REGION0_LBN 0
30#define ADR_REGION0_WIDTH 18
31#define ADR_REGION1_LBN 32
32#define ADR_REGION1_WIDTH 18
33#define ADR_REGION2_LBN 64
34#define ADR_REGION2_WIDTH 18
35#define ADR_REGION3_LBN 96
36#define ADR_REGION3_WIDTH 18
37
38/* Interrupt enable register */
39#define INT_EN_REG_KER 0x0010
40#define KER_INT_KER_LBN 3
41#define KER_INT_KER_WIDTH 1
42#define DRV_INT_EN_KER_LBN 0
43#define DRV_INT_EN_KER_WIDTH 1
44
45/* Interrupt status address register */
46#define INT_ADR_REG_KER 0x0030
47#define NORM_INT_VEC_DIS_KER_LBN 64
48#define NORM_INT_VEC_DIS_KER_WIDTH 1
49#define INT_ADR_KER_LBN 0
50#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
51
52/* Interrupt status register (B0 only) */
53#define INT_ISR0_B0 0x90
54#define INT_ISR1_B0 0xA0
55
56/* Interrupt acknowledge register (A0/A1 only) */
57#define INT_ACK_REG_KER_A1 0x0050
58#define INT_ACK_DUMMY_DATA_LBN 0
59#define INT_ACK_DUMMY_DATA_WIDTH 32
60
61/* Interrupt acknowledge work-around register (A0/A1 only )*/
62#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
63
64/* SPI host command register */
65#define EE_SPI_HCMD_REG_KER 0x0100
66#define EE_SPI_HCMD_CMD_EN_LBN 31
67#define EE_SPI_HCMD_CMD_EN_WIDTH 1
68#define EE_WR_TIMER_ACTIVE_LBN 28
69#define EE_WR_TIMER_ACTIVE_WIDTH 1
70#define EE_SPI_HCMD_SF_SEL_LBN 24
71#define EE_SPI_HCMD_SF_SEL_WIDTH 1
72#define EE_SPI_EEPROM 0
73#define EE_SPI_FLASH 1
74#define EE_SPI_HCMD_DABCNT_LBN 16
75#define EE_SPI_HCMD_DABCNT_WIDTH 5
76#define EE_SPI_HCMD_READ_LBN 15
77#define EE_SPI_HCMD_READ_WIDTH 1
78#define EE_SPI_READ 1
79#define EE_SPI_WRITE 0
80#define EE_SPI_HCMD_DUBCNT_LBN 12
81#define EE_SPI_HCMD_DUBCNT_WIDTH 2
82#define EE_SPI_HCMD_ADBCNT_LBN 8
83#define EE_SPI_HCMD_ADBCNT_WIDTH 2
84#define EE_SPI_HCMD_ENC_LBN 0
85#define EE_SPI_HCMD_ENC_WIDTH 8
86
87/* SPI host address register */
88#define EE_SPI_HADR_REG_KER 0x0110
89#define EE_SPI_HADR_ADR_LBN 0
90#define EE_SPI_HADR_ADR_WIDTH 24
91
92/* SPI host data register */
93#define EE_SPI_HDATA_REG_KER 0x0120
94
95/* PCIE CORE ACCESS REG */
96#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
97#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
98#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
99#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
100
101/* NIC status register */
102#define NIC_STAT_REG 0x0200
103#define ONCHIP_SRAM_LBN 16
104#define ONCHIP_SRAM_WIDTH 1
105#define SF_PRST_LBN 9
106#define SF_PRST_WIDTH 1
107#define EE_PRST_LBN 8
108#define EE_PRST_WIDTH 1
109/* See pic_mode_t for decoding of this field */
110/* These bit definitions are extrapolated from the list of numerical
111 * values for STRAP_PINS.
112 */
113#define STRAP_10G_LBN 2
114#define STRAP_10G_WIDTH 1
115#define STRAP_PCIE_LBN 0
116#define STRAP_PCIE_WIDTH 1
117
118/* GPIO control register */
119#define GPIO_CTL_REG_KER 0x0210
120#define GPIO_OUTPUTS_LBN (16)
121#define GPIO_OUTPUTS_WIDTH (4)
122#define GPIO_INPUTS_LBN (8)
123#define GPIO_DIRECTION_LBN (24)
124#define GPIO_DIRECTION_WIDTH (4)
125#define GPIO_DIRECTION_OUT (1)
126#define GPIO_SRAM_SLEEP (1 << 1)
127
128#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
129#define GPIO3_OEN_WIDTH 1
130#define GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
131#define GPIO2_OEN_WIDTH 1
132#define GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
133#define GPIO1_OEN_WIDTH 1
134#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
135#define GPIO0_OEN_WIDTH 1
136
137#define GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
138#define GPIO3_OUT_WIDTH 1
139#define GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
140#define GPIO2_OUT_WIDTH 1
141#define GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
142#define GPIO1_OUT_WIDTH 1
143#define GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
144#define GPIO0_OUT_WIDTH 1
145
146#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
147#define GPIO3_IN_WIDTH 1
148#define GPIO2_IN_WIDTH 1
149#define GPIO1_IN_WIDTH 1
150#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
151#define GPIO0_IN_WIDTH 1
152
153/* Global control register */
154#define GLB_CTL_REG_KER 0x0220
155#define EXT_PHY_RST_CTL_LBN 63
156#define EXT_PHY_RST_CTL_WIDTH 1
157#define PCIE_SD_RST_CTL_LBN 61
158#define PCIE_SD_RST_CTL_WIDTH 1
159
160#define PCIE_NSTCK_RST_CTL_LBN 58
161#define PCIE_NSTCK_RST_CTL_WIDTH 1
162#define PCIE_CORE_RST_CTL_LBN 57
163#define PCIE_CORE_RST_CTL_WIDTH 1
164#define EE_RST_CTL_LBN 49
165#define EE_RST_CTL_WIDTH 1
166#define RST_XGRX_LBN 24
167#define RST_XGRX_WIDTH 1
168#define RST_XGTX_LBN 23
169#define RST_XGTX_WIDTH 1
170#define RST_EM_LBN 22
171#define RST_EM_WIDTH 1
172#define EXT_PHY_RST_DUR_LBN 1
173#define EXT_PHY_RST_DUR_WIDTH 3
174#define SWRST_LBN 0
175#define SWRST_WIDTH 1
176#define INCLUDE_IN_RESET 0
177#define EXCLUDE_FROM_RESET 1
178
179/* Fatal interrupt register */
180#define FATAL_INTR_REG_KER 0x0230
181#define RBUF_OWN_INT_KER_EN_LBN 39
182#define RBUF_OWN_INT_KER_EN_WIDTH 1
183#define TBUF_OWN_INT_KER_EN_LBN 38
184#define TBUF_OWN_INT_KER_EN_WIDTH 1
185#define ILL_ADR_INT_KER_EN_LBN 33
186#define ILL_ADR_INT_KER_EN_WIDTH 1
187#define MEM_PERR_INT_KER_LBN 8
188#define MEM_PERR_INT_KER_WIDTH 1
189#define INT_KER_ERROR_LBN 0
190#define INT_KER_ERROR_WIDTH 12
191
192#define DP_CTRL_REG 0x250
193#define FLS_EVQ_ID_LBN 0
194#define FLS_EVQ_ID_WIDTH 11
195
196#define MEM_STAT_REG_KER 0x260
197
198/* Debug probe register */
199#define DEBUG_BLK_SEL_MISC 7
200#define DEBUG_BLK_SEL_SERDES 6
201#define DEBUG_BLK_SEL_EM 5
202#define DEBUG_BLK_SEL_SR 4
203#define DEBUG_BLK_SEL_EV 3
204#define DEBUG_BLK_SEL_RX 2
205#define DEBUG_BLK_SEL_TX 1
206#define DEBUG_BLK_SEL_BIU 0
207
208/* FPGA build version */
209#define ALTERA_BUILD_REG_KER 0x0300
210#define VER_ALL_LBN 0
211#define VER_ALL_WIDTH 32
212
213/* Spare EEPROM bits register (flash 0x390) */
214#define SPARE_REG_KER 0x310
215#define MEM_PERR_EN_TX_DATA_LBN 72
216#define MEM_PERR_EN_TX_DATA_WIDTH 2
217
218/* Timer table for kernel access */
219#define TIMER_CMD_REG_KER 0x420
220#define TIMER_MODE_LBN 12
221#define TIMER_MODE_WIDTH 2
222#define TIMER_MODE_DIS 0
223#define TIMER_MODE_INT_HLDOFF 2
224#define TIMER_VAL_LBN 0
225#define TIMER_VAL_WIDTH 12
226
227/* Driver generated event register */
228#define DRV_EV_REG_KER 0x440
229#define DRV_EV_QID_LBN 64
230#define DRV_EV_QID_WIDTH 12
231#define DRV_EV_DATA_LBN 0
232#define DRV_EV_DATA_WIDTH 64
233
234/* Buffer table configuration register */
235#define BUF_TBL_CFG_REG_KER 0x600
236#define BUF_TBL_MODE_LBN 3
237#define BUF_TBL_MODE_WIDTH 1
238#define BUF_TBL_MODE_HALF 0
239#define BUF_TBL_MODE_FULL 1
240
241/* SRAM receive descriptor cache configuration register */
242#define SRM_RX_DC_CFG_REG_KER 0x610
243#define SRM_RX_DC_BASE_ADR_LBN 0
244#define SRM_RX_DC_BASE_ADR_WIDTH 21
245
246/* SRAM transmit descriptor cache configuration register */
247#define SRM_TX_DC_CFG_REG_KER 0x620
248#define SRM_TX_DC_BASE_ADR_LBN 0
249#define SRM_TX_DC_BASE_ADR_WIDTH 21
250
251/* SRAM configuration register */
252#define SRM_CFG_REG_KER 0x630
253#define SRAM_OOB_BT_INIT_EN_LBN 3
254#define SRAM_OOB_BT_INIT_EN_WIDTH 1
255#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
256#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
257#define SRM_NB_BSZ_1BANKS_2M 0
258#define SRM_NB_BSZ_1BANKS_4M 1
259#define SRM_NB_BSZ_1BANKS_8M 2
260#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
261#define SRM_NB_BSZ_2BANKS_4M 4
262#define SRM_NB_BSZ_2BANKS_8M 5
263#define SRM_NB_BSZ_2BANKS_16M 6
264#define SRM_NB_BSZ_RESERVED 7
265
266/* Special buffer table update register */
267#define BUF_TBL_UPD_REG_KER 0x0650
268#define BUF_UPD_CMD_LBN 63
269#define BUF_UPD_CMD_WIDTH 1
270#define BUF_CLR_CMD_LBN 62
271#define BUF_CLR_CMD_WIDTH 1
272#define BUF_CLR_END_ID_LBN 32
273#define BUF_CLR_END_ID_WIDTH 20
274#define BUF_CLR_START_ID_LBN 0
275#define BUF_CLR_START_ID_WIDTH 20
276
277/* Receive configuration register */
278#define RX_CFG_REG_KER 0x800
279
280/* B0 */
281#define RX_INGR_EN_B0_LBN 47
282#define RX_INGR_EN_B0_WIDTH 1
283#define RX_DESC_PUSH_EN_B0_LBN 43
284#define RX_DESC_PUSH_EN_B0_WIDTH 1
285#define RX_XON_TX_TH_B0_LBN 33
286#define RX_XON_TX_TH_B0_WIDTH 5
287#define RX_XOFF_TX_TH_B0_LBN 28
288#define RX_XOFF_TX_TH_B0_WIDTH 5
289#define RX_USR_BUF_SIZE_B0_LBN 19
290#define RX_USR_BUF_SIZE_B0_WIDTH 9
291#define RX_XON_MAC_TH_B0_LBN 10
292#define RX_XON_MAC_TH_B0_WIDTH 9
293#define RX_XOFF_MAC_TH_B0_LBN 1
294#define RX_XOFF_MAC_TH_B0_WIDTH 9
295#define RX_XOFF_MAC_EN_B0_LBN 0
296#define RX_XOFF_MAC_EN_B0_WIDTH 1
297
298/* A1 */
299#define RX_DESC_PUSH_EN_A1_LBN 35
300#define RX_DESC_PUSH_EN_A1_WIDTH 1
301#define RX_XON_TX_TH_A1_LBN 25
302#define RX_XON_TX_TH_A1_WIDTH 5
303#define RX_XOFF_TX_TH_A1_LBN 20
304#define RX_XOFF_TX_TH_A1_WIDTH 5
305#define RX_USR_BUF_SIZE_A1_LBN 11
306#define RX_USR_BUF_SIZE_A1_WIDTH 9
307#define RX_XON_MAC_TH_A1_LBN 6
308#define RX_XON_MAC_TH_A1_WIDTH 5
309#define RX_XOFF_MAC_TH_A1_LBN 1
310#define RX_XOFF_MAC_TH_A1_WIDTH 5
311#define RX_XOFF_MAC_EN_A1_LBN 0
312#define RX_XOFF_MAC_EN_A1_WIDTH 1
313
314/* Receive filter control register */
315#define RX_FILTER_CTL_REG 0x810
316#define UDP_FULL_SRCH_LIMIT_LBN 32
317#define UDP_FULL_SRCH_LIMIT_WIDTH 8
318#define NUM_KER_LBN 24
319#define NUM_KER_WIDTH 2
320#define UDP_WILD_SRCH_LIMIT_LBN 16
321#define UDP_WILD_SRCH_LIMIT_WIDTH 8
322#define TCP_WILD_SRCH_LIMIT_LBN 8
323#define TCP_WILD_SRCH_LIMIT_WIDTH 8
324#define TCP_FULL_SRCH_LIMIT_LBN 0
325#define TCP_FULL_SRCH_LIMIT_WIDTH 8
326
327/* RX queue flush register */
328#define RX_FLUSH_DESCQ_REG_KER 0x0820
329#define RX_FLUSH_DESCQ_CMD_LBN 24
330#define RX_FLUSH_DESCQ_CMD_WIDTH 1
331#define RX_FLUSH_DESCQ_LBN 0
332#define RX_FLUSH_DESCQ_WIDTH 12
333
334/* Receive descriptor update register */
335#define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12)
336#define RX_DESC_WPTR_DWORD_LBN 0
337#define RX_DESC_WPTR_DWORD_WIDTH 12
338
339/* Receive descriptor cache configuration register */
340#define RX_DC_CFG_REG_KER 0x840
341#define RX_DC_SIZE_LBN 0
342#define RX_DC_SIZE_WIDTH 2
343
344#define RX_DC_PF_WM_REG_KER 0x850
345#define RX_DC_PF_LWM_LBN 0
346#define RX_DC_PF_LWM_WIDTH 6
347
348/* RX no descriptor drop counter */
349#define RX_NODESC_DROP_REG_KER 0x880
350#define RX_NODESC_DROP_CNT_LBN 0
351#define RX_NODESC_DROP_CNT_WIDTH 16
352
353/* RX black magic register */
354#define RX_SELF_RST_REG_KER 0x890
355#define RX_ISCSI_DIS_LBN 17
356#define RX_ISCSI_DIS_WIDTH 1
357#define RX_NODESC_WAIT_DIS_LBN 9
358#define RX_NODESC_WAIT_DIS_WIDTH 1
359#define RX_RECOVERY_EN_LBN 8
360#define RX_RECOVERY_EN_WIDTH 1
361
362/* TX queue flush register */
363#define TX_FLUSH_DESCQ_REG_KER 0x0a00
364#define TX_FLUSH_DESCQ_CMD_LBN 12
365#define TX_FLUSH_DESCQ_CMD_WIDTH 1
366#define TX_FLUSH_DESCQ_LBN 0
367#define TX_FLUSH_DESCQ_WIDTH 12
368
369/* Transmit descriptor update register */
370#define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12)
371#define TX_DESC_WPTR_DWORD_LBN 0
372#define TX_DESC_WPTR_DWORD_WIDTH 12
373
374/* Transmit descriptor cache configuration register */
375#define TX_DC_CFG_REG_KER 0xa20
376#define TX_DC_SIZE_LBN 0
377#define TX_DC_SIZE_WIDTH 2
378
379/* Transmit checksum configuration register (A0/A1 only) */
380#define TX_CHKSM_CFG_REG_KER_A1 0xa30
381
382/* Transmit configuration register */
383#define TX_CFG_REG_KER 0xa50
384#define TX_NO_EOP_DISC_EN_LBN 5
385#define TX_NO_EOP_DISC_EN_WIDTH 1
386
387/* Transmit configuration register 2 */
388#define TX_CFG2_REG_KER 0xa80
389#define TX_CSR_PUSH_EN_LBN 89
390#define TX_CSR_PUSH_EN_WIDTH 1
391#define TX_RX_SPACER_LBN 64
392#define TX_RX_SPACER_WIDTH 8
393#define TX_SW_EV_EN_LBN 59
394#define TX_SW_EV_EN_WIDTH 1
395#define TX_RX_SPACER_EN_LBN 57
396#define TX_RX_SPACER_EN_WIDTH 1
397#define TX_PREF_THRESHOLD_LBN 19
398#define TX_PREF_THRESHOLD_WIDTH 2
399#define TX_ONE_PKT_PER_Q_LBN 18
400#define TX_ONE_PKT_PER_Q_WIDTH 1
401#define TX_DIS_NON_IP_EV_LBN 17
402#define TX_DIS_NON_IP_EV_WIDTH 1
403#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
404#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
405
406/* PHY management transmit data register */
407#define MD_TXD_REG_KER 0xc00
408#define MD_TXD_LBN 0
409#define MD_TXD_WIDTH 16
410
411/* PHY management receive data register */
412#define MD_RXD_REG_KER 0xc10
413#define MD_RXD_LBN 0
414#define MD_RXD_WIDTH 16
415
416/* PHY management configuration & status register */
417#define MD_CS_REG_KER 0xc20
418#define MD_GC_LBN 4
419#define MD_GC_WIDTH 1
420#define MD_RIC_LBN 2
421#define MD_RIC_WIDTH 1
422#define MD_RDC_LBN 1
423#define MD_RDC_WIDTH 1
424#define MD_WRC_LBN 0
425#define MD_WRC_WIDTH 1
426
427/* PHY management PHY address register */
428#define MD_PHY_ADR_REG_KER 0xc30
429#define MD_PHY_ADR_LBN 0
430#define MD_PHY_ADR_WIDTH 16
431
432/* PHY management ID register */
433#define MD_ID_REG_KER 0xc40
434#define MD_PRT_ADR_LBN 11
435#define MD_PRT_ADR_WIDTH 5
436#define MD_DEV_ADR_LBN 6
437#define MD_DEV_ADR_WIDTH 5
438/* Used for writing both at once */
439#define MD_PRT_DEV_ADR_LBN 6
440#define MD_PRT_DEV_ADR_WIDTH 10
441
442/* PHY management status & mask register (DWORD read only) */
443#define MD_STAT_REG_KER 0xc50
444#define MD_BSERR_LBN 2
445#define MD_BSERR_WIDTH 1
446#define MD_LNFL_LBN 1
447#define MD_LNFL_WIDTH 1
448#define MD_BSY_LBN 0
449#define MD_BSY_WIDTH 1
450
451/* Port 0 and 1 MAC stats registers */
452#define MAC0_STAT_DMA_REG_KER 0xc60
453#define MAC_STAT_DMA_CMD_LBN 48
454#define MAC_STAT_DMA_CMD_WIDTH 1
455#define MAC_STAT_DMA_ADR_LBN 0
456#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
457
458/* Port 0 and 1 MAC control registers */
459#define MAC0_CTRL_REG_KER 0xc80
460#define MAC_XOFF_VAL_LBN 16
461#define MAC_XOFF_VAL_WIDTH 16
462#define TXFIFO_DRAIN_EN_B0_LBN 7
463#define TXFIFO_DRAIN_EN_B0_WIDTH 1
464#define MAC_BCAD_ACPT_LBN 4
465#define MAC_BCAD_ACPT_WIDTH 1
466#define MAC_UC_PROM_LBN 3
467#define MAC_UC_PROM_WIDTH 1
468#define MAC_LINK_STATUS_LBN 2
469#define MAC_LINK_STATUS_WIDTH 1
470#define MAC_SPEED_LBN 0
471#define MAC_SPEED_WIDTH 2
472
473/* 10G XAUI XGXS default values */
474#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
475#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
476#define XX_SD_CTL_DRV_DEFAULT 0 /* 20mA */
477
478/* Multicast address hash table */
479#define MAC_MCAST_HASH_REG0_KER 0xca0
480#define MAC_MCAST_HASH_REG1_KER 0xcb0
481
482/* GMAC registers */
483#define FALCON_GMAC_REGBANK 0xe00
484#define FALCON_GMAC_REGBANK_SIZE 0x200
485#define FALCON_GMAC_REG_SIZE 0x10
486
487/* XMAC registers */
488#define FALCON_XMAC_REGBANK 0x1200
489#define FALCON_XMAC_REGBANK_SIZE 0x200
490#define FALCON_XMAC_REG_SIZE 0x10
491
492/* XGMAC address register low */
493#define XM_ADR_LO_REG_MAC 0x00
494#define XM_ADR_3_LBN 24
495#define XM_ADR_3_WIDTH 8
496#define XM_ADR_2_LBN 16
497#define XM_ADR_2_WIDTH 8
498#define XM_ADR_1_LBN 8
499#define XM_ADR_1_WIDTH 8
500#define XM_ADR_0_LBN 0
501#define XM_ADR_0_WIDTH 8
502
503/* XGMAC address register high */
504#define XM_ADR_HI_REG_MAC 0x01
505#define XM_ADR_5_LBN 8
506#define XM_ADR_5_WIDTH 8
507#define XM_ADR_4_LBN 0
508#define XM_ADR_4_WIDTH 8
509
510/* XGMAC global configuration */
511#define XM_GLB_CFG_REG_MAC 0x02
512#define XM_RX_STAT_EN_LBN 11
513#define XM_RX_STAT_EN_WIDTH 1
514#define XM_TX_STAT_EN_LBN 10
515#define XM_TX_STAT_EN_WIDTH 1
516#define XM_RX_JUMBO_MODE_LBN 6
517#define XM_RX_JUMBO_MODE_WIDTH 1
518#define XM_INTCLR_MODE_LBN 3
519#define XM_INTCLR_MODE_WIDTH 1
520#define XM_CORE_RST_LBN 0
521#define XM_CORE_RST_WIDTH 1
522
523/* XGMAC transmit configuration */
524#define XM_TX_CFG_REG_MAC 0x03
525#define XM_IPG_LBN 16
526#define XM_IPG_WIDTH 4
527#define XM_FCNTL_LBN 10
528#define XM_FCNTL_WIDTH 1
529#define XM_TXCRC_LBN 8
530#define XM_TXCRC_WIDTH 1
531#define XM_AUTO_PAD_LBN 5
532#define XM_AUTO_PAD_WIDTH 1
533#define XM_TX_PRMBL_LBN 2
534#define XM_TX_PRMBL_WIDTH 1
535#define XM_TXEN_LBN 1
536#define XM_TXEN_WIDTH 1
537
538/* XGMAC receive configuration */
539#define XM_RX_CFG_REG_MAC 0x04
540#define XM_PASS_CRC_ERR_LBN 25
541#define XM_PASS_CRC_ERR_WIDTH 1
542#define XM_ACPT_ALL_MCAST_LBN 11
543#define XM_ACPT_ALL_MCAST_WIDTH 1
544#define XM_ACPT_ALL_UCAST_LBN 9
545#define XM_ACPT_ALL_UCAST_WIDTH 1
546#define XM_AUTO_DEPAD_LBN 8
547#define XM_AUTO_DEPAD_WIDTH 1
548#define XM_RXEN_LBN 1
549#define XM_RXEN_WIDTH 1
550
551/* XGMAC management interrupt mask register */
552#define XM_MGT_INT_MSK_REG_MAC_B0 0x5
553#define XM_MSK_PRMBLE_ERR_LBN 2
554#define XM_MSK_PRMBLE_ERR_WIDTH 1
555#define XM_MSK_RMTFLT_LBN 1
556#define XM_MSK_RMTFLT_WIDTH 1
557#define XM_MSK_LCLFLT_LBN 0
558#define XM_MSK_LCLFLT_WIDTH 1
559
560/* XGMAC flow control register */
561#define XM_FC_REG_MAC 0x7
562#define XM_PAUSE_TIME_LBN 16
563#define XM_PAUSE_TIME_WIDTH 16
564#define XM_DIS_FCNTL_LBN 0
565#define XM_DIS_FCNTL_WIDTH 1
566
567/* XGMAC pause time count register */
568#define XM_PAUSE_TIME_REG_MAC 0x9
569
570/* XGMAC transmit parameter register */
571#define XM_TX_PARAM_REG_MAC 0x0d
572#define XM_TX_JUMBO_MODE_LBN 31
573#define XM_TX_JUMBO_MODE_WIDTH 1
574#define XM_MAX_TX_FRM_SIZE_LBN 16
575#define XM_MAX_TX_FRM_SIZE_WIDTH 14
576
577/* XGMAC receive parameter register */
578#define XM_RX_PARAM_REG_MAC 0x0e
579#define XM_MAX_RX_FRM_SIZE_LBN 0
580#define XM_MAX_RX_FRM_SIZE_WIDTH 14
581
582/* XGMAC management interrupt status register */
583#define XM_MGT_INT_REG_MAC_B0 0x0f
584#define XM_PRMBLE_ERR 2
585#define XM_PRMBLE_WIDTH 1
586#define XM_RMTFLT_LBN 1
587#define XM_RMTFLT_WIDTH 1
588#define XM_LCLFLT_LBN 0
589#define XM_LCLFLT_WIDTH 1
590
591/* XGXS/XAUI powerdown/reset register */
592#define XX_PWR_RST_REG_MAC 0x10
593
594#define XX_PWRDND_EN_LBN 15
595#define XX_PWRDND_EN_WIDTH 1
596#define XX_PWRDNC_EN_LBN 14
597#define XX_PWRDNC_EN_WIDTH 1
598#define XX_PWRDNB_EN_LBN 13
599#define XX_PWRDNB_EN_WIDTH 1
600#define XX_PWRDNA_EN_LBN 12
601#define XX_PWRDNA_EN_WIDTH 1
602#define XX_RSTPLLCD_EN_LBN 9
603#define XX_RSTPLLCD_EN_WIDTH 1
604#define XX_RSTPLLAB_EN_LBN 8
605#define XX_RSTPLLAB_EN_WIDTH 1
606#define XX_RESETD_EN_LBN 7
607#define XX_RESETD_EN_WIDTH 1
608#define XX_RESETC_EN_LBN 6
609#define XX_RESETC_EN_WIDTH 1
610#define XX_RESETB_EN_LBN 5
611#define XX_RESETB_EN_WIDTH 1
612#define XX_RESETA_EN_LBN 4
613#define XX_RESETA_EN_WIDTH 1
614#define XX_RSTXGXSRX_EN_LBN 2
615#define XX_RSTXGXSRX_EN_WIDTH 1
616#define XX_RSTXGXSTX_EN_LBN 1
617#define XX_RSTXGXSTX_EN_WIDTH 1
618#define XX_RST_XX_EN_LBN 0
619#define XX_RST_XX_EN_WIDTH 1
620
621/* XGXS/XAUI powerdown/reset control register */
622#define XX_SD_CTL_REG_MAC 0x11
623#define XX_HIDRVD_LBN 15
624#define XX_HIDRVD_WIDTH 1
625#define XX_LODRVD_LBN 14
626#define XX_LODRVD_WIDTH 1
627#define XX_HIDRVC_LBN 13
628#define XX_HIDRVC_WIDTH 1
629#define XX_LODRVC_LBN 12
630#define XX_LODRVC_WIDTH 1
631#define XX_HIDRVB_LBN 11
632#define XX_HIDRVB_WIDTH 1
633#define XX_LODRVB_LBN 10
634#define XX_LODRVB_WIDTH 1
635#define XX_HIDRVA_LBN 9
636#define XX_HIDRVA_WIDTH 1
637#define XX_LODRVA_LBN 8
638#define XX_LODRVA_WIDTH 1
639
640#define XX_TXDRV_CTL_REG_MAC 0x12
641#define XX_DEQD_LBN 28
642#define XX_DEQD_WIDTH 4
643#define XX_DEQC_LBN 24
644#define XX_DEQC_WIDTH 4
645#define XX_DEQB_LBN 20
646#define XX_DEQB_WIDTH 4
647#define XX_DEQA_LBN 16
648#define XX_DEQA_WIDTH 4
649#define XX_DTXD_LBN 12
650#define XX_DTXD_WIDTH 4
651#define XX_DTXC_LBN 8
652#define XX_DTXC_WIDTH 4
653#define XX_DTXB_LBN 4
654#define XX_DTXB_WIDTH 4
655#define XX_DTXA_LBN 0
656#define XX_DTXA_WIDTH 4
657
658/* XAUI XGXS core status register */
659#define XX_FORCE_SIG_DECODE_FORCED 0xff
660#define XX_CORE_STAT_REG_MAC 0x16
661#define XX_ALIGN_DONE_LBN 20
662#define XX_ALIGN_DONE_WIDTH 1
663#define XX_SYNC_STAT_LBN 16
664#define XX_SYNC_STAT_WIDTH 4
665#define XX_SYNC_STAT_DECODE_SYNCED 0xf
666#define XX_COMMA_DET_LBN 12
667#define XX_COMMA_DET_WIDTH 4
668#define XX_COMMA_DET_DECODE_DETECTED 0xf
669#define XX_COMMA_DET_RESET 0xf
670#define XX_CHARERR_LBN 4
671#define XX_CHARERR_WIDTH 4
672#define XX_CHARERR_RESET 0xf
673#define XX_DISPERR_LBN 0
674#define XX_DISPERR_WIDTH 4
675#define XX_DISPERR_RESET 0xf
676
677/* Receive filter table */
678#define RX_FILTER_TBL0 0xF00000
679
680/* Receive descriptor pointer table */
681#define RX_DESC_PTR_TBL_KER_A1 0x11800
682#define RX_DESC_PTR_TBL_KER_B0 0xF40000
683#define RX_DESC_PTR_TBL_KER_P0 0x900
684#define RX_ISCSI_DDIG_EN_LBN 88
685#define RX_ISCSI_DDIG_EN_WIDTH 1
686#define RX_ISCSI_HDIG_EN_LBN 87
687#define RX_ISCSI_HDIG_EN_WIDTH 1
688#define RX_DESCQ_BUF_BASE_ID_LBN 36
689#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
690#define RX_DESCQ_EVQ_ID_LBN 24
691#define RX_DESCQ_EVQ_ID_WIDTH 12
692#define RX_DESCQ_OWNER_ID_LBN 10
693#define RX_DESCQ_OWNER_ID_WIDTH 14
694#define RX_DESCQ_LABEL_LBN 5
695#define RX_DESCQ_LABEL_WIDTH 5
696#define RX_DESCQ_SIZE_LBN 3
697#define RX_DESCQ_SIZE_WIDTH 2
698#define RX_DESCQ_SIZE_4K 3
699#define RX_DESCQ_SIZE_2K 2
700#define RX_DESCQ_SIZE_1K 1
701#define RX_DESCQ_SIZE_512 0
702#define RX_DESCQ_TYPE_LBN 2
703#define RX_DESCQ_TYPE_WIDTH 1
704#define RX_DESCQ_JUMBO_LBN 1
705#define RX_DESCQ_JUMBO_WIDTH 1
706#define RX_DESCQ_EN_LBN 0
707#define RX_DESCQ_EN_WIDTH 1
708
709/* Transmit descriptor pointer table */
710#define TX_DESC_PTR_TBL_KER_A1 0x11900
711#define TX_DESC_PTR_TBL_KER_B0 0xF50000
712#define TX_DESC_PTR_TBL_KER_P0 0xa40
713#define TX_NON_IP_DROP_DIS_B0_LBN 91
714#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
715#define TX_IP_CHKSM_DIS_B0_LBN 90
716#define TX_IP_CHKSM_DIS_B0_WIDTH 1
717#define TX_TCP_CHKSM_DIS_B0_LBN 89
718#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
719#define TX_DESCQ_EN_LBN 88
720#define TX_DESCQ_EN_WIDTH 1
721#define TX_ISCSI_DDIG_EN_LBN 87
722#define TX_ISCSI_DDIG_EN_WIDTH 1
723#define TX_ISCSI_HDIG_EN_LBN 86
724#define TX_ISCSI_HDIG_EN_WIDTH 1
725#define TX_DESCQ_BUF_BASE_ID_LBN 36
726#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
727#define TX_DESCQ_EVQ_ID_LBN 24
728#define TX_DESCQ_EVQ_ID_WIDTH 12
729#define TX_DESCQ_OWNER_ID_LBN 10
730#define TX_DESCQ_OWNER_ID_WIDTH 14
731#define TX_DESCQ_LABEL_LBN 5
732#define TX_DESCQ_LABEL_WIDTH 5
733#define TX_DESCQ_SIZE_LBN 3
734#define TX_DESCQ_SIZE_WIDTH 2
735#define TX_DESCQ_SIZE_4K 3
736#define TX_DESCQ_SIZE_2K 2
737#define TX_DESCQ_SIZE_1K 1
738#define TX_DESCQ_SIZE_512 0
739#define TX_DESCQ_TYPE_LBN 1
740#define TX_DESCQ_TYPE_WIDTH 2
741
742/* Event queue pointer */
743#define EVQ_PTR_TBL_KER_A1 0x11a00
744#define EVQ_PTR_TBL_KER_B0 0xf60000
745#define EVQ_PTR_TBL_KER_P0 0x500
746#define EVQ_EN_LBN 23
747#define EVQ_EN_WIDTH 1
748#define EVQ_SIZE_LBN 20
749#define EVQ_SIZE_WIDTH 3
750#define EVQ_SIZE_32K 6
751#define EVQ_SIZE_16K 5
752#define EVQ_SIZE_8K 4
753#define EVQ_SIZE_4K 3
754#define EVQ_SIZE_2K 2
755#define EVQ_SIZE_1K 1
756#define EVQ_SIZE_512 0
757#define EVQ_BUF_BASE_ID_LBN 0
758#define EVQ_BUF_BASE_ID_WIDTH 20
759
760/* Event queue read pointer */
761#define EVQ_RPTR_REG_KER_A1 0x11b00
762#define EVQ_RPTR_REG_KER_B0 0xfa0000
763#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
764#define EVQ_RPTR_DWORD_LBN 0
765#define EVQ_RPTR_DWORD_WIDTH 14
766
767/* RSS indirection table */
768#define RX_RSS_INDIR_TBL_B0 0xFB0000
769#define RX_RSS_INDIR_ENT_B0_LBN 0
770#define RX_RSS_INDIR_ENT_B0_WIDTH 6
771
772/* Special buffer descriptors (full-mode) */
773#define BUF_FULL_TBL_KER_A1 0x8000
774#define BUF_FULL_TBL_KER_B0 0x800000
775#define IP_DAT_BUF_SIZE_LBN 50
776#define IP_DAT_BUF_SIZE_WIDTH 1
777#define IP_DAT_BUF_SIZE_8K 1
778#define IP_DAT_BUF_SIZE_4K 0
779#define BUF_ADR_REGION_LBN 48
780#define BUF_ADR_REGION_WIDTH 2
781#define BUF_ADR_FBUF_LBN 14
782#define BUF_ADR_FBUF_WIDTH 34
783#define BUF_OWNER_ID_FBUF_LBN 0
784#define BUF_OWNER_ID_FBUF_WIDTH 14
785
786/* Transmit descriptor */
787#define TX_KER_PORT_LBN 63
788#define TX_KER_PORT_WIDTH 1
789#define TX_KER_CONT_LBN 62
790#define TX_KER_CONT_WIDTH 1
791#define TX_KER_BYTE_CNT_LBN 48
792#define TX_KER_BYTE_CNT_WIDTH 14
793#define TX_KER_BUF_REGION_LBN 46
794#define TX_KER_BUF_REGION_WIDTH 2
795#define TX_KER_BUF_REGION0_DECODE 0
796#define TX_KER_BUF_REGION1_DECODE 1
797#define TX_KER_BUF_REGION2_DECODE 2
798#define TX_KER_BUF_REGION3_DECODE 3
799#define TX_KER_BUF_ADR_LBN 0
800#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
801
802/* Receive descriptor */
803#define RX_KER_BUF_SIZE_LBN 48
804#define RX_KER_BUF_SIZE_WIDTH 14
805#define RX_KER_BUF_REGION_LBN 46
806#define RX_KER_BUF_REGION_WIDTH 2
807#define RX_KER_BUF_REGION0_DECODE 0
808#define RX_KER_BUF_REGION1_DECODE 1
809#define RX_KER_BUF_REGION2_DECODE 2
810#define RX_KER_BUF_REGION3_DECODE 3
811#define RX_KER_BUF_ADR_LBN 0
812#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
813
814/**************************************************************************
815 *
816 * Falcon events
817 *
818 **************************************************************************
819 */
820
821/* Event queue entries */
822#define EV_CODE_LBN 60
823#define EV_CODE_WIDTH 4
824#define RX_IP_EV_DECODE 0
825#define TX_IP_EV_DECODE 2
826#define DRIVER_EV_DECODE 5
827#define GLOBAL_EV_DECODE 6
828#define DRV_GEN_EV_DECODE 7
829#define WHOLE_EVENT_LBN 0
830#define WHOLE_EVENT_WIDTH 64
831
832/* Receive events */
833#define RX_EV_PKT_OK_LBN 56
834#define RX_EV_PKT_OK_WIDTH 1
835#define RX_EV_PAUSE_FRM_ERR_LBN 55
836#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
837#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
838#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
839#define RX_EV_IF_FRAG_ERR_LBN 53
840#define RX_EV_IF_FRAG_ERR_WIDTH 1
841#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
842#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
843#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
844#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
845#define RX_EV_ETH_CRC_ERR_LBN 50
846#define RX_EV_ETH_CRC_ERR_WIDTH 1
847#define RX_EV_FRM_TRUNC_LBN 49
848#define RX_EV_FRM_TRUNC_WIDTH 1
849#define RX_EV_DRIB_NIB_LBN 48
850#define RX_EV_DRIB_NIB_WIDTH 1
851#define RX_EV_TOBE_DISC_LBN 47
852#define RX_EV_TOBE_DISC_WIDTH 1
853#define RX_EV_PKT_TYPE_LBN 44
854#define RX_EV_PKT_TYPE_WIDTH 3
855#define RX_EV_PKT_TYPE_ETH_DECODE 0
856#define RX_EV_PKT_TYPE_LLC_DECODE 1
857#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
858#define RX_EV_PKT_TYPE_VLAN_DECODE 3
859#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
860#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
861#define RX_EV_HDR_TYPE_LBN 42
862#define RX_EV_HDR_TYPE_WIDTH 2
863#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
864#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
865#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
866#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
867#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
868 ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
869#define RX_EV_MCAST_HASH_MATCH_LBN 40
870#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
871#define RX_EV_MCAST_PKT_LBN 39
872#define RX_EV_MCAST_PKT_WIDTH 1
873#define RX_EV_Q_LABEL_LBN 32
874#define RX_EV_Q_LABEL_WIDTH 5
875#define RX_EV_JUMBO_CONT_LBN 31
876#define RX_EV_JUMBO_CONT_WIDTH 1
877#define RX_EV_BYTE_CNT_LBN 16
878#define RX_EV_BYTE_CNT_WIDTH 14
879#define RX_EV_SOP_LBN 15
880#define RX_EV_SOP_WIDTH 1
881#define RX_EV_DESC_PTR_LBN 0
882#define RX_EV_DESC_PTR_WIDTH 12
883
884/* Transmit events */
885#define TX_EV_PKT_ERR_LBN 38
886#define TX_EV_PKT_ERR_WIDTH 1
887#define TX_EV_Q_LABEL_LBN 32
888#define TX_EV_Q_LABEL_WIDTH 5
889#define TX_EV_WQ_FF_FULL_LBN 15
890#define TX_EV_WQ_FF_FULL_WIDTH 1
891#define TX_EV_COMP_LBN 12
892#define TX_EV_COMP_WIDTH 1
893#define TX_EV_DESC_PTR_LBN 0
894#define TX_EV_DESC_PTR_WIDTH 12
895
896/* Driver events */
897#define DRIVER_EV_SUB_CODE_LBN 56
898#define DRIVER_EV_SUB_CODE_WIDTH 4
899#define DRIVER_EV_SUB_DATA_LBN 0
900#define DRIVER_EV_SUB_DATA_WIDTH 14
901#define TX_DESCQ_FLS_DONE_EV_DECODE 0
902#define RX_DESCQ_FLS_DONE_EV_DECODE 1
903#define EVQ_INIT_DONE_EV_DECODE 2
904#define EVQ_NOT_EN_EV_DECODE 3
905#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
906#define SRM_UPD_DONE_EV_DECODE 5
907#define WAKE_UP_EV_DECODE 6
908#define TX_PKT_NON_TCP_UDP_DECODE 9
909#define TIMER_EV_DECODE 10
910#define RX_RECOVERY_EV_DECODE 11
911#define RX_DSC_ERROR_EV_DECODE 14
912#define TX_DSC_ERROR_EV_DECODE 15
913#define DRIVER_EV_TX_DESCQ_ID_LBN 0
914#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
915#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
916#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
917#define DRIVER_EV_RX_DESCQ_ID_LBN 0
918#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
919#define SRM_CLR_EV_DECODE 0
920#define SRM_UPD_EV_DECODE 1
921#define SRM_ILLCLR_EV_DECODE 2
922
923/* Global events */
924#define RX_RECOVERY_B0_LBN 12
925#define RX_RECOVERY_B0_WIDTH 1
926#define XG_MNT_INTR_B0_LBN 11
927#define XG_MNT_INTR_B0_WIDTH 1
928#define RX_RECOVERY_A1_LBN 11
929#define RX_RECOVERY_A1_WIDTH 1
930#define XG_PHY_INTR_LBN 9
931#define XG_PHY_INTR_WIDTH 1
932#define G_PHY1_INTR_LBN 8
933#define G_PHY1_INTR_WIDTH 1
934#define G_PHY0_INTR_LBN 7
935#define G_PHY0_INTR_WIDTH 1
936
937/* Driver-generated test events */
938#define EVQ_MAGIC_LBN 0
939#define EVQ_MAGIC_WIDTH 32
940
941/**************************************************************************
942 *
943 * Falcon MAC stats
944 *
945 **************************************************************************
946 *
947 */
948#define GRxGoodOct_offset 0x0
949#define GRxBadOct_offset 0x8
950#define GRxMissPkt_offset 0x10
951#define GRxFalseCRS_offset 0x14
952#define GRxPausePkt_offset 0x18
953#define GRxBadPkt_offset 0x1C
954#define GRxUcastPkt_offset 0x20
955#define GRxMcastPkt_offset 0x24
956#define GRxBcastPkt_offset 0x28
957#define GRxGoodLt64Pkt_offset 0x2C
958#define GRxBadLt64Pkt_offset 0x30
959#define GRx64Pkt_offset 0x34
960#define GRx65to127Pkt_offset 0x38
961#define GRx128to255Pkt_offset 0x3C
962#define GRx256to511Pkt_offset 0x40
963#define GRx512to1023Pkt_offset 0x44
964#define GRx1024to15xxPkt_offset 0x48
965#define GRx15xxtoJumboPkt_offset 0x4C
966#define GRxGtJumboPkt_offset 0x50
967#define GRxFcsErr64to15xxPkt_offset 0x54
968#define GRxFcsErr15xxtoJumboPkt_offset 0x58
969#define GRxFcsErrGtJumboPkt_offset 0x5C
970#define GTxGoodBadOct_offset 0x80
971#define GTxGoodOct_offset 0x88
972#define GTxSglColPkt_offset 0x90
973#define GTxMultColPkt_offset 0x94
974#define GTxExColPkt_offset 0x98
975#define GTxDefPkt_offset 0x9C
976#define GTxLateCol_offset 0xA0
977#define GTxExDefPkt_offset 0xA4
978#define GTxPausePkt_offset 0xA8
979#define GTxBadPkt_offset 0xAC
980#define GTxUcastPkt_offset 0xB0
981#define GTxMcastPkt_offset 0xB4
982#define GTxBcastPkt_offset 0xB8
983#define GTxLt64Pkt_offset 0xBC
984#define GTx64Pkt_offset 0xC0
985#define GTx65to127Pkt_offset 0xC4
986#define GTx128to255Pkt_offset 0xC8
987#define GTx256to511Pkt_offset 0xCC
988#define GTx512to1023Pkt_offset 0xD0
989#define GTx1024to15xxPkt_offset 0xD4
990#define GTx15xxtoJumboPkt_offset 0xD8
991#define GTxGtJumboPkt_offset 0xDC
992#define GTxNonTcpUdpPkt_offset 0xE0
993#define GTxMacSrcErrPkt_offset 0xE4
994#define GTxIpSrcErrPkt_offset 0xE8
995#define GDmaDone_offset 0xEC
996
997#define XgRxOctets_offset 0x0
998#define XgRxOctets_WIDTH 48
999#define XgRxOctetsOK_offset 0x8
1000#define XgRxOctetsOK_WIDTH 48
1001#define XgRxPkts_offset 0x10
1002#define XgRxPkts_WIDTH 32
1003#define XgRxPktsOK_offset 0x14
1004#define XgRxPktsOK_WIDTH 32
1005#define XgRxBroadcastPkts_offset 0x18
1006#define XgRxBroadcastPkts_WIDTH 32
1007#define XgRxMulticastPkts_offset 0x1C
1008#define XgRxMulticastPkts_WIDTH 32
1009#define XgRxUnicastPkts_offset 0x20
1010#define XgRxUnicastPkts_WIDTH 32
1011#define XgRxUndersizePkts_offset 0x24
1012#define XgRxUndersizePkts_WIDTH 32
1013#define XgRxOversizePkts_offset 0x28
1014#define XgRxOversizePkts_WIDTH 32
1015#define XgRxJabberPkts_offset 0x2C
1016#define XgRxJabberPkts_WIDTH 32
1017#define XgRxUndersizeFCSerrorPkts_offset 0x30
1018#define XgRxUndersizeFCSerrorPkts_WIDTH 32
1019#define XgRxDropEvents_offset 0x34
1020#define XgRxDropEvents_WIDTH 32
1021#define XgRxFCSerrorPkts_offset 0x38
1022#define XgRxFCSerrorPkts_WIDTH 32
1023#define XgRxAlignError_offset 0x3C
1024#define XgRxAlignError_WIDTH 32
1025#define XgRxSymbolError_offset 0x40
1026#define XgRxSymbolError_WIDTH 32
1027#define XgRxInternalMACError_offset 0x44
1028#define XgRxInternalMACError_WIDTH 32
1029#define XgRxControlPkts_offset 0x48
1030#define XgRxControlPkts_WIDTH 32
1031#define XgRxPausePkts_offset 0x4C
1032#define XgRxPausePkts_WIDTH 32
1033#define XgRxPkts64Octets_offset 0x50
1034#define XgRxPkts64Octets_WIDTH 32
1035#define XgRxPkts65to127Octets_offset 0x54
1036#define XgRxPkts65to127Octets_WIDTH 32
1037#define XgRxPkts128to255Octets_offset 0x58
1038#define XgRxPkts128to255Octets_WIDTH 32
1039#define XgRxPkts256to511Octets_offset 0x5C
1040#define XgRxPkts256to511Octets_WIDTH 32
1041#define XgRxPkts512to1023Octets_offset 0x60
1042#define XgRxPkts512to1023Octets_WIDTH 32
1043#define XgRxPkts1024to15xxOctets_offset 0x64
1044#define XgRxPkts1024to15xxOctets_WIDTH 32
1045#define XgRxPkts15xxtoMaxOctets_offset 0x68
1046#define XgRxPkts15xxtoMaxOctets_WIDTH 32
1047#define XgRxLengthError_offset 0x6C
1048#define XgRxLengthError_WIDTH 32
1049#define XgTxPkts_offset 0x80
1050#define XgTxPkts_WIDTH 32
1051#define XgTxOctets_offset 0x88
1052#define XgTxOctets_WIDTH 48
1053#define XgTxMulticastPkts_offset 0x90
1054#define XgTxMulticastPkts_WIDTH 32
1055#define XgTxBroadcastPkts_offset 0x94
1056#define XgTxBroadcastPkts_WIDTH 32
1057#define XgTxUnicastPkts_offset 0x98
1058#define XgTxUnicastPkts_WIDTH 32
1059#define XgTxControlPkts_offset 0x9C
1060#define XgTxControlPkts_WIDTH 32
1061#define XgTxPausePkts_offset 0xA0
1062#define XgTxPausePkts_WIDTH 32
1063#define XgTxPkts64Octets_offset 0xA4
1064#define XgTxPkts64Octets_WIDTH 32
1065#define XgTxPkts65to127Octets_offset 0xA8
1066#define XgTxPkts65to127Octets_WIDTH 32
1067#define XgTxPkts128to255Octets_offset 0xAC
1068#define XgTxPkts128to255Octets_WIDTH 32
1069#define XgTxPkts256to511Octets_offset 0xB0
1070#define XgTxPkts256to511Octets_WIDTH 32
1071#define XgTxPkts512to1023Octets_offset 0xB4
1072#define XgTxPkts512to1023Octets_WIDTH 32
1073#define XgTxPkts1024to15xxOctets_offset 0xB8
1074#define XgTxPkts1024to15xxOctets_WIDTH 32
1075#define XgTxPkts1519toMaxOctets_offset 0xBC
1076#define XgTxPkts1519toMaxOctets_WIDTH 32
1077#define XgTxUndersizePkts_offset 0xC0
1078#define XgTxUndersizePkts_WIDTH 32
1079#define XgTxOversizePkts_offset 0xC4
1080#define XgTxOversizePkts_WIDTH 32
1081#define XgTxNonTcpUdpPkt_offset 0xC8
1082#define XgTxNonTcpUdpPkt_WIDTH 16
1083#define XgTxMacSrcErrPkt_offset 0xCC
1084#define XgTxMacSrcErrPkt_WIDTH 16
1085#define XgTxIpSrcErrPkt_offset 0xD0
1086#define XgTxIpSrcErrPkt_WIDTH 16
1087#define XgDmaDone_offset 0xD4
1088
1089#define FALCON_STATS_NOT_DONE 0x00000000
1090#define FALCON_STATS_DONE 0xffffffff
1091
1092/* Interrupt status register bits */
1093#define FATAL_INT_LBN 64
1094#define FATAL_INT_WIDTH 1
1095#define INT_EVQS_LBN 40
1096#define INT_EVQS_WIDTH 4
1097
1098/**************************************************************************
1099 *
1100 * Falcon non-volatile configuration
1101 *
1102 **************************************************************************
1103 */
1104
1105/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
1106struct falcon_nvconfig_board_v2 {
1107 __le16 nports;
1108 u8 port0_phy_addr;
1109 u8 port0_phy_type;
1110 u8 port1_phy_addr;
1111 u8 port1_phy_type;
1112 __le16 asic_sub_revision;
1113 __le16 board_revision;
1114} __attribute__ ((packed));
1115
1116#define NVCONFIG_BASE 0x300
1117#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
1118struct falcon_nvconfig {
1119 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
1120 u8 mac_address[2][8]; /* 0x310 */
1121 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
1122 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
1123 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
1124 efx_oword_t hw_init_reg; /* 0x350 */
1125 efx_oword_t nic_stat_reg; /* 0x360 */
1126 efx_oword_t glb_ctl_reg; /* 0x370 */
1127 efx_oword_t srm_cfg_reg; /* 0x380 */
1128 efx_oword_t spare_reg; /* 0x390 */
1129 __le16 board_magic_num; /* 0x3A0 */
1130 __le16 board_struct_ver;
1131 __le16 board_checksum;
1132 struct falcon_nvconfig_board_v2 board_v2;
1133} __attribute__ ((packed));
1134
1135#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
new file mode 100644
index 000000000000..ea08184ddfa9
--- /dev/null
+++ b/drivers/net/sfc/falcon_io.h
@@ -0,0 +1,243 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_IO_H
12#define EFX_FALCON_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16#include "net_driver.h"
17
18/**************************************************************************
19 *
20 * Falcon hardware access
21 *
22 **************************************************************************
23 *
24 * Notes on locking strategy:
25 *
26 * Most Falcon registers require 16-byte (or 8-byte, for SRAM
27 * registers) atomic writes which necessitates locking.
28 * Under normal operation few writes to the Falcon BAR are made and these
29 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
30 * cased to allow 4-byte (hence lockless) accesses.
31 *
32 * It *is* safe to write to these 4-byte registers in the middle of an
33 * access to an 8-byte or 16-byte register. We therefore use a
34 * spinlock to protect accesses to the larger registers, but no locks
35 * for the 4-byte registers.
36 *
37 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
38 * due to the way the 16byte registers are "collected" in the Falcon BIU
39 *
40 * We also lock when carrying out reads, to ensure consistency of the
41 * data (made possible since the BIU reads all 128 bits into a cache).
42 * Reads are very rare, so this isn't a significant performance
43 * impact. (Most data transferred from NIC to host is DMAed directly
44 * into host memory).
45 *
46 * I/O BAR access uses locks for both reads and writes (but is only provided
47 * for testing purposes).
48 */
49
50/* Special buffer descriptors (Falcon SRAM) */
51#define BUF_TBL_KER_A1 0x18000
52#define BUF_TBL_KER_B0 0x800000
53
54
55#if BITS_PER_LONG == 64
56#define FALCON_USE_QWORD_IO 1
57#endif
58
59#define _falcon_writeq(efx, value, reg) \
60 __raw_writeq((__force u64) (value), (efx)->membase + (reg))
61#define _falcon_writel(efx, value, reg) \
62 __raw_writel((__force u32) (value), (efx)->membase + (reg))
63#define _falcon_readq(efx, reg) \
64 ((__force __le64) __raw_readq((efx)->membase + (reg)))
65#define _falcon_readl(efx, reg) \
66 ((__force __le32) __raw_readl((efx)->membase + (reg)))
67
68/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
69static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
70 unsigned int reg)
71{
72 unsigned long flags;
73
74 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
75 EFX_OWORD_VAL(*value));
76
77 spin_lock_irqsave(&efx->biu_lock, flags);
78#ifdef FALCON_USE_QWORD_IO
79 _falcon_writeq(efx, value->u64[0], reg + 0);
80 wmb();
81 _falcon_writeq(efx, value->u64[1], reg + 8);
82#else
83 _falcon_writel(efx, value->u32[0], reg + 0);
84 _falcon_writel(efx, value->u32[1], reg + 4);
85 _falcon_writel(efx, value->u32[2], reg + 8);
86 wmb();
87 _falcon_writel(efx, value->u32[3], reg + 12);
88#endif
89 mmiowb();
90 spin_unlock_irqrestore(&efx->biu_lock, flags);
91}
92
93/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
94static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
95 unsigned int index)
96{
97 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
98 unsigned long flags;
99
100 EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
101 reg, EFX_QWORD_VAL(*value));
102
103 spin_lock_irqsave(&efx->biu_lock, flags);
104#ifdef FALCON_USE_QWORD_IO
105 _falcon_writeq(efx, value->u64[0], reg + 0);
106#else
107 _falcon_writel(efx, value->u32[0], reg + 0);
108 wmb();
109 _falcon_writel(efx, value->u32[1], reg + 4);
110#endif
111 mmiowb();
112 spin_unlock_irqrestore(&efx->biu_lock, flags);
113}
114
115/* Write dword to Falcon register that allows partial writes
116 *
117 * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
118 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
119 * for lockless writes.
120 */
121static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
122 unsigned int reg)
123{
124 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
125 reg, EFX_DWORD_VAL(*value));
126
127 /* No lock required */
128 _falcon_writel(efx, value->u32[0], reg);
129}
130
131/* Read from a Falcon register
132 *
133 * This reads an entire 16-byte Falcon register in one go, locking as
134 * appropriate. It is essential to read the first dword first, as this
135 * prompts Falcon to load the current value into the shadow register.
136 */
137static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
138 unsigned int reg)
139{
140 unsigned long flags;
141
142 spin_lock_irqsave(&efx->biu_lock, flags);
143 value->u32[0] = _falcon_readl(efx, reg + 0);
144 rmb();
145 value->u32[1] = _falcon_readl(efx, reg + 4);
146 value->u32[2] = _falcon_readl(efx, reg + 8);
147 value->u32[3] = _falcon_readl(efx, reg + 12);
148 spin_unlock_irqrestore(&efx->biu_lock, flags);
149
150 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
151 EFX_OWORD_VAL(*value));
152}
153
154/* This reads an 8-byte Falcon SRAM entry in one go. */
155static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
156 unsigned int index)
157{
158 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
159 unsigned long flags;
160
161 spin_lock_irqsave(&efx->biu_lock, flags);
162#ifdef FALCON_USE_QWORD_IO
163 value->u64[0] = _falcon_readq(efx, reg + 0);
164#else
165 value->u32[0] = _falcon_readl(efx, reg + 0);
166 rmb();
167 value->u32[1] = _falcon_readl(efx, reg + 4);
168#endif
169 spin_unlock_irqrestore(&efx->biu_lock, flags);
170
171 EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
172 reg, EFX_QWORD_VAL(*value));
173}
174
175/* Read dword from Falcon register that allows partial writes (sic) */
176static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
177 unsigned int reg)
178{
179 value->u32[0] = _falcon_readl(efx, reg);
180 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
181 reg, EFX_DWORD_VAL(*value));
182}
183
184/* Write to a register forming part of a table */
185static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
186 unsigned int reg, unsigned int index)
187{
188 falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
189}
190
191/* Read to a register forming part of a table */
192static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
193 unsigned int reg, unsigned int index)
194{
195 falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
196}
197
198/* Write to a dword register forming part of a table */
199static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
200 unsigned int reg, unsigned int index)
201{
202 falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
203}
204
205/* Page-mapped register block size */
206#define FALCON_PAGE_BLOCK_SIZE 0x2000
207
208/* Calculate offset to page-mapped register block */
209#define FALCON_PAGED_REG(page, reg) \
210 ((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
211
212/* As for falcon_write(), but for a page-mapped register. */
213static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
214 unsigned int reg, unsigned int page)
215{
216 falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
217}
218
219/* As for falcon_writel(), but for a page-mapped register. */
220static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
221 unsigned int reg, unsigned int page)
222{
223 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
224}
225
226/* Write dword to Falcon page-mapped register with an extra lock.
227 *
228 * As for falcon_writel_page(), but for a register that suffers from
229 * SFC bug 3181. Take out a lock so the BIU collector cannot be
230 * confused. */
231static inline void falcon_writel_page_locked(struct efx_nic *efx,
232 efx_dword_t *value,
233 unsigned int reg,
234 unsigned int page)
235{
236 unsigned long flags;
237
238 spin_lock_irqsave(&efx->biu_lock, flags);
239 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
240 spin_unlock_irqrestore(&efx->biu_lock, flags);
241}
242
243#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
new file mode 100644
index 000000000000..aa7521b24a5d
--- /dev/null
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -0,0 +1,585 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "efx.h"
14#include "falcon.h"
15#include "falcon_hwdefs.h"
16#include "falcon_io.h"
17#include "mac.h"
18#include "gmii.h"
19#include "mdio_10g.h"
20#include "phy.h"
21#include "boards.h"
22#include "workarounds.h"
23
24/**************************************************************************
25 *
26 * MAC register access
27 *
28 **************************************************************************/
29
30/* Offset of an XMAC register within Falcon */
31#define FALCON_XMAC_REG(mac_reg) \
32 (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
33
34void falcon_xmac_writel(struct efx_nic *efx,
35 efx_dword_t *value, unsigned int mac_reg)
36{
37 efx_oword_t temp;
38
39 EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA));
40 falcon_write(efx, &temp, FALCON_XMAC_REG(mac_reg));
41}
42
43void falcon_xmac_readl(struct efx_nic *efx,
44 efx_dword_t *value, unsigned int mac_reg)
45{
46 efx_oword_t temp;
47
48 falcon_read(efx, &temp, FALCON_XMAC_REG(mac_reg));
49 EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA));
50}
51
52/**************************************************************************
53 *
54 * MAC operations
55 *
56 *************************************************************************/
57static int falcon_reset_xmac(struct efx_nic *efx)
58{
59 efx_dword_t reg;
60 int count;
61
62 EFX_POPULATE_DWORD_1(reg, XM_CORE_RST, 1);
63 falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
64
65 for (count = 0; count < 10000; count++) { /* wait upto 100ms */
66 falcon_xmac_readl(efx, &reg, XM_GLB_CFG_REG_MAC);
67 if (EFX_DWORD_FIELD(reg, XM_CORE_RST) == 0)
68 return 0;
69 udelay(10);
70 }
71
72 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
73 return -ETIMEDOUT;
74}
75
76/* Configure the XAUI driver that is an output from Falcon */
77static void falcon_setup_xaui(struct efx_nic *efx)
78{
79 efx_dword_t sdctl, txdrv;
80
81 /* Move the XAUI into low power, unless there is no PHY, in
82 * which case the XAUI will have to drive a cable. */
83 if (efx->phy_type == PHY_TYPE_NONE)
84 return;
85
86 falcon_xmac_readl(efx, &sdctl, XX_SD_CTL_REG_MAC);
87 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
88 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
89 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
90 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
91 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
92 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
93 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
94 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
95 falcon_xmac_writel(efx, &sdctl, XX_SD_CTL_REG_MAC);
96
97 EFX_POPULATE_DWORD_8(txdrv,
98 XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
99 XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
100 XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
101 XX_DEQA, XX_TXDRV_DEQ_DEFAULT,
102 XX_DTXD, XX_TXDRV_DTX_DEFAULT,
103 XX_DTXC, XX_TXDRV_DTX_DEFAULT,
104 XX_DTXB, XX_TXDRV_DTX_DEFAULT,
105 XX_DTXA, XX_TXDRV_DTX_DEFAULT);
106 falcon_xmac_writel(efx, &txdrv, XX_TXDRV_CTL_REG_MAC);
107}
108
109static void falcon_hold_xaui_in_rst(struct efx_nic *efx)
110{
111 efx_dword_t reg;
112
113 EFX_ZERO_DWORD(reg);
114 EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 1);
115 EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 1);
116 EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 1);
117 EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 1);
118 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 1);
119 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 1);
120 EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 1);
121 EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 1);
122 EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 1);
123 EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 1);
124 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
125 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
126 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
127 udelay(10);
128}
129
130static int _falcon_reset_xaui_a(struct efx_nic *efx)
131{
132 efx_dword_t reg;
133
134 falcon_hold_xaui_in_rst(efx);
135 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
136
137 /* Follow the RAMBUS XAUI data reset sequencing
138 * Channels A and B first: power down, reset PLL, reset, clear
139 */
140 EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 0);
141 EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 0);
142 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
143 udelay(10);
144
145 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 0);
146 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
147 udelay(10);
148
149 EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 0);
150 EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 0);
151 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
152 udelay(10);
153
154 /* Channels C and D: power down, reset PLL, reset, clear */
155 EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 0);
156 EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 0);
157 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
158 udelay(10);
159
160 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 0);
161 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
162 udelay(10);
163
164 EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 0);
165 EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 0);
166 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
167 udelay(10);
168
169 /* Setup XAUI */
170 falcon_setup_xaui(efx);
171 udelay(10);
172
173 /* Take XGXS out of reset */
174 EFX_ZERO_DWORD(reg);
175 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
176 udelay(10);
177
178 return 0;
179}
180
181static int _falcon_reset_xaui_b(struct efx_nic *efx)
182{
183 efx_dword_t reg;
184 int count;
185
186 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
187 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
188
189 /* Give some time for the link to establish */
190 for (count = 0; count < 1000; count++) { /* wait upto 10ms */
191 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
192 if (EFX_DWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
193 falcon_setup_xaui(efx);
194 return 0;
195 }
196 udelay(10);
197 }
198 EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
199 return -ETIMEDOUT;
200}
201
202int falcon_reset_xaui(struct efx_nic *efx)
203{
204 int rc;
205
206 if (EFX_WORKAROUND_9388(efx)) {
207 falcon_hold_xaui_in_rst(efx);
208 efx->phy_op->reset_xaui(efx);
209 rc = _falcon_reset_xaui_a(efx);
210 } else {
211 rc = _falcon_reset_xaui_b(efx);
212 }
213 return rc;
214}
215
216static int falcon_xgmii_status(struct efx_nic *efx)
217{
218 efx_dword_t reg;
219
220 if (FALCON_REV(efx) < FALCON_REV_B0)
221 return 1;
222
223 /* The ISR latches, so clear it and re-read */
224 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
225 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
226
227 if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
228 EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
229 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
230 return 0;
231 }
232
233 return 1;
234}
235
236static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
237{
238 efx_dword_t reg;
239
240 if (FALCON_REV(efx) < FALCON_REV_B0)
241 return;
242
243 /* Flush the ISR */
244 if (enable)
245 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
246
247 EFX_POPULATE_DWORD_2(reg,
248 XM_MSK_RMTFLT, !enable,
249 XM_MSK_LCLFLT, !enable);
250 falcon_xmac_writel(efx, &reg, XM_MGT_INT_MSK_REG_MAC_B0);
251}
252
253int falcon_init_xmac(struct efx_nic *efx)
254{
255 int rc;
256
257 /* Initialize the PHY first so the clock is around */
258 rc = efx->phy_op->init(efx);
259 if (rc)
260 goto fail1;
261
262 rc = falcon_reset_xaui(efx);
263 if (rc)
264 goto fail2;
265
266 /* Wait again. Give the PHY and MAC time to come back */
267 schedule_timeout_uninterruptible(HZ / 10);
268
269 rc = falcon_reset_xmac(efx);
270 if (rc)
271 goto fail2;
272
273 falcon_mask_status_intr(efx, 1);
274 return 0;
275
276 fail2:
277 efx->phy_op->fini(efx);
278 fail1:
279 return rc;
280}
281
282int falcon_xaui_link_ok(struct efx_nic *efx)
283{
284 efx_dword_t reg;
285 int align_done, sync_status, link_ok = 0;
286
287 /* Read link status */
288 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
289
290 align_done = EFX_DWORD_FIELD(reg, XX_ALIGN_DONE);
291 sync_status = EFX_DWORD_FIELD(reg, XX_SYNC_STAT);
292 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
293 link_ok = 1;
294
295 /* Clear link status ready for next read */
296 EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
297 EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
298 EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
299 falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
300
301 /* If the link is up, then check the phy side of the xaui link
302 * (error conditions from the wire side propoagate back through
303 * the phy to the xaui side). */
304 if (efx->link_up && link_ok) {
305 int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
306 if (has_phyxs)
307 link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
308 }
309
310 /* If the PHY and XAUI links are up, then check the mac's xgmii
311 * fault state */
312 if (efx->link_up && link_ok)
313 link_ok = falcon_xgmii_status(efx);
314
315 return link_ok;
316}
317
318static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
319{
320 unsigned int max_frame_len;
321 efx_dword_t reg;
322 int rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
323
324 /* Configure MAC - cut-thru mode is hard wired on */
325 EFX_POPULATE_DWORD_3(reg,
326 XM_RX_JUMBO_MODE, 1,
327 XM_TX_STAT_EN, 1,
328 XM_RX_STAT_EN, 1);
329 falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
330
331 /* Configure TX */
332 EFX_POPULATE_DWORD_6(reg,
333 XM_TXEN, 1,
334 XM_TX_PRMBL, 1,
335 XM_AUTO_PAD, 1,
336 XM_TXCRC, 1,
337 XM_FCNTL, 1,
338 XM_IPG, 0x3);
339 falcon_xmac_writel(efx, &reg, XM_TX_CFG_REG_MAC);
340
341 /* Configure RX */
342 EFX_POPULATE_DWORD_5(reg,
343 XM_RXEN, 1,
344 XM_AUTO_DEPAD, 0,
345 XM_ACPT_ALL_MCAST, 1,
346 XM_ACPT_ALL_UCAST, efx->promiscuous,
347 XM_PASS_CRC_ERR, 1);
348 falcon_xmac_writel(efx, &reg, XM_RX_CFG_REG_MAC);
349
350 /* Set frame length */
351 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
352 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
353 falcon_xmac_writel(efx, &reg, XM_RX_PARAM_REG_MAC);
354 EFX_POPULATE_DWORD_2(reg,
355 XM_MAX_TX_FRM_SIZE, max_frame_len,
356 XM_TX_JUMBO_MODE, 1);
357 falcon_xmac_writel(efx, &reg, XM_TX_PARAM_REG_MAC);
358
359 EFX_POPULATE_DWORD_2(reg,
360 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
361 XM_DIS_FCNTL, rx_fc ? 0 : 1);
362 falcon_xmac_writel(efx, &reg, XM_FC_REG_MAC);
363
364 /* Set MAC address */
365 EFX_POPULATE_DWORD_4(reg,
366 XM_ADR_0, efx->net_dev->dev_addr[0],
367 XM_ADR_1, efx->net_dev->dev_addr[1],
368 XM_ADR_2, efx->net_dev->dev_addr[2],
369 XM_ADR_3, efx->net_dev->dev_addr[3]);
370 falcon_xmac_writel(efx, &reg, XM_ADR_LO_REG_MAC);
371 EFX_POPULATE_DWORD_2(reg,
372 XM_ADR_4, efx->net_dev->dev_addr[4],
373 XM_ADR_5, efx->net_dev->dev_addr[5]);
374 falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
375}
376
377/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
378 * to come back up. Bash it until it comes back up */
379static int falcon_check_xaui_link_up(struct efx_nic *efx)
380{
381 int max_tries, tries;
382 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
383 max_tries = tries;
384
385 if (efx->phy_type == PHY_TYPE_NONE)
386 return 0;
387
388 while (tries) {
389 if (falcon_xaui_link_ok(efx))
390 return 1;
391
392 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
393 __func__, tries);
394 (void) falcon_reset_xaui(efx);
395 udelay(200);
396 tries--;
397 }
398
399 EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n",
400 max_tries);
401 return 0;
402}
403
404void falcon_reconfigure_xmac(struct efx_nic *efx)
405{
406 int xaui_link_ok;
407
408 falcon_mask_status_intr(efx, 0);
409
410 falcon_deconfigure_mac_wrapper(efx);
411 efx->phy_op->reconfigure(efx);
412 falcon_reconfigure_xmac_core(efx);
413 falcon_reconfigure_mac_wrapper(efx);
414
415 /* Ensure XAUI link is up */
416 xaui_link_ok = falcon_check_xaui_link_up(efx);
417
418 if (xaui_link_ok && efx->link_up)
419 falcon_mask_status_intr(efx, 1);
420}
421
422void falcon_fini_xmac(struct efx_nic *efx)
423{
424 /* Isolate the MAC - PHY */
425 falcon_deconfigure_mac_wrapper(efx);
426
427 /* Potentially power down the PHY */
428 efx->phy_op->fini(efx);
429}
430
431void falcon_update_stats_xmac(struct efx_nic *efx)
432{
433 struct efx_mac_stats *mac_stats = &efx->mac_stats;
434 int rc;
435
436 rc = falcon_dma_stats(efx, XgDmaDone_offset);
437 if (rc)
438 return;
439
440 /* Update MAC stats from DMAed values */
441 FALCON_STAT(efx, XgRxOctets, rx_bytes);
442 FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
443 FALCON_STAT(efx, XgRxPkts, rx_packets);
444 FALCON_STAT(efx, XgRxPktsOK, rx_good);
445 FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
446 FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
447 FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
448 FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
449 FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
450 FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
451 FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
452 FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
453 FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
454 FALCON_STAT(efx, XgRxAlignError, rx_align_error);
455 FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
456 FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
457 FALCON_STAT(efx, XgRxControlPkts, rx_control);
458 FALCON_STAT(efx, XgRxPausePkts, rx_pause);
459 FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
460 FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
461 FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
462 FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
463 FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
464 FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
465 FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
466 FALCON_STAT(efx, XgRxLengthError, rx_length_error);
467 FALCON_STAT(efx, XgTxPkts, tx_packets);
468 FALCON_STAT(efx, XgTxOctets, tx_bytes);
469 FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
470 FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
471 FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
472 FALCON_STAT(efx, XgTxControlPkts, tx_control);
473 FALCON_STAT(efx, XgTxPausePkts, tx_pause);
474 FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
475 FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
476 FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
477 FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
478 FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
479 FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
480 FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
481 FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
482 FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
483 FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
484 FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
485 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
486
487 /* Update derived statistics */
488 mac_stats->tx_good_bytes =
489 (mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
490 mac_stats->rx_bad_bytes =
491 (mac_stats->rx_bytes - mac_stats->rx_good_bytes);
492}
493
494#define EFX_XAUI_RETRAIN_MAX 8
495
496int falcon_check_xmac(struct efx_nic *efx)
497{
498 unsigned xaui_link_ok;
499 int rc;
500
501 falcon_mask_status_intr(efx, 0);
502 xaui_link_ok = falcon_xaui_link_ok(efx);
503
504 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
505 (void) falcon_reset_xaui(efx);
506
507 /* Call the PHY check_hw routine */
508 rc = efx->phy_op->check_hw(efx);
509
510 /* Unmask interrupt if everything was (and still is) ok */
511 if (xaui_link_ok && efx->link_up)
512 falcon_mask_status_intr(efx, 1);
513
514 return rc;
515}
516
517/* Simulate a PHY event */
518void falcon_xmac_sim_phy_event(struct efx_nic *efx)
519{
520 efx_qword_t phy_event;
521
522 EFX_POPULATE_QWORD_2(phy_event,
523 EV_CODE, GLOBAL_EV_DECODE,
524 XG_PHY_INTR, 1);
525 falcon_generate_event(&efx->channel[0], &phy_event);
526}
527
528int falcon_xmac_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
529{
530 mdio_clause45_get_settings(efx, ecmd);
531 ecmd->transceiver = XCVR_INTERNAL;
532 ecmd->phy_address = efx->mii.phy_id;
533 ecmd->autoneg = AUTONEG_DISABLE;
534 ecmd->duplex = DUPLEX_FULL;
535 return 0;
536}
537
538int falcon_xmac_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
539{
540 if (ecmd->transceiver != XCVR_INTERNAL)
541 return -EINVAL;
542 if (ecmd->autoneg != AUTONEG_DISABLE)
543 return -EINVAL;
544 if (ecmd->duplex != DUPLEX_FULL)
545 return -EINVAL;
546
547 return mdio_clause45_set_settings(efx, ecmd);
548}
549
550
551int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
552{
553 int reset;
554
555 if (flow_control & EFX_FC_AUTO) {
556 EFX_LOG(efx, "10G does not support flow control "
557 "autonegotiation\n");
558 return -EINVAL;
559 }
560
561 if ((flow_control & EFX_FC_TX) && !(flow_control & EFX_FC_RX))
562 return -EINVAL;
563
564 /* TX flow control may automatically turn itself off if the
565 * link partner (intermittently) stops responding to pause
566 * frames. There isn't any indication that this has happened,
567 * so the best we do is leave it up to the user to spot this
568 * and fix it be cycling transmit flow control on this end. */
569 reset = ((flow_control & EFX_FC_TX) &&
570 !(efx->flow_control & EFX_FC_TX));
571 if (EFX_WORKAROUND_11482(efx) && reset) {
572 if (FALCON_REV(efx) >= FALCON_REV_B0) {
573 /* Recover by resetting the EM block */
574 if (efx->link_up)
575 falcon_drain_tx_fifo(efx);
576 } else {
577 /* Schedule a reset to recover */
578 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
579 }
580 }
581
582 efx->flow_control = flow_control;
583
584 return 0;
585}
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
new file mode 100644
index 000000000000..d25bbd1297f4
--- /dev/null
+++ b/drivers/net/sfc/gmii.h
@@ -0,0 +1,195 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_GMII_H
12#define EFX_GMII_H
13
14/*
15 * GMII interface
16 */
17
18#include <linux/mii.h>
19
20/* GMII registers, excluding registers already defined as MII
21 * registers in mii.h
22 */
23#define GMII_IER 0x12 /* Interrupt enable register */
24#define GMII_ISR 0x13 /* Interrupt status register */
25
26/* Interrupt enable register */
27#define IER_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
28#define IER_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
29#define IER_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
30#define IER_PAGE_RCVD 0x1000 /* Bit 12 - page received */
31#define IER_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
32#define IER_LINK_CHG 0x0400 /* Bit 10 - link status changed */
33#define IER_SYM_ERR 0x0200 /* Bit 9 - symbol error */
34#define IER_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
35#define IER_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
36#define IER_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
37#define IER_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
38#define IER_ENERGY 0x0010 /* Bit 4 - energy detect */
39#define IER_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
40#define IER_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
41#define IER_JABBER 0x0001 /* Bit 0 - jabber */
42
43/* Interrupt status register */
44#define ISR_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
45#define ISR_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
46#define ISR_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
47#define ISR_PAGE_RCVD 0x1000 /* Bit 12 - page received */
48#define ISR_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
49#define ISR_LINK_CHG 0x0400 /* Bit 10 - link status changed */
50#define ISR_SYM_ERR 0x0200 /* Bit 9 - symbol error */
51#define ISR_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
52#define ISR_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
53#define ISR_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
54#define ISR_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
55#define ISR_ENERGY 0x0010 /* Bit 4 - energy detect */
56#define ISR_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
57#define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
58#define ISR_JABBER 0x0001 /* Bit 0 - jabber */
59
60/* Logically extended advertisement register */
61#define GM_ADVERTISE_SLCT ADVERTISE_SLCT
62#define GM_ADVERTISE_CSMA ADVERTISE_CSMA
63#define GM_ADVERTISE_10HALF ADVERTISE_10HALF
64#define GM_ADVERTISE_1000XFULL ADVERTISE_1000XFULL
65#define GM_ADVERTISE_10FULL ADVERTISE_10FULL
66#define GM_ADVERTISE_1000XHALF ADVERTISE_1000XHALF
67#define GM_ADVERTISE_100HALF ADVERTISE_100HALF
68#define GM_ADVERTISE_1000XPAUSE ADVERTISE_1000XPAUSE
69#define GM_ADVERTISE_100FULL ADVERTISE_100FULL
70#define GM_ADVERTISE_1000XPSE_ASYM ADVERTISE_1000XPSE_ASYM
71#define GM_ADVERTISE_100BASE4 ADVERTISE_100BASE4
72#define GM_ADVERTISE_PAUSE_CAP ADVERTISE_PAUSE_CAP
73#define GM_ADVERTISE_PAUSE_ASYM ADVERTISE_PAUSE_ASYM
74#define GM_ADVERTISE_RESV ADVERTISE_RESV
75#define GM_ADVERTISE_RFAULT ADVERTISE_RFAULT
76#define GM_ADVERTISE_LPACK ADVERTISE_LPACK
77#define GM_ADVERTISE_NPAGE ADVERTISE_NPAGE
78#define GM_ADVERTISE_1000FULL (ADVERTISE_1000FULL << 8)
79#define GM_ADVERTISE_1000HALF (ADVERTISE_1000HALF << 8)
80#define GM_ADVERTISE_1000 (GM_ADVERTISE_1000FULL | \
81 GM_ADVERTISE_1000HALF)
82#define GM_ADVERTISE_FULL (GM_ADVERTISE_1000FULL | \
83 ADVERTISE_FULL)
84#define GM_ADVERTISE_ALL (GM_ADVERTISE_1000FULL | \
85 GM_ADVERTISE_1000HALF | \
86 ADVERTISE_ALL)
87
88/* Logically extended link partner ability register */
89#define GM_LPA_SLCT LPA_SLCT
90#define GM_LPA_10HALF LPA_10HALF
91#define GM_LPA_1000XFULL LPA_1000XFULL
92#define GM_LPA_10FULL LPA_10FULL
93#define GM_LPA_1000XHALF LPA_1000XHALF
94#define GM_LPA_100HALF LPA_100HALF
95#define GM_LPA_1000XPAUSE LPA_1000XPAUSE
96#define GM_LPA_100FULL LPA_100FULL
97#define GM_LPA_1000XPAUSE_ASYM LPA_1000XPAUSE_ASYM
98#define GM_LPA_100BASE4 LPA_100BASE4
99#define GM_LPA_PAUSE_CAP LPA_PAUSE_CAP
100#define GM_LPA_PAUSE_ASYM LPA_PAUSE_ASYM
101#define GM_LPA_RESV LPA_RESV
102#define GM_LPA_RFAULT LPA_RFAULT
103#define GM_LPA_LPACK LPA_LPACK
104#define GM_LPA_NPAGE LPA_NPAGE
105#define GM_LPA_1000FULL (LPA_1000FULL << 6)
106#define GM_LPA_1000HALF (LPA_1000HALF << 6)
107#define GM_LPA_10000FULL 0x00040000
108#define GM_LPA_10000HALF 0x00080000
109#define GM_LPA_DUPLEX (GM_LPA_1000FULL | GM_LPA_10000FULL \
110 | LPA_DUPLEX)
111#define GM_LPA_10 (LPA_10FULL | LPA_10HALF)
112#define GM_LPA_100 LPA_100
113#define GM_LPA_1000 (GM_LPA_1000FULL | GM_LPA_1000HALF)
114#define GM_LPA_10000 (GM_LPA_10000FULL | GM_LPA_10000HALF)
115
116/* Retrieve GMII autonegotiation advertised abilities
117 *
118 * The MII advertisment register (MII_ADVERTISE) is logically extended
119 * to include advertisement bits ADVERTISE_1000FULL and
120 * ADVERTISE_1000HALF from MII_CTRL1000. The result can be tested
121 * against the GM_ADVERTISE_xxx constants.
122 */
123static inline unsigned int gmii_advertised(struct mii_if_info *gmii)
124{
125 unsigned int advertise;
126 unsigned int ctrl1000;
127
128 advertise = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_ADVERTISE);
129 ctrl1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_CTRL1000);
130 return (((ctrl1000 << 8) & GM_ADVERTISE_1000) | advertise);
131}
132
133/* Retrieve GMII autonegotiation link partner abilities
134 *
135 * The MII link partner ability register (MII_LPA) is logically
136 * extended by adding bits LPA_1000HALF and LPA_1000FULL from
137 * MII_STAT1000. The result can be tested against the GM_LPA_xxx
138 * constants.
139 */
140static inline unsigned int gmii_lpa(struct mii_if_info *gmii)
141{
142 unsigned int lpa;
143 unsigned int stat1000;
144
145 lpa = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_LPA);
146 stat1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_STAT1000);
147 return (((stat1000 << 6) & GM_LPA_1000) | lpa);
148}
149
150/* Calculate GMII autonegotiated link technology
151 *
152 * "negotiated" should be the result of gmii_advertised() logically
153 * ANDed with the result of gmii_lpa().
154 *
155 * "tech" will be negotiated with the unused bits masked out. For
156 * example, if both ends of the link are capable of both
157 * GM_LPA_1000FULL and GM_LPA_100FULL, GM_LPA_100FULL will be masked
158 * out.
159 */
160static inline unsigned int gmii_nway_result(unsigned int negotiated)
161{
162 unsigned int other_bits;
163
164 /* Mask out the speed and duplexity bits */
165 other_bits = negotiated & ~(GM_LPA_10 | GM_LPA_100 | GM_LPA_1000);
166
167 if (negotiated & GM_LPA_1000FULL)
168 return (other_bits | GM_LPA_1000FULL);
169 else if (negotiated & GM_LPA_1000HALF)
170 return (other_bits | GM_LPA_1000HALF);
171 else
172 return (other_bits | mii_nway_result(negotiated));
173}
174
175/* Calculate GMII non-autonegotiated link technology
176 *
177 * This provides an equivalent to gmii_nway_result for the case when
178 * autonegotiation is disabled.
179 */
180static inline unsigned int gmii_forced_result(unsigned int bmcr)
181{
182 unsigned int result;
183 int full_duplex;
184
185 full_duplex = bmcr & BMCR_FULLDPLX;
186 if (bmcr & BMCR_SPEED1000)
187 result = full_duplex ? GM_LPA_1000FULL : GM_LPA_1000HALF;
188 else if (bmcr & BMCR_SPEED100)
189 result = full_duplex ? GM_LPA_100FULL : GM_LPA_100HALF;
190 else
191 result = full_duplex ? GM_LPA_10FULL : GM_LPA_10HALF;
192 return result;
193}
194
195#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/i2c-direct.c b/drivers/net/sfc/i2c-direct.c
new file mode 100644
index 000000000000..b6c62d0ed9c2
--- /dev/null
+++ b/drivers/net/sfc/i2c-direct.c
@@ -0,0 +1,381 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "i2c-direct.h"
14
15/*
16 * I2C data (SDA) and clock (SCL) line read/writes with appropriate
17 * delays.
18 */
19
20static inline void setsda(struct efx_i2c_interface *i2c, int state)
21{
22 udelay(i2c->op->udelay);
23 i2c->sda = state;
24 i2c->op->setsda(i2c);
25 udelay(i2c->op->udelay);
26}
27
28static inline void setscl(struct efx_i2c_interface *i2c, int state)
29{
30 udelay(i2c->op->udelay);
31 i2c->scl = state;
32 i2c->op->setscl(i2c);
33 udelay(i2c->op->udelay);
34}
35
36static inline int getsda(struct efx_i2c_interface *i2c)
37{
38 int sda;
39
40 udelay(i2c->op->udelay);
41 sda = i2c->op->getsda(i2c);
42 udelay(i2c->op->udelay);
43 return sda;
44}
45
46static inline int getscl(struct efx_i2c_interface *i2c)
47{
48 int scl;
49
50 udelay(i2c->op->udelay);
51 scl = i2c->op->getscl(i2c);
52 udelay(i2c->op->udelay);
53 return scl;
54}
55
56/*
57 * I2C low-level protocol operations
58 *
59 */
60
61static inline void i2c_release(struct efx_i2c_interface *i2c)
62{
63 EFX_WARN_ON_PARANOID(!i2c->scl);
64 EFX_WARN_ON_PARANOID(!i2c->sda);
65 /* Devices may time out if operations do not end */
66 setscl(i2c, 1);
67 setsda(i2c, 1);
68 EFX_BUG_ON_PARANOID(getsda(i2c) != 1);
69 EFX_BUG_ON_PARANOID(getscl(i2c) != 1);
70}
71
72static inline void i2c_start(struct efx_i2c_interface *i2c)
73{
74 /* We may be restarting immediately after a {send,recv}_bit,
75 * so SCL will not necessarily already be high.
76 */
77 EFX_WARN_ON_PARANOID(!i2c->sda);
78 setscl(i2c, 1);
79 setsda(i2c, 0);
80 setscl(i2c, 0);
81 setsda(i2c, 1);
82}
83
84static inline void i2c_send_bit(struct efx_i2c_interface *i2c, int bit)
85{
86 EFX_WARN_ON_PARANOID(i2c->scl != 0);
87 setsda(i2c, bit);
88 setscl(i2c, 1);
89 setscl(i2c, 0);
90 setsda(i2c, 1);
91}
92
93static inline int i2c_recv_bit(struct efx_i2c_interface *i2c)
94{
95 int bit;
96
97 EFX_WARN_ON_PARANOID(i2c->scl != 0);
98 EFX_WARN_ON_PARANOID(!i2c->sda);
99 setscl(i2c, 1);
100 bit = getsda(i2c);
101 setscl(i2c, 0);
102 return bit;
103}
104
105static inline void i2c_stop(struct efx_i2c_interface *i2c)
106{
107 EFX_WARN_ON_PARANOID(i2c->scl != 0);
108 setsda(i2c, 0);
109 setscl(i2c, 1);
110 setsda(i2c, 1);
111}
112
113/*
114 * I2C mid-level protocol operations
115 *
116 */
117
118/* Sends a byte via the I2C bus and checks for an acknowledgement from
119 * the slave device.
120 */
121static int i2c_send_byte(struct efx_i2c_interface *i2c, u8 byte)
122{
123 int i;
124
125 /* Send byte */
126 for (i = 0; i < 8; i++) {
127 i2c_send_bit(i2c, !!(byte & 0x80));
128 byte <<= 1;
129 }
130
131 /* Check for acknowledgement from slave */
132 return (i2c_recv_bit(i2c) == 0 ? 0 : -EIO);
133}
134
135/* Receives a byte via the I2C bus and sends ACK/NACK to the slave device. */
136static u8 i2c_recv_byte(struct efx_i2c_interface *i2c, int ack)
137{
138 u8 value = 0;
139 int i;
140
141 /* Receive byte */
142 for (i = 0; i < 8; i++)
143 value = (value << 1) | i2c_recv_bit(i2c);
144
145 /* Send ACK/NACK */
146 i2c_send_bit(i2c, (ack ? 0 : 1));
147
148 return value;
149}
150
151/* Calculate command byte for a read operation */
152static inline u8 i2c_read_cmd(u8 device_id)
153{
154 return ((device_id << 1) | 1);
155}
156
157/* Calculate command byte for a write operation */
158static inline u8 i2c_write_cmd(u8 device_id)
159{
160 return ((device_id << 1) | 0);
161}
162
163int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id)
164{
165 int rc;
166
167 /* If someone is driving the bus low we just give up. */
168 if (getsda(i2c) == 0 || getscl(i2c) == 0) {
169 EFX_ERR(i2c->efx, "%s someone is holding the I2C bus low."
170 " Giving up.\n", __func__);
171 return -EFAULT;
172 }
173
174 /* Pretend to initiate a device write */
175 i2c_start(i2c);
176 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
177 if (rc)
178 goto out;
179
180 out:
181 i2c_stop(i2c);
182 i2c_release(i2c);
183
184 return rc;
185}
186
187/* This performs a fast read of one or more consecutive bytes from an
188 * I2C device. Not all devices support consecutive reads of more than
189 * one byte; for these devices use efx_i2c_read() instead.
190 */
191int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
192 u8 device_id, u8 offset, u8 *data, unsigned int len)
193{
194 int i;
195 int rc;
196
197 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
198 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
199 EFX_WARN_ON_PARANOID(data == NULL);
200 EFX_WARN_ON_PARANOID(len < 1);
201
202 /* Select device and starting offset */
203 i2c_start(i2c);
204 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
205 if (rc)
206 goto out;
207 rc = i2c_send_byte(i2c, offset);
208 if (rc)
209 goto out;
210
211 /* Read data from device */
212 i2c_start(i2c);
213 rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
214 if (rc)
215 goto out;
216 for (i = 0; i < (len - 1); i++)
217 /* Read and acknowledge all but the last byte */
218 data[i] = i2c_recv_byte(i2c, 1);
219 /* Read last byte with no acknowledgement */
220 data[i] = i2c_recv_byte(i2c, 0);
221
222 out:
223 i2c_stop(i2c);
224 i2c_release(i2c);
225
226 return rc;
227}
228
229/* This performs a fast write of one or more consecutive bytes to an
230 * I2C device. Not all devices support consecutive writes of more
231 * than one byte; for these devices use efx_i2c_write() instead.
232 */
233int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
234 u8 device_id, u8 offset,
235 const u8 *data, unsigned int len)
236{
237 int i;
238 int rc;
239
240 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
241 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
242 EFX_WARN_ON_PARANOID(len < 1);
243
244 /* Select device and starting offset */
245 i2c_start(i2c);
246 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
247 if (rc)
248 goto out;
249 rc = i2c_send_byte(i2c, offset);
250 if (rc)
251 goto out;
252
253 /* Write data to device */
254 for (i = 0; i < len; i++) {
255 rc = i2c_send_byte(i2c, data[i]);
256 if (rc)
257 goto out;
258 }
259
260 out:
261 i2c_stop(i2c);
262 i2c_release(i2c);
263
264 return rc;
265}
266
267/* I2C byte-by-byte read */
268int efx_i2c_read(struct efx_i2c_interface *i2c,
269 u8 device_id, u8 offset, u8 *data, unsigned int len)
270{
271 int rc;
272
273 /* i2c_fast_read with length 1 is a single byte read */
274 for (; len > 0; offset++, data++, len--) {
275 rc = efx_i2c_fast_read(i2c, device_id, offset, data, 1);
276 if (rc)
277 return rc;
278 }
279
280 return 0;
281}
282
283/* I2C byte-by-byte write */
284int efx_i2c_write(struct efx_i2c_interface *i2c,
285 u8 device_id, u8 offset, const u8 *data, unsigned int len)
286{
287 int rc;
288
289 /* i2c_fast_write with length 1 is a single byte write */
290 for (; len > 0; offset++, data++, len--) {
291 rc = efx_i2c_fast_write(i2c, device_id, offset, data, 1);
292 if (rc)
293 return rc;
294 mdelay(i2c->op->mdelay);
295 }
296
297 return 0;
298}
299
300
301/* This is just a slightly neater wrapper round efx_i2c_fast_write
302 * in the case where the target doesn't take an offset
303 */
304int efx_i2c_send_bytes(struct efx_i2c_interface *i2c,
305 u8 device_id, const u8 *data, unsigned int len)
306{
307 return efx_i2c_fast_write(i2c, device_id, data[0], data + 1, len - 1);
308}
309
310/* I2C receiving of bytes - does not send an offset byte */
311int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
312 u8 *bytes, unsigned int len)
313{
314 int i;
315 int rc;
316
317 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
318 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
319 EFX_WARN_ON_PARANOID(len < 1);
320
321 /* Select device */
322 i2c_start(i2c);
323
324 /* Read data from device */
325 rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
326 if (rc)
327 goto out;
328
329 for (i = 0; i < (len - 1); i++)
330 /* Read and acknowledge all but the last byte */
331 bytes[i] = i2c_recv_byte(i2c, 1);
332 /* Read last byte with no acknowledgement */
333 bytes[i] = i2c_recv_byte(i2c, 0);
334
335 out:
336 i2c_stop(i2c);
337 i2c_release(i2c);
338
339 return rc;
340}
341
342/* SMBus and some I2C devices will time out if the I2C clock is
343 * held low for too long. This is most likely to happen in virtualised
344 * systems (when the entire domain is descheduled) but could in
345 * principle happen due to preemption on any busy system (and given the
346 * potential length of an I2C operation turning preemption off is not
347 * a sensible option). The following functions deal with the failure by
348 * retrying up to a fixed number of times.
349 */
350
351#define I2C_MAX_RETRIES (10)
352
353/* The timeout problem will result in -EIO. If the wrapped function
354 * returns any other error, pass this up and do not retry. */
355#define RETRY_WRAPPER(_f) \
356 int retries = I2C_MAX_RETRIES; \
357 int rc; \
358 while (retries) { \
359 rc = _f; \
360 if (rc != -EIO) \
361 return rc; \
362 retries--; \
363 } \
364 return rc; \
365
366int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c, u8 device_id)
367{
368 RETRY_WRAPPER(efx_i2c_check_presence(i2c, device_id))
369}
370
371int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
372 u8 device_id, u8 offset, u8 *data, unsigned int len)
373{
374 RETRY_WRAPPER(efx_i2c_read(i2c, device_id, offset, data, len))
375}
376
377int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
378 u8 device_id, u8 offset, const u8 *data, unsigned int len)
379{
380 RETRY_WRAPPER(efx_i2c_write(i2c, device_id, offset, data, len))
381}
diff --git a/drivers/net/sfc/i2c-direct.h b/drivers/net/sfc/i2c-direct.h
new file mode 100644
index 000000000000..291e561071f5
--- /dev/null
+++ b/drivers/net/sfc/i2c-direct.h
@@ -0,0 +1,91 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_I2C_DIRECT_H
12#define EFX_I2C_DIRECT_H
13
14#include "net_driver.h"
15
16/*
17 * Direct control of an I2C bus
18 */
19
20struct efx_i2c_interface;
21
22/**
23 * struct efx_i2c_bit_operations - I2C bus direct control methods
24 *
25 * I2C bus direct control methods.
26 *
27 * @setsda: Set state of SDA line
28 * @setscl: Set state of SCL line
29 * @getsda: Get state of SDA line
30 * @getscl: Get state of SCL line
31 * @udelay: Delay between each bit operation
32 * @mdelay: Delay between each byte write
33 */
34struct efx_i2c_bit_operations {
35 void (*setsda) (struct efx_i2c_interface *i2c);
36 void (*setscl) (struct efx_i2c_interface *i2c);
37 int (*getsda) (struct efx_i2c_interface *i2c);
38 int (*getscl) (struct efx_i2c_interface *i2c);
39 unsigned int udelay;
40 unsigned int mdelay;
41};
42
43/**
44 * struct efx_i2c_interface - an I2C interface
45 *
46 * An I2C interface.
47 *
48 * @efx: Attached Efx NIC
49 * @op: I2C bus control methods
50 * @sda: Current output state of SDA line
51 * @scl: Current output state of SCL line
52 */
53struct efx_i2c_interface {
54 struct efx_nic *efx;
55 struct efx_i2c_bit_operations *op;
56 unsigned int sda:1;
57 unsigned int scl:1;
58};
59
60extern int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id);
61extern int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
62 u8 device_id, u8 offset,
63 u8 *data, unsigned int len);
64extern int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
65 u8 device_id, u8 offset,
66 const u8 *data, unsigned int len);
67extern int efx_i2c_read(struct efx_i2c_interface *i2c,
68 u8 device_id, u8 offset, u8 *data, unsigned int len);
69extern int efx_i2c_write(struct efx_i2c_interface *i2c,
70 u8 device_id, u8 offset,
71 const u8 *data, unsigned int len);
72
73extern int efx_i2c_send_bytes(struct efx_i2c_interface *i2c, u8 device_id,
74 const u8 *bytes, unsigned int len);
75
76extern int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
77 u8 *bytes, unsigned int len);
78
79
80/* Versions of the API that retry on failure. */
81extern int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c,
82 u8 device_id);
83
84extern int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
85 u8 device_id, u8 offset, u8 *data, unsigned int len);
86
87extern int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
88 u8 device_id, u8 offset,
89 const u8 *data, unsigned int len);
90
91#endif /* EFX_I2C_DIRECT_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
new file mode 100644
index 000000000000..edd07d4dee18
--- /dev/null
+++ b/drivers/net/sfc/mac.h
@@ -0,0 +1,33 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2007 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_MAC_H
12#define EFX_MAC_H
13
14#include "net_driver.h"
15
16extern void falcon_xmac_writel(struct efx_nic *efx,
17 efx_dword_t *value, unsigned int mac_reg);
18extern void falcon_xmac_readl(struct efx_nic *efx,
19 efx_dword_t *value, unsigned int mac_reg);
20extern int falcon_init_xmac(struct efx_nic *efx);
21extern void falcon_reconfigure_xmac(struct efx_nic *efx);
22extern void falcon_update_stats_xmac(struct efx_nic *efx);
23extern void falcon_fini_xmac(struct efx_nic *efx);
24extern int falcon_check_xmac(struct efx_nic *efx);
25extern void falcon_xmac_sim_phy_event(struct efx_nic *efx);
26extern int falcon_xmac_get_settings(struct efx_nic *efx,
27 struct ethtool_cmd *ecmd);
28extern int falcon_xmac_set_settings(struct efx_nic *efx,
29 struct ethtool_cmd *ecmd);
30extern int falcon_xmac_set_pause(struct efx_nic *efx,
31 enum efx_fc_type pause_params);
32
33#endif
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
new file mode 100644
index 000000000000..dc06bb0aa575
--- /dev/null
+++ b/drivers/net/sfc/mdio_10g.c
@@ -0,0 +1,282 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9/*
10 * Useful functions for working with MDIO clause 45 PHYs
11 */
12#include <linux/types.h>
13#include <linux/ethtool.h>
14#include <linux/delay.h>
15#include "net_driver.h"
16#include "mdio_10g.h"
17#include "boards.h"
18
19int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
20 int spins, int spintime)
21{
22 u32 ctrl;
23 int phy_id = port->mii.phy_id;
24
25 /* Catch callers passing values in the wrong units (or just silly) */
26 EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
27
28 mdio_clause45_write(port, phy_id, mmd, MDIO_MMDREG_CTRL1,
29 (1 << MDIO_MMDREG_CTRL1_RESET_LBN));
30 /* Wait for the reset bit to clear. */
31 do {
32 msleep(spintime);
33 ctrl = mdio_clause45_read(port, phy_id, mmd, MDIO_MMDREG_CTRL1);
34 spins--;
35
36 } while (spins && (ctrl & (1 << MDIO_MMDREG_CTRL1_RESET_LBN)));
37
38 return spins ? spins : -ETIMEDOUT;
39}
40
41static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
42 int fault_fatal)
43{
44 int status;
45 int phy_id = efx->mii.phy_id;
46
47 /* Read MMD STATUS2 to check it is responding. */
48 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2);
49 if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
50 ((1 << MDIO_MMDREG_STAT2_PRESENT_WIDTH) - 1)) !=
51 MDIO_MMDREG_STAT2_PRESENT_VAL) {
52 EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd);
53 return -EIO;
54 }
55
56 /* Read MMD STATUS 1 to check for fault. */
57 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT1);
58 if ((status & (1 << MDIO_MMDREG_STAT1_FAULT_LBN)) != 0) {
59 if (fault_fatal) {
60 EFX_ERR(efx, "PHY MMD %d reporting fatal"
61 " fault: status %x\n", mmd, status);
62 return -EIO;
63 } else {
64 EFX_LOG(efx, "PHY MMD %d reporting status"
65 " %x (expected)\n", mmd, status);
66 }
67 }
68 return 0;
69}
70
71/* This ought to be ridiculous overkill. We expect it to fail rarely */
72#define MDIO45_RESET_TIME 1000 /* ms */
73#define MDIO45_RESET_ITERS 100
74
75int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
76 unsigned int mmd_mask)
77{
78 const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
79 int tries = MDIO45_RESET_ITERS;
80 int rc = 0;
81 int in_reset;
82
83 while (tries) {
84 int mask = mmd_mask;
85 int mmd = 0;
86 int stat;
87 in_reset = 0;
88 while (mask) {
89 if (mask & 1) {
90 stat = mdio_clause45_read(efx,
91 efx->mii.phy_id,
92 mmd,
93 MDIO_MMDREG_CTRL1);
94 if (stat < 0) {
95 EFX_ERR(efx, "failed to read status of"
96 " MMD %d\n", mmd);
97 return -EIO;
98 }
99 if (stat & (1 << MDIO_MMDREG_CTRL1_RESET_LBN))
100 in_reset |= (1 << mmd);
101 }
102 mask = mask >> 1;
103 mmd++;
104 }
105 if (!in_reset)
106 break;
107 tries--;
108 msleep(spintime);
109 }
110 if (in_reset != 0) {
111 EFX_ERR(efx, "not all MMDs came out of reset in time."
112 " MMDs still in reset: %x\n", in_reset);
113 rc = -ETIMEDOUT;
114 }
115 return rc;
116}
117
118int mdio_clause45_check_mmds(struct efx_nic *efx,
119 unsigned int mmd_mask, unsigned int fatal_mask)
120{
121 int devices, mmd = 0;
122 int probe_mmd;
123
124 /* Historically we have probed the PHYXS to find out what devices are
125 * present,but that doesn't work so well if the PHYXS isn't expected
126 * to exist, if so just find the first item in the list supplied. */
127 probe_mmd = (mmd_mask & MDIO_MMDREG_DEVS0_PHYXS) ? MDIO_MMD_PHYXS :
128 __ffs(mmd_mask);
129 devices = mdio_clause45_read(efx, efx->mii.phy_id,
130 probe_mmd, MDIO_MMDREG_DEVS0);
131
132 /* Check all the expected MMDs are present */
133 if (devices < 0) {
134 EFX_ERR(efx, "failed to read devices present\n");
135 return -EIO;
136 }
137 if ((devices & mmd_mask) != mmd_mask) {
138 EFX_ERR(efx, "required MMDs not present: got %x, "
139 "wanted %x\n", devices, mmd_mask);
140 return -ENODEV;
141 }
142 EFX_TRACE(efx, "Devices present: %x\n", devices);
143
144 /* Check all required MMDs are responding and happy. */
145 while (mmd_mask) {
146 if (mmd_mask & 1) {
147 int fault_fatal = fatal_mask & 1;
148 if (mdio_clause45_check_mmd(efx, mmd, fault_fatal))
149 return -EIO;
150 }
151 mmd_mask = mmd_mask >> 1;
152 fatal_mask = fatal_mask >> 1;
153 mmd++;
154 }
155
156 return 0;
157}
158
159int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
160{
161 int phy_id = efx->mii.phy_id;
162 int status;
163 int ok = 1;
164 int mmd = 0;
165 int good;
166
167 while (mmd_mask) {
168 if (mmd_mask & 1) {
169 /* Double reads because link state is latched, and a
170 * read moves the current state into the register */
171 status = mdio_clause45_read(efx, phy_id,
172 mmd, MDIO_MMDREG_STAT1);
173 status = mdio_clause45_read(efx, phy_id,
174 mmd, MDIO_MMDREG_STAT1);
175
176 good = status & (1 << MDIO_MMDREG_STAT1_LINK_LBN);
177 ok = ok && good;
178 }
179 mmd_mask = (mmd_mask >> 1);
180 mmd++;
181 }
182 return ok;
183}
184
185/**
186 * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
187 * @efx: Efx NIC
188 * @ecmd: Buffer for settings
189 *
190 * On return the 'port', 'speed', 'supported' and 'advertising' fields of
191 * ecmd have been filled out based on the PMA type.
192 */
193void mdio_clause45_get_settings(struct efx_nic *efx,
194 struct ethtool_cmd *ecmd)
195{
196 int pma_type;
197
198 /* If no PMA is present we are presumably talking something XAUI-ish
199 * like CX4. Which we report as FIBRE (see below) */
200 if ((efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)) == 0) {
201 ecmd->speed = SPEED_10000;
202 ecmd->port = PORT_FIBRE;
203 ecmd->supported = SUPPORTED_FIBRE;
204 ecmd->advertising = ADVERTISED_FIBRE;
205 return;
206 }
207
208 pma_type = mdio_clause45_read(efx, efx->mii.phy_id,
209 MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL2);
210 pma_type &= MDIO_PMAPMD_CTRL2_TYPE_MASK;
211
212 switch (pma_type) {
213 /* We represent CX4 as fibre in the absence of anything
214 better. */
215 case MDIO_PMAPMD_CTRL2_10G_CX4:
216 ecmd->speed = SPEED_10000;
217 ecmd->port = PORT_FIBRE;
218 ecmd->supported = SUPPORTED_FIBRE;
219 ecmd->advertising = ADVERTISED_FIBRE;
220 break;
221 /* 10G Base-T */
222 case MDIO_PMAPMD_CTRL2_10G_BT:
223 ecmd->speed = SPEED_10000;
224 ecmd->port = PORT_TP;
225 ecmd->supported = SUPPORTED_TP | SUPPORTED_10000baseT_Full;
226 ecmd->advertising = (ADVERTISED_FIBRE
227 | ADVERTISED_10000baseT_Full);
228 break;
229 case MDIO_PMAPMD_CTRL2_1G_BT:
230 ecmd->speed = SPEED_1000;
231 ecmd->port = PORT_TP;
232 ecmd->supported = SUPPORTED_TP | SUPPORTED_1000baseT_Full;
233 ecmd->advertising = (ADVERTISED_FIBRE
234 | ADVERTISED_1000baseT_Full);
235 break;
236 case MDIO_PMAPMD_CTRL2_100_BT:
237 ecmd->speed = SPEED_100;
238 ecmd->port = PORT_TP;
239 ecmd->supported = SUPPORTED_TP | SUPPORTED_100baseT_Full;
240 ecmd->advertising = (ADVERTISED_FIBRE
241 | ADVERTISED_100baseT_Full);
242 break;
243 case MDIO_PMAPMD_CTRL2_10_BT:
244 ecmd->speed = SPEED_10;
245 ecmd->port = PORT_TP;
246 ecmd->supported = SUPPORTED_TP | SUPPORTED_10baseT_Full;
247 ecmd->advertising = ADVERTISED_FIBRE | ADVERTISED_10baseT_Full;
248 break;
249 /* All the other defined modes are flavours of
250 * 10G optical */
251 default:
252 ecmd->speed = SPEED_10000;
253 ecmd->port = PORT_FIBRE;
254 ecmd->supported = SUPPORTED_FIBRE;
255 ecmd->advertising = ADVERTISED_FIBRE;
256 break;
257 }
258}
259
260/**
261 * mdio_clause45_set_settings - Set (some of) the PHY settings over MDIO.
262 * @efx: Efx NIC
263 * @ecmd: New settings
264 *
265 * Currently this just enforces that we are _not_ changing the
266 * 'port', 'speed', 'supported' or 'advertising' settings as these
267 * cannot be changed on any currently supported PHY.
268 */
269int mdio_clause45_set_settings(struct efx_nic *efx,
270 struct ethtool_cmd *ecmd)
271{
272 struct ethtool_cmd tmpcmd;
273 mdio_clause45_get_settings(efx, &tmpcmd);
274 /* None of the current PHYs support more than one mode
275 * of operation (and only 10GBT ever will), so keep things
276 * simple for now */
277 if ((ecmd->speed == tmpcmd.speed) && (ecmd->port == tmpcmd.port) &&
278 (ecmd->supported == tmpcmd.supported) &&
279 (ecmd->advertising == tmpcmd.advertising))
280 return 0;
281 return -EOPNOTSUPP;
282}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
new file mode 100644
index 000000000000..2214b6d820a7
--- /dev/null
+++ b/drivers/net/sfc/mdio_10g.h
@@ -0,0 +1,232 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_MDIO_10G_H
11#define EFX_MDIO_10G_H
12
13/*
14 * Definitions needed for doing 10G MDIO as specified in clause 45
15 * MDIO, which do not appear in Linux yet. Also some helper functions.
16 */
17
18#include "efx.h"
19#include "boards.h"
20
21/* Numbering of the MDIO Manageable Devices (MMDs) */
22/* Physical Medium Attachment/ Physical Medium Dependent sublayer */
23#define MDIO_MMD_PMAPMD (1)
24/* WAN Interface Sublayer */
25#define MDIO_MMD_WIS (2)
26/* Physical Coding Sublayer */
27#define MDIO_MMD_PCS (3)
28/* PHY Extender Sublayer */
29#define MDIO_MMD_PHYXS (4)
30/* Extender Sublayer */
31#define MDIO_MMD_DTEXS (5)
32/* Transmission convergence */
33#define MDIO_MMD_TC (6)
34/* Auto negotiation */
35#define MDIO_MMD_AN (7)
36
37/* Generic register locations */
38#define MDIO_MMDREG_CTRL1 (0)
39#define MDIO_MMDREG_STAT1 (1)
40#define MDIO_MMDREG_IDHI (2)
41#define MDIO_MMDREG_IDLOW (3)
42#define MDIO_MMDREG_SPEED (4)
43#define MDIO_MMDREG_DEVS0 (5)
44#define MDIO_MMDREG_DEVS1 (6)
45#define MDIO_MMDREG_CTRL2 (7)
46#define MDIO_MMDREG_STAT2 (8)
47
48/* Bits in MMDREG_CTRL1 */
49/* Reset */
50#define MDIO_MMDREG_CTRL1_RESET_LBN (15)
51#define MDIO_MMDREG_CTRL1_RESET_WIDTH (1)
52
53/* Bits in MMDREG_STAT1 */
54#define MDIO_MMDREG_STAT1_FAULT_LBN (7)
55#define MDIO_MMDREG_STAT1_FAULT_WIDTH (1)
56/* Link state */
57#define MDIO_MMDREG_STAT1_LINK_LBN (2)
58#define MDIO_MMDREG_STAT1_LINK_WIDTH (1)
59
60/* Bits in ID reg */
61#define MDIO_ID_REV(_id32) (_id32 & 0xf)
62#define MDIO_ID_MODEL(_id32) ((_id32 >> 4) & 0x3f)
63#define MDIO_ID_OUI(_id32) (_id32 >> 10)
64
65/* Bits in MMDREG_DEVS0. Someone thoughtfully layed things out
66 * so the 'bit present' bit number of an MMD is the number of
67 * that MMD */
68#define DEV_PRESENT_BIT(_b) (1 << _b)
69
70#define MDIO_MMDREG_DEVS0_PHYXS DEV_PRESENT_BIT(MDIO_MMD_PHYXS)
71#define MDIO_MMDREG_DEVS0_PCS DEV_PRESENT_BIT(MDIO_MMD_PCS)
72#define MDIO_MMDREG_DEVS0_PMAPMD DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)
73
74/* Bits in MMDREG_STAT2 */
75#define MDIO_MMDREG_STAT2_PRESENT_VAL (2)
76#define MDIO_MMDREG_STAT2_PRESENT_LBN (14)
77#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
78
79/* PMA type (4 bits) */
80#define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0)
81#define MDIO_PMAPMD_CTRL2_10G_EW (0x1)
82#define MDIO_PMAPMD_CTRL2_10G_LW (0x2)
83#define MDIO_PMAPMD_CTRL2_10G_SW (0x3)
84#define MDIO_PMAPMD_CTRL2_10G_LX4 (0x4)
85#define MDIO_PMAPMD_CTRL2_10G_ER (0x5)
86#define MDIO_PMAPMD_CTRL2_10G_LR (0x6)
87#define MDIO_PMAPMD_CTRL2_10G_SR (0x7)
88/* Reserved */
89#define MDIO_PMAPMD_CTRL2_10G_BT (0x9)
90/* Reserved */
91/* Reserved */
92#define MDIO_PMAPMD_CTRL2_1G_BT (0xc)
93/* Reserved */
94#define MDIO_PMAPMD_CTRL2_100_BT (0xe)
95#define MDIO_PMAPMD_CTRL2_10_BT (0xf)
96#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf)
97
98/* /\* PHY XGXS lane state *\/ */
99#define MDIO_PHYXS_LANE_STATE (0x18)
100#define MDIO_PHYXS_LANE_ALIGNED_LBN (12)
101
102/* AN registers */
103#define MDIO_AN_STATUS (1)
104#define MDIO_AN_STATUS_XNP_LBN (7)
105#define MDIO_AN_STATUS_PAGE_LBN (6)
106#define MDIO_AN_STATUS_AN_DONE_LBN (5)
107#define MDIO_AN_STATUS_LP_AN_CAP_LBN (0)
108
109#define MDIO_AN_10GBT_STATUS (33)
110#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
111#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */
112#define MDIO_AN_10GBT_STATUS_LOC_OK_LBN (13) /* Local OK */
113#define MDIO_AN_10GBT_STATUS_REM_OK_LBN (12) /* Remote OK */
114#define MDIO_AN_10GBT_STATUS_LP_10G_LBN (11) /* Link partner is 10GBT capable */
115#define MDIO_AN_10GBT_STATUS_LP_LTA_LBN (10) /* LP loop timing ability */
116#define MDIO_AN_10GBT_STATUS_LP_TRR_LBN (9) /* LP Training Reset Request */
117
118
119/* Packing of the prt and dev arguments of clause 45 style MDIO into a
120 * single int so they can be passed into the mdio_read/write functions
121 * that currently exist. Note that as Falcon is the only current user,
122 * the packed form is chosen to match what Falcon needs to write into
123 * a register. This is checked at compile-time so do not change it. If
124 * your target chip needs things layed out differently you will need
125 * to unpack the arguments in your chip-specific mdio functions.
126 */
127 /* These are defined by the standard. */
128#define MDIO45_PRT_ID_WIDTH (5)
129#define MDIO45_DEV_ID_WIDTH (5)
130
131/* The prt ID is just packed in immediately to the left of the dev ID */
132#define MDIO45_PRT_DEV_WIDTH (MDIO45_PRT_ID_WIDTH + MDIO45_DEV_ID_WIDTH)
133
134#define MDIO45_PRT_ID_MASK ((1 << MDIO45_PRT_DEV_WIDTH) - 1)
135/* This is the prt + dev extended by 1 bit to hold the 'is clause 45' flag. */
136#define MDIO45_XPRT_ID_WIDTH (MDIO45_PRT_DEV_WIDTH + 1)
137#define MDIO45_XPRT_ID_MASK ((1 << MDIO45_XPRT_ID_WIDTH) - 1)
138#define MDIO45_XPRT_ID_IS10G (1 << (MDIO45_XPRT_ID_WIDTH - 1))
139
140
141#define MDIO45_PRT_ID_COMP_LBN MDIO45_DEV_ID_WIDTH
142#define MDIO45_PRT_ID_COMP_WIDTH MDIO45_PRT_ID_WIDTH
143#define MDIO45_DEV_ID_COMP_LBN 0
144#define MDIO45_DEV_ID_COMP_WIDTH MDIO45_DEV_ID_WIDTH
145
146/* Compose port and device into a phy_id */
147static inline int mdio_clause45_pack(u8 prt, u8 dev)
148{
149 efx_dword_t phy_id;
150 EFX_POPULATE_DWORD_2(phy_id, MDIO45_PRT_ID_COMP, prt,
151 MDIO45_DEV_ID_COMP, dev);
152 return MDIO45_XPRT_ID_IS10G | EFX_DWORD_VAL(phy_id);
153}
154
155static inline void mdio_clause45_unpack(u32 val, u8 *prt, u8 *dev)
156{
157 efx_dword_t phy_id;
158 EFX_POPULATE_DWORD_1(phy_id, EFX_DWORD_0, val);
159 *prt = EFX_DWORD_FIELD(phy_id, MDIO45_PRT_ID_COMP);
160 *dev = EFX_DWORD_FIELD(phy_id, MDIO45_DEV_ID_COMP);
161}
162
163static inline int mdio_clause45_read(struct efx_nic *efx,
164 u8 prt, u8 dev, u16 addr)
165{
166 return efx->mii.mdio_read(efx->net_dev,
167 mdio_clause45_pack(prt, dev), addr);
168}
169
170static inline void mdio_clause45_write(struct efx_nic *efx,
171 u8 prt, u8 dev, u16 addr, int value)
172{
173 efx->mii.mdio_write(efx->net_dev,
174 mdio_clause45_pack(prt, dev), addr, value);
175}
176
177
178static inline u32 mdio_clause45_read_id(struct efx_nic *efx, int mmd)
179{
180 int phy_id = efx->mii.phy_id;
181 u16 id_low = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDLOW);
182 u16 id_hi = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDHI);
183 return (id_hi << 16) | (id_low);
184}
185
186static inline int mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
187{
188 int i, sync, lane_status;
189
190 for (i = 0; i < 2; ++i)
191 lane_status = mdio_clause45_read(efx, efx->mii.phy_id,
192 MDIO_MMD_PHYXS,
193 MDIO_PHYXS_LANE_STATE);
194
195 sync = (lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN)) != 0;
196 if (!sync)
197 EFX_INFO(efx, "XGXS lane status: %x\n", lane_status);
198 return sync;
199}
200
201extern const char *mdio_clause45_mmd_name(int mmd);
202
203/*
204 * Reset a specific MMD and wait for reset to clear.
205 * Return number of spins left (>0) on success, -%ETIMEDOUT on failure.
206 *
207 * This function will sleep
208 */
209extern int mdio_clause45_reset_mmd(struct efx_nic *efx, int mmd,
210 int spins, int spintime);
211
212/* As mdio_clause45_check_mmd but for multiple MMDs */
213int mdio_clause45_check_mmds(struct efx_nic *efx,
214 unsigned int mmd_mask, unsigned int fatal_mask);
215
216/* Check the link status of specified mmds in bit mask */
217extern int mdio_clause45_links_ok(struct efx_nic *efx,
218 unsigned int mmd_mask);
219
220/* Read (some of) the PHY settings over MDIO */
221extern void mdio_clause45_get_settings(struct efx_nic *efx,
222 struct ethtool_cmd *ecmd);
223
224/* Set (some of) the PHY settings over MDIO */
225extern int mdio_clause45_set_settings(struct efx_nic *efx,
226 struct ethtool_cmd *ecmd);
227
228/* Wait for specified MMDs to exit reset within a timeout */
229extern int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
230 unsigned int mmd_mask);
231
232#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
new file mode 100644
index 000000000000..c505482c2520
--- /dev/null
+++ b/drivers/net/sfc/net_driver.h
@@ -0,0 +1,883 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11/* Common definitions for all Efx net driver code */
12
13#ifndef EFX_NET_DRIVER_H
14#define EFX_NET_DRIVER_H
15
16#include <linux/version.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/ethtool.h>
20#include <linux/if_vlan.h>
21#include <linux/timer.h>
22#include <linux/mii.h>
23#include <linux/list.h>
24#include <linux/pci.h>
25#include <linux/device.h>
26#include <linux/highmem.h>
27#include <linux/workqueue.h>
28#include <linux/inet_lro.h>
29
30#include "enum.h"
31#include "bitfield.h"
32#include "i2c-direct.h"
33
34#define EFX_MAX_LRO_DESCRIPTORS 8
35#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
36
37/**************************************************************************
38 *
39 * Build definitions
40 *
41 **************************************************************************/
42#ifndef EFX_DRIVER_NAME
43#define EFX_DRIVER_NAME "sfc"
44#endif
45#define EFX_DRIVER_VERSION "2.2.0136"
46
47#ifdef EFX_ENABLE_DEBUG
48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
49#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
50#else
51#define EFX_BUG_ON_PARANOID(x) do {} while (0)
52#define EFX_WARN_ON_PARANOID(x) do {} while (0)
53#endif
54
55#define NET_DEV_REGISTERED(efx) \
56 ((efx)->net_dev->reg_state == NETREG_REGISTERED)
57
58/* Include net device name in log messages if it has been registered.
59 * Use efx->name not efx->net_dev->name so that races with (un)registration
60 * are harmless.
61 */
62#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
63
64/* Un-rate-limited logging */
65#define EFX_ERR(efx, fmt, args...) \
66dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args)
67
68#define EFX_INFO(efx, fmt, args...) \
69dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args)
70
71#ifdef EFX_ENABLE_DEBUG
72#define EFX_LOG(efx, fmt, args...) \
73dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
74#else
75#define EFX_LOG(efx, fmt, args...) \
76dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
77#endif
78
79#define EFX_TRACE(efx, fmt, args...) do {} while (0)
80
81#define EFX_REGDUMP(efx, fmt, args...) do {} while (0)
82
83/* Rate-limited logging */
84#define EFX_ERR_RL(efx, fmt, args...) \
85do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0)
86
87#define EFX_INFO_RL(efx, fmt, args...) \
88do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
89
90#define EFX_LOG_RL(efx, fmt, args...) \
91do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
92
93/* Kernel headers may redefine inline anyway */
94#ifndef inline
95#define inline inline __attribute__ ((always_inline))
96#endif
97
98/**************************************************************************
99 *
100 * Efx data structures
101 *
102 **************************************************************************/
103
104#define EFX_MAX_CHANNELS 32
105#define EFX_MAX_TX_QUEUES 1
106#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
107
108/**
109 * struct efx_special_buffer - An Efx special buffer
110 * @addr: CPU base address of the buffer
111 * @dma_addr: DMA base address of the buffer
112 * @len: Buffer length, in bytes
113 * @index: Buffer index within controller;s buffer table
114 * @entries: Number of buffer table entries
115 *
116 * Special buffers are used for the event queues and the TX and RX
117 * descriptor queues for each channel. They are *not* used for the
118 * actual transmit and receive buffers.
119 *
120 * Note that for Falcon, TX and RX descriptor queues live in host memory.
121 * Allocation and freeing procedures must take this into account.
122 */
123struct efx_special_buffer {
124 void *addr;
125 dma_addr_t dma_addr;
126 unsigned int len;
127 int index;
128 int entries;
129};
130
131/**
132 * struct efx_tx_buffer - An Efx TX buffer
133 * @skb: The associated socket buffer.
134 * Set only on the final fragment of a packet; %NULL for all other
135 * fragments. When this fragment completes, then we can free this
136 * skb.
137 * @dma_addr: DMA address of the fragment.
138 * @len: Length of this fragment.
139 * This field is zero when the queue slot is empty.
140 * @continuation: True if this fragment is not the end of a packet.
141 * @unmap_single: True if pci_unmap_single should be used.
142 * @unmap_addr: DMA address to unmap
143 * @unmap_len: Length of this fragment to unmap
144 */
145struct efx_tx_buffer {
146 const struct sk_buff *skb;
147 dma_addr_t dma_addr;
148 unsigned short len;
149 unsigned char continuation;
150 unsigned char unmap_single;
151 dma_addr_t unmap_addr;
152 unsigned short unmap_len;
153};
154
155/**
156 * struct efx_tx_queue - An Efx TX queue
157 *
158 * This is a ring buffer of TX fragments.
159 * Since the TX completion path always executes on the same
160 * CPU and the xmit path can operate on different CPUs,
161 * performance is increased by ensuring that the completion
162 * path and the xmit path operate on different cache lines.
163 * This is particularly important if the xmit path is always
164 * executing on one CPU which is different from the completion
165 * path. There is also a cache line for members which are
166 * read but not written on the fast path.
167 *
168 * @efx: The associated Efx NIC
169 * @queue: DMA queue number
170 * @used: Queue is used by net driver
171 * @channel: The associated channel
172 * @buffer: The software buffer ring
173 * @txd: The hardware descriptor ring
174 * @read_count: Current read pointer.
175 * This is the number of buffers that have been removed from both rings.
176 * @stopped: Stopped flag.
177 * Set if this TX queue is currently stopping its port.
178 * @insert_count: Current insert pointer
179 * This is the number of buffers that have been added to the
180 * software ring.
181 * @write_count: Current write pointer
182 * This is the number of buffers that have been added to the
183 * hardware ring.
184 * @old_read_count: The value of read_count when last checked.
185 * This is here for performance reasons. The xmit path will
186 * only get the up-to-date value of read_count if this
187 * variable indicates that the queue is full. This is to
188 * avoid cache-line ping-pong between the xmit path and the
189 * completion path.
190 */
191struct efx_tx_queue {
192 /* Members which don't change on the fast path */
193 struct efx_nic *efx ____cacheline_aligned_in_smp;
194 int queue;
195 int used;
196 struct efx_channel *channel;
197 struct efx_nic *nic;
198 struct efx_tx_buffer *buffer;
199 struct efx_special_buffer txd;
200
201 /* Members used mainly on the completion path */
202 unsigned int read_count ____cacheline_aligned_in_smp;
203 int stopped;
204
205 /* Members used only on the xmit path */
206 unsigned int insert_count ____cacheline_aligned_in_smp;
207 unsigned int write_count;
208 unsigned int old_read_count;
209};
210
211/**
212 * struct efx_rx_buffer - An Efx RX data buffer
213 * @dma_addr: DMA base address of the buffer
214 * @skb: The associated socket buffer, if any.
215 * If both this and page are %NULL, the buffer slot is currently free.
216 * @page: The associated page buffer, if any.
217 * If both this and skb are %NULL, the buffer slot is currently free.
218 * @data: Pointer to ethernet header
219 * @len: Buffer length, in bytes.
220 * @unmap_addr: DMA address to unmap
221 */
222struct efx_rx_buffer {
223 dma_addr_t dma_addr;
224 struct sk_buff *skb;
225 struct page *page;
226 char *data;
227 unsigned int len;
228 dma_addr_t unmap_addr;
229};
230
231/**
232 * struct efx_rx_queue - An Efx RX queue
233 * @efx: The associated Efx NIC
234 * @queue: DMA queue number
235 * @used: Queue is used by net driver
236 * @channel: The associated channel
237 * @buffer: The software buffer ring
238 * @rxd: The hardware descriptor ring
239 * @added_count: Number of buffers added to the receive queue.
240 * @notified_count: Number of buffers given to NIC (<= @added_count).
241 * @removed_count: Number of buffers removed from the receive queue.
242 * @add_lock: Receive queue descriptor add spin lock.
243 * This lock must be held in order to add buffers to the RX
244 * descriptor ring (rxd and buffer) and to update added_count (but
245 * not removed_count).
246 * @max_fill: RX descriptor maximum fill level (<= ring size)
247 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
248 * (<= @max_fill)
249 * @fast_fill_limit: The level to which a fast fill will fill
250 * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill)
251 * @min_fill: RX descriptor minimum non-zero fill level.
252 * This records the minimum fill level observed when a ring
253 * refill was triggered.
254 * @min_overfill: RX descriptor minimum overflow fill level.
255 * This records the minimum fill level at which RX queue
256 * overflow was observed. It should never be set.
257 * @alloc_page_count: RX allocation strategy counter.
258 * @alloc_skb_count: RX allocation strategy counter.
259 * @work: Descriptor push work thread
260 * @buf_page: Page for next RX buffer.
261 * We can use a single page for multiple RX buffers. This tracks
262 * the remaining space in the allocation.
263 * @buf_dma_addr: Page's DMA address.
264 * @buf_data: Page's host address.
265 */
266struct efx_rx_queue {
267 struct efx_nic *efx;
268 int queue;
269 int used;
270 struct efx_channel *channel;
271 struct efx_rx_buffer *buffer;
272 struct efx_special_buffer rxd;
273
274 int added_count;
275 int notified_count;
276 int removed_count;
277 spinlock_t add_lock;
278 unsigned int max_fill;
279 unsigned int fast_fill_trigger;
280 unsigned int fast_fill_limit;
281 unsigned int min_fill;
282 unsigned int min_overfill;
283 unsigned int alloc_page_count;
284 unsigned int alloc_skb_count;
285 struct delayed_work work;
286 unsigned int slow_fill_count;
287
288 struct page *buf_page;
289 dma_addr_t buf_dma_addr;
290 char *buf_data;
291};
292
293/**
294 * struct efx_buffer - An Efx general-purpose buffer
295 * @addr: host base address of the buffer
296 * @dma_addr: DMA base address of the buffer
297 * @len: Buffer length, in bytes
298 *
299 * Falcon uses these buffers for its interrupt status registers and
300 * MAC stats dumps.
301 */
302struct efx_buffer {
303 void *addr;
304 dma_addr_t dma_addr;
305 unsigned int len;
306};
307
308
309/* Flags for channel->used_flags */
310#define EFX_USED_BY_RX 1
311#define EFX_USED_BY_TX 2
312#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
313
314enum efx_rx_alloc_method {
315 RX_ALLOC_METHOD_AUTO = 0,
316 RX_ALLOC_METHOD_SKB = 1,
317 RX_ALLOC_METHOD_PAGE = 2,
318};
319
320/**
321 * struct efx_channel - An Efx channel
322 *
323 * A channel comprises an event queue, at least one TX queue, at least
324 * one RX queue, and an associated tasklet for processing the event
325 * queue.
326 *
327 * @efx: Associated Efx NIC
328 * @evqnum: Event queue number
329 * @channel: Channel instance number
330 * @used_flags: Channel is used by net driver
331 * @enabled: Channel enabled indicator
332 * @irq: IRQ number (MSI and MSI-X only)
333 * @has_interrupt: Channel has an interrupt
334 * @irq_moderation: IRQ moderation value (in us)
335 * @napi_dev: Net device used with NAPI
336 * @napi_str: NAPI control structure
337 * @reset_work: Scheduled reset work thread
338 * @work_pending: Is work pending via NAPI?
339 * @eventq: Event queue buffer
340 * @eventq_read_ptr: Event queue read pointer
341 * @last_eventq_read_ptr: Last event queue read pointer value.
342 * @eventq_magic: Event queue magic value for driver-generated test events
343 * @lro_mgr: LRO state
344 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
345 * and diagnostic counters
346 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
347 * descriptors
348 * @rx_alloc_pop_pages: RX allocation method currently in use for popping
349 * descriptors
350 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
351 * @n_rx_ip_frag_err: Count of RX IP fragment errors
352 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
353 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
354 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
355 * @n_rx_overlength: Count of RX_OVERLENGTH errors
356 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
357 */
358struct efx_channel {
359 struct efx_nic *efx;
360 int evqnum;
361 int channel;
362 int used_flags;
363 int enabled;
364 int irq;
365 unsigned int has_interrupt;
366 unsigned int irq_moderation;
367 struct net_device *napi_dev;
368 struct napi_struct napi_str;
369 struct work_struct reset_work;
370 int work_pending;
371 struct efx_special_buffer eventq;
372 unsigned int eventq_read_ptr;
373 unsigned int last_eventq_read_ptr;
374 unsigned int eventq_magic;
375
376 struct net_lro_mgr lro_mgr;
377 int rx_alloc_level;
378 int rx_alloc_push_pages;
379 int rx_alloc_pop_pages;
380
381 unsigned n_rx_tobe_disc;
382 unsigned n_rx_ip_frag_err;
383 unsigned n_rx_ip_hdr_chksum_err;
384 unsigned n_rx_tcp_udp_chksum_err;
385 unsigned n_rx_frm_trunc;
386 unsigned n_rx_overlength;
387 unsigned n_skbuff_leaks;
388
389 /* Used to pipeline received packets in order to optimise memory
390 * access with prefetches.
391 */
392 struct efx_rx_buffer *rx_pkt;
393 int rx_pkt_csummed;
394
395};
396
397/**
398 * struct efx_blinker - S/W LED blinking context
399 * @led_num: LED ID (board-specific meaning)
400 * @state: Current state - on or off
401 * @resubmit: Timer resubmission flag
402 * @timer: Control timer for blinking
403 */
404struct efx_blinker {
405 int led_num;
406 int state;
407 int resubmit;
408 struct timer_list timer;
409};
410
411
412/**
413 * struct efx_board - board information
414 * @type: Board model type
415 * @major: Major rev. ('A', 'B' ...)
416 * @minor: Minor rev. (0, 1, ...)
417 * @init: Initialisation function
418 * @init_leds: Sets up board LEDs
419 * @set_fault_led: Turns the fault LED on or off
420 * @blink: Starts/stops blinking
421 * @blinker: used to blink LEDs in software
422 */
423struct efx_board {
424 int type;
425 int major;
426 int minor;
427 int (*init) (struct efx_nic *nic);
428 /* As the LEDs are typically attached to the PHY, LEDs
429 * have a separate init callback that happens later than
430 * board init. */
431 int (*init_leds)(struct efx_nic *efx);
432 void (*set_fault_led) (struct efx_nic *efx, int state);
433 void (*blink) (struct efx_nic *efx, int start);
434 struct efx_blinker blinker;
435};
436
437enum efx_int_mode {
438 /* Be careful if altering to correct macro below */
439 EFX_INT_MODE_MSIX = 0,
440 EFX_INT_MODE_MSI = 1,
441 EFX_INT_MODE_LEGACY = 2,
442 EFX_INT_MODE_MAX /* Insert any new items before this */
443};
444#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
445
446enum phy_type {
447 PHY_TYPE_NONE = 0,
448 PHY_TYPE_CX4_RTMR = 1,
449 PHY_TYPE_1G_ALASKA = 2,
450 PHY_TYPE_10XPRESS = 3,
451 PHY_TYPE_XFP = 4,
452 PHY_TYPE_PM8358 = 6,
453 PHY_TYPE_MAX /* Insert any new items before this */
454};
455
456#define PHY_ADDR_INVALID 0xff
457
458enum nic_state {
459 STATE_INIT = 0,
460 STATE_RUNNING = 1,
461 STATE_FINI = 2,
462 STATE_RESETTING = 3, /* rtnl_lock always held */
463 STATE_DISABLED = 4,
464 STATE_MAX,
465};
466
467/*
468 * Alignment of page-allocated RX buffers
469 *
470 * Controls the number of bytes inserted at the start of an RX buffer.
471 * This is the equivalent of NET_IP_ALIGN [which controls the alignment
472 * of the skb->head for hardware DMA].
473 */
474#if defined(__i386__) || defined(__x86_64__)
475#define EFX_PAGE_IP_ALIGN 0
476#else
477#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
478#endif
479
480/*
481 * Alignment of the skb->head which wraps a page-allocated RX buffer
482 *
483 * The skb allocated to wrap an rx_buffer can have this alignment. Since
484 * the data is memcpy'd from the rx_buf, it does not need to be equal to
485 * EFX_PAGE_IP_ALIGN.
486 */
487#define EFX_PAGE_SKB_ALIGN 2
488
489/* Forward declaration */
490struct efx_nic;
491
492/* Pseudo bit-mask flow control field */
493enum efx_fc_type {
494 EFX_FC_RX = 1,
495 EFX_FC_TX = 2,
496 EFX_FC_AUTO = 4,
497};
498
499/**
500 * struct efx_phy_operations - Efx PHY operations table
501 * @init: Initialise PHY
502 * @fini: Shut down PHY
503 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
504 * @clear_interrupt: Clear down interrupt
505 * @blink: Blink LEDs
506 * @check_hw: Check hardware
507 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
508 * @mmds: MMD presence mask
509 */
510struct efx_phy_operations {
511 int (*init) (struct efx_nic *efx);
512 void (*fini) (struct efx_nic *efx);
513 void (*reconfigure) (struct efx_nic *efx);
514 void (*clear_interrupt) (struct efx_nic *efx);
515 int (*check_hw) (struct efx_nic *efx);
516 void (*reset_xaui) (struct efx_nic *efx);
517 int mmds;
518};
519
520/*
521 * Efx extended statistics
522 *
523 * Not all statistics are provided by all supported MACs. The purpose
524 * is this structure is to contain the raw statistics provided by each
525 * MAC.
526 */
527struct efx_mac_stats {
528 u64 tx_bytes;
529 u64 tx_good_bytes;
530 u64 tx_bad_bytes;
531 unsigned long tx_packets;
532 unsigned long tx_bad;
533 unsigned long tx_pause;
534 unsigned long tx_control;
535 unsigned long tx_unicast;
536 unsigned long tx_multicast;
537 unsigned long tx_broadcast;
538 unsigned long tx_lt64;
539 unsigned long tx_64;
540 unsigned long tx_65_to_127;
541 unsigned long tx_128_to_255;
542 unsigned long tx_256_to_511;
543 unsigned long tx_512_to_1023;
544 unsigned long tx_1024_to_15xx;
545 unsigned long tx_15xx_to_jumbo;
546 unsigned long tx_gtjumbo;
547 unsigned long tx_collision;
548 unsigned long tx_single_collision;
549 unsigned long tx_multiple_collision;
550 unsigned long tx_excessive_collision;
551 unsigned long tx_deferred;
552 unsigned long tx_late_collision;
553 unsigned long tx_excessive_deferred;
554 unsigned long tx_non_tcpudp;
555 unsigned long tx_mac_src_error;
556 unsigned long tx_ip_src_error;
557 u64 rx_bytes;
558 u64 rx_good_bytes;
559 u64 rx_bad_bytes;
560 unsigned long rx_packets;
561 unsigned long rx_good;
562 unsigned long rx_bad;
563 unsigned long rx_pause;
564 unsigned long rx_control;
565 unsigned long rx_unicast;
566 unsigned long rx_multicast;
567 unsigned long rx_broadcast;
568 unsigned long rx_lt64;
569 unsigned long rx_64;
570 unsigned long rx_65_to_127;
571 unsigned long rx_128_to_255;
572 unsigned long rx_256_to_511;
573 unsigned long rx_512_to_1023;
574 unsigned long rx_1024_to_15xx;
575 unsigned long rx_15xx_to_jumbo;
576 unsigned long rx_gtjumbo;
577 unsigned long rx_bad_lt64;
578 unsigned long rx_bad_64_to_15xx;
579 unsigned long rx_bad_15xx_to_jumbo;
580 unsigned long rx_bad_gtjumbo;
581 unsigned long rx_overflow;
582 unsigned long rx_missed;
583 unsigned long rx_false_carrier;
584 unsigned long rx_symbol_error;
585 unsigned long rx_align_error;
586 unsigned long rx_length_error;
587 unsigned long rx_internal_error;
588 unsigned long rx_good_lt64;
589};
590
591/* Number of bits used in a multicast filter hash address */
592#define EFX_MCAST_HASH_BITS 8
593
594/* Number of (single-bit) entries in a multicast filter hash */
595#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
596
597/* An Efx multicast filter hash */
598union efx_multicast_hash {
599 u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
600 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
601};
602
603/**
604 * struct efx_nic - an Efx NIC
605 * @name: Device name (net device name or bus id before net device registered)
606 * @pci_dev: The PCI device
607 * @type: Controller type attributes
608 * @legacy_irq: IRQ number
609 * @workqueue: Workqueue for resets, port reconfigures and the HW monitor
610 * @reset_work: Scheduled reset workitem
611 * @monitor_work: Hardware monitor workitem
612 * @membase_phys: Memory BAR value as physical address
613 * @membase: Memory BAR value
614 * @biu_lock: BIU (bus interface unit) lock
615 * @interrupt_mode: Interrupt mode
616 * @i2c: I2C interface
617 * @board_info: Board-level information
618 * @state: Device state flag. Serialised by the rtnl_lock.
619 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
620 * @tx_queue: TX DMA queues
621 * @rx_queue: RX DMA queues
622 * @channel: Channels
623 * @rss_queues: Number of RSS queues
624 * @rx_buffer_len: RX buffer length
625 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
626 * @irq_status: Interrupt status buffer
627 * @last_irq_cpu: Last CPU to handle interrupt.
628 * This register is written with the SMP processor ID whenever an
629 * interrupt is handled. It is used by falcon_test_interrupt()
630 * to verify that an interrupt has occurred.
631 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
632 * @nic_data: Hardware dependant state
633 * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and
634 * efx_reconfigure_port()
635 * @port_enabled: Port enabled indicator.
636 * Serialises efx_stop_all(), efx_start_all() and efx_monitor() and
637 * efx_reconfigure_work with kernel interfaces. Safe to read under any
638 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
639 * be held to modify it.
640 * @port_initialized: Port initialized?
641 * @net_dev: Operating system network device. Consider holding the rtnl lock
642 * @rx_checksum_enabled: RX checksumming enabled
643 * @netif_stop_count: Port stop count
644 * @netif_stop_lock: Port stop lock
645 * @mac_stats: MAC statistics. These include all statistics the MACs
646 * can provide. Generic code converts these into a standard
647 * &struct net_device_stats.
648 * @stats_buffer: DMA buffer for statistics
649 * @stats_lock: Statistics update lock
650 * @mac_address: Permanent MAC address
651 * @phy_type: PHY type
652 * @phy_lock: PHY access lock
653 * @phy_op: PHY interface
654 * @phy_data: PHY private data (including PHY-specific stats)
655 * @mii: PHY interface
656 * @phy_powered: PHY power state
657 * @tx_disabled: PHY transmitter turned off
658 * @link_up: Link status
659 * @link_options: Link options (MII/GMII format)
660 * @n_link_state_changes: Number of times the link has changed state
661 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
662 * @multicast_hash: Multicast hash table
663 * @flow_control: Flow control flags - separate RX/TX so can't use link_options
664 * @reconfigure_work: work item for dealing with PHY events
665 *
666 * The @priv field of the corresponding &struct net_device points to
667 * this.
668 */
669struct efx_nic {
670 char name[IFNAMSIZ];
671 struct pci_dev *pci_dev;
672 const struct efx_nic_type *type;
673 int legacy_irq;
674 struct workqueue_struct *workqueue;
675 struct work_struct reset_work;
676 struct delayed_work monitor_work;
677 unsigned long membase_phys;
678 void __iomem *membase;
679 spinlock_t biu_lock;
680 enum efx_int_mode interrupt_mode;
681
682 struct efx_i2c_interface i2c;
683 struct efx_board board_info;
684
685 enum nic_state state;
686 enum reset_type reset_pending;
687
688 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
689 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
690 struct efx_channel channel[EFX_MAX_CHANNELS];
691
692 int rss_queues;
693 unsigned int rx_buffer_len;
694 unsigned int rx_buffer_order;
695
696 struct efx_buffer irq_status;
697 volatile signed int last_irq_cpu;
698
699 unsigned n_rx_nodesc_drop_cnt;
700
701 void *nic_data;
702
703 struct mutex mac_lock;
704 int port_enabled;
705
706 int port_initialized;
707 struct net_device *net_dev;
708 int rx_checksum_enabled;
709
710 atomic_t netif_stop_count;
711 spinlock_t netif_stop_lock;
712
713 struct efx_mac_stats mac_stats;
714 struct efx_buffer stats_buffer;
715 spinlock_t stats_lock;
716
717 unsigned char mac_address[ETH_ALEN];
718
719 enum phy_type phy_type;
720 spinlock_t phy_lock;
721 struct efx_phy_operations *phy_op;
722 void *phy_data;
723 struct mii_if_info mii;
724
725 int link_up;
726 unsigned int link_options;
727 unsigned int n_link_state_changes;
728
729 int promiscuous;
730 union efx_multicast_hash multicast_hash;
731 enum efx_fc_type flow_control;
732 struct work_struct reconfigure_work;
733
734 atomic_t rx_reset;
735};
736
737/**
738 * struct efx_nic_type - Efx device type definition
739 * @mem_bar: Memory BAR number
740 * @mem_map_size: Memory BAR mapped size
741 * @txd_ptr_tbl_base: TX descriptor ring base address
742 * @rxd_ptr_tbl_base: RX descriptor ring base address
743 * @buf_tbl_base: Buffer table base address
744 * @evq_ptr_tbl_base: Event queue pointer table base address
745 * @evq_rptr_tbl_base: Event queue read-pointer table base address
746 * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
747 * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
748 * @evq_size: Event queue size (must be a power of two)
749 * @max_dma_mask: Maximum possible DMA mask
750 * @tx_dma_mask: TX DMA mask
751 * @bug5391_mask: Address mask for bug 5391 workaround
752 * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
753 * @rx_xon_thresh: RX FIFO XON watermark (bytes)
754 * @rx_buffer_padding: Padding added to each RX buffer
755 * @max_interrupt_mode: Highest capability interrupt mode supported
756 * from &enum efx_init_mode.
757 * @phys_addr_channels: Number of channels with physically addressed
758 * descriptors
759 */
760struct efx_nic_type {
761 unsigned int mem_bar;
762 unsigned int mem_map_size;
763 unsigned int txd_ptr_tbl_base;
764 unsigned int rxd_ptr_tbl_base;
765 unsigned int buf_tbl_base;
766 unsigned int evq_ptr_tbl_base;
767 unsigned int evq_rptr_tbl_base;
768
769 unsigned int txd_ring_mask;
770 unsigned int rxd_ring_mask;
771 unsigned int evq_size;
772 dma_addr_t max_dma_mask;
773 unsigned int tx_dma_mask;
774 unsigned bug5391_mask;
775
776 int rx_xoff_thresh;
777 int rx_xon_thresh;
778 unsigned int rx_buffer_padding;
779 unsigned int max_interrupt_mode;
780 unsigned int phys_addr_channels;
781};
782
783/**************************************************************************
784 *
785 * Prototypes and inline functions
786 *
787 *************************************************************************/
788
789/* Iterate over all used channels */
790#define efx_for_each_channel(_channel, _efx) \
791 for (_channel = &_efx->channel[0]; \
792 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
793 _channel++) \
794 if (!_channel->used_flags) \
795 continue; \
796 else
797
798/* Iterate over all used channels with interrupts */
799#define efx_for_each_channel_with_interrupt(_channel, _efx) \
800 for (_channel = &_efx->channel[0]; \
801 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
802 _channel++) \
803 if (!(_channel->used_flags && _channel->has_interrupt)) \
804 continue; \
805 else
806
807/* Iterate over all used TX queues */
808#define efx_for_each_tx_queue(_tx_queue, _efx) \
809 for (_tx_queue = &_efx->tx_queue[0]; \
810 _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES]; \
811 _tx_queue++) \
812 if (!_tx_queue->used) \
813 continue; \
814 else
815
816/* Iterate over all TX queues belonging to a channel */
817#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
818 for (_tx_queue = &_channel->efx->tx_queue[0]; \
819 _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES]; \
820 _tx_queue++) \
821 if ((!_tx_queue->used) || \
822 (_tx_queue->channel != _channel)) \
823 continue; \
824 else
825
826/* Iterate over all used RX queues */
827#define efx_for_each_rx_queue(_rx_queue, _efx) \
828 for (_rx_queue = &_efx->rx_queue[0]; \
829 _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES]; \
830 _rx_queue++) \
831 if (!_rx_queue->used) \
832 continue; \
833 else
834
835/* Iterate over all RX queues belonging to a channel */
836#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
837 for (_rx_queue = &_channel->efx->rx_queue[0]; \
838 _rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \
839 _rx_queue++) \
840 if ((!_rx_queue->used) || \
841 (_rx_queue->channel != _channel)) \
842 continue; \
843 else
844
845/* Returns a pointer to the specified receive buffer in the RX
846 * descriptor queue.
847 */
848static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
849 unsigned int index)
850{
851 return (&rx_queue->buffer[index]);
852}
853
854/* Set bit in a little-endian bitfield */
855static inline void set_bit_le(int nr, unsigned char *addr)
856{
857 addr[nr / 8] |= (1 << (nr % 8));
858}
859
860/* Clear bit in a little-endian bitfield */
861static inline void clear_bit_le(int nr, unsigned char *addr)
862{
863 addr[nr / 8] &= ~(1 << (nr % 8));
864}
865
866
867/**
868 * EFX_MAX_FRAME_LEN - calculate maximum frame length
869 *
870 * This calculates the maximum frame length that will be used for a
871 * given MTU. The frame length will be equal to the MTU plus a
872 * constant amount of header space and padding. This is the quantity
873 * that the net driver will program into the MAC as the maximum frame
874 * length.
875 *
876 * The 10G MAC used in Falcon requires 8-byte alignment on the frame
877 * length, so we round up to the nearest 8.
878 */
879#define EFX_MAX_FRAME_LEN(mtu) \
880 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */) + 7) & ~7)
881
882
883#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
new file mode 100644
index 000000000000..9d02c84e6b2d
--- /dev/null
+++ b/drivers/net/sfc/phy.h
@@ -0,0 +1,48 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_PHY_H
11#define EFX_PHY_H
12
13/****************************************************************************
14 * 10Xpress (SFX7101) PHY
15 */
16extern struct efx_phy_operations falcon_tenxpress_phy_ops;
17
18enum tenxpress_state {
19 TENXPRESS_STATUS_OFF = 0,
20 TENXPRESS_STATUS_OTEMP = 1,
21 TENXPRESS_STATUS_NORMAL = 2,
22};
23
24extern void tenxpress_set_state(struct efx_nic *efx,
25 enum tenxpress_state state);
26extern void tenxpress_phy_blink(struct efx_nic *efx, int blink);
27extern void tenxpress_crc_err(struct efx_nic *efx);
28
29/****************************************************************************
30 * Exported functions from the driver for XFP optical PHYs
31 */
32extern struct efx_phy_operations falcon_xfp_phy_ops;
33
34/* The QUAKE XFP PHY provides various H/W control states for LEDs */
35#define QUAKE_LED_LINK_INVAL (0)
36#define QUAKE_LED_LINK_STAT (1)
37#define QUAKE_LED_LINK_ACT (2)
38#define QUAKE_LED_LINK_ACTSTAT (3)
39#define QUAKE_LED_OFF (4)
40#define QUAKE_LED_ON (5)
41#define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */
42/* What link the LED tracks */
43#define QUAKE_LED_TXLINK (0)
44#define QUAKE_LED_RXLINK (8)
45
46extern void xfp_set_led(struct efx_nic *p, int led, int state);
47
48#endif
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
new file mode 100644
index 000000000000..551299b462ae
--- /dev/null
+++ b/drivers/net/sfc/rx.c
@@ -0,0 +1,875 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/socket.h>
12#include <linux/in.h>
13#include <linux/ip.h>
14#include <linux/tcp.h>
15#include <linux/udp.h>
16#include <net/ip.h>
17#include <net/checksum.h>
18#include "net_driver.h"
19#include "rx.h"
20#include "efx.h"
21#include "falcon.h"
22#include "workarounds.h"
23
24/* Number of RX descriptors pushed at once. */
25#define EFX_RX_BATCH 8
26
27/* Size of buffer allocated for skb header area. */
28#define EFX_SKB_HEADERS 64u
29
30/*
31 * rx_alloc_method - RX buffer allocation method
32 *
33 * This driver supports two methods for allocating and using RX buffers:
34 * each RX buffer may be backed by an skb or by an order-n page.
35 *
36 * When LRO is in use then the second method has a lower overhead,
37 * since we don't have to allocate then free skbs on reassembled frames.
38 *
39 * Values:
40 * - RX_ALLOC_METHOD_AUTO = 0
41 * - RX_ALLOC_METHOD_SKB = 1
42 * - RX_ALLOC_METHOD_PAGE = 2
43 *
44 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
45 * controlled by the parameters below.
46 *
47 * - Since pushing and popping descriptors are separated by the rx_queue
48 * size, so the watermarks should be ~rxd_size.
49 * - The performance win by using page-based allocation for LRO is less
50 * than the performance hit of using page-based allocation of non-LRO,
51 * so the watermarks should reflect this.
52 *
53 * Per channel we maintain a single variable, updated by each channel:
54 *
55 * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
56 * RX_ALLOC_FACTOR_SKB)
57 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
58 * limits the hysteresis), and update the allocation strategy:
59 *
60 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
61 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
62 */
63static int rx_alloc_method = RX_ALLOC_METHOD_PAGE;
64
65#define RX_ALLOC_LEVEL_LRO 0x2000
66#define RX_ALLOC_LEVEL_MAX 0x3000
67#define RX_ALLOC_FACTOR_LRO 1
68#define RX_ALLOC_FACTOR_SKB (-2)
69
70/* This is the percentage fill level below which new RX descriptors
71 * will be added to the RX descriptor ring.
72 */
73static unsigned int rx_refill_threshold = 90;
74
75/* This is the percentage fill level to which an RX queue will be refilled
76 * when the "RX refill threshold" is reached.
77 */
78static unsigned int rx_refill_limit = 95;
79
80/*
81 * RX maximum head room required.
82 *
83 * This must be at least 1 to prevent overflow and at least 2 to allow
84 * pipelined receives.
85 */
86#define EFX_RXD_HEAD_ROOM 2
87
88/* Macros for zero-order pages (potentially) containing multiple RX buffers */
89#define RX_DATA_OFFSET(_data) \
90 (((unsigned long) (_data)) & (PAGE_SIZE-1))
91#define RX_BUF_OFFSET(_rx_buf) \
92 RX_DATA_OFFSET((_rx_buf)->data)
93
94#define RX_PAGE_SIZE(_efx) \
95 (PAGE_SIZE * (1u << (_efx)->rx_buffer_order))
96
97
98/**************************************************************************
99 *
100 * Linux generic LRO handling
101 *
102 **************************************************************************
103 */
104
105static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
106 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
107{
108 struct efx_channel *channel = (struct efx_channel *)priv;
109 struct iphdr *iph;
110 struct tcphdr *th;
111
112 iph = (struct iphdr *)skb->data;
113 if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP)
114 goto fail;
115
116 th = (struct tcphdr *)(skb->data + iph->ihl * 4);
117
118 *tcpudp_hdr = th;
119 *ip_hdr = iph;
120 *hdr_flags = LRO_IPV4 | LRO_TCP;
121
122 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
123 return 0;
124fail:
125 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
126 return -1;
127}
128
129static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
130 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
131 void *priv)
132{
133 struct efx_channel *channel = (struct efx_channel *)priv;
134 struct ethhdr *eh;
135 struct iphdr *iph;
136
137 /* We support EtherII and VLAN encapsulated IPv4 */
138 eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset);
139 *mac_hdr = eh;
140
141 if (eh->h_proto == htons(ETH_P_IP)) {
142 iph = (struct iphdr *)(eh + 1);
143 } else {
144 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh;
145 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
146 goto fail;
147
148 iph = (struct iphdr *)(veh + 1);
149 }
150 *ip_hdr = iph;
151
152 /* We can only do LRO over TCP */
153 if (iph->protocol != IPPROTO_TCP)
154 goto fail;
155
156 *hdr_flags = LRO_IPV4 | LRO_TCP;
157 *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
158
159 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
160 return 0;
161 fail:
162 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
163 return -1;
164}
165
166int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx)
167{
168 size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS;
169 struct net_lro_desc *lro_arr;
170
171 /* Allocate the LRO descriptors structure */
172 lro_arr = kzalloc(s, GFP_KERNEL);
173 if (lro_arr == NULL)
174 return -ENOMEM;
175
176 lro_mgr->lro_arr = lro_arr;
177 lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS;
178 lro_mgr->max_aggr = EFX_MAX_LRO_AGGR;
179 lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN;
180
181 lro_mgr->get_skb_header = efx_lro_get_skb_hdr;
182 lro_mgr->get_frag_header = efx_get_frag_hdr;
183 lro_mgr->dev = efx->net_dev;
184
185 lro_mgr->features = LRO_F_NAPI;
186
187 /* We can pass packets up with the checksum intact */
188 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
189
190 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
191
192 return 0;
193}
194
195void efx_lro_fini(struct net_lro_mgr *lro_mgr)
196{
197 kfree(lro_mgr->lro_arr);
198 lro_mgr->lro_arr = NULL;
199}
200
201/**
202 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
203 *
204 * @rx_queue: Efx RX queue
205 * @rx_buf: RX buffer structure to populate
206 *
207 * This allocates memory for a new receive buffer, maps it for DMA,
208 * and populates a struct efx_rx_buffer with the relevant
209 * information. Return a negative error code or 0 on success.
210 */
211static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
212 struct efx_rx_buffer *rx_buf)
213{
214 struct efx_nic *efx = rx_queue->efx;
215 struct net_device *net_dev = efx->net_dev;
216 int skb_len = efx->rx_buffer_len;
217
218 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
219 if (unlikely(!rx_buf->skb))
220 return -ENOMEM;
221
222 /* Adjust the SKB for padding and checksum */
223 skb_reserve(rx_buf->skb, NET_IP_ALIGN);
224 rx_buf->len = skb_len - NET_IP_ALIGN;
225 rx_buf->data = (char *)rx_buf->skb->data;
226 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
227
228 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
229 rx_buf->data, rx_buf->len,
230 PCI_DMA_FROMDEVICE);
231
232 if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
233 dev_kfree_skb_any(rx_buf->skb);
234 rx_buf->skb = NULL;
235 return -EIO;
236 }
237
238 return 0;
239}
240
241/**
242 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
243 *
244 * @rx_queue: Efx RX queue
245 * @rx_buf: RX buffer structure to populate
246 *
247 * This allocates memory for a new receive buffer, maps it for DMA,
248 * and populates a struct efx_rx_buffer with the relevant
249 * information. Return a negative error code or 0 on success.
250 */
251static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
252 struct efx_rx_buffer *rx_buf)
253{
254 struct efx_nic *efx = rx_queue->efx;
255 int bytes, space, offset;
256
257 bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
258
259 /* If there is space left in the previously allocated page,
260 * then use it. Otherwise allocate a new one */
261 rx_buf->page = rx_queue->buf_page;
262 if (rx_buf->page == NULL) {
263 dma_addr_t dma_addr;
264
265 rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
266 efx->rx_buffer_order);
267 if (unlikely(rx_buf->page == NULL))
268 return -ENOMEM;
269
270 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
271 0, RX_PAGE_SIZE(efx),
272 PCI_DMA_FROMDEVICE);
273
274 if (unlikely(pci_dma_mapping_error(dma_addr))) {
275 __free_pages(rx_buf->page, efx->rx_buffer_order);
276 rx_buf->page = NULL;
277 return -EIO;
278 }
279
280 rx_queue->buf_page = rx_buf->page;
281 rx_queue->buf_dma_addr = dma_addr;
282 rx_queue->buf_data = ((char *) page_address(rx_buf->page) +
283 EFX_PAGE_IP_ALIGN);
284 }
285
286 offset = RX_DATA_OFFSET(rx_queue->buf_data);
287 rx_buf->len = bytes;
288 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
289 rx_buf->data = rx_queue->buf_data;
290
291 /* Try to pack multiple buffers per page */
292 if (efx->rx_buffer_order == 0) {
293 /* The next buffer starts on the next 512 byte boundary */
294 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
295 offset += ((bytes + 0x1ff) & ~0x1ff);
296
297 space = RX_PAGE_SIZE(efx) - offset;
298 if (space >= bytes) {
299 /* Refs dropped on kernel releasing each skb */
300 get_page(rx_queue->buf_page);
301 goto out;
302 }
303 }
304
305 /* This is the final RX buffer for this page, so mark it for
306 * unmapping */
307 rx_queue->buf_page = NULL;
308 rx_buf->unmap_addr = rx_queue->buf_dma_addr;
309
310 out:
311 return 0;
312}
313
314/* This allocates memory for a new receive buffer, maps it for DMA,
315 * and populates a struct efx_rx_buffer with the relevant
316 * information.
317 */
318static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
319 struct efx_rx_buffer *new_rx_buf)
320{
321 int rc = 0;
322
323 if (rx_queue->channel->rx_alloc_push_pages) {
324 new_rx_buf->skb = NULL;
325 rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
326 rx_queue->alloc_page_count++;
327 } else {
328 new_rx_buf->page = NULL;
329 rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
330 rx_queue->alloc_skb_count++;
331 }
332
333 if (unlikely(rc < 0))
334 EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
335 rx_queue->queue, rc);
336 return rc;
337}
338
339static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
340 struct efx_rx_buffer *rx_buf)
341{
342 if (rx_buf->page) {
343 EFX_BUG_ON_PARANOID(rx_buf->skb);
344 if (rx_buf->unmap_addr) {
345 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
346 RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE);
347 rx_buf->unmap_addr = 0;
348 }
349 } else if (likely(rx_buf->skb)) {
350 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
351 rx_buf->len, PCI_DMA_FROMDEVICE);
352 }
353}
354
355static inline void efx_free_rx_buffer(struct efx_nic *efx,
356 struct efx_rx_buffer *rx_buf)
357{
358 if (rx_buf->page) {
359 __free_pages(rx_buf->page, efx->rx_buffer_order);
360 rx_buf->page = NULL;
361 } else if (likely(rx_buf->skb)) {
362 dev_kfree_skb_any(rx_buf->skb);
363 rx_buf->skb = NULL;
364 }
365}
366
367static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
368 struct efx_rx_buffer *rx_buf)
369{
370 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
371 efx_free_rx_buffer(rx_queue->efx, rx_buf);
372}
373
374/**
375 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
376 * @rx_queue: RX descriptor queue
377 * @retry: Recheck the fill level
378 * This will aim to fill the RX descriptor queue up to
379 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
380 * memory to do so, the caller should retry.
381 */
382static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
383 int retry)
384{
385 struct efx_rx_buffer *rx_buf;
386 unsigned fill_level, index;
387 int i, space, rc = 0;
388
389 /* Calculate current fill level. Do this outside the lock,
390 * because most of the time we'll end up not wanting to do the
391 * fill anyway.
392 */
393 fill_level = (rx_queue->added_count - rx_queue->removed_count);
394 EFX_BUG_ON_PARANOID(fill_level >
395 rx_queue->efx->type->rxd_ring_mask + 1);
396
397 /* Don't fill if we don't need to */
398 if (fill_level >= rx_queue->fast_fill_trigger)
399 return 0;
400
401 /* Record minimum fill level */
402 if (unlikely(fill_level < rx_queue->min_fill))
403 if (fill_level)
404 rx_queue->min_fill = fill_level;
405
406 /* Acquire RX add lock. If this lock is contended, then a fast
407 * fill must already be in progress (e.g. in the refill
408 * tasklet), so we don't need to do anything
409 */
410 if (!spin_trylock_bh(&rx_queue->add_lock))
411 return -1;
412
413 retry:
414 /* Recalculate current fill level now that we have the lock */
415 fill_level = (rx_queue->added_count - rx_queue->removed_count);
416 EFX_BUG_ON_PARANOID(fill_level >
417 rx_queue->efx->type->rxd_ring_mask + 1);
418 space = rx_queue->fast_fill_limit - fill_level;
419 if (space < EFX_RX_BATCH)
420 goto out_unlock;
421
422 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
423 " level %d to level %d using %s allocation\n",
424 rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
425 rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
426
427 do {
428 for (i = 0; i < EFX_RX_BATCH; ++i) {
429 index = (rx_queue->added_count &
430 rx_queue->efx->type->rxd_ring_mask);
431 rx_buf = efx_rx_buffer(rx_queue, index);
432 rc = efx_init_rx_buffer(rx_queue, rx_buf);
433 if (unlikely(rc))
434 goto out;
435 ++rx_queue->added_count;
436 }
437 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
438
439 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
440 "to level %d\n", rx_queue->queue,
441 rx_queue->added_count - rx_queue->removed_count);
442
443 out:
444 /* Send write pointer to card. */
445 falcon_notify_rx_desc(rx_queue);
446
447 /* If the fast fill is running inside from the refill tasklet, then
448 * for SMP systems it may be running on a different CPU to
449 * RX event processing, which means that the fill level may now be
450 * out of date. */
451 if (unlikely(retry && (rc == 0)))
452 goto retry;
453
454 out_unlock:
455 spin_unlock_bh(&rx_queue->add_lock);
456
457 return rc;
458}
459
460/**
461 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
462 * @rx_queue: RX descriptor queue
463 *
464 * This will aim to fill the RX descriptor queue up to
465 * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
466 * it will schedule a work item to immediately continue the fast fill
467 */
468void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
469{
470 int rc;
471
472 rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
473 if (unlikely(rc)) {
474 /* Schedule the work item to run immediately. The hope is
475 * that work is immediately pending to free some memory
476 * (e.g. an RX event or TX completion)
477 */
478 efx_schedule_slow_fill(rx_queue, 0);
479 }
480}
481
482void efx_rx_work(struct work_struct *data)
483{
484 struct efx_rx_queue *rx_queue;
485 int rc;
486
487 rx_queue = container_of(data, struct efx_rx_queue, work.work);
488
489 if (unlikely(!rx_queue->channel->enabled))
490 return;
491
492 EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
493 "%d\n", rx_queue->queue, raw_smp_processor_id());
494
495 ++rx_queue->slow_fill_count;
496 /* Push new RX descriptors, allowing at least 1 jiffy for
497 * the kernel to free some more memory. */
498 rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
499 if (rc)
500 efx_schedule_slow_fill(rx_queue, 1);
501}
502
503static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
504 struct efx_rx_buffer *rx_buf,
505 int len, int *discard,
506 int *leak_packet)
507{
508 struct efx_nic *efx = rx_queue->efx;
509 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
510
511 if (likely(len <= max_len))
512 return;
513
514 /* The packet must be discarded, but this is only a fatal error
515 * if the caller indicated it was
516 */
517 *discard = 1;
518
519 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
520 EFX_ERR_RL(efx, " RX queue %d seriously overlength "
521 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
522 rx_queue->queue, len, max_len,
523 efx->type->rx_buffer_padding);
524 /* If this buffer was skb-allocated, then the meta
525 * data at the end of the skb will be trashed. So
526 * we have no choice but to leak the fragment.
527 */
528 *leak_packet = (rx_buf->skb != NULL);
529 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
530 } else {
531 EFX_ERR_RL(efx, " RX queue %d overlength RX event "
532 "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
533 }
534
535 rx_queue->channel->n_rx_overlength++;
536}
537
538/* Pass a received packet up through the generic LRO stack
539 *
540 * Handles driverlink veto, and passes the fragment up via
541 * the appropriate LRO method
542 */
543static inline void efx_rx_packet_lro(struct efx_channel *channel,
544 struct efx_rx_buffer *rx_buf)
545{
546 struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
547 void *priv = channel;
548
549 /* Pass the skb/page into the LRO engine */
550 if (rx_buf->page) {
551 struct skb_frag_struct frags;
552
553 frags.page = rx_buf->page;
554 frags.page_offset = RX_BUF_OFFSET(rx_buf);
555 frags.size = rx_buf->len;
556
557 lro_receive_frags(lro_mgr, &frags, rx_buf->len,
558 rx_buf->len, priv, 0);
559
560 EFX_BUG_ON_PARANOID(rx_buf->skb);
561 rx_buf->page = NULL;
562 } else {
563 EFX_BUG_ON_PARANOID(!rx_buf->skb);
564
565 lro_receive_skb(lro_mgr, rx_buf->skb, priv);
566 rx_buf->skb = NULL;
567 }
568}
569
570/* Allocate and construct an SKB around a struct page.*/
571static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
572 struct efx_nic *efx,
573 int hdr_len)
574{
575 struct sk_buff *skb;
576
577 /* Allocate an SKB to store the headers */
578 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
579 if (unlikely(skb == NULL)) {
580 EFX_ERR_RL(efx, "RX out of memory for skb\n");
581 return NULL;
582 }
583
584 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
585 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
586
587 skb->ip_summed = CHECKSUM_UNNECESSARY;
588 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
589
590 skb->len = rx_buf->len;
591 skb->truesize = rx_buf->len + sizeof(struct sk_buff);
592 memcpy(skb->data, rx_buf->data, hdr_len);
593 skb->tail += hdr_len;
594
595 /* Append the remaining page onto the frag list */
596 if (unlikely(rx_buf->len > hdr_len)) {
597 struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
598 frag->page = rx_buf->page;
599 frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len;
600 frag->size = skb->len - hdr_len;
601 skb_shinfo(skb)->nr_frags = 1;
602 skb->data_len = frag->size;
603 } else {
604 __free_pages(rx_buf->page, efx->rx_buffer_order);
605 skb->data_len = 0;
606 }
607
608 /* Ownership has transferred from the rx_buf to skb */
609 rx_buf->page = NULL;
610
611 /* Move past the ethernet header */
612 skb->protocol = eth_type_trans(skb, efx->net_dev);
613
614 return skb;
615}
616
617void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
618 unsigned int len, int checksummed, int discard)
619{
620 struct efx_nic *efx = rx_queue->efx;
621 struct efx_rx_buffer *rx_buf;
622 int leak_packet = 0;
623
624 rx_buf = efx_rx_buffer(rx_queue, index);
625 EFX_BUG_ON_PARANOID(!rx_buf->data);
626 EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
627 EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
628
629 /* This allows the refill path to post another buffer.
630 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
631 * isn't overwritten yet.
632 */
633 rx_queue->removed_count++;
634
635 /* Validate the length encoded in the event vs the descriptor pushed */
636 efx_rx_packet__check_len(rx_queue, rx_buf, len,
637 &discard, &leak_packet);
638
639 EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
640 rx_queue->queue, index,
641 (unsigned long long)rx_buf->dma_addr, len,
642 (checksummed ? " [SUMMED]" : ""),
643 (discard ? " [DISCARD]" : ""));
644
645 /* Discard packet, if instructed to do so */
646 if (unlikely(discard)) {
647 if (unlikely(leak_packet))
648 rx_queue->channel->n_skbuff_leaks++;
649 else
650 /* We haven't called efx_unmap_rx_buffer yet,
651 * so fini the entire rx_buffer here */
652 efx_fini_rx_buffer(rx_queue, rx_buf);
653 return;
654 }
655
656 /* Release card resources - assumes all RX buffers consumed in-order
657 * per RX queue
658 */
659 efx_unmap_rx_buffer(efx, rx_buf);
660
661 /* Prefetch nice and early so data will (hopefully) be in cache by
662 * the time we look at it.
663 */
664 prefetch(rx_buf->data);
665
666 /* Pipeline receives so that we give time for packet headers to be
667 * prefetched into cache.
668 */
669 rx_buf->len = len;
670 if (rx_queue->channel->rx_pkt)
671 __efx_rx_packet(rx_queue->channel,
672 rx_queue->channel->rx_pkt,
673 rx_queue->channel->rx_pkt_csummed);
674 rx_queue->channel->rx_pkt = rx_buf;
675 rx_queue->channel->rx_pkt_csummed = checksummed;
676}
677
678/* Handle a received packet. Second half: Touches packet payload. */
679void __efx_rx_packet(struct efx_channel *channel,
680 struct efx_rx_buffer *rx_buf, int checksummed)
681{
682 struct efx_nic *efx = channel->efx;
683 struct sk_buff *skb;
684 int lro = efx->net_dev->features & NETIF_F_LRO;
685
686 if (rx_buf->skb) {
687 prefetch(skb_shinfo(rx_buf->skb));
688
689 skb_put(rx_buf->skb, rx_buf->len);
690
691 /* Move past the ethernet header. rx_buf->data still points
692 * at the ethernet header */
693 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
694 efx->net_dev);
695 }
696
697 /* Both our generic-LRO and SFC-SSR support skb and page based
698 * allocation, but neither support switching from one to the
699 * other on the fly. If we spot that the allocation mode has
700 * changed, then flush the LRO state.
701 */
702 if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
703 efx_flush_lro(channel);
704 channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
705 }
706 if (likely(checksummed && lro)) {
707 efx_rx_packet_lro(channel, rx_buf);
708 goto done;
709 }
710
711 /* Form an skb if required */
712 if (rx_buf->page) {
713 int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS);
714 skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
715 if (unlikely(skb == NULL)) {
716 efx_free_rx_buffer(efx, rx_buf);
717 goto done;
718 }
719 } else {
720 /* We now own the SKB */
721 skb = rx_buf->skb;
722 rx_buf->skb = NULL;
723 }
724
725 EFX_BUG_ON_PARANOID(rx_buf->page);
726 EFX_BUG_ON_PARANOID(rx_buf->skb);
727 EFX_BUG_ON_PARANOID(!skb);
728
729 /* Set the SKB flags */
730 if (unlikely(!checksummed || !efx->rx_checksum_enabled))
731 skb->ip_summed = CHECKSUM_NONE;
732
733 /* Pass the packet up */
734 netif_receive_skb(skb);
735
736 /* Update allocation strategy method */
737 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
738
739 /* fall-thru */
740done:
741 efx->net_dev->last_rx = jiffies;
742}
743
744void efx_rx_strategy(struct efx_channel *channel)
745{
746 enum efx_rx_alloc_method method = rx_alloc_method;
747
748 /* Only makes sense to use page based allocation if LRO is enabled */
749 if (!(channel->efx->net_dev->features & NETIF_F_LRO)) {
750 method = RX_ALLOC_METHOD_SKB;
751 } else if (method == RX_ALLOC_METHOD_AUTO) {
752 /* Constrain the rx_alloc_level */
753 if (channel->rx_alloc_level < 0)
754 channel->rx_alloc_level = 0;
755 else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
756 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
757
758 /* Decide on the allocation method */
759 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
760 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
761 }
762
763 /* Push the option */
764 channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
765}
766
767int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
768{
769 struct efx_nic *efx = rx_queue->efx;
770 unsigned int rxq_size;
771 int rc;
772
773 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
774
775 /* Allocate RX buffers */
776 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
777 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
778 if (!rx_queue->buffer) {
779 rc = -ENOMEM;
780 goto fail1;
781 }
782
783 rc = falcon_probe_rx(rx_queue);
784 if (rc)
785 goto fail2;
786
787 return 0;
788
789 fail2:
790 kfree(rx_queue->buffer);
791 rx_queue->buffer = NULL;
792 fail1:
793 rx_queue->used = 0;
794
795 return rc;
796}
797
798int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
799{
800 struct efx_nic *efx = rx_queue->efx;
801 unsigned int max_fill, trigger, limit;
802
803 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
804
805 /* Initialise ptr fields */
806 rx_queue->added_count = 0;
807 rx_queue->notified_count = 0;
808 rx_queue->removed_count = 0;
809 rx_queue->min_fill = -1U;
810 rx_queue->min_overfill = -1U;
811
812 /* Initialise limit fields */
813 max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM;
814 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
815 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
816
817 rx_queue->max_fill = max_fill;
818 rx_queue->fast_fill_trigger = trigger;
819 rx_queue->fast_fill_limit = limit;
820
821 /* Set up RX descriptor ring */
822 return falcon_init_rx(rx_queue);
823}
824
825void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
826{
827 int i;
828 struct efx_rx_buffer *rx_buf;
829
830 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
831
832 falcon_fini_rx(rx_queue);
833
834 /* Release RX buffers NB start at index 0 not current HW ptr */
835 if (rx_queue->buffer) {
836 for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
837 rx_buf = efx_rx_buffer(rx_queue, i);
838 efx_fini_rx_buffer(rx_queue, rx_buf);
839 }
840 }
841
842 /* For a page that is part-way through splitting into RX buffers */
843 if (rx_queue->buf_page != NULL) {
844 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
845 RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE);
846 __free_pages(rx_queue->buf_page,
847 rx_queue->efx->rx_buffer_order);
848 rx_queue->buf_page = NULL;
849 }
850}
851
852void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
853{
854 EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
855
856 falcon_remove_rx(rx_queue);
857
858 kfree(rx_queue->buffer);
859 rx_queue->buffer = NULL;
860 rx_queue->used = 0;
861}
862
863void efx_flush_lro(struct efx_channel *channel)
864{
865 lro_flush_all(&channel->lro_mgr);
866}
867
868
869module_param(rx_alloc_method, int, 0644);
870MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
871
872module_param(rx_refill_threshold, uint, 0444);
873MODULE_PARM_DESC(rx_refill_threshold,
874 "RX descriptor ring fast/slow fill threshold (%)");
875
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
new file mode 100644
index 000000000000..f35e377bfc5f
--- /dev/null
+++ b/drivers/net/sfc/rx.h
@@ -0,0 +1,29 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_RX_H
11#define EFX_RX_H
12
13#include "net_driver.h"
14
15int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
16void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
17int efx_init_rx_queue(struct efx_rx_queue *rx_queue);
18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
19
20int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
21void efx_lro_fini(struct net_lro_mgr *lro_mgr);
22void efx_flush_lro(struct efx_channel *channel);
23void efx_rx_strategy(struct efx_channel *channel);
24void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
25void efx_rx_work(struct work_struct *data);
26void __efx_rx_packet(struct efx_channel *channel,
27 struct efx_rx_buffer *rx_buf, int checksummed);
28
29#endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
new file mode 100644
index 000000000000..11fa9fb8f48b
--- /dev/null
+++ b/drivers/net/sfc/sfe4001.c
@@ -0,0 +1,252 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/*****************************************************************************
11 * Support for the SFE4001 NIC: driver code for the PCA9539 I/O expander that
12 * controls the PHY power rails, and for the MAX6647 temp. sensor used to check
13 * the PHY
14 */
15#include <linux/delay.h>
16#include "efx.h"
17#include "phy.h"
18#include "boards.h"
19#include "falcon.h"
20#include "falcon_hwdefs.h"
21#include "mac.h"
22
23/**************************************************************************
24 *
25 * I2C IO Expander device
26 *
27 **************************************************************************/
28#define PCA9539 0x74
29
30#define P0_IN 0x00
31#define P0_OUT 0x02
32#define P0_INVERT 0x04
33#define P0_CONFIG 0x06
34
35#define P0_EN_1V0X_LBN 0
36#define P0_EN_1V0X_WIDTH 1
37#define P0_EN_1V2_LBN 1
38#define P0_EN_1V2_WIDTH 1
39#define P0_EN_2V5_LBN 2
40#define P0_EN_2V5_WIDTH 1
41#define P0_EN_3V3X_LBN 3
42#define P0_EN_3V3X_WIDTH 1
43#define P0_EN_5V_LBN 4
44#define P0_EN_5V_WIDTH 1
45#define P0_SHORTEN_JTAG_LBN 5
46#define P0_SHORTEN_JTAG_WIDTH 1
47#define P0_X_TRST_LBN 6
48#define P0_X_TRST_WIDTH 1
49#define P0_DSP_RESET_LBN 7
50#define P0_DSP_RESET_WIDTH 1
51
52#define P1_IN 0x01
53#define P1_OUT 0x03
54#define P1_INVERT 0x05
55#define P1_CONFIG 0x07
56
57#define P1_AFE_PWD_LBN 0
58#define P1_AFE_PWD_WIDTH 1
59#define P1_DSP_PWD25_LBN 1
60#define P1_DSP_PWD25_WIDTH 1
61#define P1_RESERVED_LBN 2
62#define P1_RESERVED_WIDTH 2
63#define P1_SPARE_LBN 4
64#define P1_SPARE_WIDTH 4
65
66
67/**************************************************************************
68 *
69 * Temperature Sensor
70 *
71 **************************************************************************/
72#define MAX6647 0x4e
73
74#define RLTS 0x00
75#define RLTE 0x01
76#define RSL 0x02
77#define RCL 0x03
78#define RCRA 0x04
79#define RLHN 0x05
80#define RLLI 0x06
81#define RRHI 0x07
82#define RRLS 0x08
83#define WCRW 0x0a
84#define WLHO 0x0b
85#define WRHA 0x0c
86#define WRLN 0x0e
87#define OSHT 0x0f
88#define REET 0x10
89#define RIET 0x11
90#define RWOE 0x19
91#define RWOI 0x20
92#define HYS 0x21
93#define QUEUE 0x22
94#define MFID 0xfe
95#define REVID 0xff
96
97/* Status bits */
98#define MAX6647_BUSY (1 << 7) /* ADC is converting */
99#define MAX6647_LHIGH (1 << 6) /* Local high temp. alarm */
100#define MAX6647_LLOW (1 << 5) /* Local low temp. alarm */
101#define MAX6647_RHIGH (1 << 4) /* Remote high temp. alarm */
102#define MAX6647_RLOW (1 << 3) /* Remote low temp. alarm */
103#define MAX6647_FAULT (1 << 2) /* DXN/DXP short/open circuit */
104#define MAX6647_EOT (1 << 1) /* Remote junction overtemp. */
105#define MAX6647_IOT (1 << 0) /* Local junction overtemp. */
106
107static const u8 xgphy_max_temperature = 90;
108
109void sfe4001_poweroff(struct efx_nic *efx)
110{
111 struct efx_i2c_interface *i2c = &efx->i2c;
112
113 u8 cfg, out, in;
114
115 EFX_INFO(efx, "%s\n", __func__);
116
117 /* Turn off all power rails */
118 out = 0xff;
119 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
120
121 /* Disable port 1 outputs on IO expander */
122 cfg = 0xff;
123 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
124
125 /* Disable port 0 outputs on IO expander */
126 cfg = 0xff;
127 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
128
129 /* Clear any over-temperature alert */
130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
131}
132
133/* This board uses an I2C expander to provider power to the PHY, which needs to
134 * be turned on before the PHY can be used.
135 * Context: Process context, rtnl lock held
136 */
137int sfe4001_poweron(struct efx_nic *efx)
138{
139 struct efx_i2c_interface *i2c = &efx->i2c;
140 unsigned int count;
141 int rc;
142 u8 out, in, cfg;
143 efx_dword_t reg;
144
145 /* 10Xpress has fixed-function LED pins, so there is no board-specific
146 * blink code. */
147 efx->board_info.blink = tenxpress_phy_blink;
148
149 /* Ensure that XGXS and XAUI SerDes are held in reset */
150 EFX_POPULATE_DWORD_7(reg, XX_PWRDNA_EN, 1,
151 XX_PWRDNB_EN, 1,
152 XX_RSTPLLAB_EN, 1,
153 XX_RESETA_EN, 1,
154 XX_RESETB_EN, 1,
155 XX_RSTXGXSRX_EN, 1,
156 XX_RSTXGXSTX_EN, 1);
157 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
158 udelay(10);
159
160 /* Set DSP over-temperature alert threshold */
161 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
162 rc = efx_i2c_write(i2c, MAX6647, WLHO,
163 &xgphy_max_temperature, 1);
164 if (rc)
165 goto fail1;
166
167 /* Read it back and verify */
168 rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, 1);
169 if (rc)
170 goto fail1;
171 if (in != xgphy_max_temperature) {
172 rc = -EFAULT;
173 goto fail1;
174 }
175
176 /* Clear any previous over-temperature alert */
177 rc = efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
178 if (rc)
179 goto fail1;
180
181 /* Enable port 0 and port 1 outputs on IO expander */
182 cfg = 0x00;
183 rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
184 if (rc)
185 goto fail1;
186 cfg = 0xff & ~(1 << P1_SPARE_LBN);
187 rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
188 if (rc)
189 goto fail2;
190
191 /* Turn all power off then wait 1 sec. This ensures PHY is reset */
192 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
193 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
194 (0 << P0_EN_1V0X_LBN));
195 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
196 if (rc)
197 goto fail3;
198
199 schedule_timeout_uninterruptible(HZ);
200 count = 0;
201 do {
202 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
203 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
204 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
205 (1 << P0_X_TRST_LBN));
206
207 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
208 if (rc)
209 goto fail3;
210 msleep(10);
211
212 /* Turn on 1V power rail */
213 out &= ~(1 << P0_EN_1V0X_LBN);
214 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
215 if (rc)
216 goto fail3;
217
218 EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
219
220 schedule_timeout_uninterruptible(HZ);
221
222 /* Check DSP is powered */
223 rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, 1);
224 if (rc)
225 goto fail3;
226 if (in & (1 << P1_AFE_PWD_LBN))
227 goto done;
228
229 } while (++count < 20);
230
231 EFX_INFO(efx, "timed out waiting for power\n");
232 rc = -ETIMEDOUT;
233 goto fail3;
234
235done:
236 EFX_INFO(efx, "PHY is powered on\n");
237 return 0;
238
239fail3:
240 /* Turn off all power rails */
241 out = 0xff;
242 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
243 /* Disable port 1 outputs on IO expander */
244 out = 0xff;
245 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
246fail2:
247 /* Disable port 0 outputs on IO expander */
248 out = 0xff;
249 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
250fail1:
251 return rc;
252}
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
new file mode 100644
index 000000000000..34412f3d41c9
--- /dev/null
+++ b/drivers/net/sfc/spi.h
@@ -0,0 +1,71 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_SPI_H
12#define EFX_SPI_H
13
14#include "net_driver.h"
15
16/**************************************************************************
17 *
18 * Basic SPI command set and bit definitions
19 *
20 *************************************************************************/
21
22/*
23 * Commands common to all known devices.
24 *
25 */
26
27/* Write status register */
28#define SPI_WRSR 0x01
29
30/* Write data to memory array */
31#define SPI_WRITE 0x02
32
33/* Read data from memory array */
34#define SPI_READ 0x03
35
36/* Reset write enable latch */
37#define SPI_WRDI 0x04
38
39/* Read status register */
40#define SPI_RDSR 0x05
41
42/* Set write enable latch */
43#define SPI_WREN 0x06
44
45/* SST: Enable write to status register */
46#define SPI_SST_EWSR 0x50
47
48/*
49 * Status register bits. Not all bits are supported on all devices.
50 *
51 */
52
53/* Write-protect pin enabled */
54#define SPI_STATUS_WPEN 0x80
55
56/* Block protection bit 2 */
57#define SPI_STATUS_BP2 0x10
58
59/* Block protection bit 1 */
60#define SPI_STATUS_BP1 0x08
61
62/* Block protection bit 0 */
63#define SPI_STATUS_BP0 0x04
64
65/* State of the write enable latch */
66#define SPI_STATUS_WEN 0x02
67
68/* Device busy flag */
69#define SPI_STATUS_NRDY 0x01
70
71#endif /* EFX_SPI_H */
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
new file mode 100644
index 000000000000..a2e9f79e47b1
--- /dev/null
+++ b/drivers/net/sfc/tenxpress.c
@@ -0,0 +1,434 @@
1/****************************************************************************
2 * Driver for Solarflare 802.3an compliant PHY
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
11#include <linux/seq_file.h>
12#include "efx.h"
13#include "gmii.h"
14#include "mdio_10g.h"
15#include "falcon.h"
16#include "phy.h"
17#include "falcon_hwdefs.h"
18#include "boards.h"
19#include "mac.h"
20
21/* We expect these MMDs to be in the package */
22/* AN not here as mdio_check_mmds() requires STAT2 support */
23#define TENXPRESS_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PMAPMD | \
24 MDIO_MMDREG_DEVS0_PCS | \
25 MDIO_MMDREG_DEVS0_PHYXS)
26
27/* We complain if we fail to see the link partner as 10G capable this many
28 * times in a row (must be > 1 as sampling the autoneg. registers is racy)
29 */
30#define MAX_BAD_LP_TRIES (5)
31
32/* Extended control register */
33#define PMA_PMD_XCONTROL_REG 0xc000
34#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
35#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
36
37/* extended status register */
38#define PMA_PMD_XSTATUS_REG 0xc001
39#define PMA_PMD_XSTAT_FLP_LBN (12)
40
41/* LED control register */
42#define PMA_PMD_LED_CTRL_REG (0xc007)
43#define PMA_PMA_LED_ACTIVITY_LBN (3)
44
45/* LED function override register */
46#define PMA_PMD_LED_OVERR_REG (0xc009)
47/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
48#define PMA_PMD_LED_LINK_LBN (0)
49#define PMA_PMD_LED_SPEED_LBN (2)
50#define PMA_PMD_LED_TX_LBN (4)
51#define PMA_PMD_LED_RX_LBN (6)
52/* Override settings */
53#define PMA_PMD_LED_AUTO (0) /* H/W control */
54#define PMA_PMD_LED_ON (1)
55#define PMA_PMD_LED_OFF (2)
56#define PMA_PMD_LED_FLASH (3)
57/* All LEDs under hardware control */
58#define PMA_PMD_LED_FULL_AUTO (0)
59/* Green and Amber under hardware control, Red off */
60#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
61
62
63/* Self test (BIST) control register */
64#define PMA_PMD_BIST_CTRL_REG (0xc014)
65#define PMA_PMD_BIST_BER_LBN (2) /* Run BER test */
66#define PMA_PMD_BIST_CONT_LBN (1) /* Run continuous BIST until cleared */
67#define PMA_PMD_BIST_SINGLE_LBN (0) /* Run 1 BIST iteration (self clears) */
68/* Self test status register */
69#define PMA_PMD_BIST_STAT_REG (0xc015)
70#define PMA_PMD_BIST_ENX_LBN (3)
71#define PMA_PMD_BIST_PMA_LBN (2)
72#define PMA_PMD_BIST_RXD_LBN (1)
73#define PMA_PMD_BIST_AFE_LBN (0)
74
75#define BIST_MAX_DELAY (1000)
76#define BIST_POLL_DELAY (10)
77
78/* Misc register defines */
79#define PCS_CLOCK_CTRL_REG 0xd801
80#define PLL312_RST_N_LBN 2
81
82#define PCS_SOFT_RST2_REG 0xd806
83#define SERDES_RST_N_LBN 13
84#define XGXS_RST_N_LBN 12
85
86#define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */
87#define CLK312_EN_LBN 3
88
89/* Boot status register */
90#define PCS_BOOT_STATUS_REG (0xd000)
91#define PCS_BOOT_FATAL_ERR_LBN (0)
92#define PCS_BOOT_PROGRESS_LBN (1)
93#define PCS_BOOT_PROGRESS_WIDTH (2)
94#define PCS_BOOT_COMPLETE_LBN (3)
95#define PCS_BOOT_MAX_DELAY (100)
96#define PCS_BOOT_POLL_DELAY (10)
97
98/* Time to wait between powering down the LNPGA and turning off the power
99 * rails */
100#define LNPGA_PDOWN_WAIT (HZ / 5)
101
102static int crc_error_reset_threshold = 100;
103module_param(crc_error_reset_threshold, int, 0644);
104MODULE_PARM_DESC(crc_error_reset_threshold,
105 "Max number of CRC errors before XAUI reset");
106
107struct tenxpress_phy_data {
108 enum tenxpress_state state;
109 atomic_t bad_crc_count;
110 int bad_lp_tries;
111};
112
113static int tenxpress_state_is(struct efx_nic *efx, int state)
114{
115 struct tenxpress_phy_data *phy_data = efx->phy_data;
116 return (phy_data != NULL) && (state == phy_data->state);
117}
118
119void tenxpress_set_state(struct efx_nic *efx,
120 enum tenxpress_state state)
121{
122 struct tenxpress_phy_data *phy_data = efx->phy_data;
123 if (phy_data != NULL)
124 phy_data->state = state;
125}
126
127void tenxpress_crc_err(struct efx_nic *efx)
128{
129 struct tenxpress_phy_data *phy_data = efx->phy_data;
130 if (phy_data != NULL)
131 atomic_inc(&phy_data->bad_crc_count);
132}
133
134/* Check that the C166 has booted successfully */
135static int tenxpress_phy_check(struct efx_nic *efx)
136{
137 int phy_id = efx->mii.phy_id;
138 int count = PCS_BOOT_MAX_DELAY / PCS_BOOT_POLL_DELAY;
139 int boot_stat;
140
141 /* Wait for the boot to complete (or not) */
142 while (count) {
143 boot_stat = mdio_clause45_read(efx, phy_id,
144 MDIO_MMD_PCS,
145 PCS_BOOT_STATUS_REG);
146 if (boot_stat & (1 << PCS_BOOT_COMPLETE_LBN))
147 break;
148 count--;
149 udelay(PCS_BOOT_POLL_DELAY);
150 }
151
152 if (!count) {
153 EFX_ERR(efx, "%s: PHY boot timed out. Last status "
154 "%x\n", __func__,
155 (boot_stat >> PCS_BOOT_PROGRESS_LBN) &
156 ((1 << PCS_BOOT_PROGRESS_WIDTH) - 1));
157 return -ETIMEDOUT;
158 }
159
160 return 0;
161}
162
163static void tenxpress_reset_xaui(struct efx_nic *efx);
164
165static int tenxpress_init(struct efx_nic *efx)
166{
167 int rc, reg;
168
169 /* Turn on the clock */
170 reg = (1 << CLK312_EN_LBN);
171 mdio_clause45_write(efx, efx->mii.phy_id,
172 MDIO_MMD_PCS, PCS_TEST_SELECT_REG, reg);
173
174 rc = tenxpress_phy_check(efx);
175 if (rc < 0)
176 return rc;
177
178 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
179 reg = mdio_clause45_read(efx, efx->mii.phy_id,
180 MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG);
181 reg |= (1 << PMA_PMA_LED_ACTIVITY_LBN);
182 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
183 PMA_PMD_LED_CTRL_REG, reg);
184
185 reg = PMA_PMD_LED_DEFAULT;
186 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
187 PMA_PMD_LED_OVERR_REG, reg);
188
189 return rc;
190}
191
192static int tenxpress_phy_init(struct efx_nic *efx)
193{
194 struct tenxpress_phy_data *phy_data;
195 int rc = 0;
196
197 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
198 efx->phy_data = phy_data;
199
200 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
201
202 rc = mdio_clause45_wait_reset_mmds(efx,
203 TENXPRESS_REQUIRED_DEVS);
204 if (rc < 0)
205 goto fail;
206
207 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
208 if (rc < 0)
209 goto fail;
210
211 rc = tenxpress_init(efx);
212 if (rc < 0)
213 goto fail;
214
215 schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
216
217 /* Let XGXS and SerDes out of reset and resets 10XPress */
218 falcon_reset_xaui(efx);
219
220 return 0;
221
222 fail:
223 kfree(efx->phy_data);
224 efx->phy_data = NULL;
225 return rc;
226}
227
228static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
229{
230 struct tenxpress_phy_data *pd = efx->phy_data;
231 int reg;
232
233 /* Nothing to do if all is well and was previously so. */
234 if (!(bad_lp || pd->bad_lp_tries))
235 return;
236
237 reg = mdio_clause45_read(efx, efx->mii.phy_id,
238 MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG);
239
240 if (bad_lp)
241 pd->bad_lp_tries++;
242 else
243 pd->bad_lp_tries = 0;
244
245 if (pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
246 pd->bad_lp_tries = 0; /* Restart count */
247 reg &= ~(PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN);
248 reg |= (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN);
249 EFX_ERR(efx, "This NIC appears to be plugged into"
250 " a port that is not 10GBASE-T capable.\n"
251 " This PHY is 10GBASE-T ONLY, so no link can"
252 " be established.\n");
253 } else {
254 reg |= (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN);
255 }
256 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
257 PMA_PMD_LED_OVERR_REG, reg);
258}
259
260/* Check link status and return a boolean OK value. If the link is NOT
261 * OK we have a quick rummage round to see if we appear to be plugged
262 * into a non-10GBT port and if so warn the user that they won't get
263 * link any time soon as we are 10GBT only, unless caller specified
264 * not to do this check (it isn't useful in loopback) */
265static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
266{
267 int ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
268
269 if (ok) {
270 tenxpress_set_bad_lp(efx, 0);
271 } else if (check_lp) {
272 /* Are we plugged into the wrong sort of link? */
273 int bad_lp = 0;
274 int phy_id = efx->mii.phy_id;
275 int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
276 MDIO_AN_STATUS);
277 int xphy_stat = mdio_clause45_read(efx, phy_id,
278 MDIO_MMD_PMAPMD,
279 PMA_PMD_XSTATUS_REG);
280 /* Are we plugged into anything that sends FLPs? If
281 * not we can't distinguish between not being plugged
282 * in and being plugged into a non-AN antique. The FLP
283 * bit has the advantage of not clearing when autoneg
284 * restarts. */
285 if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) {
286 tenxpress_set_bad_lp(efx, 0);
287 return ok;
288 }
289
290 /* If it can do 10GBT it must be XNP capable */
291 bad_lp = !(an_stat & (1 << MDIO_AN_STATUS_XNP_LBN));
292 if (!bad_lp && (an_stat & (1 << MDIO_AN_STATUS_PAGE_LBN))) {
293 bad_lp = !(mdio_clause45_read(efx, phy_id,
294 MDIO_MMD_AN, MDIO_AN_10GBT_STATUS) &
295 (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN));
296 }
297 tenxpress_set_bad_lp(efx, bad_lp);
298 }
299 return ok;
300}
301
302static void tenxpress_phy_reconfigure(struct efx_nic *efx)
303{
304 if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
305 return;
306
307 efx->link_up = tenxpress_link_ok(efx, 0);
308 efx->link_options = GM_LPA_10000FULL;
309}
310
311static void tenxpress_phy_clear_interrupt(struct efx_nic *efx)
312{
313 /* Nothing done here - LASI interrupts aren't reliable so poll */
314}
315
316
317/* Poll PHY for interrupt */
318static int tenxpress_phy_check_hw(struct efx_nic *efx)
319{
320 struct tenxpress_phy_data *phy_data = efx->phy_data;
321 int phy_up = tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL);
322 int link_ok;
323
324 link_ok = phy_up && tenxpress_link_ok(efx, 1);
325
326 if (link_ok != efx->link_up)
327 falcon_xmac_sim_phy_event(efx);
328
329 /* Nothing to check if we've already shut down the PHY */
330 if (!phy_up)
331 return 0;
332
333 if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
334 EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
335 falcon_reset_xaui(efx);
336 atomic_set(&phy_data->bad_crc_count, 0);
337 }
338
339 return 0;
340}
341
342static void tenxpress_phy_fini(struct efx_nic *efx)
343{
344 int reg;
345
346 /* Power down the LNPGA */
347 reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
348 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
349 PMA_PMD_XCONTROL_REG, reg);
350
351 /* Waiting here ensures that the board fini, which can turn off the
352 * power to the PHY, won't get run until the LNPGA powerdown has been
353 * given long enough to complete. */
354 schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
355
356 kfree(efx->phy_data);
357 efx->phy_data = NULL;
358}
359
360
361/* Set the RX and TX LEDs and Link LED flashing. The other LEDs
362 * (which probably aren't wired anyway) are left in AUTO mode */
363void tenxpress_phy_blink(struct efx_nic *efx, int blink)
364{
365 int reg;
366
367 if (blink)
368 reg = (PMA_PMD_LED_FLASH << PMA_PMD_LED_TX_LBN) |
369 (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN) |
370 (PMA_PMD_LED_FLASH << PMA_PMD_LED_LINK_LBN);
371 else
372 reg = PMA_PMD_LED_DEFAULT;
373
374 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
375 PMA_PMD_LED_OVERR_REG, reg);
376}
377
378static void tenxpress_reset_xaui(struct efx_nic *efx)
379{
380 int phy = efx->mii.phy_id;
381 int clk_ctrl, test_select, soft_rst2;
382
383 /* Real work is done on clock_ctrl other resets are thought to be
384 * optional but make the reset more reliable
385 */
386
387 /* Read */
388 clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
389 PCS_CLOCK_CTRL_REG);
390 test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
391 PCS_TEST_SELECT_REG);
392 soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
393 PCS_SOFT_RST2_REG);
394
395 /* Put in reset */
396 test_select &= ~(1 << CLK312_EN_LBN);
397 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
398 PCS_TEST_SELECT_REG, test_select);
399
400 soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
401 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
402 PCS_SOFT_RST2_REG, soft_rst2);
403
404 clk_ctrl &= ~(1 << PLL312_RST_N_LBN);
405 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
406 PCS_CLOCK_CTRL_REG, clk_ctrl);
407 udelay(10);
408
409 /* Remove reset */
410 clk_ctrl |= (1 << PLL312_RST_N_LBN);
411 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
412 PCS_CLOCK_CTRL_REG, clk_ctrl);
413 udelay(10);
414
415 soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
416 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
417 PCS_SOFT_RST2_REG, soft_rst2);
418 udelay(10);
419
420 test_select |= (1 << CLK312_EN_LBN);
421 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
422 PCS_TEST_SELECT_REG, test_select);
423 udelay(10);
424}
425
426struct efx_phy_operations falcon_tenxpress_phy_ops = {
427 .init = tenxpress_phy_init,
428 .reconfigure = tenxpress_phy_reconfigure,
429 .check_hw = tenxpress_phy_check_hw,
430 .fini = tenxpress_phy_fini,
431 .clear_interrupt = tenxpress_phy_clear_interrupt,
432 .reset_xaui = tenxpress_reset_xaui,
433 .mmds = TENXPRESS_REQUIRED_DEVS,
434};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
new file mode 100644
index 000000000000..fbb866b2185e
--- /dev/null
+++ b/drivers/net/sfc/tx.c
@@ -0,0 +1,452 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
15#include <linux/if_ether.h>
16#include <linux/highmem.h>
17#include "net_driver.h"
18#include "tx.h"
19#include "efx.h"
20#include "falcon.h"
21#include "workarounds.h"
22
23/*
24 * TX descriptor ring full threshold
25 *
26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue
28 */
29#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
31
32/* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue.
34 */
35void efx_stop_queue(struct efx_nic *efx)
36{
37 spin_lock_bh(&efx->netif_stop_lock);
38 EFX_TRACE(efx, "stop TX queue\n");
39
40 atomic_inc(&efx->netif_stop_count);
41 netif_stop_queue(efx->net_dev);
42
43 spin_unlock_bh(&efx->netif_stop_lock);
44}
45
46/* Wake netif's TX queue
47 * We want to be able to nest calls to netif_stop_queue(), since each
48 * channel can have an individual stop on the queue.
49 */
50inline void efx_wake_queue(struct efx_nic *efx)
51{
52 local_bh_disable();
53 if (atomic_dec_and_lock(&efx->netif_stop_count,
54 &efx->netif_stop_lock)) {
55 EFX_TRACE(efx, "waking TX queue\n");
56 netif_wake_queue(efx->net_dev);
57 spin_unlock(&efx->netif_stop_lock);
58 }
59 local_bh_enable();
60}
61
62static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63 struct efx_tx_buffer *buffer)
64{
65 if (buffer->unmap_len) {
66 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
67 if (buffer->unmap_single)
68 pci_unmap_single(pci_dev, buffer->unmap_addr,
69 buffer->unmap_len, PCI_DMA_TODEVICE);
70 else
71 pci_unmap_page(pci_dev, buffer->unmap_addr,
72 buffer->unmap_len, PCI_DMA_TODEVICE);
73 buffer->unmap_len = 0;
74 buffer->unmap_single = 0;
75 }
76
77 if (buffer->skb) {
78 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
79 buffer->skb = NULL;
80 EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
81 "complete\n", tx_queue->queue, read_ptr);
82 }
83}
84
85
86/*
87 * Add a socket buffer to a TX queue
88 *
89 * This maps all fragments of a socket buffer for DMA and adds them to
90 * the TX queue. The queue's insert pointer will be incremented by
91 * the number of fragments in the socket buffer.
92 *
93 * If any DMA mapping fails, any mapped fragments will be unmapped,
94 * the queue's insert pointer will be restored to its original value.
95 *
96 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
97 * You must hold netif_tx_lock() to call this function.
98 */
99static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
100 const struct sk_buff *skb)
101{
102 struct efx_nic *efx = tx_queue->efx;
103 struct pci_dev *pci_dev = efx->pci_dev;
104 struct efx_tx_buffer *buffer;
105 skb_frag_t *fragment;
106 struct page *page;
107 int page_offset;
108 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
109 dma_addr_t dma_addr, unmap_addr = 0;
110 unsigned int dma_len;
111 unsigned unmap_single;
112 int q_space, i = 0;
113 int rc = NETDEV_TX_OK;
114
115 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
116
117 /* Get size of the initial fragment */
118 len = skb_headlen(skb);
119
120 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
121 q_space = efx->type->txd_ring_mask - 1 - fill_level;
122
123 /* Map for DMA. Use pci_map_single rather than pci_map_page
124 * since this is more efficient on machines with sparse
125 * memory.
126 */
127 unmap_single = 1;
128 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
129
130 /* Process all fragments */
131 while (1) {
132 if (unlikely(pci_dma_mapping_error(dma_addr)))
133 goto pci_err;
134
135 /* Store fields for marking in the per-fragment final
136 * descriptor */
137 unmap_len = len;
138 unmap_addr = dma_addr;
139
140 /* Add to TX queue, splitting across DMA boundaries */
141 do {
142 if (unlikely(q_space-- <= 0)) {
143 /* It might be that completions have
144 * happened since the xmit path last
145 * checked. Update the xmit path's
146 * copy of read_count.
147 */
148 ++tx_queue->stopped;
149 /* This memory barrier protects the
150 * change of stopped from the access
151 * of read_count. */
152 smp_mb();
153 tx_queue->old_read_count =
154 *(volatile unsigned *)
155 &tx_queue->read_count;
156 fill_level = (tx_queue->insert_count
157 - tx_queue->old_read_count);
158 q_space = (efx->type->txd_ring_mask - 1 -
159 fill_level);
160 if (unlikely(q_space-- <= 0))
161 goto stop;
162 smp_mb();
163 --tx_queue->stopped;
164 }
165
166 insert_ptr = (tx_queue->insert_count &
167 efx->type->txd_ring_mask);
168 buffer = &tx_queue->buffer[insert_ptr];
169 EFX_BUG_ON_PARANOID(buffer->skb);
170 EFX_BUG_ON_PARANOID(buffer->len);
171 EFX_BUG_ON_PARANOID(buffer->continuation != 1);
172 EFX_BUG_ON_PARANOID(buffer->unmap_len);
173
174 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
175 if (likely(dma_len > len))
176 dma_len = len;
177
178 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
179 if (misalign && dma_len + misalign > 512)
180 dma_len = 512 - misalign;
181
182 /* Fill out per descriptor fields */
183 buffer->len = dma_len;
184 buffer->dma_addr = dma_addr;
185 len -= dma_len;
186 dma_addr += dma_len;
187 ++tx_queue->insert_count;
188 } while (len);
189
190 /* Transfer ownership of the unmapping to the final buffer */
191 buffer->unmap_addr = unmap_addr;
192 buffer->unmap_single = unmap_single;
193 buffer->unmap_len = unmap_len;
194 unmap_len = 0;
195
196 /* Get address and size of next fragment */
197 if (i >= skb_shinfo(skb)->nr_frags)
198 break;
199 fragment = &skb_shinfo(skb)->frags[i];
200 len = fragment->size;
201 page = fragment->page;
202 page_offset = fragment->page_offset;
203 i++;
204 /* Map for DMA */
205 unmap_single = 0;
206 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
207 PCI_DMA_TODEVICE);
208 }
209
210 /* Transfer ownership of the skb to the final buffer */
211 buffer->skb = skb;
212 buffer->continuation = 0;
213
214 /* Pass off to hardware */
215 falcon_push_buffers(tx_queue);
216
217 return NETDEV_TX_OK;
218
219 pci_err:
220 EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
221 "fragments for DMA\n", tx_queue->queue, skb->len,
222 skb_shinfo(skb)->nr_frags + 1);
223
224 /* Mark the packet as transmitted, and free the SKB ourselves */
225 dev_kfree_skb_any((struct sk_buff *)skb);
226 goto unwind;
227
228 stop:
229 rc = NETDEV_TX_BUSY;
230
231 if (tx_queue->stopped == 1)
232 efx_stop_queue(efx);
233
234 unwind:
235 /* Work backwards until we hit the original insert pointer value */
236 while (tx_queue->insert_count != tx_queue->write_count) {
237 --tx_queue->insert_count;
238 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
239 buffer = &tx_queue->buffer[insert_ptr];
240 efx_dequeue_buffer(tx_queue, buffer);
241 buffer->len = 0;
242 }
243
244 /* Free the fragment we were mid-way through pushing */
245 if (unmap_len)
246 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
247 PCI_DMA_TODEVICE);
248
249 return rc;
250}
251
252/* Remove packets from the TX queue
253 *
254 * This removes packets from the TX queue, up to and including the
255 * specified index.
256 */
257static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
258 unsigned int index)
259{
260 struct efx_nic *efx = tx_queue->efx;
261 unsigned int stop_index, read_ptr;
262 unsigned int mask = tx_queue->efx->type->txd_ring_mask;
263
264 stop_index = (index + 1) & mask;
265 read_ptr = tx_queue->read_count & mask;
266
267 while (read_ptr != stop_index) {
268 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
269 if (unlikely(buffer->len == 0)) {
270 EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
271 "completion id %x\n", tx_queue->queue,
272 read_ptr);
273 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
274 return;
275 }
276
277 efx_dequeue_buffer(tx_queue, buffer);
278 buffer->continuation = 1;
279 buffer->len = 0;
280
281 ++tx_queue->read_count;
282 read_ptr = tx_queue->read_count & mask;
283 }
284}
285
286/* Initiate a packet transmission on the specified TX queue.
287 * Note that returning anything other than NETDEV_TX_OK will cause the
288 * OS to free the skb.
289 *
290 * This function is split out from efx_hard_start_xmit to allow the
291 * loopback test to direct packets via specific TX queues. It is
292 * therefore a non-static inline, so as not to penalise performance
293 * for non-loopback transmissions.
294 *
295 * Context: netif_tx_lock held
296 */
297inline int efx_xmit(struct efx_nic *efx,
298 struct efx_tx_queue *tx_queue, struct sk_buff *skb)
299{
300 int rc;
301
302 /* Map fragments for DMA and add to TX queue */
303 rc = efx_enqueue_skb(tx_queue, skb);
304 if (unlikely(rc != NETDEV_TX_OK))
305 goto out;
306
307 /* Update last TX timer */
308 efx->net_dev->trans_start = jiffies;
309
310 out:
311 return rc;
312}
313
314/* Initiate a packet transmission. We use one channel per CPU
315 * (sharing when we have more CPUs than channels). On Falcon, the TX
316 * completion events will be directed back to the CPU that transmitted
317 * the packet, which should be cache-efficient.
318 *
319 * Context: non-blocking.
320 * Note that returning anything other than NETDEV_TX_OK will cause the
321 * OS to free the skb.
322 */
323int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
324{
325 struct efx_nic *efx = net_dev->priv;
326 return efx_xmit(efx, &efx->tx_queue[0], skb);
327}
328
329void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
330{
331 unsigned fill_level;
332 struct efx_nic *efx = tx_queue->efx;
333
334 EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);
335
336 efx_dequeue_buffers(tx_queue, index);
337
338 /* See if we need to restart the netif queue. This barrier
339 * separates the update of read_count from the test of
340 * stopped. */
341 smp_mb();
342 if (unlikely(tx_queue->stopped)) {
343 fill_level = tx_queue->insert_count - tx_queue->read_count;
344 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
345 EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx));
346
347 /* Do this under netif_tx_lock(), to avoid racing
348 * with efx_xmit(). */
349 netif_tx_lock(efx->net_dev);
350 if (tx_queue->stopped) {
351 tx_queue->stopped = 0;
352 efx_wake_queue(efx);
353 }
354 netif_tx_unlock(efx->net_dev);
355 }
356 }
357}
358
359int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
360{
361 struct efx_nic *efx = tx_queue->efx;
362 unsigned int txq_size;
363 int i, rc;
364
365 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
366
367 /* Allocate software ring */
368 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
369 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
370 if (!tx_queue->buffer) {
371 rc = -ENOMEM;
372 goto fail1;
373 }
374 for (i = 0; i <= efx->type->txd_ring_mask; ++i)
375 tx_queue->buffer[i].continuation = 1;
376
377 /* Allocate hardware ring */
378 rc = falcon_probe_tx(tx_queue);
379 if (rc)
380 goto fail2;
381
382 return 0;
383
384 fail2:
385 kfree(tx_queue->buffer);
386 tx_queue->buffer = NULL;
387 fail1:
388 tx_queue->used = 0;
389
390 return rc;
391}
392
393int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
394{
395 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
396
397 tx_queue->insert_count = 0;
398 tx_queue->write_count = 0;
399 tx_queue->read_count = 0;
400 tx_queue->old_read_count = 0;
401 BUG_ON(tx_queue->stopped);
402
403 /* Set up TX descriptor ring */
404 return falcon_init_tx(tx_queue);
405}
406
407void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
408{
409 struct efx_tx_buffer *buffer;
410
411 if (!tx_queue->buffer)
412 return;
413
414 /* Free any buffers left in the ring */
415 while (tx_queue->read_count != tx_queue->write_count) {
416 buffer = &tx_queue->buffer[tx_queue->read_count &
417 tx_queue->efx->type->txd_ring_mask];
418 efx_dequeue_buffer(tx_queue, buffer);
419 buffer->continuation = 1;
420 buffer->len = 0;
421
422 ++tx_queue->read_count;
423 }
424}
425
426void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
427{
428 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
429
430 /* Flush TX queue, remove descriptor ring */
431 falcon_fini_tx(tx_queue);
432
433 efx_release_tx_buffers(tx_queue);
434
435 /* Release queue's stop on port, if any */
436 if (tx_queue->stopped) {
437 tx_queue->stopped = 0;
438 efx_wake_queue(tx_queue->efx);
439 }
440}
441
442void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
443{
444 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
445 falcon_remove_tx(tx_queue);
446
447 kfree(tx_queue->buffer);
448 tx_queue->buffer = NULL;
449 tx_queue->used = 0;
450}
451
452
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
new file mode 100644
index 000000000000..1526a73b4b51
--- /dev/null
+++ b/drivers/net/sfc/tx.h
@@ -0,0 +1,24 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_TX_H
12#define EFX_TX_H
13
14#include "net_driver.h"
15
16int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
17void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
18int efx_init_tx_queue(struct efx_tx_queue *tx_queue);
19void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
20
21int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
22void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
23
24#endif /* EFX_TX_H */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
new file mode 100644
index 000000000000..dca62f190198
--- /dev/null
+++ b/drivers/net/sfc/workarounds.h
@@ -0,0 +1,56 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_WORKAROUNDS_H
11#define EFX_WORKAROUNDS_H
12
13/*
14 * Hardware workarounds.
15 * Bug numbers are from Solarflare's Bugzilla.
16 */
17
18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1)
20
21/* XAUI resets if link not detected */
22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
23/* SNAP frames have TOBE_DISC set */
24#define EFX_WORKAROUND_5475 EFX_WORKAROUND_ALWAYS
25/* RX PCIe double split performance issue */
26#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
27/* TX pkt parser problem with <= 16 byte TXes */
28#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
29/* XGXS and XAUI reset sequencing in SW */
30#define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS
31/* Low rate CRC errors require XAUI reset */
32#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS
33/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
34 * or a PCIe error (bug 11028) */
35#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
36/* Transmit flow control may get disabled */
37#define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS
38/* Flush events can take a very long time to appear */
39#define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
40
41/* Spurious parity errors in TSORT buffers */
42#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
43/* iSCSI parsing errors */
44#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
45/* RX events go missing */
46#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
47/* RX_RESET on A1 */
48#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
49/* Increase filter depth to avoid RX_RESET */
50#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
51/* Flushes may never complete */
52#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A
53/* Leak overlength packets rather than free */
54#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
55
56#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/sfc/xenpack.h b/drivers/net/sfc/xenpack.h
new file mode 100644
index 000000000000..b0d1f225b70a
--- /dev/null
+++ b/drivers/net/sfc/xenpack.h
@@ -0,0 +1,62 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_XENPACK_H
11#define EFX_XENPACK_H
12
13/* Exported functions from Xenpack standard PHY control */
14
15#include "mdio_10g.h"
16
17/****************************************************************************/
18/* XENPACK MDIO register extensions */
19#define MDIO_XP_LASI_RX_CTRL (0x9000)
20#define MDIO_XP_LASI_TX_CTRL (0x9001)
21#define MDIO_XP_LASI_CTRL (0x9002)
22#define MDIO_XP_LASI_RX_STAT (0x9003)
23#define MDIO_XP_LASI_TX_STAT (0x9004)
24#define MDIO_XP_LASI_STAT (0x9005)
25
26/* Control/Status bits */
27#define XP_LASI_LS_ALARM (1 << 0)
28#define XP_LASI_TX_ALARM (1 << 1)
29#define XP_LASI_RX_ALARM (1 << 2)
30/* These two are Quake vendor extensions to the standard XENPACK defines */
31#define XP_LASI_LS_INTB (1 << 3)
32#define XP_LASI_TEST (1 << 7)
33
34/* Enable LASI interrupts for PHY */
35static inline void xenpack_enable_lasi_irqs(struct efx_nic *efx)
36{
37 int reg;
38 int phy_id = efx->mii.phy_id;
39 /* Read to clear LASI status register */
40 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
41 MDIO_XP_LASI_STAT);
42
43 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
44 MDIO_XP_LASI_CTRL, XP_LASI_LS_ALARM);
45}
46
47/* Read the LASI interrupt status to clear the interrupt. */
48static inline int xenpack_clear_lasi_irqs(struct efx_nic *efx)
49{
50 /* Read to clear link status alarm */
51 return mdio_clause45_read(efx, efx->mii.phy_id,
52 MDIO_MMD_PMAPMD, MDIO_XP_LASI_STAT);
53}
54
55/* Turn off LASI interrupts */
56static inline void xenpack_disable_lasi_irqs(struct efx_nic *efx)
57{
58 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
59 MDIO_XP_LASI_CTRL, 0);
60}
61
62#endif /* EFX_XENPACK_H */
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
new file mode 100644
index 000000000000..66dd5bf1eaa9
--- /dev/null
+++ b/drivers/net/sfc/xfp_phy.c
@@ -0,0 +1,132 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9/*
10 * Driver for XFP optical PHYs (plus some support specific to the Quake 2032)
11 * See www.amcc.com for details (search for qt2032)
12 */
13
14#include <linux/timer.h>
15#include <linux/delay.h>
16#include "efx.h"
17#include "gmii.h"
18#include "mdio_10g.h"
19#include "xenpack.h"
20#include "phy.h"
21#include "mac.h"
22
23#define XFP_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PCS | \
24 MDIO_MMDREG_DEVS0_PMAPMD | \
25 MDIO_MMDREG_DEVS0_PHYXS)
26
27/****************************************************************************/
28/* Quake-specific MDIO registers */
29#define MDIO_QUAKE_LED0_REG (0xD006)
30
31void xfp_set_led(struct efx_nic *p, int led, int mode)
32{
33 int addr = MDIO_QUAKE_LED0_REG + led;
34 mdio_clause45_write(p, p->mii.phy_id, MDIO_MMD_PMAPMD, addr,
35 mode);
36}
37
38#define XFP_MAX_RESET_TIME 500
39#define XFP_RESET_WAIT 10
40
41/* Reset the PHYXS MMD. This is documented (for the Quake PHY) as doing
42 * a complete soft reset.
43 */
44static int xfp_reset_phy(struct efx_nic *efx)
45{
46 int rc;
47
48 rc = mdio_clause45_reset_mmd(efx, MDIO_MMD_PHYXS,
49 XFP_MAX_RESET_TIME / XFP_RESET_WAIT,
50 XFP_RESET_WAIT);
51 if (rc < 0)
52 goto fail;
53
54 /* Wait 250ms for the PHY to complete bootup */
55 msleep(250);
56
57 /* Check that all the MMDs we expect are present and responding. We
58 * expect faults on some if the link is down, but not on the PHY XS */
59 rc = mdio_clause45_check_mmds(efx, XFP_REQUIRED_DEVS,
60 MDIO_MMDREG_DEVS0_PHYXS);
61 if (rc < 0)
62 goto fail;
63
64 efx->board_info.init_leds(efx);
65
66 return rc;
67
68 fail:
69 EFX_ERR(efx, "XFP: reset timed out!\n");
70 return rc;
71}
72
73static int xfp_phy_init(struct efx_nic *efx)
74{
75 u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
76 int rc;
77
78 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
79 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
80 MDIO_ID_REV(devid));
81
82 rc = xfp_reset_phy(efx);
83
84 EFX_INFO(efx, "XFP: PHY init %s.\n",
85 rc ? "failed" : "successful");
86
87 return rc;
88}
89
90static void xfp_phy_clear_interrupt(struct efx_nic *efx)
91{
92 xenpack_clear_lasi_irqs(efx);
93}
94
95static int xfp_link_ok(struct efx_nic *efx)
96{
97 return mdio_clause45_links_ok(efx, XFP_REQUIRED_DEVS);
98}
99
100static int xfp_phy_check_hw(struct efx_nic *efx)
101{
102 int rc = 0;
103 int link_up = xfp_link_ok(efx);
104 /* Simulate a PHY event if link state has changed */
105 if (link_up != efx->link_up)
106 falcon_xmac_sim_phy_event(efx);
107
108 return rc;
109}
110
111static void xfp_phy_reconfigure(struct efx_nic *efx)
112{
113 efx->link_up = xfp_link_ok(efx);
114 efx->link_options = GM_LPA_10000FULL;
115}
116
117
118static void xfp_phy_fini(struct efx_nic *efx)
119{
120 /* Clobber the LED if it was blinking */
121 efx->board_info.blink(efx, 0);
122}
123
124struct efx_phy_operations falcon_xfp_phy_ops = {
125 .init = xfp_phy_init,
126 .reconfigure = xfp_phy_reconfigure,
127 .check_hw = xfp_phy_check_hw,
128 .fini = xfp_phy_fini,
129 .clear_interrupt = xfp_phy_clear_interrupt,
130 .reset_xaui = efx_port_dummy_op_void,
131 .mmds = XFP_REQUIRED_DEVS,
132};