diff options
Diffstat (limited to 'drivers/net/ethernet/sfc/mcdi.c')
-rw-r--r-- | drivers/net/ethernet/sfc/mcdi.c | 1203 |
1 files changed, 1203 insertions, 0 deletions
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c new file mode 100644 index 000000000000..3dd45ed61f0a --- /dev/null +++ b/drivers/net/ethernet/sfc/mcdi.c | |||
@@ -0,0 +1,1203 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2008-2011 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include <linux/delay.h> | ||
11 | #include "net_driver.h" | ||
12 | #include "nic.h" | ||
13 | #include "io.h" | ||
14 | #include "regs.h" | ||
15 | #include "mcdi_pcol.h" | ||
16 | #include "phy.h" | ||
17 | |||
18 | /************************************************************************** | ||
19 | * | ||
20 | * Management-Controller-to-Driver Interface | ||
21 | * | ||
22 | ************************************************************************** | ||
23 | */ | ||
24 | |||
25 | /* Software-defined structure to the shared-memory */ | ||
26 | #define CMD_NOTIFY_PORT0 0 | ||
27 | #define CMD_NOTIFY_PORT1 4 | ||
28 | #define CMD_PDU_PORT0 0x008 | ||
29 | #define CMD_PDU_PORT1 0x108 | ||
30 | #define REBOOT_FLAG_PORT0 0x3f8 | ||
31 | #define REBOOT_FLAG_PORT1 0x3fc | ||
32 | |||
33 | #define MCDI_RPC_TIMEOUT 10 /*seconds */ | ||
34 | |||
35 | #define MCDI_PDU(efx) \ | ||
36 | (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) | ||
37 | #define MCDI_DOORBELL(efx) \ | ||
38 | (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0) | ||
39 | #define MCDI_REBOOT_FLAG(efx) \ | ||
40 | (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0) | ||
41 | |||
42 | #define SEQ_MASK \ | ||
43 | EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) | ||
44 | |||
45 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | ||
46 | { | ||
47 | struct siena_nic_data *nic_data; | ||
48 | EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | ||
49 | nic_data = efx->nic_data; | ||
50 | return &nic_data->mcdi; | ||
51 | } | ||
52 | |||
53 | static inline void | ||
54 | efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) | ||
55 | { | ||
56 | struct siena_nic_data *nic_data = efx->nic_data; | ||
57 | value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); | ||
58 | } | ||
59 | |||
60 | static inline void | ||
61 | efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) | ||
62 | { | ||
63 | struct siena_nic_data *nic_data = efx->nic_data; | ||
64 | __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); | ||
65 | } | ||
66 | |||
67 | void efx_mcdi_init(struct efx_nic *efx) | ||
68 | { | ||
69 | struct efx_mcdi_iface *mcdi; | ||
70 | |||
71 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
72 | return; | ||
73 | |||
74 | mcdi = efx_mcdi(efx); | ||
75 | init_waitqueue_head(&mcdi->wq); | ||
76 | spin_lock_init(&mcdi->iface_lock); | ||
77 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | ||
78 | mcdi->mode = MCDI_MODE_POLL; | ||
79 | |||
80 | (void) efx_mcdi_poll_reboot(efx); | ||
81 | } | ||
82 | |||
83 | static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | ||
84 | const u8 *inbuf, size_t inlen) | ||
85 | { | ||
86 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
87 | unsigned pdu = MCDI_PDU(efx); | ||
88 | unsigned doorbell = MCDI_DOORBELL(efx); | ||
89 | unsigned int i; | ||
90 | efx_dword_t hdr; | ||
91 | u32 xflags, seqno; | ||
92 | |||
93 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | ||
94 | BUG_ON(inlen & 3 || inlen >= 0x100); | ||
95 | |||
96 | seqno = mcdi->seqno & SEQ_MASK; | ||
97 | xflags = 0; | ||
98 | if (mcdi->mode == MCDI_MODE_EVENTS) | ||
99 | xflags |= MCDI_HEADER_XFLAGS_EVREQ; | ||
100 | |||
101 | EFX_POPULATE_DWORD_6(hdr, | ||
102 | MCDI_HEADER_RESPONSE, 0, | ||
103 | MCDI_HEADER_RESYNC, 1, | ||
104 | MCDI_HEADER_CODE, cmd, | ||
105 | MCDI_HEADER_DATALEN, inlen, | ||
106 | MCDI_HEADER_SEQ, seqno, | ||
107 | MCDI_HEADER_XFLAGS, xflags); | ||
108 | |||
109 | efx_mcdi_writed(efx, &hdr, pdu); | ||
110 | |||
111 | for (i = 0; i < inlen; i += 4) | ||
112 | efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), | ||
113 | pdu + 4 + i); | ||
114 | |||
115 | /* ring the doorbell with a distinctive value */ | ||
116 | EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); | ||
117 | efx_mcdi_writed(efx, &hdr, doorbell); | ||
118 | } | ||
119 | |||
120 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) | ||
121 | { | ||
122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
123 | unsigned int pdu = MCDI_PDU(efx); | ||
124 | int i; | ||
125 | |||
126 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | ||
127 | BUG_ON(outlen & 3 || outlen >= 0x100); | ||
128 | |||
129 | for (i = 0; i < outlen; i += 4) | ||
130 | efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); | ||
131 | } | ||
132 | |||
133 | static int efx_mcdi_poll(struct efx_nic *efx) | ||
134 | { | ||
135 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
136 | unsigned int time, finish; | ||
137 | unsigned int respseq, respcmd, error; | ||
138 | unsigned int pdu = MCDI_PDU(efx); | ||
139 | unsigned int rc, spins; | ||
140 | efx_dword_t reg; | ||
141 | |||
142 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ | ||
143 | rc = -efx_mcdi_poll_reboot(efx); | ||
144 | if (rc) | ||
145 | goto out; | ||
146 | |||
147 | /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, | ||
148 | * because generally mcdi responses are fast. After that, back off | ||
149 | * and poll once a jiffy (approximately) | ||
150 | */ | ||
151 | spins = TICK_USEC; | ||
152 | finish = get_seconds() + MCDI_RPC_TIMEOUT; | ||
153 | |||
154 | while (1) { | ||
155 | if (spins != 0) { | ||
156 | --spins; | ||
157 | udelay(1); | ||
158 | } else { | ||
159 | schedule_timeout_uninterruptible(1); | ||
160 | } | ||
161 | |||
162 | time = get_seconds(); | ||
163 | |||
164 | efx_mcdi_readd(efx, ®, pdu); | ||
165 | |||
166 | /* All 1's indicates that shared memory is in reset (and is | ||
167 | * not a valid header). Wait for it to come out reset before | ||
168 | * completing the command */ | ||
169 | if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && | ||
170 | EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) | ||
171 | break; | ||
172 | |||
173 | if (time >= finish) | ||
174 | return -ETIMEDOUT; | ||
175 | } | ||
176 | |||
177 | mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); | ||
178 | respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); | ||
179 | respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); | ||
180 | error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); | ||
181 | |||
182 | if (error && mcdi->resplen == 0) { | ||
183 | netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); | ||
184 | rc = EIO; | ||
185 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { | ||
186 | netif_err(efx, hw, efx->net_dev, | ||
187 | "MC response mismatch tx seq 0x%x rx seq 0x%x\n", | ||
188 | respseq, mcdi->seqno); | ||
189 | rc = EIO; | ||
190 | } else if (error) { | ||
191 | efx_mcdi_readd(efx, ®, pdu + 4); | ||
192 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { | ||
193 | #define TRANSLATE_ERROR(name) \ | ||
194 | case MC_CMD_ERR_ ## name: \ | ||
195 | rc = name; \ | ||
196 | break | ||
197 | TRANSLATE_ERROR(ENOENT); | ||
198 | TRANSLATE_ERROR(EINTR); | ||
199 | TRANSLATE_ERROR(EACCES); | ||
200 | TRANSLATE_ERROR(EBUSY); | ||
201 | TRANSLATE_ERROR(EINVAL); | ||
202 | TRANSLATE_ERROR(EDEADLK); | ||
203 | TRANSLATE_ERROR(ENOSYS); | ||
204 | TRANSLATE_ERROR(ETIME); | ||
205 | #undef TRANSLATE_ERROR | ||
206 | default: | ||
207 | rc = EIO; | ||
208 | break; | ||
209 | } | ||
210 | } else | ||
211 | rc = 0; | ||
212 | |||
213 | out: | ||
214 | mcdi->resprc = rc; | ||
215 | if (rc) | ||
216 | mcdi->resplen = 0; | ||
217 | |||
218 | /* Return rc=0 like wait_event_timeout() */ | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | /* Test and clear MC-rebooted flag for this port/function */ | ||
223 | int efx_mcdi_poll_reboot(struct efx_nic *efx) | ||
224 | { | ||
225 | unsigned int addr = MCDI_REBOOT_FLAG(efx); | ||
226 | efx_dword_t reg; | ||
227 | uint32_t value; | ||
228 | |||
229 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
230 | return false; | ||
231 | |||
232 | efx_mcdi_readd(efx, ®, addr); | ||
233 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); | ||
234 | |||
235 | if (value == 0) | ||
236 | return 0; | ||
237 | |||
238 | EFX_ZERO_DWORD(reg); | ||
239 | efx_mcdi_writed(efx, ®, addr); | ||
240 | |||
241 | if (value == MC_STATUS_DWORD_ASSERT) | ||
242 | return -EINTR; | ||
243 | else | ||
244 | return -EIO; | ||
245 | } | ||
246 | |||
247 | static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) | ||
248 | { | ||
249 | /* Wait until the interface becomes QUIESCENT and we win the race | ||
250 | * to mark it RUNNING. */ | ||
251 | wait_event(mcdi->wq, | ||
252 | atomic_cmpxchg(&mcdi->state, | ||
253 | MCDI_STATE_QUIESCENT, | ||
254 | MCDI_STATE_RUNNING) | ||
255 | == MCDI_STATE_QUIESCENT); | ||
256 | } | ||
257 | |||
258 | static int efx_mcdi_await_completion(struct efx_nic *efx) | ||
259 | { | ||
260 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
261 | |||
262 | if (wait_event_timeout( | ||
263 | mcdi->wq, | ||
264 | atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, | ||
265 | msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) | ||
266 | return -ETIMEDOUT; | ||
267 | |||
268 | /* Check if efx_mcdi_set_mode() switched us back to polled completions. | ||
269 | * In which case, poll for completions directly. If efx_mcdi_ev_cpl() | ||
270 | * completed the request first, then we'll just end up completing the | ||
271 | * request again, which is safe. | ||
272 | * | ||
273 | * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which | ||
274 | * wait_event_timeout() implicitly provides. | ||
275 | */ | ||
276 | if (mcdi->mode == MCDI_MODE_POLL) | ||
277 | return efx_mcdi_poll(efx); | ||
278 | |||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) | ||
283 | { | ||
284 | /* If the interface is RUNNING, then move to COMPLETED and wake any | ||
285 | * waiters. If the interface isn't in RUNNING then we've received a | ||
286 | * duplicate completion after we've already transitioned back to | ||
287 | * QUIESCENT. [A subsequent invocation would increment seqno, so would | ||
288 | * have failed the seqno check]. | ||
289 | */ | ||
290 | if (atomic_cmpxchg(&mcdi->state, | ||
291 | MCDI_STATE_RUNNING, | ||
292 | MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { | ||
293 | wake_up(&mcdi->wq); | ||
294 | return true; | ||
295 | } | ||
296 | |||
297 | return false; | ||
298 | } | ||
299 | |||
300 | static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) | ||
301 | { | ||
302 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | ||
303 | wake_up(&mcdi->wq); | ||
304 | } | ||
305 | |||
306 | static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, | ||
307 | unsigned int datalen, unsigned int errno) | ||
308 | { | ||
309 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
310 | bool wake = false; | ||
311 | |||
312 | spin_lock(&mcdi->iface_lock); | ||
313 | |||
314 | if ((seqno ^ mcdi->seqno) & SEQ_MASK) { | ||
315 | if (mcdi->credits) | ||
316 | /* The request has been cancelled */ | ||
317 | --mcdi->credits; | ||
318 | else | ||
319 | netif_err(efx, hw, efx->net_dev, | ||
320 | "MC response mismatch tx seq 0x%x rx " | ||
321 | "seq 0x%x\n", seqno, mcdi->seqno); | ||
322 | } else { | ||
323 | mcdi->resprc = errno; | ||
324 | mcdi->resplen = datalen; | ||
325 | |||
326 | wake = true; | ||
327 | } | ||
328 | |||
329 | spin_unlock(&mcdi->iface_lock); | ||
330 | |||
331 | if (wake) | ||
332 | efx_mcdi_complete(mcdi); | ||
333 | } | ||
334 | |||
335 | /* Issue the given command by writing the data into the shared memory PDU, | ||
336 | * ring the doorbell and wait for completion. Copyout the result. */ | ||
337 | int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, | ||
338 | const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, | ||
339 | size_t *outlen_actual) | ||
340 | { | ||
341 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
342 | int rc; | ||
343 | BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | ||
344 | |||
345 | efx_mcdi_acquire(mcdi); | ||
346 | |||
347 | /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ | ||
348 | spin_lock_bh(&mcdi->iface_lock); | ||
349 | ++mcdi->seqno; | ||
350 | spin_unlock_bh(&mcdi->iface_lock); | ||
351 | |||
352 | efx_mcdi_copyin(efx, cmd, inbuf, inlen); | ||
353 | |||
354 | if (mcdi->mode == MCDI_MODE_POLL) | ||
355 | rc = efx_mcdi_poll(efx); | ||
356 | else | ||
357 | rc = efx_mcdi_await_completion(efx); | ||
358 | |||
359 | if (rc != 0) { | ||
360 | /* Close the race with efx_mcdi_ev_cpl() executing just too late | ||
361 | * and completing a request we've just cancelled, by ensuring | ||
362 | * that the seqno check therein fails. | ||
363 | */ | ||
364 | spin_lock_bh(&mcdi->iface_lock); | ||
365 | ++mcdi->seqno; | ||
366 | ++mcdi->credits; | ||
367 | spin_unlock_bh(&mcdi->iface_lock); | ||
368 | |||
369 | netif_err(efx, hw, efx->net_dev, | ||
370 | "MC command 0x%x inlen %d mode %d timed out\n", | ||
371 | cmd, (int)inlen, mcdi->mode); | ||
372 | } else { | ||
373 | size_t resplen; | ||
374 | |||
375 | /* At the very least we need a memory barrier here to ensure | ||
376 | * we pick up changes from efx_mcdi_ev_cpl(). Protect against | ||
377 | * a spurious efx_mcdi_ev_cpl() running concurrently by | ||
378 | * acquiring the iface_lock. */ | ||
379 | spin_lock_bh(&mcdi->iface_lock); | ||
380 | rc = -mcdi->resprc; | ||
381 | resplen = mcdi->resplen; | ||
382 | spin_unlock_bh(&mcdi->iface_lock); | ||
383 | |||
384 | if (rc == 0) { | ||
385 | efx_mcdi_copyout(efx, outbuf, | ||
386 | min(outlen, mcdi->resplen + 3) & ~0x3); | ||
387 | if (outlen_actual != NULL) | ||
388 | *outlen_actual = resplen; | ||
389 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) | ||
390 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ | ||
391 | else if (rc == -EIO || rc == -EINTR) { | ||
392 | netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", | ||
393 | -rc); | ||
394 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
395 | } else | ||
396 | netif_dbg(efx, hw, efx->net_dev, | ||
397 | "MC command 0x%x inlen %d failed rc=%d\n", | ||
398 | cmd, (int)inlen, -rc); | ||
399 | } | ||
400 | |||
401 | efx_mcdi_release(mcdi); | ||
402 | return rc; | ||
403 | } | ||
404 | |||
405 | void efx_mcdi_mode_poll(struct efx_nic *efx) | ||
406 | { | ||
407 | struct efx_mcdi_iface *mcdi; | ||
408 | |||
409 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
410 | return; | ||
411 | |||
412 | mcdi = efx_mcdi(efx); | ||
413 | if (mcdi->mode == MCDI_MODE_POLL) | ||
414 | return; | ||
415 | |||
416 | /* We can switch from event completion to polled completion, because | ||
417 | * mcdi requests are always completed in shared memory. We do this by | ||
418 | * switching the mode to POLL'd then completing the request. | ||
419 | * efx_mcdi_await_completion() will then call efx_mcdi_poll(). | ||
420 | * | ||
421 | * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), | ||
422 | * which efx_mcdi_complete() provides for us. | ||
423 | */ | ||
424 | mcdi->mode = MCDI_MODE_POLL; | ||
425 | |||
426 | efx_mcdi_complete(mcdi); | ||
427 | } | ||
428 | |||
429 | void efx_mcdi_mode_event(struct efx_nic *efx) | ||
430 | { | ||
431 | struct efx_mcdi_iface *mcdi; | ||
432 | |||
433 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
434 | return; | ||
435 | |||
436 | mcdi = efx_mcdi(efx); | ||
437 | |||
438 | if (mcdi->mode == MCDI_MODE_EVENTS) | ||
439 | return; | ||
440 | |||
441 | /* We can't switch from polled to event completion in the middle of a | ||
442 | * request, because the completion method is specified in the request. | ||
443 | * So acquire the interface to serialise the requestors. We don't need | ||
444 | * to acquire the iface_lock to change the mode here, but we do need a | ||
445 | * write memory barrier ensure that efx_mcdi_rpc() sees it, which | ||
446 | * efx_mcdi_acquire() provides. | ||
447 | */ | ||
448 | efx_mcdi_acquire(mcdi); | ||
449 | mcdi->mode = MCDI_MODE_EVENTS; | ||
450 | efx_mcdi_release(mcdi); | ||
451 | } | ||
452 | |||
453 | static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | ||
454 | { | ||
455 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
456 | |||
457 | /* If there is an outstanding MCDI request, it has been terminated | ||
458 | * either by a BADASSERT or REBOOT event. If the mcdi interface is | ||
459 | * in polled mode, then do nothing because the MC reboot handler will | ||
460 | * set the header correctly. However, if the mcdi interface is waiting | ||
461 | * for a CMDDONE event it won't receive it [and since all MCDI events | ||
462 | * are sent to the same queue, we can't be racing with | ||
463 | * efx_mcdi_ev_cpl()] | ||
464 | * | ||
465 | * There's a race here with efx_mcdi_rpc(), because we might receive | ||
466 | * a REBOOT event *before* the request has been copied out. In polled | ||
467 | * mode (during startup) this is irrelevant, because efx_mcdi_complete() | ||
468 | * is ignored. In event mode, this condition is just an edge-case of | ||
469 | * receiving a REBOOT event after posting the MCDI request. Did the mc | ||
470 | * reboot before or after the copyout? The best we can do always is | ||
471 | * just return failure. | ||
472 | */ | ||
473 | spin_lock(&mcdi->iface_lock); | ||
474 | if (efx_mcdi_complete(mcdi)) { | ||
475 | if (mcdi->mode == MCDI_MODE_EVENTS) { | ||
476 | mcdi->resprc = rc; | ||
477 | mcdi->resplen = 0; | ||
478 | ++mcdi->credits; | ||
479 | } | ||
480 | } else | ||
481 | /* Nobody was waiting for an MCDI request, so trigger a reset */ | ||
482 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
483 | |||
484 | spin_unlock(&mcdi->iface_lock); | ||
485 | } | ||
486 | |||
487 | static unsigned int efx_mcdi_event_link_speed[] = { | ||
488 | [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, | ||
489 | [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, | ||
490 | [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, | ||
491 | }; | ||
492 | |||
493 | |||
494 | static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) | ||
495 | { | ||
496 | u32 flags, fcntl, speed, lpa; | ||
497 | |||
498 | speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); | ||
499 | EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); | ||
500 | speed = efx_mcdi_event_link_speed[speed]; | ||
501 | |||
502 | flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); | ||
503 | fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); | ||
504 | lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); | ||
505 | |||
506 | /* efx->link_state is only modified by efx_mcdi_phy_get_link(), | ||
507 | * which is only run after flushing the event queues. Therefore, it | ||
508 | * is safe to modify the link state outside of the mac_lock here. | ||
509 | */ | ||
510 | efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); | ||
511 | |||
512 | efx_mcdi_phy_check_fcntl(efx, lpa); | ||
513 | |||
514 | efx_link_status_changed(efx); | ||
515 | } | ||
516 | |||
517 | static const char *sensor_names[] = { | ||
518 | [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor", | ||
519 | [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor", | ||
520 | [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling", | ||
521 | [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor", | ||
522 | [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling", | ||
523 | [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor", | ||
524 | [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling", | ||
525 | [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor", | ||
526 | [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor", | ||
527 | [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor", | ||
528 | [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor", | ||
529 | [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor", | ||
530 | [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor" | ||
531 | }; | ||
532 | |||
533 | static const char *sensor_status_names[] = { | ||
534 | [MC_CMD_SENSOR_STATE_OK] = "OK", | ||
535 | [MC_CMD_SENSOR_STATE_WARNING] = "Warning", | ||
536 | [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", | ||
537 | [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", | ||
538 | }; | ||
539 | |||
540 | static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) | ||
541 | { | ||
542 | unsigned int monitor, state, value; | ||
543 | const char *name, *state_txt; | ||
544 | monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); | ||
545 | state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); | ||
546 | value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); | ||
547 | /* Deal gracefully with the board having more drivers than we | ||
548 | * know about, but do not expect new sensor states. */ | ||
549 | name = (monitor >= ARRAY_SIZE(sensor_names)) | ||
550 | ? "No sensor name available" : | ||
551 | sensor_names[monitor]; | ||
552 | EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); | ||
553 | state_txt = sensor_status_names[state]; | ||
554 | |||
555 | netif_err(efx, hw, efx->net_dev, | ||
556 | "Sensor %d (%s) reports condition '%s' for raw value %d\n", | ||
557 | monitor, name, state_txt, value); | ||
558 | } | ||
559 | |||
560 | /* Called from falcon_process_eventq for MCDI events */ | ||
561 | void efx_mcdi_process_event(struct efx_channel *channel, | ||
562 | efx_qword_t *event) | ||
563 | { | ||
564 | struct efx_nic *efx = channel->efx; | ||
565 | int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); | ||
566 | u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); | ||
567 | |||
568 | switch (code) { | ||
569 | case MCDI_EVENT_CODE_BADSSERT: | ||
570 | netif_err(efx, hw, efx->net_dev, | ||
571 | "MC watchdog or assertion failure at 0x%x\n", data); | ||
572 | efx_mcdi_ev_death(efx, EINTR); | ||
573 | break; | ||
574 | |||
575 | case MCDI_EVENT_CODE_PMNOTICE: | ||
576 | netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); | ||
577 | break; | ||
578 | |||
579 | case MCDI_EVENT_CODE_CMDDONE: | ||
580 | efx_mcdi_ev_cpl(efx, | ||
581 | MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), | ||
582 | MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), | ||
583 | MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); | ||
584 | break; | ||
585 | |||
586 | case MCDI_EVENT_CODE_LINKCHANGE: | ||
587 | efx_mcdi_process_link_change(efx, event); | ||
588 | break; | ||
589 | case MCDI_EVENT_CODE_SENSOREVT: | ||
590 | efx_mcdi_sensor_event(efx, event); | ||
591 | break; | ||
592 | case MCDI_EVENT_CODE_SCHEDERR: | ||
593 | netif_info(efx, hw, efx->net_dev, | ||
594 | "MC Scheduler error address=0x%x\n", data); | ||
595 | break; | ||
596 | case MCDI_EVENT_CODE_REBOOT: | ||
597 | netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); | ||
598 | efx_mcdi_ev_death(efx, EIO); | ||
599 | break; | ||
600 | case MCDI_EVENT_CODE_MAC_STATS_DMA: | ||
601 | /* MAC stats are gather lazily. We can ignore this. */ | ||
602 | break; | ||
603 | |||
604 | default: | ||
605 | netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", | ||
606 | code); | ||
607 | } | ||
608 | } | ||
609 | |||
610 | /************************************************************************** | ||
611 | * | ||
612 | * Specific request functions | ||
613 | * | ||
614 | ************************************************************************** | ||
615 | */ | ||
616 | |||
617 | void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) | ||
618 | { | ||
619 | u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; | ||
620 | size_t outlength; | ||
621 | const __le16 *ver_words; | ||
622 | int rc; | ||
623 | |||
624 | BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); | ||
625 | |||
626 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, | ||
627 | outbuf, sizeof(outbuf), &outlength); | ||
628 | if (rc) | ||
629 | goto fail; | ||
630 | |||
631 | if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { | ||
632 | rc = -EIO; | ||
633 | goto fail; | ||
634 | } | ||
635 | |||
636 | ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); | ||
637 | snprintf(buf, len, "%u.%u.%u.%u", | ||
638 | le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), | ||
639 | le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); | ||
640 | return; | ||
641 | |||
642 | fail: | ||
643 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
644 | buf[0] = 0; | ||
645 | } | ||
646 | |||
647 | int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | ||
648 | bool *was_attached) | ||
649 | { | ||
650 | u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; | ||
651 | u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; | ||
652 | size_t outlen; | ||
653 | int rc; | ||
654 | |||
655 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, | ||
656 | driver_operating ? 1 : 0); | ||
657 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); | ||
658 | |||
659 | rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), | ||
660 | outbuf, sizeof(outbuf), &outlen); | ||
661 | if (rc) | ||
662 | goto fail; | ||
663 | if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { | ||
664 | rc = -EIO; | ||
665 | goto fail; | ||
666 | } | ||
667 | |||
668 | if (was_attached != NULL) | ||
669 | *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); | ||
670 | return 0; | ||
671 | |||
672 | fail: | ||
673 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
674 | return rc; | ||
675 | } | ||
676 | |||
677 | int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | ||
678 | u16 *fw_subtype_list) | ||
679 | { | ||
680 | uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; | ||
681 | size_t outlen; | ||
682 | int port_num = efx_port_num(efx); | ||
683 | int offset; | ||
684 | int rc; | ||
685 | |||
686 | BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); | ||
687 | |||
688 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, | ||
689 | outbuf, sizeof(outbuf), &outlen); | ||
690 | if (rc) | ||
691 | goto fail; | ||
692 | |||
693 | if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { | ||
694 | rc = -EIO; | ||
695 | goto fail; | ||
696 | } | ||
697 | |||
698 | offset = (port_num) | ||
699 | ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST | ||
700 | : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; | ||
701 | if (mac_address) | ||
702 | memcpy(mac_address, outbuf + offset, ETH_ALEN); | ||
703 | if (fw_subtype_list) | ||
704 | memcpy(fw_subtype_list, | ||
705 | outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, | ||
706 | MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); | ||
707 | |||
708 | return 0; | ||
709 | |||
710 | fail: | ||
711 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", | ||
712 | __func__, rc, (int)outlen); | ||
713 | |||
714 | return rc; | ||
715 | } | ||
716 | |||
717 | int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) | ||
718 | { | ||
719 | u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; | ||
720 | u32 dest = 0; | ||
721 | int rc; | ||
722 | |||
723 | if (uart) | ||
724 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; | ||
725 | if (evq) | ||
726 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; | ||
727 | |||
728 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); | ||
729 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); | ||
730 | |||
731 | BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); | ||
732 | |||
733 | rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), | ||
734 | NULL, 0, NULL); | ||
735 | if (rc) | ||
736 | goto fail; | ||
737 | |||
738 | return 0; | ||
739 | |||
740 | fail: | ||
741 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
742 | return rc; | ||
743 | } | ||
744 | |||
745 | int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) | ||
746 | { | ||
747 | u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; | ||
748 | size_t outlen; | ||
749 | int rc; | ||
750 | |||
751 | BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); | ||
752 | |||
753 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, | ||
754 | outbuf, sizeof(outbuf), &outlen); | ||
755 | if (rc) | ||
756 | goto fail; | ||
757 | if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { | ||
758 | rc = -EIO; | ||
759 | goto fail; | ||
760 | } | ||
761 | |||
762 | *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); | ||
763 | return 0; | ||
764 | |||
765 | fail: | ||
766 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | ||
767 | __func__, rc); | ||
768 | return rc; | ||
769 | } | ||
770 | |||
771 | int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | ||
772 | size_t *size_out, size_t *erase_size_out, | ||
773 | bool *protected_out) | ||
774 | { | ||
775 | u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; | ||
776 | u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; | ||
777 | size_t outlen; | ||
778 | int rc; | ||
779 | |||
780 | MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); | ||
781 | |||
782 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), | ||
783 | outbuf, sizeof(outbuf), &outlen); | ||
784 | if (rc) | ||
785 | goto fail; | ||
786 | if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { | ||
787 | rc = -EIO; | ||
788 | goto fail; | ||
789 | } | ||
790 | |||
791 | *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); | ||
792 | *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); | ||
793 | *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & | ||
794 | (1 << MC_CMD_NVRAM_PROTECTED_LBN)); | ||
795 | return 0; | ||
796 | |||
797 | fail: | ||
798 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
799 | return rc; | ||
800 | } | ||
801 | |||
802 | int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) | ||
803 | { | ||
804 | u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; | ||
805 | int rc; | ||
806 | |||
807 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); | ||
808 | |||
809 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); | ||
810 | |||
811 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), | ||
812 | NULL, 0, NULL); | ||
813 | if (rc) | ||
814 | goto fail; | ||
815 | |||
816 | return 0; | ||
817 | |||
818 | fail: | ||
819 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
820 | return rc; | ||
821 | } | ||
822 | |||
823 | int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | ||
824 | loff_t offset, u8 *buffer, size_t length) | ||
825 | { | ||
826 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; | ||
827 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | ||
828 | size_t outlen; | ||
829 | int rc; | ||
830 | |||
831 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); | ||
832 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); | ||
833 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); | ||
834 | |||
835 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), | ||
836 | outbuf, sizeof(outbuf), &outlen); | ||
837 | if (rc) | ||
838 | goto fail; | ||
839 | |||
840 | memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); | ||
841 | return 0; | ||
842 | |||
843 | fail: | ||
844 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
845 | return rc; | ||
846 | } | ||
847 | |||
848 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | ||
849 | loff_t offset, const u8 *buffer, size_t length) | ||
850 | { | ||
851 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | ||
852 | int rc; | ||
853 | |||
854 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); | ||
855 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); | ||
856 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); | ||
857 | memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); | ||
858 | |||
859 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); | ||
860 | |||
861 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, | ||
862 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), | ||
863 | NULL, 0, NULL); | ||
864 | if (rc) | ||
865 | goto fail; | ||
866 | |||
867 | return 0; | ||
868 | |||
869 | fail: | ||
870 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
871 | return rc; | ||
872 | } | ||
873 | |||
874 | int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | ||
875 | loff_t offset, size_t length) | ||
876 | { | ||
877 | u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; | ||
878 | int rc; | ||
879 | |||
880 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); | ||
881 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); | ||
882 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); | ||
883 | |||
884 | BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); | ||
885 | |||
886 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), | ||
887 | NULL, 0, NULL); | ||
888 | if (rc) | ||
889 | goto fail; | ||
890 | |||
891 | return 0; | ||
892 | |||
893 | fail: | ||
894 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
895 | return rc; | ||
896 | } | ||
897 | |||
898 | int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) | ||
899 | { | ||
900 | u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; | ||
901 | int rc; | ||
902 | |||
903 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); | ||
904 | |||
905 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); | ||
906 | |||
907 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), | ||
908 | NULL, 0, NULL); | ||
909 | if (rc) | ||
910 | goto fail; | ||
911 | |||
912 | return 0; | ||
913 | |||
914 | fail: | ||
915 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
916 | return rc; | ||
917 | } | ||
918 | |||
919 | static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) | ||
920 | { | ||
921 | u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; | ||
922 | u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; | ||
923 | int rc; | ||
924 | |||
925 | MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); | ||
926 | |||
927 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), | ||
928 | outbuf, sizeof(outbuf), NULL); | ||
929 | if (rc) | ||
930 | return rc; | ||
931 | |||
932 | switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { | ||
933 | case MC_CMD_NVRAM_TEST_PASS: | ||
934 | case MC_CMD_NVRAM_TEST_NOTSUPP: | ||
935 | return 0; | ||
936 | default: | ||
937 | return -EIO; | ||
938 | } | ||
939 | } | ||
940 | |||
941 | int efx_mcdi_nvram_test_all(struct efx_nic *efx) | ||
942 | { | ||
943 | u32 nvram_types; | ||
944 | unsigned int type; | ||
945 | int rc; | ||
946 | |||
947 | rc = efx_mcdi_nvram_types(efx, &nvram_types); | ||
948 | if (rc) | ||
949 | goto fail1; | ||
950 | |||
951 | type = 0; | ||
952 | while (nvram_types != 0) { | ||
953 | if (nvram_types & 1) { | ||
954 | rc = efx_mcdi_nvram_test(efx, type); | ||
955 | if (rc) | ||
956 | goto fail2; | ||
957 | } | ||
958 | type++; | ||
959 | nvram_types >>= 1; | ||
960 | } | ||
961 | |||
962 | return 0; | ||
963 | |||
964 | fail2: | ||
965 | netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", | ||
966 | __func__, type); | ||
967 | fail1: | ||
968 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
969 | return rc; | ||
970 | } | ||
971 | |||
972 | static int efx_mcdi_read_assertion(struct efx_nic *efx) | ||
973 | { | ||
974 | u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; | ||
975 | u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; | ||
976 | unsigned int flags, index, ofst; | ||
977 | const char *reason; | ||
978 | size_t outlen; | ||
979 | int retry; | ||
980 | int rc; | ||
981 | |||
982 | /* Attempt to read any stored assertion state before we reboot | ||
983 | * the mcfw out of the assertion handler. Retry twice, once | ||
984 | * because a boot-time assertion might cause this command to fail | ||
985 | * with EINTR. And once again because GET_ASSERTS can race with | ||
986 | * MC_CMD_REBOOT running on the other port. */ | ||
987 | retry = 2; | ||
988 | do { | ||
989 | MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); | ||
990 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, | ||
991 | inbuf, MC_CMD_GET_ASSERTS_IN_LEN, | ||
992 | outbuf, sizeof(outbuf), &outlen); | ||
993 | } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); | ||
994 | |||
995 | if (rc) | ||
996 | return rc; | ||
997 | if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) | ||
998 | return -EIO; | ||
999 | |||
1000 | /* Print out any recorded assertion state */ | ||
1001 | flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); | ||
1002 | if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) | ||
1003 | return 0; | ||
1004 | |||
1005 | reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) | ||
1006 | ? "system-level assertion" | ||
1007 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) | ||
1008 | ? "thread-level assertion" | ||
1009 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) | ||
1010 | ? "watchdog reset" | ||
1011 | : "unknown assertion"; | ||
1012 | netif_err(efx, hw, efx->net_dev, | ||
1013 | "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, | ||
1014 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), | ||
1015 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); | ||
1016 | |||
1017 | /* Print out the registers */ | ||
1018 | ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; | ||
1019 | for (index = 1; index < 32; index++) { | ||
1020 | netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, | ||
1021 | MCDI_DWORD2(outbuf, ofst)); | ||
1022 | ofst += sizeof(efx_dword_t); | ||
1023 | } | ||
1024 | |||
1025 | return 0; | ||
1026 | } | ||
1027 | |||
1028 | static void efx_mcdi_exit_assertion(struct efx_nic *efx) | ||
1029 | { | ||
1030 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | ||
1031 | |||
1032 | /* Atomically reboot the mcfw out of the assertion handler */ | ||
1033 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | ||
1034 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, | ||
1035 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); | ||
1036 | efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, | ||
1037 | NULL, 0, NULL); | ||
1038 | } | ||
1039 | |||
1040 | int efx_mcdi_handle_assertion(struct efx_nic *efx) | ||
1041 | { | ||
1042 | int rc; | ||
1043 | |||
1044 | rc = efx_mcdi_read_assertion(efx); | ||
1045 | if (rc) | ||
1046 | return rc; | ||
1047 | |||
1048 | efx_mcdi_exit_assertion(efx); | ||
1049 | |||
1050 | return 0; | ||
1051 | } | ||
1052 | |||
1053 | void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
1054 | { | ||
1055 | u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; | ||
1056 | int rc; | ||
1057 | |||
1058 | BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); | ||
1059 | BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); | ||
1060 | BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); | ||
1061 | |||
1062 | BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); | ||
1063 | |||
1064 | MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); | ||
1065 | |||
1066 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), | ||
1067 | NULL, 0, NULL); | ||
1068 | if (rc) | ||
1069 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | ||
1070 | __func__, rc); | ||
1071 | } | ||
1072 | |||
1073 | int efx_mcdi_reset_port(struct efx_nic *efx) | ||
1074 | { | ||
1075 | int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); | ||
1076 | if (rc) | ||
1077 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | ||
1078 | __func__, rc); | ||
1079 | return rc; | ||
1080 | } | ||
1081 | |||
1082 | int efx_mcdi_reset_mc(struct efx_nic *efx) | ||
1083 | { | ||
1084 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | ||
1085 | int rc; | ||
1086 | |||
1087 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | ||
1088 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); | ||
1089 | rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), | ||
1090 | NULL, 0, NULL); | ||
1091 | /* White is black, and up is down */ | ||
1092 | if (rc == -EIO) | ||
1093 | return 0; | ||
1094 | if (rc == 0) | ||
1095 | rc = -EIO; | ||
1096 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
1097 | return rc; | ||
1098 | } | ||
1099 | |||
1100 | static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, | ||
1101 | const u8 *mac, int *id_out) | ||
1102 | { | ||
1103 | u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; | ||
1104 | u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; | ||
1105 | size_t outlen; | ||
1106 | int rc; | ||
1107 | |||
1108 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); | ||
1109 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, | ||
1110 | MC_CMD_FILTER_MODE_SIMPLE); | ||
1111 | memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); | ||
1112 | |||
1113 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), | ||
1114 | outbuf, sizeof(outbuf), &outlen); | ||
1115 | if (rc) | ||
1116 | goto fail; | ||
1117 | |||
1118 | if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { | ||
1119 | rc = -EIO; | ||
1120 | goto fail; | ||
1121 | } | ||
1122 | |||
1123 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); | ||
1124 | |||
1125 | return 0; | ||
1126 | |||
1127 | fail: | ||
1128 | *id_out = -1; | ||
1129 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
1130 | return rc; | ||
1131 | |||
1132 | } | ||
1133 | |||
1134 | |||
1135 | int | ||
1136 | efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) | ||
1137 | { | ||
1138 | return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); | ||
1139 | } | ||
1140 | |||
1141 | |||
1142 | int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) | ||
1143 | { | ||
1144 | u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; | ||
1145 | size_t outlen; | ||
1146 | int rc; | ||
1147 | |||
1148 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, | ||
1149 | outbuf, sizeof(outbuf), &outlen); | ||
1150 | if (rc) | ||
1151 | goto fail; | ||
1152 | |||
1153 | if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { | ||
1154 | rc = -EIO; | ||
1155 | goto fail; | ||
1156 | } | ||
1157 | |||
1158 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); | ||
1159 | |||
1160 | return 0; | ||
1161 | |||
1162 | fail: | ||
1163 | *id_out = -1; | ||
1164 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
1165 | return rc; | ||
1166 | } | ||
1167 | |||
1168 | |||
1169 | int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) | ||
1170 | { | ||
1171 | u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; | ||
1172 | int rc; | ||
1173 | |||
1174 | MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); | ||
1175 | |||
1176 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), | ||
1177 | NULL, 0, NULL); | ||
1178 | if (rc) | ||
1179 | goto fail; | ||
1180 | |||
1181 | return 0; | ||
1182 | |||
1183 | fail: | ||
1184 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
1185 | return rc; | ||
1186 | } | ||
1187 | |||
1188 | |||
1189 | int efx_mcdi_wol_filter_reset(struct efx_nic *efx) | ||
1190 | { | ||
1191 | int rc; | ||
1192 | |||
1193 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); | ||
1194 | if (rc) | ||
1195 | goto fail; | ||
1196 | |||
1197 | return 0; | ||
1198 | |||
1199 | fail: | ||
1200 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | ||
1201 | return rc; | ||
1202 | } | ||
1203 | |||