diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /drivers/net/sfc/mcdi.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/net/sfc/mcdi.c')
-rw-r--r-- | drivers/net/sfc/mcdi.c | 1173 |
1 files changed, 1173 insertions, 0 deletions
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c new file mode 100644 index 000000000000..c48669c77414 --- /dev/null +++ b/drivers/net/sfc/mcdi.c | |||
@@ -0,0 +1,1173 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2008-2009 Solarflare Communications Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation, incorporated herein by reference. | ||
8 | */ | ||
9 | |||
10 | #include <linux/delay.h> | ||
11 | #include "net_driver.h" | ||
12 | #include "nic.h" | ||
13 | #include "io.h" | ||
14 | #include "regs.h" | ||
15 | #include "mcdi_pcol.h" | ||
16 | #include "phy.h" | ||
17 | |||
18 | /************************************************************************** | ||
19 | * | ||
20 | * Management-Controller-to-Driver Interface | ||
21 | * | ||
22 | ************************************************************************** | ||
23 | */ | ||
24 | |||
25 | /* Software-defined structure to the shared-memory */ | ||
26 | #define CMD_NOTIFY_PORT0 0 | ||
27 | #define CMD_NOTIFY_PORT1 4 | ||
28 | #define CMD_PDU_PORT0 0x008 | ||
29 | #define CMD_PDU_PORT1 0x108 | ||
30 | #define REBOOT_FLAG_PORT0 0x3f8 | ||
31 | #define REBOOT_FLAG_PORT1 0x3fc | ||
32 | |||
33 | #define MCDI_RPC_TIMEOUT 10 /*seconds */ | ||
34 | |||
35 | #define MCDI_PDU(efx) \ | ||
36 | (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) | ||
37 | #define MCDI_DOORBELL(efx) \ | ||
38 | (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0) | ||
39 | #define MCDI_REBOOT_FLAG(efx) \ | ||
40 | (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0) | ||
41 | |||
42 | #define SEQ_MASK \ | ||
43 | EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) | ||
44 | |||
45 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | ||
46 | { | ||
47 | struct siena_nic_data *nic_data; | ||
48 | EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | ||
49 | nic_data = efx->nic_data; | ||
50 | return &nic_data->mcdi; | ||
51 | } | ||
52 | |||
53 | void efx_mcdi_init(struct efx_nic *efx) | ||
54 | { | ||
55 | struct efx_mcdi_iface *mcdi; | ||
56 | |||
57 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
58 | return; | ||
59 | |||
60 | mcdi = efx_mcdi(efx); | ||
61 | init_waitqueue_head(&mcdi->wq); | ||
62 | spin_lock_init(&mcdi->iface_lock); | ||
63 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | ||
64 | mcdi->mode = MCDI_MODE_POLL; | ||
65 | |||
66 | (void) efx_mcdi_poll_reboot(efx); | ||
67 | } | ||
68 | |||
69 | static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | ||
70 | const u8 *inbuf, size_t inlen) | ||
71 | { | ||
72 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
73 | unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | ||
74 | unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); | ||
75 | unsigned int i; | ||
76 | efx_dword_t hdr; | ||
77 | u32 xflags, seqno; | ||
78 | |||
79 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | ||
80 | BUG_ON(inlen & 3 || inlen >= 0x100); | ||
81 | |||
82 | seqno = mcdi->seqno & SEQ_MASK; | ||
83 | xflags = 0; | ||
84 | if (mcdi->mode == MCDI_MODE_EVENTS) | ||
85 | xflags |= MCDI_HEADER_XFLAGS_EVREQ; | ||
86 | |||
87 | EFX_POPULATE_DWORD_6(hdr, | ||
88 | MCDI_HEADER_RESPONSE, 0, | ||
89 | MCDI_HEADER_RESYNC, 1, | ||
90 | MCDI_HEADER_CODE, cmd, | ||
91 | MCDI_HEADER_DATALEN, inlen, | ||
92 | MCDI_HEADER_SEQ, seqno, | ||
93 | MCDI_HEADER_XFLAGS, xflags); | ||
94 | |||
95 | efx_writed(efx, &hdr, pdu); | ||
96 | |||
97 | for (i = 0; i < inlen; i += 4) | ||
98 | _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); | ||
99 | |||
100 | /* Ensure the payload is written out before the header */ | ||
101 | wmb(); | ||
102 | |||
103 | /* ring the doorbell with a distinctive value */ | ||
104 | _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); | ||
105 | } | ||
106 | |||
107 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) | ||
108 | { | ||
109 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
110 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | ||
111 | int i; | ||
112 | |||
113 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | ||
114 | BUG_ON(outlen & 3 || outlen >= 0x100); | ||
115 | |||
116 | for (i = 0; i < outlen; i += 4) | ||
117 | *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); | ||
118 | } | ||
119 | |||
120 | static int efx_mcdi_poll(struct efx_nic *efx) | ||
121 | { | ||
122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
123 | unsigned int time, finish; | ||
124 | unsigned int respseq, respcmd, error; | ||
125 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | ||
126 | unsigned int rc, spins; | ||
127 | efx_dword_t reg; | ||
128 | |||
129 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ | ||
130 | rc = -efx_mcdi_poll_reboot(efx); | ||
131 | if (rc) | ||
132 | goto out; | ||
133 | |||
134 | /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, | ||
135 | * because generally mcdi responses are fast. After that, back off | ||
136 | * and poll once a jiffy (approximately) | ||
137 | */ | ||
138 | spins = TICK_USEC; | ||
139 | finish = get_seconds() + MCDI_RPC_TIMEOUT; | ||
140 | |||
141 | while (1) { | ||
142 | if (spins != 0) { | ||
143 | --spins; | ||
144 | udelay(1); | ||
145 | } else { | ||
146 | schedule_timeout_uninterruptible(1); | ||
147 | } | ||
148 | |||
149 | time = get_seconds(); | ||
150 | |||
151 | rmb(); | ||
152 | efx_readd(efx, ®, pdu); | ||
153 | |||
154 | /* All 1's indicates that shared memory is in reset (and is | ||
155 | * not a valid header). Wait for it to come out reset before | ||
156 | * completing the command */ | ||
157 | if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && | ||
158 | EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) | ||
159 | break; | ||
160 | |||
161 | if (time >= finish) | ||
162 | return -ETIMEDOUT; | ||
163 | } | ||
164 | |||
165 | mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); | ||
166 | respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); | ||
167 | respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); | ||
168 | error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); | ||
169 | |||
170 | if (error && mcdi->resplen == 0) { | ||
171 | EFX_ERR(efx, "MC rebooted\n"); | ||
172 | rc = EIO; | ||
173 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { | ||
174 | EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n", | ||
175 | respseq, mcdi->seqno); | ||
176 | rc = EIO; | ||
177 | } else if (error) { | ||
178 | efx_readd(efx, ®, pdu + 4); | ||
179 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { | ||
180 | #define TRANSLATE_ERROR(name) \ | ||
181 | case MC_CMD_ERR_ ## name: \ | ||
182 | rc = name; \ | ||
183 | break | ||
184 | TRANSLATE_ERROR(ENOENT); | ||
185 | TRANSLATE_ERROR(EINTR); | ||
186 | TRANSLATE_ERROR(EACCES); | ||
187 | TRANSLATE_ERROR(EBUSY); | ||
188 | TRANSLATE_ERROR(EINVAL); | ||
189 | TRANSLATE_ERROR(EDEADLK); | ||
190 | TRANSLATE_ERROR(ENOSYS); | ||
191 | TRANSLATE_ERROR(ETIME); | ||
192 | #undef TRANSLATE_ERROR | ||
193 | default: | ||
194 | rc = EIO; | ||
195 | break; | ||
196 | } | ||
197 | } else | ||
198 | rc = 0; | ||
199 | |||
200 | out: | ||
201 | mcdi->resprc = rc; | ||
202 | if (rc) | ||
203 | mcdi->resplen = 0; | ||
204 | |||
205 | /* Return rc=0 like wait_event_timeout() */ | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | /* Test and clear MC-rebooted flag for this port/function */ | ||
210 | int efx_mcdi_poll_reboot(struct efx_nic *efx) | ||
211 | { | ||
212 | unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); | ||
213 | efx_dword_t reg; | ||
214 | uint32_t value; | ||
215 | |||
216 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
217 | return false; | ||
218 | |||
219 | efx_readd(efx, ®, addr); | ||
220 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); | ||
221 | |||
222 | if (value == 0) | ||
223 | return 0; | ||
224 | |||
225 | EFX_ZERO_DWORD(reg); | ||
226 | efx_writed(efx, ®, addr); | ||
227 | |||
228 | if (value == MC_STATUS_DWORD_ASSERT) | ||
229 | return -EINTR; | ||
230 | else | ||
231 | return -EIO; | ||
232 | } | ||
233 | |||
234 | static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) | ||
235 | { | ||
236 | /* Wait until the interface becomes QUIESCENT and we win the race | ||
237 | * to mark it RUNNING. */ | ||
238 | wait_event(mcdi->wq, | ||
239 | atomic_cmpxchg(&mcdi->state, | ||
240 | MCDI_STATE_QUIESCENT, | ||
241 | MCDI_STATE_RUNNING) | ||
242 | == MCDI_STATE_QUIESCENT); | ||
243 | } | ||
244 | |||
245 | static int efx_mcdi_await_completion(struct efx_nic *efx) | ||
246 | { | ||
247 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
248 | |||
249 | if (wait_event_timeout( | ||
250 | mcdi->wq, | ||
251 | atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, | ||
252 | msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) | ||
253 | return -ETIMEDOUT; | ||
254 | |||
255 | /* Check if efx_mcdi_set_mode() switched us back to polled completions. | ||
256 | * In which case, poll for completions directly. If efx_mcdi_ev_cpl() | ||
257 | * completed the request first, then we'll just end up completing the | ||
258 | * request again, which is safe. | ||
259 | * | ||
260 | * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which | ||
261 | * wait_event_timeout() implicitly provides. | ||
262 | */ | ||
263 | if (mcdi->mode == MCDI_MODE_POLL) | ||
264 | return efx_mcdi_poll(efx); | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) | ||
270 | { | ||
271 | /* If the interface is RUNNING, then move to COMPLETED and wake any | ||
272 | * waiters. If the interface isn't in RUNNING then we've received a | ||
273 | * duplicate completion after we've already transitioned back to | ||
274 | * QUIESCENT. [A subsequent invocation would increment seqno, so would | ||
275 | * have failed the seqno check]. | ||
276 | */ | ||
277 | if (atomic_cmpxchg(&mcdi->state, | ||
278 | MCDI_STATE_RUNNING, | ||
279 | MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { | ||
280 | wake_up(&mcdi->wq); | ||
281 | return true; | ||
282 | } | ||
283 | |||
284 | return false; | ||
285 | } | ||
286 | |||
287 | static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) | ||
288 | { | ||
289 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | ||
290 | wake_up(&mcdi->wq); | ||
291 | } | ||
292 | |||
293 | static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, | ||
294 | unsigned int datalen, unsigned int errno) | ||
295 | { | ||
296 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
297 | bool wake = false; | ||
298 | |||
299 | spin_lock(&mcdi->iface_lock); | ||
300 | |||
301 | if ((seqno ^ mcdi->seqno) & SEQ_MASK) { | ||
302 | if (mcdi->credits) | ||
303 | /* The request has been cancelled */ | ||
304 | --mcdi->credits; | ||
305 | else | ||
306 | EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx " | ||
307 | "seq 0x%x\n", seqno, mcdi->seqno); | ||
308 | } else { | ||
309 | mcdi->resprc = errno; | ||
310 | mcdi->resplen = datalen; | ||
311 | |||
312 | wake = true; | ||
313 | } | ||
314 | |||
315 | spin_unlock(&mcdi->iface_lock); | ||
316 | |||
317 | if (wake) | ||
318 | efx_mcdi_complete(mcdi); | ||
319 | } | ||
320 | |||
321 | /* Issue the given command by writing the data into the shared memory PDU, | ||
322 | * ring the doorbell and wait for completion. Copyout the result. */ | ||
323 | int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, | ||
324 | const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, | ||
325 | size_t *outlen_actual) | ||
326 | { | ||
327 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
328 | int rc; | ||
329 | BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | ||
330 | |||
331 | efx_mcdi_acquire(mcdi); | ||
332 | |||
333 | /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ | ||
334 | spin_lock_bh(&mcdi->iface_lock); | ||
335 | ++mcdi->seqno; | ||
336 | spin_unlock_bh(&mcdi->iface_lock); | ||
337 | |||
338 | efx_mcdi_copyin(efx, cmd, inbuf, inlen); | ||
339 | |||
340 | if (mcdi->mode == MCDI_MODE_POLL) | ||
341 | rc = efx_mcdi_poll(efx); | ||
342 | else | ||
343 | rc = efx_mcdi_await_completion(efx); | ||
344 | |||
345 | if (rc != 0) { | ||
346 | /* Close the race with efx_mcdi_ev_cpl() executing just too late | ||
347 | * and completing a request we've just cancelled, by ensuring | ||
348 | * that the seqno check therein fails. | ||
349 | */ | ||
350 | spin_lock_bh(&mcdi->iface_lock); | ||
351 | ++mcdi->seqno; | ||
352 | ++mcdi->credits; | ||
353 | spin_unlock_bh(&mcdi->iface_lock); | ||
354 | |||
355 | EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n", | ||
356 | cmd, (int)inlen, mcdi->mode); | ||
357 | } else { | ||
358 | size_t resplen; | ||
359 | |||
360 | /* At the very least we need a memory barrier here to ensure | ||
361 | * we pick up changes from efx_mcdi_ev_cpl(). Protect against | ||
362 | * a spurious efx_mcdi_ev_cpl() running concurrently by | ||
363 | * acquiring the iface_lock. */ | ||
364 | spin_lock_bh(&mcdi->iface_lock); | ||
365 | rc = -mcdi->resprc; | ||
366 | resplen = mcdi->resplen; | ||
367 | spin_unlock_bh(&mcdi->iface_lock); | ||
368 | |||
369 | if (rc == 0) { | ||
370 | efx_mcdi_copyout(efx, outbuf, | ||
371 | min(outlen, mcdi->resplen + 3) & ~0x3); | ||
372 | if (outlen_actual != NULL) | ||
373 | *outlen_actual = resplen; | ||
374 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) | ||
375 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ | ||
376 | else if (rc == -EIO || rc == -EINTR) { | ||
377 | EFX_ERR(efx, "MC fatal error %d\n", -rc); | ||
378 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
379 | } else | ||
380 | EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n", | ||
381 | cmd, (int)inlen, -rc); | ||
382 | } | ||
383 | |||
384 | efx_mcdi_release(mcdi); | ||
385 | return rc; | ||
386 | } | ||
387 | |||
388 | void efx_mcdi_mode_poll(struct efx_nic *efx) | ||
389 | { | ||
390 | struct efx_mcdi_iface *mcdi; | ||
391 | |||
392 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
393 | return; | ||
394 | |||
395 | mcdi = efx_mcdi(efx); | ||
396 | if (mcdi->mode == MCDI_MODE_POLL) | ||
397 | return; | ||
398 | |||
399 | /* We can switch from event completion to polled completion, because | ||
400 | * mcdi requests are always completed in shared memory. We do this by | ||
401 | * switching the mode to POLL'd then completing the request. | ||
402 | * efx_mcdi_await_completion() will then call efx_mcdi_poll(). | ||
403 | * | ||
404 | * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), | ||
405 | * which efx_mcdi_complete() provides for us. | ||
406 | */ | ||
407 | mcdi->mode = MCDI_MODE_POLL; | ||
408 | |||
409 | efx_mcdi_complete(mcdi); | ||
410 | } | ||
411 | |||
412 | void efx_mcdi_mode_event(struct efx_nic *efx) | ||
413 | { | ||
414 | struct efx_mcdi_iface *mcdi; | ||
415 | |||
416 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | ||
417 | return; | ||
418 | |||
419 | mcdi = efx_mcdi(efx); | ||
420 | |||
421 | if (mcdi->mode == MCDI_MODE_EVENTS) | ||
422 | return; | ||
423 | |||
424 | /* We can't switch from polled to event completion in the middle of a | ||
425 | * request, because the completion method is specified in the request. | ||
426 | * So acquire the interface to serialise the requestors. We don't need | ||
427 | * to acquire the iface_lock to change the mode here, but we do need a | ||
428 | * write memory barrier ensure that efx_mcdi_rpc() sees it, which | ||
429 | * efx_mcdi_acquire() provides. | ||
430 | */ | ||
431 | efx_mcdi_acquire(mcdi); | ||
432 | mcdi->mode = MCDI_MODE_EVENTS; | ||
433 | efx_mcdi_release(mcdi); | ||
434 | } | ||
435 | |||
436 | static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | ||
437 | { | ||
438 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | ||
439 | |||
440 | /* If there is an outstanding MCDI request, it has been terminated | ||
441 | * either by a BADASSERT or REBOOT event. If the mcdi interface is | ||
442 | * in polled mode, then do nothing because the MC reboot handler will | ||
443 | * set the header correctly. However, if the mcdi interface is waiting | ||
444 | * for a CMDDONE event it won't receive it [and since all MCDI events | ||
445 | * are sent to the same queue, we can't be racing with | ||
446 | * efx_mcdi_ev_cpl()] | ||
447 | * | ||
448 | * There's a race here with efx_mcdi_rpc(), because we might receive | ||
449 | * a REBOOT event *before* the request has been copied out. In polled | ||
450 | * mode (during startup) this is irrelevent, because efx_mcdi_complete() | ||
451 | * is ignored. In event mode, this condition is just an edge-case of | ||
452 | * receiving a REBOOT event after posting the MCDI request. Did the mc | ||
453 | * reboot before or after the copyout? The best we can do always is | ||
454 | * just return failure. | ||
455 | */ | ||
456 | spin_lock(&mcdi->iface_lock); | ||
457 | if (efx_mcdi_complete(mcdi)) { | ||
458 | if (mcdi->mode == MCDI_MODE_EVENTS) { | ||
459 | mcdi->resprc = rc; | ||
460 | mcdi->resplen = 0; | ||
461 | } | ||
462 | } else | ||
463 | /* Nobody was waiting for an MCDI request, so trigger a reset */ | ||
464 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
465 | |||
466 | spin_unlock(&mcdi->iface_lock); | ||
467 | } | ||
468 | |||
469 | static unsigned int efx_mcdi_event_link_speed[] = { | ||
470 | [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, | ||
471 | [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, | ||
472 | [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, | ||
473 | }; | ||
474 | |||
475 | |||
476 | static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) | ||
477 | { | ||
478 | u32 flags, fcntl, speed, lpa; | ||
479 | |||
480 | speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); | ||
481 | EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); | ||
482 | speed = efx_mcdi_event_link_speed[speed]; | ||
483 | |||
484 | flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); | ||
485 | fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); | ||
486 | lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); | ||
487 | |||
488 | /* efx->link_state is only modified by efx_mcdi_phy_get_link(), | ||
489 | * which is only run after flushing the event queues. Therefore, it | ||
490 | * is safe to modify the link state outside of the mac_lock here. | ||
491 | */ | ||
492 | efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); | ||
493 | |||
494 | efx_mcdi_phy_check_fcntl(efx, lpa); | ||
495 | |||
496 | efx_link_status_changed(efx); | ||
497 | } | ||
498 | |||
499 | static const char *sensor_names[] = { | ||
500 | [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor", | ||
501 | [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor", | ||
502 | [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling", | ||
503 | [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor", | ||
504 | [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling", | ||
505 | [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor", | ||
506 | [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling", | ||
507 | [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor", | ||
508 | [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor", | ||
509 | [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor", | ||
510 | [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor", | ||
511 | [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor", | ||
512 | [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor" | ||
513 | }; | ||
514 | |||
515 | static const char *sensor_status_names[] = { | ||
516 | [MC_CMD_SENSOR_STATE_OK] = "OK", | ||
517 | [MC_CMD_SENSOR_STATE_WARNING] = "Warning", | ||
518 | [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", | ||
519 | [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", | ||
520 | }; | ||
521 | |||
522 | static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) | ||
523 | { | ||
524 | unsigned int monitor, state, value; | ||
525 | const char *name, *state_txt; | ||
526 | monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); | ||
527 | state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); | ||
528 | value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); | ||
529 | /* Deal gracefully with the board having more drivers than we | ||
530 | * know about, but do not expect new sensor states. */ | ||
531 | name = (monitor >= ARRAY_SIZE(sensor_names)) | ||
532 | ? "No sensor name available" : | ||
533 | sensor_names[monitor]; | ||
534 | EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); | ||
535 | state_txt = sensor_status_names[state]; | ||
536 | |||
537 | EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n", | ||
538 | monitor, name, state_txt, value); | ||
539 | } | ||
540 | |||
541 | /* Called from falcon_process_eventq for MCDI events */ | ||
542 | void efx_mcdi_process_event(struct efx_channel *channel, | ||
543 | efx_qword_t *event) | ||
544 | { | ||
545 | struct efx_nic *efx = channel->efx; | ||
546 | int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); | ||
547 | u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); | ||
548 | |||
549 | switch (code) { | ||
550 | case MCDI_EVENT_CODE_BADSSERT: | ||
551 | EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data); | ||
552 | efx_mcdi_ev_death(efx, EINTR); | ||
553 | break; | ||
554 | |||
555 | case MCDI_EVENT_CODE_PMNOTICE: | ||
556 | EFX_INFO(efx, "MCDI PM event.\n"); | ||
557 | break; | ||
558 | |||
559 | case MCDI_EVENT_CODE_CMDDONE: | ||
560 | efx_mcdi_ev_cpl(efx, | ||
561 | MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), | ||
562 | MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), | ||
563 | MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); | ||
564 | break; | ||
565 | |||
566 | case MCDI_EVENT_CODE_LINKCHANGE: | ||
567 | efx_mcdi_process_link_change(efx, event); | ||
568 | break; | ||
569 | case MCDI_EVENT_CODE_SENSOREVT: | ||
570 | efx_mcdi_sensor_event(efx, event); | ||
571 | break; | ||
572 | case MCDI_EVENT_CODE_SCHEDERR: | ||
573 | EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data); | ||
574 | break; | ||
575 | case MCDI_EVENT_CODE_REBOOT: | ||
576 | EFX_INFO(efx, "MC Reboot\n"); | ||
577 | efx_mcdi_ev_death(efx, EIO); | ||
578 | break; | ||
579 | case MCDI_EVENT_CODE_MAC_STATS_DMA: | ||
580 | /* MAC stats are gather lazily. We can ignore this. */ | ||
581 | break; | ||
582 | |||
583 | default: | ||
584 | EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code); | ||
585 | } | ||
586 | } | ||
587 | |||
588 | /************************************************************************** | ||
589 | * | ||
590 | * Specific request functions | ||
591 | * | ||
592 | ************************************************************************** | ||
593 | */ | ||
594 | |||
595 | int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) | ||
596 | { | ||
597 | u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; | ||
598 | size_t outlength; | ||
599 | const __le16 *ver_words; | ||
600 | int rc; | ||
601 | |||
602 | BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); | ||
603 | |||
604 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, | ||
605 | outbuf, sizeof(outbuf), &outlength); | ||
606 | if (rc) | ||
607 | goto fail; | ||
608 | |||
609 | if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) { | ||
610 | *version = 0; | ||
611 | *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE); | ||
612 | return 0; | ||
613 | } | ||
614 | |||
615 | if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { | ||
616 | rc = -EMSGSIZE; | ||
617 | goto fail; | ||
618 | } | ||
619 | |||
620 | ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); | ||
621 | *version = (((u64)le16_to_cpu(ver_words[0]) << 48) | | ||
622 | ((u64)le16_to_cpu(ver_words[1]) << 32) | | ||
623 | ((u64)le16_to_cpu(ver_words[2]) << 16) | | ||
624 | le16_to_cpu(ver_words[3])); | ||
625 | *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE); | ||
626 | |||
627 | return 0; | ||
628 | |||
629 | fail: | ||
630 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
631 | return rc; | ||
632 | } | ||
633 | |||
634 | int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | ||
635 | bool *was_attached) | ||
636 | { | ||
637 | u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; | ||
638 | u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; | ||
639 | size_t outlen; | ||
640 | int rc; | ||
641 | |||
642 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, | ||
643 | driver_operating ? 1 : 0); | ||
644 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); | ||
645 | |||
646 | rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), | ||
647 | outbuf, sizeof(outbuf), &outlen); | ||
648 | if (rc) | ||
649 | goto fail; | ||
650 | if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) | ||
651 | goto fail; | ||
652 | |||
653 | if (was_attached != NULL) | ||
654 | *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); | ||
655 | return 0; | ||
656 | |||
657 | fail: | ||
658 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
659 | return rc; | ||
660 | } | ||
661 | |||
662 | int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | ||
663 | u16 *fw_subtype_list) | ||
664 | { | ||
665 | uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; | ||
666 | size_t outlen; | ||
667 | int port_num = efx_port_num(efx); | ||
668 | int offset; | ||
669 | int rc; | ||
670 | |||
671 | BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); | ||
672 | |||
673 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, | ||
674 | outbuf, sizeof(outbuf), &outlen); | ||
675 | if (rc) | ||
676 | goto fail; | ||
677 | |||
678 | if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { | ||
679 | rc = -EMSGSIZE; | ||
680 | goto fail; | ||
681 | } | ||
682 | |||
683 | offset = (port_num) | ||
684 | ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST | ||
685 | : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; | ||
686 | if (mac_address) | ||
687 | memcpy(mac_address, outbuf + offset, ETH_ALEN); | ||
688 | if (fw_subtype_list) | ||
689 | memcpy(fw_subtype_list, | ||
690 | outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, | ||
691 | MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); | ||
692 | |||
693 | return 0; | ||
694 | |||
695 | fail: | ||
696 | EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen); | ||
697 | |||
698 | return rc; | ||
699 | } | ||
700 | |||
701 | int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) | ||
702 | { | ||
703 | u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; | ||
704 | u32 dest = 0; | ||
705 | int rc; | ||
706 | |||
707 | if (uart) | ||
708 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; | ||
709 | if (evq) | ||
710 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; | ||
711 | |||
712 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); | ||
713 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); | ||
714 | |||
715 | BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); | ||
716 | |||
717 | rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), | ||
718 | NULL, 0, NULL); | ||
719 | if (rc) | ||
720 | goto fail; | ||
721 | |||
722 | return 0; | ||
723 | |||
724 | fail: | ||
725 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
726 | return rc; | ||
727 | } | ||
728 | |||
729 | int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) | ||
730 | { | ||
731 | u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; | ||
732 | size_t outlen; | ||
733 | int rc; | ||
734 | |||
735 | BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); | ||
736 | |||
737 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, | ||
738 | outbuf, sizeof(outbuf), &outlen); | ||
739 | if (rc) | ||
740 | goto fail; | ||
741 | if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) | ||
742 | goto fail; | ||
743 | |||
744 | *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); | ||
745 | return 0; | ||
746 | |||
747 | fail: | ||
748 | EFX_ERR(efx, "%s: failed rc=%d\n", | ||
749 | __func__, rc); | ||
750 | return rc; | ||
751 | } | ||
752 | |||
753 | int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | ||
754 | size_t *size_out, size_t *erase_size_out, | ||
755 | bool *protected_out) | ||
756 | { | ||
757 | u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; | ||
758 | u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; | ||
759 | size_t outlen; | ||
760 | int rc; | ||
761 | |||
762 | MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); | ||
763 | |||
764 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), | ||
765 | outbuf, sizeof(outbuf), &outlen); | ||
766 | if (rc) | ||
767 | goto fail; | ||
768 | if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) | ||
769 | goto fail; | ||
770 | |||
771 | *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); | ||
772 | *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); | ||
773 | *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & | ||
774 | (1 << MC_CMD_NVRAM_PROTECTED_LBN)); | ||
775 | return 0; | ||
776 | |||
777 | fail: | ||
778 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
779 | return rc; | ||
780 | } | ||
781 | |||
782 | int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) | ||
783 | { | ||
784 | u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; | ||
785 | int rc; | ||
786 | |||
787 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); | ||
788 | |||
789 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); | ||
790 | |||
791 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), | ||
792 | NULL, 0, NULL); | ||
793 | if (rc) | ||
794 | goto fail; | ||
795 | |||
796 | return 0; | ||
797 | |||
798 | fail: | ||
799 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
800 | return rc; | ||
801 | } | ||
802 | |||
803 | int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | ||
804 | loff_t offset, u8 *buffer, size_t length) | ||
805 | { | ||
806 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; | ||
807 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | ||
808 | size_t outlen; | ||
809 | int rc; | ||
810 | |||
811 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); | ||
812 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); | ||
813 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); | ||
814 | |||
815 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), | ||
816 | outbuf, sizeof(outbuf), &outlen); | ||
817 | if (rc) | ||
818 | goto fail; | ||
819 | |||
820 | memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); | ||
821 | return 0; | ||
822 | |||
823 | fail: | ||
824 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
825 | return rc; | ||
826 | } | ||
827 | |||
828 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | ||
829 | loff_t offset, const u8 *buffer, size_t length) | ||
830 | { | ||
831 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | ||
832 | int rc; | ||
833 | |||
834 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); | ||
835 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); | ||
836 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); | ||
837 | memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); | ||
838 | |||
839 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); | ||
840 | |||
841 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, | ||
842 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), | ||
843 | NULL, 0, NULL); | ||
844 | if (rc) | ||
845 | goto fail; | ||
846 | |||
847 | return 0; | ||
848 | |||
849 | fail: | ||
850 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
851 | return rc; | ||
852 | } | ||
853 | |||
854 | int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | ||
855 | loff_t offset, size_t length) | ||
856 | { | ||
857 | u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; | ||
858 | int rc; | ||
859 | |||
860 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); | ||
861 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); | ||
862 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); | ||
863 | |||
864 | BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); | ||
865 | |||
866 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), | ||
867 | NULL, 0, NULL); | ||
868 | if (rc) | ||
869 | goto fail; | ||
870 | |||
871 | return 0; | ||
872 | |||
873 | fail: | ||
874 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
875 | return rc; | ||
876 | } | ||
877 | |||
878 | int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) | ||
879 | { | ||
880 | u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; | ||
881 | int rc; | ||
882 | |||
883 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); | ||
884 | |||
885 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); | ||
886 | |||
887 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), | ||
888 | NULL, 0, NULL); | ||
889 | if (rc) | ||
890 | goto fail; | ||
891 | |||
892 | return 0; | ||
893 | |||
894 | fail: | ||
895 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
896 | return rc; | ||
897 | } | ||
898 | |||
899 | static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) | ||
900 | { | ||
901 | u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; | ||
902 | u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; | ||
903 | int rc; | ||
904 | |||
905 | MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); | ||
906 | |||
907 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), | ||
908 | outbuf, sizeof(outbuf), NULL); | ||
909 | if (rc) | ||
910 | return rc; | ||
911 | |||
912 | switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { | ||
913 | case MC_CMD_NVRAM_TEST_PASS: | ||
914 | case MC_CMD_NVRAM_TEST_NOTSUPP: | ||
915 | return 0; | ||
916 | default: | ||
917 | return -EIO; | ||
918 | } | ||
919 | } | ||
920 | |||
921 | int efx_mcdi_nvram_test_all(struct efx_nic *efx) | ||
922 | { | ||
923 | u32 nvram_types; | ||
924 | unsigned int type; | ||
925 | int rc; | ||
926 | |||
927 | rc = efx_mcdi_nvram_types(efx, &nvram_types); | ||
928 | if (rc) | ||
929 | return rc; | ||
930 | |||
931 | type = 0; | ||
932 | while (nvram_types != 0) { | ||
933 | if (nvram_types & 1) { | ||
934 | rc = efx_mcdi_nvram_test(efx, type); | ||
935 | if (rc) | ||
936 | return rc; | ||
937 | } | ||
938 | type++; | ||
939 | nvram_types >>= 1; | ||
940 | } | ||
941 | |||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | static int efx_mcdi_read_assertion(struct efx_nic *efx) | ||
946 | { | ||
947 | u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; | ||
948 | u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; | ||
949 | unsigned int flags, index, ofst; | ||
950 | const char *reason; | ||
951 | size_t outlen; | ||
952 | int retry; | ||
953 | int rc; | ||
954 | |||
955 | /* Attempt to read any stored assertion state before we reboot | ||
956 | * the mcfw out of the assertion handler. Retry twice, once | ||
957 | * because a boot-time assertion might cause this command to fail | ||
958 | * with EINTR. And once again because GET_ASSERTS can race with | ||
959 | * MC_CMD_REBOOT running on the other port. */ | ||
960 | retry = 2; | ||
961 | do { | ||
962 | MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); | ||
963 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, | ||
964 | inbuf, MC_CMD_GET_ASSERTS_IN_LEN, | ||
965 | outbuf, sizeof(outbuf), &outlen); | ||
966 | } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); | ||
967 | |||
968 | if (rc) | ||
969 | return rc; | ||
970 | if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) | ||
971 | return -EINVAL; | ||
972 | |||
973 | /* Print out any recorded assertion state */ | ||
974 | flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); | ||
975 | if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) | ||
976 | return 0; | ||
977 | |||
978 | reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) | ||
979 | ? "system-level assertion" | ||
980 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) | ||
981 | ? "thread-level assertion" | ||
982 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) | ||
983 | ? "watchdog reset" | ||
984 | : "unknown assertion"; | ||
985 | EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, | ||
986 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), | ||
987 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); | ||
988 | |||
989 | /* Print out the registers */ | ||
990 | ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; | ||
991 | for (index = 1; index < 32; index++) { | ||
992 | EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index, | ||
993 | MCDI_DWORD2(outbuf, ofst)); | ||
994 | ofst += sizeof(efx_dword_t); | ||
995 | } | ||
996 | |||
997 | return 0; | ||
998 | } | ||
999 | |||
1000 | static void efx_mcdi_exit_assertion(struct efx_nic *efx) | ||
1001 | { | ||
1002 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | ||
1003 | |||
1004 | /* Atomically reboot the mcfw out of the assertion handler */ | ||
1005 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | ||
1006 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, | ||
1007 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); | ||
1008 | efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, | ||
1009 | NULL, 0, NULL); | ||
1010 | } | ||
1011 | |||
1012 | int efx_mcdi_handle_assertion(struct efx_nic *efx) | ||
1013 | { | ||
1014 | int rc; | ||
1015 | |||
1016 | rc = efx_mcdi_read_assertion(efx); | ||
1017 | if (rc) | ||
1018 | return rc; | ||
1019 | |||
1020 | efx_mcdi_exit_assertion(efx); | ||
1021 | |||
1022 | return 0; | ||
1023 | } | ||
1024 | |||
1025 | void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | ||
1026 | { | ||
1027 | u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; | ||
1028 | int rc; | ||
1029 | |||
1030 | BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); | ||
1031 | BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); | ||
1032 | BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); | ||
1033 | |||
1034 | BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); | ||
1035 | |||
1036 | MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); | ||
1037 | |||
1038 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), | ||
1039 | NULL, 0, NULL); | ||
1040 | if (rc) | ||
1041 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1042 | } | ||
1043 | |||
1044 | int efx_mcdi_reset_port(struct efx_nic *efx) | ||
1045 | { | ||
1046 | int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); | ||
1047 | if (rc) | ||
1048 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1049 | return rc; | ||
1050 | } | ||
1051 | |||
1052 | int efx_mcdi_reset_mc(struct efx_nic *efx) | ||
1053 | { | ||
1054 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | ||
1055 | int rc; | ||
1056 | |||
1057 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | ||
1058 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); | ||
1059 | rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), | ||
1060 | NULL, 0, NULL); | ||
1061 | /* White is black, and up is down */ | ||
1062 | if (rc == -EIO) | ||
1063 | return 0; | ||
1064 | if (rc == 0) | ||
1065 | rc = -EIO; | ||
1066 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1067 | return rc; | ||
1068 | } | ||
1069 | |||
1070 | int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, | ||
1071 | const u8 *mac, int *id_out) | ||
1072 | { | ||
1073 | u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; | ||
1074 | u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; | ||
1075 | size_t outlen; | ||
1076 | int rc; | ||
1077 | |||
1078 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); | ||
1079 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, | ||
1080 | MC_CMD_FILTER_MODE_SIMPLE); | ||
1081 | memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); | ||
1082 | |||
1083 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), | ||
1084 | outbuf, sizeof(outbuf), &outlen); | ||
1085 | if (rc) | ||
1086 | goto fail; | ||
1087 | |||
1088 | if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { | ||
1089 | rc = -EMSGSIZE; | ||
1090 | goto fail; | ||
1091 | } | ||
1092 | |||
1093 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); | ||
1094 | |||
1095 | return 0; | ||
1096 | |||
1097 | fail: | ||
1098 | *id_out = -1; | ||
1099 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1100 | return rc; | ||
1101 | |||
1102 | } | ||
1103 | |||
1104 | |||
1105 | int | ||
1106 | efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) | ||
1107 | { | ||
1108 | return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); | ||
1109 | } | ||
1110 | |||
1111 | |||
1112 | int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) | ||
1113 | { | ||
1114 | u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; | ||
1115 | size_t outlen; | ||
1116 | int rc; | ||
1117 | |||
1118 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, | ||
1119 | outbuf, sizeof(outbuf), &outlen); | ||
1120 | if (rc) | ||
1121 | goto fail; | ||
1122 | |||
1123 | if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { | ||
1124 | rc = -EMSGSIZE; | ||
1125 | goto fail; | ||
1126 | } | ||
1127 | |||
1128 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); | ||
1129 | |||
1130 | return 0; | ||
1131 | |||
1132 | fail: | ||
1133 | *id_out = -1; | ||
1134 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1135 | return rc; | ||
1136 | } | ||
1137 | |||
1138 | |||
1139 | int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) | ||
1140 | { | ||
1141 | u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; | ||
1142 | int rc; | ||
1143 | |||
1144 | MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); | ||
1145 | |||
1146 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), | ||
1147 | NULL, 0, NULL); | ||
1148 | if (rc) | ||
1149 | goto fail; | ||
1150 | |||
1151 | return 0; | ||
1152 | |||
1153 | fail: | ||
1154 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1155 | return rc; | ||
1156 | } | ||
1157 | |||
1158 | |||
1159 | int efx_mcdi_wol_filter_reset(struct efx_nic *efx) | ||
1160 | { | ||
1161 | int rc; | ||
1162 | |||
1163 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); | ||
1164 | if (rc) | ||
1165 | goto fail; | ||
1166 | |||
1167 | return 0; | ||
1168 | |||
1169 | fail: | ||
1170 | EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); | ||
1171 | return rc; | ||
1172 | } | ||
1173 | |||