aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/mcdi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc/mcdi.c')
-rw-r--r--drivers/net/sfc/mcdi.c1112
1 files changed, 1112 insertions, 0 deletions
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
new file mode 100644
index 000000000000..683353b904c7
--- /dev/null
+++ b/drivers/net/sfc/mcdi.c
@@ -0,0 +1,1112 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
11#include "net_driver.h"
12#include "nic.h"
13#include "io.h"
14#include "regs.h"
15#include "mcdi_pcol.h"
16#include "phy.h"
17
18/**************************************************************************
19 *
20 * Management-Controller-to-Driver Interface
21 *
22 **************************************************************************
23 */
24
25/* Software-defined structure to the shared-memory */
26#define CMD_NOTIFY_PORT0 0
27#define CMD_NOTIFY_PORT1 4
28#define CMD_PDU_PORT0 0x008
29#define CMD_PDU_PORT1 0x108
30#define REBOOT_FLAG_PORT0 0x3f8
31#define REBOOT_FLAG_PORT1 0x3fc
32
33#define MCDI_RPC_TIMEOUT 10 /*seconds */
34
35#define MCDI_PDU(efx) \
36 (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0)
37#define MCDI_DOORBELL(efx) \
38 (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0)
39#define MCDI_REBOOT_FLAG(efx) \
40 (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0)
41
42#define SEQ_MASK \
43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
44
45static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
46{
47 struct siena_nic_data *nic_data;
48 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
49 nic_data = efx->nic_data;
50 return &nic_data->mcdi;
51}
52
53void efx_mcdi_init(struct efx_nic *efx)
54{
55 struct efx_mcdi_iface *mcdi;
56
57 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
58 return;
59
60 mcdi = efx_mcdi(efx);
61 init_waitqueue_head(&mcdi->wq);
62 spin_lock_init(&mcdi->iface_lock);
63 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
64 mcdi->mode = MCDI_MODE_POLL;
65
66 (void) efx_mcdi_poll_reboot(efx);
67}
68
69static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
70 const u8 *inbuf, size_t inlen)
71{
72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
75 unsigned int i;
76 efx_dword_t hdr;
77 u32 xflags, seqno;
78
79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
80 BUG_ON(inlen & 3 || inlen >= 0x100);
81
82 seqno = mcdi->seqno & SEQ_MASK;
83 xflags = 0;
84 if (mcdi->mode == MCDI_MODE_EVENTS)
85 xflags |= MCDI_HEADER_XFLAGS_EVREQ;
86
87 EFX_POPULATE_DWORD_6(hdr,
88 MCDI_HEADER_RESPONSE, 0,
89 MCDI_HEADER_RESYNC, 1,
90 MCDI_HEADER_CODE, cmd,
91 MCDI_HEADER_DATALEN, inlen,
92 MCDI_HEADER_SEQ, seqno,
93 MCDI_HEADER_XFLAGS, xflags);
94
95 efx_writed(efx, &hdr, pdu);
96
97 for (i = 0; i < inlen; i += 4)
98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
99
100 /* Ensure the payload is written out before the header */
101 wmb();
102
103 /* ring the doorbell with a distinctive value */
104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
105}
106
107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
108{
109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
111 int i;
112
113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
114 BUG_ON(outlen & 3 || outlen >= 0x100);
115
116 for (i = 0; i < outlen; i += 4)
117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
118}
119
120static int efx_mcdi_poll(struct efx_nic *efx)
121{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned int time, finish;
124 unsigned int respseq, respcmd, error;
125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
126 unsigned int rc, spins;
127 efx_dword_t reg;
128
129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
130 rc = efx_mcdi_poll_reboot(efx);
131 if (rc)
132 goto out;
133
134 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
135 * because generally mcdi responses are fast. After that, back off
136 * and poll once a jiffy (approximately)
137 */
138 spins = TICK_USEC;
139 finish = get_seconds() + MCDI_RPC_TIMEOUT;
140
141 while (1) {
142 if (spins != 0) {
143 --spins;
144 udelay(1);
145 } else
146 schedule();
147
148 time = get_seconds();
149
150 rmb();
151 efx_readd(efx, &reg, pdu);
152
153 /* All 1's indicates that shared memory is in reset (and is
154 * not a valid header). Wait for it to come out reset before
155 * completing the command */
156 if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
157 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
158 break;
159
160 if (time >= finish)
161 return -ETIMEDOUT;
162 }
163
164 mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
165 respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
166 respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
167 error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
168
169 if (error && mcdi->resplen == 0) {
170 EFX_ERR(efx, "MC rebooted\n");
171 rc = EIO;
172 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
173 EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
174 respseq, mcdi->seqno);
175 rc = EIO;
176 } else if (error) {
177 efx_readd(efx, &reg, pdu + 4);
178 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
179#define TRANSLATE_ERROR(name) \
180 case MC_CMD_ERR_ ## name: \
181 rc = name; \
182 break
183 TRANSLATE_ERROR(ENOENT);
184 TRANSLATE_ERROR(EINTR);
185 TRANSLATE_ERROR(EACCES);
186 TRANSLATE_ERROR(EBUSY);
187 TRANSLATE_ERROR(EINVAL);
188 TRANSLATE_ERROR(EDEADLK);
189 TRANSLATE_ERROR(ENOSYS);
190 TRANSLATE_ERROR(ETIME);
191#undef TRANSLATE_ERROR
192 default:
193 rc = EIO;
194 break;
195 }
196 } else
197 rc = 0;
198
199out:
200 mcdi->resprc = rc;
201 if (rc)
202 mcdi->resplen = 0;
203
204 /* Return rc=0 like wait_event_timeout() */
205 return 0;
206}
207
208/* Test and clear MC-rebooted flag for this port/function */
209int efx_mcdi_poll_reboot(struct efx_nic *efx)
210{
211 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
212 efx_dword_t reg;
213 uint32_t value;
214
215 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
216 return false;
217
218 efx_readd(efx, &reg, addr);
219 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
220
221 if (value == 0)
222 return 0;
223
224 EFX_ZERO_DWORD(reg);
225 efx_writed(efx, &reg, addr);
226
227 if (value == MC_STATUS_DWORD_ASSERT)
228 return -EINTR;
229 else
230 return -EIO;
231}
232
233static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
234{
235 /* Wait until the interface becomes QUIESCENT and we win the race
236 * to mark it RUNNING. */
237 wait_event(mcdi->wq,
238 atomic_cmpxchg(&mcdi->state,
239 MCDI_STATE_QUIESCENT,
240 MCDI_STATE_RUNNING)
241 == MCDI_STATE_QUIESCENT);
242}
243
244static int efx_mcdi_await_completion(struct efx_nic *efx)
245{
246 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
247
248 if (wait_event_timeout(
249 mcdi->wq,
250 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
251 msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
252 return -ETIMEDOUT;
253
254 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
255 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
256 * completed the request first, then we'll just end up completing the
257 * request again, which is safe.
258 *
259 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
260 * wait_event_timeout() implicitly provides.
261 */
262 if (mcdi->mode == MCDI_MODE_POLL)
263 return efx_mcdi_poll(efx);
264
265 return 0;
266}
267
268static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
269{
270 /* If the interface is RUNNING, then move to COMPLETED and wake any
271 * waiters. If the interface isn't in RUNNING then we've received a
272 * duplicate completion after we've already transitioned back to
273 * QUIESCENT. [A subsequent invocation would increment seqno, so would
274 * have failed the seqno check].
275 */
276 if (atomic_cmpxchg(&mcdi->state,
277 MCDI_STATE_RUNNING,
278 MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
279 wake_up(&mcdi->wq);
280 return true;
281 }
282
283 return false;
284}
285
286static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
287{
288 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
289 wake_up(&mcdi->wq);
290}
291
292static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
293 unsigned int datalen, unsigned int errno)
294{
295 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
296 bool wake = false;
297
298 spin_lock(&mcdi->iface_lock);
299
300 if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
301 if (mcdi->credits)
302 /* The request has been cancelled */
303 --mcdi->credits;
304 else
305 EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx "
306 "seq 0x%x\n", seqno, mcdi->seqno);
307 } else {
308 mcdi->resprc = errno;
309 mcdi->resplen = datalen;
310
311 wake = true;
312 }
313
314 spin_unlock(&mcdi->iface_lock);
315
316 if (wake)
317 efx_mcdi_complete(mcdi);
318}
319
320/* Issue the given command by writing the data into the shared memory PDU,
321 * ring the doorbell and wait for completion. Copyout the result. */
322int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
323 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
324 size_t *outlen_actual)
325{
326 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
327 int rc;
328 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
329
330 efx_mcdi_acquire(mcdi);
331
332 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
333 spin_lock_bh(&mcdi->iface_lock);
334 ++mcdi->seqno;
335 spin_unlock_bh(&mcdi->iface_lock);
336
337 efx_mcdi_copyin(efx, cmd, inbuf, inlen);
338
339 if (mcdi->mode == MCDI_MODE_POLL)
340 rc = efx_mcdi_poll(efx);
341 else
342 rc = efx_mcdi_await_completion(efx);
343
344 if (rc != 0) {
345 /* Close the race with efx_mcdi_ev_cpl() executing just too late
346 * and completing a request we've just cancelled, by ensuring
347 * that the seqno check therein fails.
348 */
349 spin_lock_bh(&mcdi->iface_lock);
350 ++mcdi->seqno;
351 ++mcdi->credits;
352 spin_unlock_bh(&mcdi->iface_lock);
353
354 EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n",
355 cmd, (int)inlen, mcdi->mode);
356 } else {
357 size_t resplen;
358
359 /* At the very least we need a memory barrier here to ensure
360 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
361 * a spurious efx_mcdi_ev_cpl() running concurrently by
362 * acquiring the iface_lock. */
363 spin_lock_bh(&mcdi->iface_lock);
364 rc = -mcdi->resprc;
365 resplen = mcdi->resplen;
366 spin_unlock_bh(&mcdi->iface_lock);
367
368 if (rc == 0) {
369 efx_mcdi_copyout(efx, outbuf,
370 min(outlen, mcdi->resplen + 3) & ~0x3);
371 if (outlen_actual != NULL)
372 *outlen_actual = resplen;
373 } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
374 ; /* Don't reset if MC_CMD_REBOOT returns EIO */
375 else if (rc == -EIO || rc == -EINTR) {
376 EFX_ERR(efx, "MC fatal error %d\n", -rc);
377 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
378 } else
379 EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n",
380 cmd, (int)inlen, -rc);
381 }
382
383 efx_mcdi_release(mcdi);
384 return rc;
385}
386
387void efx_mcdi_mode_poll(struct efx_nic *efx)
388{
389 struct efx_mcdi_iface *mcdi;
390
391 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
392 return;
393
394 mcdi = efx_mcdi(efx);
395 if (mcdi->mode == MCDI_MODE_POLL)
396 return;
397
398 /* We can switch from event completion to polled completion, because
399 * mcdi requests are always completed in shared memory. We do this by
400 * switching the mode to POLL'd then completing the request.
401 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
402 *
403 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
404 * which efx_mcdi_complete() provides for us.
405 */
406 mcdi->mode = MCDI_MODE_POLL;
407
408 efx_mcdi_complete(mcdi);
409}
410
411void efx_mcdi_mode_event(struct efx_nic *efx)
412{
413 struct efx_mcdi_iface *mcdi;
414
415 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
416 return;
417
418 mcdi = efx_mcdi(efx);
419
420 if (mcdi->mode == MCDI_MODE_EVENTS)
421 return;
422
423 /* We can't switch from polled to event completion in the middle of a
424 * request, because the completion method is specified in the request.
425 * So acquire the interface to serialise the requestors. We don't need
426 * to acquire the iface_lock to change the mode here, but we do need a
427 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
428 * efx_mcdi_acquire() provides.
429 */
430 efx_mcdi_acquire(mcdi);
431 mcdi->mode = MCDI_MODE_EVENTS;
432 efx_mcdi_release(mcdi);
433}
434
435static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
436{
437 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
438
439 /* If there is an outstanding MCDI request, it has been terminated
440 * either by a BADASSERT or REBOOT event. If the mcdi interface is
441 * in polled mode, then do nothing because the MC reboot handler will
442 * set the header correctly. However, if the mcdi interface is waiting
443 * for a CMDDONE event it won't receive it [and since all MCDI events
444 * are sent to the same queue, we can't be racing with
445 * efx_mcdi_ev_cpl()]
446 *
447 * There's a race here with efx_mcdi_rpc(), because we might receive
448 * a REBOOT event *before* the request has been copied out. In polled
449 * mode (during startup) this is irrelevent, because efx_mcdi_complete()
450 * is ignored. In event mode, this condition is just an edge-case of
451 * receiving a REBOOT event after posting the MCDI request. Did the mc
452 * reboot before or after the copyout? The best we can do always is
453 * just return failure.
454 */
455 spin_lock(&mcdi->iface_lock);
456 if (efx_mcdi_complete(mcdi)) {
457 if (mcdi->mode == MCDI_MODE_EVENTS) {
458 mcdi->resprc = rc;
459 mcdi->resplen = 0;
460 }
461 } else
462 /* Nobody was waiting for an MCDI request, so trigger a reset */
463 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
464
465 spin_unlock(&mcdi->iface_lock);
466}
467
468static unsigned int efx_mcdi_event_link_speed[] = {
469 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
470 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
471 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
472};
473
474
475static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
476{
477 u32 flags, fcntl, speed, lpa;
478
479 speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
480 EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
481 speed = efx_mcdi_event_link_speed[speed];
482
483 flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
484 fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
485 lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
486
487 /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
488 * which is only run after flushing the event queues. Therefore, it
489 * is safe to modify the link state outside of the mac_lock here.
490 */
491 efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
492
493 efx_mcdi_phy_check_fcntl(efx, lpa);
494
495 efx_link_status_changed(efx);
496}
497
498static const char *sensor_names[] = {
499 [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor",
500 [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor",
501 [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling",
502 [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor",
503 [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling",
504 [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor",
505 [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling",
506 [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor",
507 [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor",
508 [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor",
509 [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor",
510 [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor",
511 [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor"
512};
513
514static const char *sensor_status_names[] = {
515 [MC_CMD_SENSOR_STATE_OK] = "OK",
516 [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
517 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
518 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
519};
520
521static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
522{
523 unsigned int monitor, state, value;
524 const char *name, *state_txt;
525 monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
526 state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
527 value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
528 /* Deal gracefully with the board having more drivers than we
529 * know about, but do not expect new sensor states. */
530 name = (monitor >= ARRAY_SIZE(sensor_names))
531 ? "No sensor name available" :
532 sensor_names[monitor];
533 EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
534 state_txt = sensor_status_names[state];
535
536 EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n",
537 monitor, name, state_txt, value);
538}
539
540/* Called from falcon_process_eventq for MCDI events */
541void efx_mcdi_process_event(struct efx_channel *channel,
542 efx_qword_t *event)
543{
544 struct efx_nic *efx = channel->efx;
545 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
546 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
547
548 switch (code) {
549 case MCDI_EVENT_CODE_BADSSERT:
550 EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data);
551 efx_mcdi_ev_death(efx, EINTR);
552 break;
553
554 case MCDI_EVENT_CODE_PMNOTICE:
555 EFX_INFO(efx, "MCDI PM event.\n");
556 break;
557
558 case MCDI_EVENT_CODE_CMDDONE:
559 efx_mcdi_ev_cpl(efx,
560 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
561 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
562 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
563 break;
564
565 case MCDI_EVENT_CODE_LINKCHANGE:
566 efx_mcdi_process_link_change(efx, event);
567 break;
568 case MCDI_EVENT_CODE_SENSOREVT:
569 efx_mcdi_sensor_event(efx, event);
570 break;
571 case MCDI_EVENT_CODE_SCHEDERR:
572 EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data);
573 break;
574 case MCDI_EVENT_CODE_REBOOT:
575 EFX_INFO(efx, "MC Reboot\n");
576 efx_mcdi_ev_death(efx, EIO);
577 break;
578 case MCDI_EVENT_CODE_MAC_STATS_DMA:
579 /* MAC stats are gather lazily. We can ignore this. */
580 break;
581
582 default:
583 EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code);
584 }
585}
586
587/**************************************************************************
588 *
589 * Specific request functions
590 *
591 **************************************************************************
592 */
593
594int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
595{
596 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
597 size_t outlength;
598 const __le16 *ver_words;
599 int rc;
600
601 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
602
603 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
604 outbuf, sizeof(outbuf), &outlength);
605 if (rc)
606 goto fail;
607
608 if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
609 *version = 0;
610 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
611 return 0;
612 }
613
614 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
615 rc = -EMSGSIZE;
616 goto fail;
617 }
618
619 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
620 *version = (((u64)le16_to_cpu(ver_words[0]) << 48) |
621 ((u64)le16_to_cpu(ver_words[1]) << 32) |
622 ((u64)le16_to_cpu(ver_words[2]) << 16) |
623 le16_to_cpu(ver_words[3]));
624 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
625
626 return 0;
627
628fail:
629 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
630 return rc;
631}
632
633int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
634 bool *was_attached)
635{
636 u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN];
637 u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN];
638 size_t outlen;
639 int rc;
640
641 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
642 driver_operating ? 1 : 0);
643 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
644
645 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
646 outbuf, sizeof(outbuf), &outlen);
647 if (rc)
648 goto fail;
649 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN)
650 goto fail;
651
652 if (was_attached != NULL)
653 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
654 return 0;
655
656fail:
657 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
658 return rc;
659}
660
661int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
662 u16 *fw_subtype_list)
663{
664 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
665 size_t outlen;
666 int port_num = efx_port_num(efx);
667 int offset;
668 int rc;
669
670 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
671
672 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
673 outbuf, sizeof(outbuf), &outlen);
674 if (rc)
675 goto fail;
676
677 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
678 rc = -EMSGSIZE;
679 goto fail;
680 }
681
682 offset = (port_num)
683 ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
684 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
685 if (mac_address)
686 memcpy(mac_address, outbuf + offset, ETH_ALEN);
687 if (fw_subtype_list)
688 memcpy(fw_subtype_list,
689 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
690 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN);
691
692 return 0;
693
694fail:
695 EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen);
696
697 return rc;
698}
699
700int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
701{
702 u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
703 u32 dest = 0;
704 int rc;
705
706 if (uart)
707 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
708 if (evq)
709 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
710
711 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
712 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
713
714 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
715
716 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
717 NULL, 0, NULL);
718 if (rc)
719 goto fail;
720
721 return 0;
722
723fail:
724 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
725 return rc;
726}
727
728int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
729{
730 u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
731 size_t outlen;
732 int rc;
733
734 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
735
736 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
737 outbuf, sizeof(outbuf), &outlen);
738 if (rc)
739 goto fail;
740 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN)
741 goto fail;
742
743 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
744 return 0;
745
746fail:
747 EFX_ERR(efx, "%s: failed rc=%d\n",
748 __func__, rc);
749 return rc;
750}
751
752int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
753 size_t *size_out, size_t *erase_size_out,
754 bool *protected_out)
755{
756 u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN];
757 u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN];
758 size_t outlen;
759 int rc;
760
761 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
762
763 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
764 outbuf, sizeof(outbuf), &outlen);
765 if (rc)
766 goto fail;
767 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN)
768 goto fail;
769
770 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
771 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
772 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
773 (1 << MC_CMD_NVRAM_PROTECTED_LBN));
774 return 0;
775
776fail:
777 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
778 return rc;
779}
780
781int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
782{
783 u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
784 int rc;
785
786 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
787
788 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
789
790 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
791 NULL, 0, NULL);
792 if (rc)
793 goto fail;
794
795 return 0;
796
797fail:
798 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
799 return rc;
800}
801
802int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
803 loff_t offset, u8 *buffer, size_t length)
804{
805 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
806 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)];
807 size_t outlen;
808 int rc;
809
810 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
811 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
812 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
813
814 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
815 outbuf, sizeof(outbuf), &outlen);
816 if (rc)
817 goto fail;
818
819 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
820 return 0;
821
822fail:
823 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
824 return rc;
825}
826
827int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
828 loff_t offset, const u8 *buffer, size_t length)
829{
830 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)];
831 int rc;
832
833 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
835 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
836 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
837
838 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
839
840 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf),
841 NULL, 0, NULL);
842 if (rc)
843 goto fail;
844
845 return 0;
846
847fail:
848 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
849 return rc;
850}
851
852int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
853 loff_t offset, size_t length)
854{
855 u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
856 int rc;
857
858 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
859 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
860 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
861
862 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
863
864 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
865 NULL, 0, NULL);
866 if (rc)
867 goto fail;
868
869 return 0;
870
871fail:
872 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
873 return rc;
874}
875
876int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
877{
878 u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
879 int rc;
880
881 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
882
883 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
884
885 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
886 NULL, 0, NULL);
887 if (rc)
888 goto fail;
889
890 return 0;
891
892fail:
893 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
894 return rc;
895}
896
897int efx_mcdi_handle_assertion(struct efx_nic *efx)
898{
899 union {
900 u8 asserts[MC_CMD_GET_ASSERTS_IN_LEN];
901 u8 reboot[MC_CMD_REBOOT_IN_LEN];
902 } inbuf;
903 u8 assertion[MC_CMD_GET_ASSERTS_OUT_LEN];
904 unsigned int flags, index, ofst;
905 const char *reason;
906 size_t outlen;
907 int retry;
908 int rc;
909
910 /* Check if the MC is in the assertion handler, retrying twice. Once
911 * because a boot-time assertion might cause this command to fail
912 * with EINTR. And once again because GET_ASSERTS can race with
913 * MC_CMD_REBOOT running on the other port. */
914 retry = 2;
915 do {
916 MCDI_SET_DWORD(inbuf.asserts, GET_ASSERTS_IN_CLEAR, 0);
917 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
918 inbuf.asserts, MC_CMD_GET_ASSERTS_IN_LEN,
919 assertion, sizeof(assertion), &outlen);
920 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
921
922 if (rc)
923 return rc;
924 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
925 return -EINVAL;
926
927 flags = MCDI_DWORD(assertion, GET_ASSERTS_OUT_GLOBAL_FLAGS);
928 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
929 return 0;
930
931 /* Reset the hardware atomically such that only one port with succeed.
932 * This command will succeed if a reboot is no longer required (because
933 * the other port did it first), but fail with EIO if it succeeds.
934 */
935 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
936 MCDI_SET_DWORD(inbuf.reboot, REBOOT_IN_FLAGS,
937 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
938 efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf.reboot, MC_CMD_REBOOT_IN_LEN,
939 NULL, 0, NULL);
940
941 /* Print out the assertion */
942 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
943 ? "system-level assertion"
944 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
945 ? "thread-level assertion"
946 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
947 ? "watchdog reset"
948 : "unknown assertion";
949 EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
950 MCDI_DWORD(assertion, GET_ASSERTS_OUT_SAVED_PC_OFFS),
951 MCDI_DWORD(assertion, GET_ASSERTS_OUT_THREAD_OFFS));
952
953 /* Print out the registers */
954 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
955 for (index = 1; index < 32; index++) {
956 EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
957 MCDI_DWORD2(assertion, ofst));
958 ofst += sizeof(efx_dword_t);
959 }
960
961 return 0;
962}
963
964void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
965{
966 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
967 int rc;
968
969 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
970 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
971 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
972
973 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
974
975 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
976
977 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
978 NULL, 0, NULL);
979 if (rc)
980 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
981}
982
983int efx_mcdi_reset_port(struct efx_nic *efx)
984{
985 int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
986 if (rc)
987 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
988 return rc;
989}
990
991int efx_mcdi_reset_mc(struct efx_nic *efx)
992{
993 u8 inbuf[MC_CMD_REBOOT_IN_LEN];
994 int rc;
995
996 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
997 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
998 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
999 NULL, 0, NULL);
1000 /* White is black, and up is down */
1001 if (rc == -EIO)
1002 return 0;
1003 if (rc == 0)
1004 rc = -EIO;
1005 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1006 return rc;
1007}
1008
1009int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1010 const u8 *mac, int *id_out)
1011{
1012 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
1013 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
1014 size_t outlen;
1015 int rc;
1016
1017 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1018 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1019 MC_CMD_FILTER_MODE_SIMPLE);
1020 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
1021
1022 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1023 outbuf, sizeof(outbuf), &outlen);
1024 if (rc)
1025 goto fail;
1026
1027 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1028 rc = -EMSGSIZE;
1029 goto fail;
1030 }
1031
1032 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1033
1034 return 0;
1035
1036fail:
1037 *id_out = -1;
1038 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1039 return rc;
1040
1041}
1042
1043
1044int
1045efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
1046{
1047 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1048}
1049
1050
1051int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1052{
1053 u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN];
1054 size_t outlen;
1055 int rc;
1056
1057 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1058 outbuf, sizeof(outbuf), &outlen);
1059 if (rc)
1060 goto fail;
1061
1062 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1063 rc = -EMSGSIZE;
1064 goto fail;
1065 }
1066
1067 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
1068
1069 return 0;
1070
1071fail:
1072 *id_out = -1;
1073 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1074 return rc;
1075}
1076
1077
1078int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1079{
1080 u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
1081 int rc;
1082
1083 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1084
1085 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1086 NULL, 0, NULL);
1087 if (rc)
1088 goto fail;
1089
1090 return 0;
1091
1092fail:
1093 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1094 return rc;
1095}
1096
1097
1098int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1099{
1100 int rc;
1101
1102 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1103 if (rc)
1104 goto fail;
1105
1106 return 0;
1107
1108fail:
1109 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
1110 return rc;
1111}
1112