aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2013-08-27 18:12:31 -0400
committerBen Hutchings <bhutchings@solarflare.com>2013-08-29 13:12:06 -0400
commitcade715ff18440dda53e59c10c606586c92be33e (patch)
tree5c88d245245ccf9f67f6d19b076bd280686e5346 /drivers/net/ethernet
parent251111d9a1bd9a26e25446d876156bf265858cb5 (diff)
sfc: Implement asynchronous MCDI requests
This will allow use of MCDI from the data path, in particular for accelerated RFS. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/sfc/efx.c3
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c289
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h29
3 files changed, 291 insertions, 30 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index db2f119d7fec..69150fa1459b 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1368,6 +1368,9 @@ static void efx_soft_disable_interrupts(struct efx_nic *efx)
1368 if (!channel->type->keep_eventq) 1368 if (!channel->type->keep_eventq)
1369 efx_fini_eventq(channel); 1369 efx_fini_eventq(channel);
1370 } 1370 }
1371
1372 /* Flush the asynchronous MCDI request queue */
1373 efx_mcdi_flush_async(efx);
1371} 1374}
1372 1375
1373static void efx_enable_interrupts(struct efx_nic *efx) 1376static void efx_enable_interrupts(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index c616fb52d1d5..8150781b41eb 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -37,6 +37,18 @@
37#define SEQ_MASK \ 37#define SEQ_MASK \
38 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 38 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
39 39
40struct efx_mcdi_async_param {
41 struct list_head list;
42 unsigned int cmd;
43 size_t inlen;
44 size_t outlen;
45 efx_mcdi_async_completer *complete;
46 unsigned long cookie;
47 /* followed by request/response buffer */
48};
49
50static void efx_mcdi_timeout_async(unsigned long context);
51
40static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 52static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
41{ 53{
42 EFX_BUG_ON_PARANOID(!efx->mcdi); 54 EFX_BUG_ON_PARANOID(!efx->mcdi);
@@ -52,10 +64,15 @@ int efx_mcdi_init(struct efx_nic *efx)
52 return -ENOMEM; 64 return -ENOMEM;
53 65
54 mcdi = efx_mcdi(efx); 66 mcdi = efx_mcdi(efx);
67 mcdi->efx = efx;
55 init_waitqueue_head(&mcdi->wq); 68 init_waitqueue_head(&mcdi->wq);
56 spin_lock_init(&mcdi->iface_lock); 69 spin_lock_init(&mcdi->iface_lock);
57 mcdi->state = MCDI_STATE_QUIESCENT; 70 mcdi->state = MCDI_STATE_QUIESCENT;
58 mcdi->mode = MCDI_MODE_POLL; 71 mcdi->mode = MCDI_MODE_POLL;
72 spin_lock_init(&mcdi->async_lock);
73 INIT_LIST_HEAD(&mcdi->async_list);
74 setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
75 (unsigned long)mcdi);
59 76
60 (void) efx_mcdi_poll_reboot(efx); 77 (void) efx_mcdi_poll_reboot(efx);
61 mcdi->new_epoch = true; 78 mcdi->new_epoch = true;
@@ -253,13 +270,21 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx)
253 return efx->type->mcdi_poll_reboot(efx); 270 return efx->type->mcdi_poll_reboot(efx);
254} 271}
255 272
256static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) 273static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
274{
275 return cmpxchg(&mcdi->state,
276 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
277 MCDI_STATE_QUIESCENT;
278}
279
280static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
257{ 281{
258 /* Wait until the interface becomes QUIESCENT and we win the race 282 /* Wait until the interface becomes QUIESCENT and we win the race
259 * to mark it RUNNING. */ 283 * to mark it RUNNING_SYNC.
284 */
260 wait_event(mcdi->wq, 285 wait_event(mcdi->wq,
261 cmpxchg(&mcdi->state, 286 cmpxchg(&mcdi->state,
262 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING) == 287 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
263 MCDI_STATE_QUIESCENT); 288 MCDI_STATE_QUIESCENT);
264} 289}
265 290
@@ -285,16 +310,14 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
285 return 0; 310 return 0;
286} 311}
287 312
288static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) 313/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
314 * requester. Return whether this was done. Does not take any locks.
315 */
316static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
289{ 317{
290 /* If the interface is RUNNING, then move to COMPLETED and wake any 318 if (cmpxchg(&mcdi->state,
291 * waiters. If the interface isn't in RUNNING then we've received a 319 MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
292 * duplicate completion after we've already transitioned back to 320 MCDI_STATE_RUNNING_SYNC) {
293 * QUIESCENT. [A subsequent invocation would increment seqno, so would
294 * have failed the seqno check].
295 */
296 if (cmpxchg(&mcdi->state, MCDI_STATE_RUNNING, MCDI_STATE_COMPLETED) ==
297 MCDI_STATE_RUNNING) {
298 wake_up(&mcdi->wq); 321 wake_up(&mcdi->wq);
299 return true; 322 return true;
300 } 323 }
@@ -304,10 +327,91 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
304 327
305static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) 328static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
306{ 329{
330 if (mcdi->mode == MCDI_MODE_EVENTS) {
331 struct efx_mcdi_async_param *async;
332 struct efx_nic *efx = mcdi->efx;
333
334 /* Process the asynchronous request queue */
335 spin_lock_bh(&mcdi->async_lock);
336 async = list_first_entry_or_null(
337 &mcdi->async_list, struct efx_mcdi_async_param, list);
338 if (async) {
339 mcdi->state = MCDI_STATE_RUNNING_ASYNC;
340 efx_mcdi_send_request(efx, async->cmd,
341 (const efx_dword_t *)(async + 1),
342 async->inlen);
343 mod_timer(&mcdi->async_timer,
344 jiffies + MCDI_RPC_TIMEOUT);
345 }
346 spin_unlock_bh(&mcdi->async_lock);
347
348 if (async)
349 return;
350 }
351
307 mcdi->state = MCDI_STATE_QUIESCENT; 352 mcdi->state = MCDI_STATE_QUIESCENT;
308 wake_up(&mcdi->wq); 353 wake_up(&mcdi->wq);
309} 354}
310 355
356/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
357 * asynchronous completion function, and release the interface.
358 * Return whether this was done. Must be called in bh-disabled
359 * context. Will take iface_lock and async_lock.
360 */
361static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
362{
363 struct efx_nic *efx = mcdi->efx;
364 struct efx_mcdi_async_param *async;
365 size_t hdr_len, data_len;
366 efx_dword_t *outbuf;
367 int rc;
368
369 if (cmpxchg(&mcdi->state,
370 MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
371 MCDI_STATE_RUNNING_ASYNC)
372 return false;
373
374 spin_lock(&mcdi->iface_lock);
375 if (timeout) {
376 /* Ensure that if the completion event arrives later,
377 * the seqno check in efx_mcdi_ev_cpl() will fail
378 */
379 ++mcdi->seqno;
380 ++mcdi->credits;
381 rc = -ETIMEDOUT;
382 hdr_len = 0;
383 data_len = 0;
384 } else {
385 rc = mcdi->resprc;
386 hdr_len = mcdi->resp_hdr_len;
387 data_len = mcdi->resp_data_len;
388 }
389 spin_unlock(&mcdi->iface_lock);
390
391 /* Stop the timer. In case the timer function is running, we
392 * must wait for it to return so that there is no possibility
393 * of it aborting the next request.
394 */
395 if (!timeout)
396 del_timer_sync(&mcdi->async_timer);
397
398 spin_lock(&mcdi->async_lock);
399 async = list_first_entry(&mcdi->async_list,
400 struct efx_mcdi_async_param, list);
401 list_del(&async->list);
402 spin_unlock(&mcdi->async_lock);
403
404 outbuf = (efx_dword_t *)(async + 1);
405 efx->type->mcdi_read_response(efx, outbuf, hdr_len,
406 min(async->outlen, data_len));
407 async->complete(efx, async->cookie, rc, outbuf, data_len);
408 kfree(async);
409
410 efx_mcdi_release(mcdi);
411
412 return true;
413}
414
311static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, 415static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
312 unsigned int datalen, unsigned int mcdi_err) 416 unsigned int datalen, unsigned int mcdi_err)
313{ 417{
@@ -339,8 +443,24 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
339 443
340 spin_unlock(&mcdi->iface_lock); 444 spin_unlock(&mcdi->iface_lock);
341 445
342 if (wake) 446 if (wake) {
343 efx_mcdi_complete(mcdi); 447 if (!efx_mcdi_complete_async(mcdi, false))
448 (void) efx_mcdi_complete_sync(mcdi);
449
450 /* If the interface isn't RUNNING_ASYNC or
451 * RUNNING_SYNC then we've received a duplicate
452 * completion after we've already transitioned back to
453 * QUIESCENT. [A subsequent invocation would increment
454 * seqno, so would have failed the seqno check].
455 */
456 }
457}
458
459static void efx_mcdi_timeout_async(unsigned long context)
460{
461 struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
462
463 efx_mcdi_complete_async(mcdi, true);
344} 464}
345 465
346static int 466static int
@@ -383,11 +503,80 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
383 if (rc) 503 if (rc)
384 return rc; 504 return rc;
385 505
386 efx_mcdi_acquire(mcdi); 506 efx_mcdi_acquire_sync(mcdi);
387 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 507 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
388 return 0; 508 return 0;
389} 509}
390 510
511/**
512 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
513 * @efx: NIC through which to issue the command
514 * @cmd: Command type number
515 * @inbuf: Command parameters
516 * @inlen: Length of command parameters, in bytes
517 * @outlen: Length to allocate for response buffer, in bytes
518 * @complete: Function to be called on completion or cancellation.
519 * @cookie: Arbitrary value to be passed to @complete.
520 *
521 * This function does not sleep and therefore may be called in atomic
522 * context. It will fail if event queues are disabled or if MCDI
523 * event completions have been disabled due to an error.
524 *
525 * If it succeeds, the @complete function will be called exactly once
526 * in atomic context, when one of the following occurs:
527 * (a) the completion event is received (in NAPI context)
528 * (b) event queues are disabled (in the process that disables them)
529 * (c) the request times-out (in timer context)
530 */
531int
532efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
533 const efx_dword_t *inbuf, size_t inlen, size_t outlen,
534 efx_mcdi_async_completer *complete, unsigned long cookie)
535{
536 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
537 struct efx_mcdi_async_param *async;
538 int rc;
539
540 rc = efx_mcdi_check_supported(efx, cmd, inlen);
541 if (rc)
542 return rc;
543
544 async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
545 GFP_ATOMIC);
546 if (!async)
547 return -ENOMEM;
548
549 async->cmd = cmd;
550 async->inlen = inlen;
551 async->outlen = outlen;
552 async->complete = complete;
553 async->cookie = cookie;
554 memcpy(async + 1, inbuf, inlen);
555
556 spin_lock_bh(&mcdi->async_lock);
557
558 if (mcdi->mode == MCDI_MODE_EVENTS) {
559 list_add_tail(&async->list, &mcdi->async_list);
560
561 /* If this is at the front of the queue, try to start it
562 * immediately
563 */
564 if (mcdi->async_list.next == &async->list &&
565 efx_mcdi_acquire_async(mcdi)) {
566 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
567 mod_timer(&mcdi->async_timer,
568 jiffies + MCDI_RPC_TIMEOUT);
569 }
570 } else {
571 kfree(async);
572 rc = -ENETDOWN;
573 }
574
575 spin_unlock_bh(&mcdi->async_lock);
576
577 return rc;
578}
579
391int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 580int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
392 efx_dword_t *outbuf, size_t outlen, 581 efx_dword_t *outbuf, size_t outlen,
393 size_t *outlen_actual) 582 size_t *outlen_actual)
@@ -455,6 +644,10 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
455 return rc; 644 return rc;
456} 645}
457 646
647/* Switch to polled MCDI completions. This can be called in various
648 * error conditions with various locks held, so it must be lockless.
649 * Caller is responsible for flushing asynchronous requests later.
650 */
458void efx_mcdi_mode_poll(struct efx_nic *efx) 651void efx_mcdi_mode_poll(struct efx_nic *efx)
459{ 652{
460 struct efx_mcdi_iface *mcdi; 653 struct efx_mcdi_iface *mcdi;
@@ -472,11 +665,50 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
472 * efx_mcdi_await_completion() will then call efx_mcdi_poll(). 665 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
473 * 666 *
474 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), 667 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
475 * which efx_mcdi_complete() provides for us. 668 * which efx_mcdi_complete_sync() provides for us.
476 */ 669 */
477 mcdi->mode = MCDI_MODE_POLL; 670 mcdi->mode = MCDI_MODE_POLL;
478 671
479 efx_mcdi_complete(mcdi); 672 efx_mcdi_complete_sync(mcdi);
673}
674
675/* Flush any running or queued asynchronous requests, after event processing
676 * is stopped
677 */
678void efx_mcdi_flush_async(struct efx_nic *efx)
679{
680 struct efx_mcdi_async_param *async, *next;
681 struct efx_mcdi_iface *mcdi;
682
683 if (!efx->mcdi)
684 return;
685
686 mcdi = efx_mcdi(efx);
687
688 /* We must be in polling mode so no more requests can be queued */
689 BUG_ON(mcdi->mode != MCDI_MODE_POLL);
690
691 del_timer_sync(&mcdi->async_timer);
692
693 /* If a request is still running, make sure we give the MC
694 * time to complete it so that the response won't overwrite our
695 * next request.
696 */
697 if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
698 efx_mcdi_poll(efx);
699 mcdi->state = MCDI_STATE_QUIESCENT;
700 }
701
702 /* Nothing else will access the async list now, so it is safe
703 * to walk it without holding async_lock. If we hold it while
704 * calling a completer then lockdep may warn that we have
705 * acquired locks in the wrong order.
706 */
707 list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
708 async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
709 list_del(&async->list);
710 kfree(async);
711 }
480} 712}
481 713
482void efx_mcdi_mode_event(struct efx_nic *efx) 714void efx_mcdi_mode_event(struct efx_nic *efx)
@@ -498,7 +730,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
498 * write memory barrier ensure that efx_mcdi_rpc() sees it, which 730 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
499 * efx_mcdi_acquire() provides. 731 * efx_mcdi_acquire() provides.
500 */ 732 */
501 efx_mcdi_acquire(mcdi); 733 efx_mcdi_acquire_sync(mcdi);
502 mcdi->mode = MCDI_MODE_EVENTS; 734 mcdi->mode = MCDI_MODE_EVENTS;
503 efx_mcdi_release(mcdi); 735 efx_mcdi_release(mcdi);
504} 736}
@@ -515,16 +747,21 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
515 * are sent to the same queue, we can't be racing with 747 * are sent to the same queue, we can't be racing with
516 * efx_mcdi_ev_cpl()] 748 * efx_mcdi_ev_cpl()]
517 * 749 *
518 * There's a race here with efx_mcdi_rpc(), because we might receive 750 * If there is an outstanding asynchronous request, we can't
519 * a REBOOT event *before* the request has been copied out. In polled 751 * complete it now (efx_mcdi_complete() would deadlock). The
520 * mode (during startup) this is irrelevant, because efx_mcdi_complete() 752 * reset process will take care of this.
521 * is ignored. In event mode, this condition is just an edge-case of 753 *
522 * receiving a REBOOT event after posting the MCDI request. Did the mc 754 * There's a race here with efx_mcdi_send_request(), because
523 * reboot before or after the copyout? The best we can do always is 755 * we might receive a REBOOT event *before* the request has
524 * just return failure. 756 * been copied out. In polled mode (during startup) this is
757 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
758 * event mode, this condition is just an edge-case of
759 * receiving a REBOOT event after posting the MCDI
760 * request. Did the mc reboot before or after the copyout? The
761 * best we can do always is just return failure.
525 */ 762 */
526 spin_lock(&mcdi->iface_lock); 763 spin_lock(&mcdi->iface_lock);
527 if (efx_mcdi_complete(mcdi)) { 764 if (efx_mcdi_complete_sync(mcdi)) {
528 if (mcdi->mode == MCDI_MODE_EVENTS) { 765 if (mcdi->mode == MCDI_MODE_EVENTS) {
529 mcdi->resprc = rc; 766 mcdi->resprc = rc;
530 mcdi->resp_hdr_len = 0; 767 mcdi->resp_hdr_len = 0;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 6c0363a2abdf..e37cf1d6ed46 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -14,15 +14,17 @@
14 * enum efx_mcdi_state - MCDI request handling state 14 * enum efx_mcdi_state - MCDI request handling state
15 * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the 15 * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
16 * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING 16 * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
17 * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that 17 * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
18 * moved into this state is allowed to move out of it. 18 * Only the thread that moved into this state is allowed to move out of it.
19 * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
19 * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread 20 * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
20 * has not yet consumed the result. For all other threads, equivalent to 21 * has not yet consumed the result. For all other threads, equivalent to
21 * %MCDI_STATE_RUNNING. 22 * %MCDI_STATE_RUNNING.
22 */ 23 */
23enum efx_mcdi_state { 24enum efx_mcdi_state {
24 MCDI_STATE_QUIESCENT, 25 MCDI_STATE_QUIESCENT,
25 MCDI_STATE_RUNNING, 26 MCDI_STATE_RUNNING_SYNC,
27 MCDI_STATE_RUNNING_ASYNC,
26 MCDI_STATE_COMPLETED, 28 MCDI_STATE_COMPLETED,
27}; 29};
28 30
@@ -33,19 +35,25 @@ enum efx_mcdi_mode {
33 35
34/** 36/**
35 * struct efx_mcdi_iface - MCDI protocol context 37 * struct efx_mcdi_iface - MCDI protocol context
38 * @efx: The associated NIC.
36 * @state: Request handling state. Waited for by @wq. 39 * @state: Request handling state. Waited for by @wq.
37 * @mode: Poll for mcdi completion, or wait for an mcdi_event. 40 * @mode: Poll for mcdi completion, or wait for an mcdi_event.
38 * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING 41 * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
39 * @new_epoch: Indicates start of day or start of MC reboot recovery 42 * @new_epoch: Indicates start of day or start of MC reboot recovery
40 * @iface_lock: Serialises access to all the following fields 43 * @iface_lock: Serialises access to @seqno, @credits and response metadata
41 * @seqno: The next sequence number to use for mcdi requests. 44 * @seqno: The next sequence number to use for mcdi requests.
42 * @credits: Number of spurious MCDI completion events allowed before we 45 * @credits: Number of spurious MCDI completion events allowed before we
43 * trigger a fatal error 46 * trigger a fatal error
44 * @resprc: Response error/success code (Linux numbering) 47 * @resprc: Response error/success code (Linux numbering)
45 * @resp_hdr_len: Response header length 48 * @resp_hdr_len: Response header length
46 * @resp_data_len: Response data (SDU or error) length 49 * @resp_data_len: Response data (SDU or error) length
50 * @async_lock: Serialises access to @async_list while event processing is
51 * enabled
52 * @async_list: Queue of asynchronous requests
53 * @async_timer: Timer for asynchronous request timeout
47 */ 54 */
48struct efx_mcdi_iface { 55struct efx_mcdi_iface {
56 struct efx_nic *efx;
49 enum efx_mcdi_state state; 57 enum efx_mcdi_state state;
50 enum efx_mcdi_mode mode; 58 enum efx_mcdi_mode mode;
51 wait_queue_head_t wq; 59 wait_queue_head_t wq;
@@ -56,6 +64,9 @@ struct efx_mcdi_iface {
56 int resprc; 64 int resprc;
57 size_t resp_hdr_len; 65 size_t resp_hdr_len;
58 size_t resp_data_len; 66 size_t resp_data_len;
67 spinlock_t async_lock;
68 struct list_head async_list;
69 struct timer_list async_timer;
59}; 70};
60 71
61struct efx_mcdi_mon { 72struct efx_mcdi_mon {
@@ -111,10 +122,20 @@ extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
111 efx_dword_t *outbuf, size_t outlen, 122 efx_dword_t *outbuf, size_t outlen,
112 size_t *outlen_actual); 123 size_t *outlen_actual);
113 124
125typedef void efx_mcdi_async_completer(struct efx_nic *efx,
126 unsigned long cookie, int rc,
127 efx_dword_t *outbuf,
128 size_t outlen_actual);
129extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
130 const efx_dword_t *inbuf, size_t inlen,
131 size_t outlen,
132 efx_mcdi_async_completer *complete,
133 unsigned long cookie);
114 134
115extern int efx_mcdi_poll_reboot(struct efx_nic *efx); 135extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
116extern void efx_mcdi_mode_poll(struct efx_nic *efx); 136extern void efx_mcdi_mode_poll(struct efx_nic *efx);
117extern void efx_mcdi_mode_event(struct efx_nic *efx); 137extern void efx_mcdi_mode_event(struct efx_nic *efx);
138extern void efx_mcdi_flush_async(struct efx_nic *efx);
118 139
119extern void efx_mcdi_process_event(struct efx_channel *channel, 140extern void efx_mcdi_process_event(struct efx_channel *channel,
120 efx_qword_t *event); 141 efx_qword_t *event);