aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/nic.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2012-02-07 19:11:20 -0500
committerBen Hutchings <bhutchings@solarflare.com>2012-02-15 19:24:46 -0500
commit9f2cb71c2b0ce33c472856c0feec2883fa0d9cd1 (patch)
treee60e2fc6a18ec5cfaf079e9855246a9282742667 /drivers/net/ethernet/sfc/nic.c
parent2ae75dac301b0f255f79cd84ac70c619e55d7694 (diff)
sfc: Leave interrupts and event queues enabled whenever we can
When SR-IOV is enabled we may receive FLR (Function-Level Reset) events, associated queue flush events and requests from VF drivers at any time. Therefore we need to keep event queues and interrupts enabled whenever possible. Currently we stop interrupt-driven event processing before flushing RX and TX queues; efx_nic_flush_queues() then polls event queues for flush events and discards any others it finds. Change it to work with the regular event handling functions. Currently efx_start_channel() fills RX queues synchronously when a device is brought up. This could now race with NAPI, so change it to send fill events. This was almost entirely written by Steve Hodgson, formerly shodgson@solarflare.com. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet/sfc/nic.c')
-rw-r--r--drivers/net/ethernet/sfc/nic.c349
1 files changed, 178 insertions, 171 deletions
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 2b33afd4276d..03e4125d6b96 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -49,17 +49,14 @@
49#define EFX_INT_ERROR_EXPIRE 3600 49#define EFX_INT_ERROR_EXPIRE 3600
50#define EFX_MAX_INT_ERRORS 5 50#define EFX_MAX_INT_ERRORS 5
51 51
52/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
53 */
54#define EFX_FLUSH_INTERVAL 10
55#define EFX_FLUSH_POLL_COUNT 100
56
57/* Depth of RX flush request fifo */ 52/* Depth of RX flush request fifo */
58#define EFX_RX_FLUSH_COUNT 4 53#define EFX_RX_FLUSH_COUNT 4
59 54
60/* Driver generated events */ 55/* Driver generated events */
61#define _EFX_CHANNEL_MAGIC_TEST 0x000101 56#define _EFX_CHANNEL_MAGIC_TEST 0x000101
62#define _EFX_CHANNEL_MAGIC_FILL 0x000102 57#define _EFX_CHANNEL_MAGIC_FILL 0x000102
58#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
59#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
63 60
64#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 61#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
65#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 62#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
@@ -69,6 +66,12 @@
69#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 66#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
71 efx_rx_queue_index(_rx_queue)) 68 efx_rx_queue_index(_rx_queue))
69#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
71 efx_rx_queue_index(_rx_queue))
72#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
74 (_tx_queue)->queue)
72 75
73/************************************************************************** 76/**************************************************************************
74 * 77 *
@@ -432,8 +435,6 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
432 struct efx_nic *efx = tx_queue->efx; 435 struct efx_nic *efx = tx_queue->efx;
433 efx_oword_t reg; 436 efx_oword_t reg;
434 437
435 tx_queue->flushed = FLUSH_NONE;
436
437 /* Pin TX descriptor ring */ 438 /* Pin TX descriptor ring */
438 efx_init_special_buffer(efx, &tx_queue->txd); 439 efx_init_special_buffer(efx, &tx_queue->txd);
439 440
@@ -490,9 +491,6 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
490 struct efx_nic *efx = tx_queue->efx; 491 struct efx_nic *efx = tx_queue->efx;
491 efx_oword_t tx_flush_descq; 492 efx_oword_t tx_flush_descq;
492 493
493 tx_queue->flushed = FLUSH_PENDING;
494
495 /* Post a flush command */
496 EFX_POPULATE_OWORD_2(tx_flush_descq, 494 EFX_POPULATE_OWORD_2(tx_flush_descq,
497 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 495 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
498 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 496 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
@@ -504,9 +502,6 @@ void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
504 struct efx_nic *efx = tx_queue->efx; 502 struct efx_nic *efx = tx_queue->efx;
505 efx_oword_t tx_desc_ptr; 503 efx_oword_t tx_desc_ptr;
506 504
507 /* The queue should have been flushed */
508 WARN_ON(tx_queue->flushed != FLUSH_DONE);
509
510 /* Remove TX descriptor ring from card */ 505 /* Remove TX descriptor ring from card */
511 EFX_ZERO_OWORD(tx_desc_ptr); 506 EFX_ZERO_OWORD(tx_desc_ptr);
512 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 507 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
@@ -597,8 +592,6 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
597 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 592 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
598 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 593 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
599 594
600 rx_queue->flushed = FLUSH_NONE;
601
602 /* Pin RX descriptor ring */ 595 /* Pin RX descriptor ring */
603 efx_init_special_buffer(efx, &rx_queue->rxd); 596 efx_init_special_buffer(efx, &rx_queue->rxd);
604 597
@@ -627,9 +620,6 @@ static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
627 struct efx_nic *efx = rx_queue->efx; 620 struct efx_nic *efx = rx_queue->efx;
628 efx_oword_t rx_flush_descq; 621 efx_oword_t rx_flush_descq;
629 622
630 rx_queue->flushed = FLUSH_PENDING;
631
632 /* Post a flush command */
633 EFX_POPULATE_OWORD_2(rx_flush_descq, 623 EFX_POPULATE_OWORD_2(rx_flush_descq,
634 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 624 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
635 FRF_AZ_RX_FLUSH_DESCQ, 625 FRF_AZ_RX_FLUSH_DESCQ,
@@ -642,9 +632,6 @@ void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
642 efx_oword_t rx_desc_ptr; 632 efx_oword_t rx_desc_ptr;
643 struct efx_nic *efx = rx_queue->efx; 633 struct efx_nic *efx = rx_queue->efx;
644 634
645 /* The queue should already have been flushed */
646 WARN_ON(rx_queue->flushed != FLUSH_DONE);
647
648 /* Remove RX descriptor ring from card */ 635 /* Remove RX descriptor ring from card */
649 EFX_ZERO_OWORD(rx_desc_ptr); 636 EFX_ZERO_OWORD(rx_desc_ptr);
650 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 637 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
@@ -662,6 +649,89 @@ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
662 649
663/************************************************************************** 650/**************************************************************************
664 * 651 *
652 * Flush handling
653 *
654 **************************************************************************/
655
656/* efx_nic_flush_queues() must be woken up when all flushes are completed,
657 * or more RX flushes can be kicked off.
658 */
659static bool efx_flush_wake(struct efx_nic *efx)
660{
661 /* Ensure that all updates are visible to efx_nic_flush_queues() */
662 smp_mb();
663
664 return (atomic_read(&efx->drain_pending) == 0 ||
665 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
666 && atomic_read(&efx->rxq_flush_pending) > 0));
667}
668
669/* Flush all the transmit queues, and continue flushing receive queues until
670 * they're all flushed. Wait for the DRAIN events to be recieved so that there
671 * are no more RX and TX events left on any channel. */
672int efx_nic_flush_queues(struct efx_nic *efx)
673{
674 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
675 struct efx_channel *channel;
676 struct efx_rx_queue *rx_queue;
677 struct efx_tx_queue *tx_queue;
678 int rc = 0;
679
680 efx->type->prepare_flush(efx);
681
682 efx_for_each_channel(channel, efx) {
683 efx_for_each_channel_tx_queue(tx_queue, channel) {
684 atomic_inc(&efx->drain_pending);
685 efx_flush_tx_queue(tx_queue);
686 }
687 efx_for_each_channel_rx_queue(rx_queue, channel) {
688 atomic_inc(&efx->drain_pending);
689 rx_queue->flush_pending = true;
690 atomic_inc(&efx->rxq_flush_pending);
691 }
692 }
693
694 while (timeout && atomic_read(&efx->drain_pending) > 0) {
695 /* The hardware supports four concurrent rx flushes, each of
696 * which may need to be retried if there is an outstanding
697 * descriptor fetch
698 */
699 efx_for_each_channel(channel, efx) {
700 efx_for_each_channel_rx_queue(rx_queue, channel) {
701 if (atomic_read(&efx->rxq_flush_outstanding) >=
702 EFX_RX_FLUSH_COUNT)
703 break;
704
705 if (rx_queue->flush_pending) {
706 rx_queue->flush_pending = false;
707 atomic_dec(&efx->rxq_flush_pending);
708 atomic_inc(&efx->rxq_flush_outstanding);
709 efx_flush_rx_queue(rx_queue);
710 }
711 }
712 }
713
714 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
715 timeout);
716 }
717
718 if (atomic_read(&efx->drain_pending)) {
719 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
720 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
721 atomic_read(&efx->rxq_flush_outstanding),
722 atomic_read(&efx->rxq_flush_pending));
723 rc = -ETIMEDOUT;
724
725 atomic_set(&efx->drain_pending, 0);
726 atomic_set(&efx->rxq_flush_pending, 0);
727 atomic_set(&efx->rxq_flush_outstanding, 0);
728 }
729
730 return rc;
731}
732
733/**************************************************************************
734 *
665 * Event queue processing 735 * Event queue processing
666 * Event queues are processed by per-channel tasklets. 736 * Event queues are processed by per-channel tasklets.
667 * 737 *
@@ -722,6 +792,9 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
722 struct efx_nic *efx = channel->efx; 792 struct efx_nic *efx = channel->efx;
723 int tx_packets = 0; 793 int tx_packets = 0;
724 794
795 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
796 return 0;
797
725 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 798 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
726 /* Transmit completion */ 799 /* Transmit completion */
727 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 800 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
@@ -863,6 +936,10 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
863 bool rx_ev_pkt_ok; 936 bool rx_ev_pkt_ok;
864 u16 flags; 937 u16 flags;
865 struct efx_rx_queue *rx_queue; 938 struct efx_rx_queue *rx_queue;
939 struct efx_nic *efx = channel->efx;
940
941 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
942 return;
866 943
867 /* Basic packet information */ 944 /* Basic packet information */
868 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 945 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
@@ -909,6 +986,72 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
909 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 986 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
910} 987}
911 988
989/* If this flush done event corresponds to a &struct efx_tx_queue, then
990 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
991 * of all transmit completions.
992 */
993static void
994efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
995{
996 struct efx_tx_queue *tx_queue;
997 int qid;
998
999 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1000 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1001 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1002 qid % EFX_TXQ_TYPES);
1003
1004 efx_magic_event(tx_queue->channel,
1005 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1006 }
1007}
1008
1009/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1010 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1011 * the RX queue back to the mask of RX queues in need of flushing.
1012 */
1013static void
1014efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1015{
1016 struct efx_channel *channel;
1017 struct efx_rx_queue *rx_queue;
1018 int qid;
1019 bool failed;
1020
1021 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1022 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1023 if (qid >= efx->n_channels)
1024 return;
1025 channel = efx_get_channel(efx, qid);
1026 if (!efx_channel_has_rx_queue(channel))
1027 return;
1028 rx_queue = efx_channel_get_rx_queue(channel);
1029
1030 if (failed) {
1031 netif_info(efx, hw, efx->net_dev,
1032 "RXQ %d flush retry\n", qid);
1033 rx_queue->flush_pending = true;
1034 atomic_inc(&efx->rxq_flush_pending);
1035 } else {
1036 efx_magic_event(efx_rx_queue_channel(rx_queue),
1037 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1038 }
1039 atomic_dec(&efx->rxq_flush_outstanding);
1040 if (efx_flush_wake(efx))
1041 wake_up(&efx->flush_wq);
1042}
1043
1044static void
1045efx_handle_drain_event(struct efx_channel *channel)
1046{
1047 struct efx_nic *efx = channel->efx;
1048
1049 WARN_ON(atomic_read(&efx->drain_pending) == 0);
1050 atomic_dec(&efx->drain_pending);
1051 if (efx_flush_wake(efx))
1052 wake_up(&efx->flush_wq);
1053}
1054
912static void 1055static void
913efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1056efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
914{ 1057{
@@ -916,21 +1059,28 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
916 struct efx_rx_queue *rx_queue = 1059 struct efx_rx_queue *rx_queue =
917 efx_channel_has_rx_queue(channel) ? 1060 efx_channel_has_rx_queue(channel) ?
918 efx_channel_get_rx_queue(channel) : NULL; 1061 efx_channel_get_rx_queue(channel) : NULL;
919 unsigned magic; 1062 unsigned magic, code;
920 1063
921 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1064 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1065 code = _EFX_CHANNEL_MAGIC_CODE(magic);
922 1066
923 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) 1067 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
924 ; /* ignore */ 1068 /* ignore */
925 else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) 1069 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
926 /* The queue must be empty, so we won't receive any rx 1070 /* The queue must be empty, so we won't receive any rx
927 * events, so efx_process_channel() won't refill the 1071 * events, so efx_process_channel() won't refill the
928 * queue. Refill it here */ 1072 * queue. Refill it here */
929 efx_fast_push_rx_descriptors(rx_queue); 1073 efx_fast_push_rx_descriptors(rx_queue);
930 else 1074 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1075 rx_queue->enabled = false;
1076 efx_handle_drain_event(channel);
1077 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1078 efx_handle_drain_event(channel);
1079 } else {
931 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1080 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
932 "generated event "EFX_QWORD_FMT"\n", 1081 "generated event "EFX_QWORD_FMT"\n",
933 channel->channel, EFX_QWORD_VAL(*event)); 1082 channel->channel, EFX_QWORD_VAL(*event));
1083 }
934} 1084}
935 1085
936static void 1086static void
@@ -947,10 +1097,12 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
947 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1097 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
948 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1098 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
949 channel->channel, ev_sub_data); 1099 channel->channel, ev_sub_data);
1100 efx_handle_tx_flush_done(efx, event);
950 break; 1101 break;
951 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1102 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
952 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1103 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
953 channel->channel, ev_sub_data); 1104 channel->channel, ev_sub_data);
1105 efx_handle_rx_flush_done(efx, event);
954 break; 1106 break;
955 case FSE_AZ_EVQ_INIT_DONE_EV: 1107 case FSE_AZ_EVQ_INIT_DONE_EV:
956 netif_dbg(efx, hw, efx->net_dev, 1108 netif_dbg(efx, hw, efx->net_dev,
@@ -1162,143 +1314,6 @@ void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
1162 1314
1163/************************************************************************** 1315/**************************************************************************
1164 * 1316 *
1165 * Flush handling
1166 *
1167 **************************************************************************/
1168
1169
1170static void efx_poll_flush_events(struct efx_nic *efx)
1171{
1172 struct efx_channel *channel = efx_get_channel(efx, 0);
1173 struct efx_tx_queue *tx_queue;
1174 struct efx_rx_queue *rx_queue;
1175 unsigned int read_ptr = channel->eventq_read_ptr;
1176 unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
1177
1178 do {
1179 efx_qword_t *event = efx_event(channel, read_ptr);
1180 int ev_code, ev_sub_code, ev_queue;
1181 bool ev_failed;
1182
1183 if (!efx_event_present(event))
1184 break;
1185
1186 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1187 ev_sub_code = EFX_QWORD_FIELD(*event,
1188 FSF_AZ_DRIVER_EV_SUBCODE);
1189 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1190 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1191 ev_queue = EFX_QWORD_FIELD(*event,
1192 FSF_AZ_DRIVER_EV_SUBDATA);
1193 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1194 tx_queue = efx_get_tx_queue(
1195 efx, ev_queue / EFX_TXQ_TYPES,
1196 ev_queue % EFX_TXQ_TYPES);
1197 tx_queue->flushed = FLUSH_DONE;
1198 }
1199 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1200 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1201 ev_queue = EFX_QWORD_FIELD(
1202 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1203 ev_failed = EFX_QWORD_FIELD(
1204 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1205 if (ev_queue < efx->n_rx_channels) {
1206 rx_queue = efx_get_rx_queue(efx, ev_queue);
1207 rx_queue->flushed =
1208 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1209 }
1210 }
1211
1212 /* We're about to destroy the queue anyway, so
1213 * it's ok to throw away every non-flush event */
1214 EFX_SET_QWORD(*event);
1215
1216 ++read_ptr;
1217 } while (read_ptr != end_ptr);
1218
1219 channel->eventq_read_ptr = read_ptr;
1220}
1221
1222/* Handle tx and rx flushes at the same time, since they run in
1223 * parallel in the hardware and there's no reason for us to
1224 * serialise them */
1225int efx_nic_flush_queues(struct efx_nic *efx)
1226{
1227 struct efx_channel *channel;
1228 struct efx_rx_queue *rx_queue;
1229 struct efx_tx_queue *tx_queue;
1230 int i, tx_pending, rx_pending;
1231
1232 /* If necessary prepare the hardware for flushing */
1233 efx->type->prepare_flush(efx);
1234
1235 /* Flush all tx queues in parallel */
1236 efx_for_each_channel(channel, efx) {
1237 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1238 if (tx_queue->initialised)
1239 efx_flush_tx_queue(tx_queue);
1240 }
1241 }
1242
1243 /* The hardware supports four concurrent rx flushes, each of which may
1244 * need to be retried if there is an outstanding descriptor fetch */
1245 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1246 rx_pending = tx_pending = 0;
1247 efx_for_each_channel(channel, efx) {
1248 efx_for_each_channel_rx_queue(rx_queue, channel) {
1249 if (rx_queue->flushed == FLUSH_PENDING)
1250 ++rx_pending;
1251 }
1252 }
1253 efx_for_each_channel(channel, efx) {
1254 efx_for_each_channel_rx_queue(rx_queue, channel) {
1255 if (rx_pending == EFX_RX_FLUSH_COUNT)
1256 break;
1257 if (rx_queue->flushed == FLUSH_FAILED ||
1258 rx_queue->flushed == FLUSH_NONE) {
1259 efx_flush_rx_queue(rx_queue);
1260 ++rx_pending;
1261 }
1262 }
1263 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1264 if (tx_queue->initialised &&
1265 tx_queue->flushed != FLUSH_DONE)
1266 ++tx_pending;
1267 }
1268 }
1269
1270 if (rx_pending == 0 && tx_pending == 0)
1271 return 0;
1272
1273 msleep(EFX_FLUSH_INTERVAL);
1274 efx_poll_flush_events(efx);
1275 }
1276
1277 /* Mark the queues as all flushed. We're going to return failure
1278 * leading to a reset, or fake up success anyway */
1279 efx_for_each_channel(channel, efx) {
1280 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1281 if (tx_queue->initialised &&
1282 tx_queue->flushed != FLUSH_DONE)
1283 netif_err(efx, hw, efx->net_dev,
1284 "tx queue %d flush command timed out\n",
1285 tx_queue->queue);
1286 tx_queue->flushed = FLUSH_DONE;
1287 }
1288 efx_for_each_channel_rx_queue(rx_queue, channel) {
1289 if (rx_queue->flushed != FLUSH_DONE)
1290 netif_err(efx, hw, efx->net_dev,
1291 "rx queue %d flush command timed out\n",
1292 efx_rx_queue_index(rx_queue));
1293 rx_queue->flushed = FLUSH_DONE;
1294 }
1295 }
1296
1297 return -ETIMEDOUT;
1298}
1299
1300/**************************************************************************
1301 *
1302 * Hardware interrupts 1317 * Hardware interrupts
1303 * The hardware interrupt handler does very little work; all the event 1318 * The hardware interrupt handler does very little work; all the event
1304 * queue processing is carried out by per-channel tasklets. 1319 * queue processing is carried out by per-channel tasklets.
@@ -1320,18 +1335,10 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
1320 1335
1321void efx_nic_enable_interrupts(struct efx_nic *efx) 1336void efx_nic_enable_interrupts(struct efx_nic *efx)
1322{ 1337{
1323 struct efx_channel *channel;
1324
1325 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1338 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1326 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1339 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1327 1340
1328 /* Enable interrupts */
1329 efx_nic_interrupts(efx, true, false); 1341 efx_nic_interrupts(efx, true, false);
1330
1331 /* Force processing of all the channels to get the EVQ RPTRs up to
1332 date */
1333 efx_for_each_channel(channel, efx)
1334 efx_schedule_channel(channel);
1335} 1342}
1336 1343
1337void efx_nic_disable_interrupts(struct efx_nic *efx) 1344void efx_nic_disable_interrupts(struct efx_nic *efx)