aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/efx.c15
-rw-r--r--drivers/net/sfc/falcon.c247
-rw-r--r--drivers/net/sfc/falcon.h1
-rw-r--r--drivers/net/sfc/net_driver.h4
4 files changed, 141 insertions, 126 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index f65e313c2be2..06ea71c7e34e 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -445,10 +445,17 @@ static void efx_fini_channels(struct efx_nic *efx)
445 struct efx_channel *channel; 445 struct efx_channel *channel;
446 struct efx_tx_queue *tx_queue; 446 struct efx_tx_queue *tx_queue;
447 struct efx_rx_queue *rx_queue; 447 struct efx_rx_queue *rx_queue;
448 int rc;
448 449
449 EFX_ASSERT_RESET_SERIALISED(efx); 450 EFX_ASSERT_RESET_SERIALISED(efx);
450 BUG_ON(efx->port_enabled); 451 BUG_ON(efx->port_enabled);
451 452
453 rc = falcon_flush_queues(efx);
454 if (rc)
455 EFX_ERR(efx, "failed to flush queues\n");
456 else
457 EFX_LOG(efx, "successfully flushed all queues\n");
458
452 efx_for_each_channel(channel, efx) { 459 efx_for_each_channel(channel, efx) {
453 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); 460 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
454 461
@@ -456,13 +463,6 @@ static void efx_fini_channels(struct efx_nic *efx)
456 efx_fini_rx_queue(rx_queue); 463 efx_fini_rx_queue(rx_queue);
457 efx_for_each_channel_tx_queue(tx_queue, channel) 464 efx_for_each_channel_tx_queue(tx_queue, channel)
458 efx_fini_tx_queue(tx_queue); 465 efx_fini_tx_queue(tx_queue);
459 }
460
461 /* Do the event queues last so that we can handle flush events
462 * for all DMA queues. */
463 efx_for_each_channel(channel, efx) {
464 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
465
466 efx_fini_eventq(channel); 466 efx_fini_eventq(channel);
467 } 467 }
468} 468}
@@ -1092,7 +1092,6 @@ static void efx_stop_all(struct efx_nic *efx)
1092 1092
1093 /* Isolate the MAC from the TX and RX engines, so that queue 1093 /* Isolate the MAC from the TX and RX engines, so that queue
1094 * flushes will complete in a timely fashion. */ 1094 * flushes will complete in a timely fashion. */
1095 falcon_deconfigure_mac_wrapper(efx);
1096 falcon_drain_tx_fifo(efx); 1095 falcon_drain_tx_fifo(efx);
1097 1096
1098 /* Stop the kernel transmit interface late, so the watchdog 1097 /* Stop the kernel transmit interface late, so the watchdog
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index ad192f82c5e6..70c681ecdabe 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -108,10 +108,10 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
108/* Max number of internal errors. After this resets will not be performed */ 108/* Max number of internal errors. After this resets will not be performed */
109#define FALCON_MAX_INT_ERRORS 4 109#define FALCON_MAX_INT_ERRORS 4
110 110
111/* Maximum period that we wait for flush events. If the flush event 111/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
112 * doesn't arrive in this period of time then we check if the queue 112 */
113 * was disabled anyway. */ 113#define FALCON_FLUSH_INTERVAL 10
114#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */ 114#define FALCON_FLUSH_POLL_COUNT 100
115 115
116/************************************************************************** 116/**************************************************************************
117 * 117 *
@@ -452,6 +452,8 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
452 efx_oword_t tx_desc_ptr; 452 efx_oword_t tx_desc_ptr;
453 struct efx_nic *efx = tx_queue->efx; 453 struct efx_nic *efx = tx_queue->efx;
454 454
455 tx_queue->flushed = false;
456
455 /* Pin TX descriptor ring */ 457 /* Pin TX descriptor ring */
456 falcon_init_special_buffer(efx, &tx_queue->txd); 458 falcon_init_special_buffer(efx, &tx_queue->txd);
457 459
@@ -492,60 +494,16 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
492 } 494 }
493} 495}
494 496
495static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) 497static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
496{ 498{
497 struct efx_nic *efx = tx_queue->efx; 499 struct efx_nic *efx = tx_queue->efx;
498 struct efx_channel *channel = &efx->channel[0];
499 efx_oword_t tx_flush_descq; 500 efx_oword_t tx_flush_descq;
500 unsigned int read_ptr, i;
501 501
502 /* Post a flush command */ 502 /* Post a flush command */
503 EFX_POPULATE_OWORD_2(tx_flush_descq, 503 EFX_POPULATE_OWORD_2(tx_flush_descq,
504 TX_FLUSH_DESCQ_CMD, 1, 504 TX_FLUSH_DESCQ_CMD, 1,
505 TX_FLUSH_DESCQ, tx_queue->queue); 505 TX_FLUSH_DESCQ, tx_queue->queue);
506 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); 506 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
507 msleep(FALCON_FLUSH_TIMEOUT);
508
509 if (EFX_WORKAROUND_7803(efx))
510 return 0;
511
512 /* Look for a flush completed event */
513 read_ptr = channel->eventq_read_ptr;
514 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
515 efx_qword_t *event = falcon_event(channel, read_ptr);
516 int ev_code, ev_sub_code, ev_queue;
517 if (!falcon_event_present(event))
518 break;
519
520 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
521 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
522 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
523 if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
524 (ev_queue == tx_queue->queue)) {
525 EFX_LOG(efx, "tx queue %d flush command succesful\n",
526 tx_queue->queue);
527 return 0;
528 }
529
530 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
531 }
532
533 if (EFX_WORKAROUND_11557(efx)) {
534 efx_oword_t reg;
535 bool enabled;
536
537 falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
538 tx_queue->queue);
539 enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
540 if (!enabled) {
541 EFX_LOG(efx, "tx queue %d disabled without a "
542 "flush event seen\n", tx_queue->queue);
543 return 0;
544 }
545 }
546
547 EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
548 return -ETIMEDOUT;
549} 507}
550 508
551void falcon_fini_tx(struct efx_tx_queue *tx_queue) 509void falcon_fini_tx(struct efx_tx_queue *tx_queue)
@@ -553,9 +511,8 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue)
553 struct efx_nic *efx = tx_queue->efx; 511 struct efx_nic *efx = tx_queue->efx;
554 efx_oword_t tx_desc_ptr; 512 efx_oword_t tx_desc_ptr;
555 513
556 /* Stop the hardware using the queue */ 514 /* The queue should have been flushed */
557 if (falcon_flush_tx_queue(tx_queue)) 515 WARN_ON(!tx_queue->flushed);
558 EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
559 516
560 /* Remove TX descriptor ring from card */ 517 /* Remove TX descriptor ring from card */
561 EFX_ZERO_OWORD(tx_desc_ptr); 518 EFX_ZERO_OWORD(tx_desc_ptr);
@@ -643,6 +600,8 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue)
643 rx_queue->queue, rx_queue->rxd.index, 600 rx_queue->queue, rx_queue->rxd.index,
644 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 601 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
645 602
603 rx_queue->flushed = false;
604
646 /* Pin RX descriptor ring */ 605 /* Pin RX descriptor ring */
647 falcon_init_special_buffer(efx, &rx_queue->rxd); 606 falcon_init_special_buffer(efx, &rx_queue->rxd);
648 607
@@ -663,11 +622,9 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue)
663 rx_queue->queue); 622 rx_queue->queue);
664} 623}
665 624
666static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) 625static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
667{ 626{
668 struct efx_nic *efx = rx_queue->efx; 627 struct efx_nic *efx = rx_queue->efx;
669 struct efx_channel *channel = &efx->channel[0];
670 unsigned int read_ptr, i;
671 efx_oword_t rx_flush_descq; 628 efx_oword_t rx_flush_descq;
672 629
673 /* Post a flush command */ 630 /* Post a flush command */
@@ -675,76 +632,15 @@ static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
675 RX_FLUSH_DESCQ_CMD, 1, 632 RX_FLUSH_DESCQ_CMD, 1,
676 RX_FLUSH_DESCQ, rx_queue->queue); 633 RX_FLUSH_DESCQ, rx_queue->queue);
677 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); 634 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
678 msleep(FALCON_FLUSH_TIMEOUT);
679
680 if (EFX_WORKAROUND_7803(efx))
681 return 0;
682
683 /* Look for a flush completed event */
684 read_ptr = channel->eventq_read_ptr;
685 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
686 efx_qword_t *event = falcon_event(channel, read_ptr);
687 int ev_code, ev_sub_code, ev_queue;
688 bool ev_failed;
689 if (!falcon_event_present(event))
690 break;
691
692 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
693 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
694 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
695 ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
696
697 if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
698 (ev_queue == rx_queue->queue)) {
699 if (ev_failed) {
700 EFX_INFO(efx, "rx queue %d flush command "
701 "failed\n", rx_queue->queue);
702 return -EAGAIN;
703 } else {
704 EFX_LOG(efx, "rx queue %d flush command "
705 "succesful\n", rx_queue->queue);
706 return 0;
707 }
708 }
709
710 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
711 }
712
713 if (EFX_WORKAROUND_11557(efx)) {
714 efx_oword_t reg;
715 bool enabled;
716
717 falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
718 rx_queue->queue);
719 enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
720 if (!enabled) {
721 EFX_LOG(efx, "rx queue %d disabled without a "
722 "flush event seen\n", rx_queue->queue);
723 return 0;
724 }
725 }
726
727 EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
728 return -ETIMEDOUT;
729} 635}
730 636
731void falcon_fini_rx(struct efx_rx_queue *rx_queue) 637void falcon_fini_rx(struct efx_rx_queue *rx_queue)
732{ 638{
733 efx_oword_t rx_desc_ptr; 639 efx_oword_t rx_desc_ptr;
734 struct efx_nic *efx = rx_queue->efx; 640 struct efx_nic *efx = rx_queue->efx;
735 int i, rc;
736 641
737 /* Try and flush the rx queue. This may need to be repeated */ 642 /* The queue should already have been flushed */
738 for (i = 0; i < 5; i++) { 643 WARN_ON(!rx_queue->flushed);
739 rc = falcon_flush_rx_queue(rx_queue);
740 if (rc == -EAGAIN)
741 continue;
742 break;
743 }
744 if (rc) {
745 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
746 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
747 }
748 644
749 /* Remove RX descriptor ring from card */ 645 /* Remove RX descriptor ring from card */
750 EFX_ZERO_OWORD(rx_desc_ptr); 646 EFX_ZERO_OWORD(rx_desc_ptr);
@@ -1255,6 +1151,121 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1255 falcon_generate_event(channel, &test_event); 1151 falcon_generate_event(channel, &test_event);
1256} 1152}
1257 1153
1154/**************************************************************************
1155 *
1156 * Flush handling
1157 *
1158 **************************************************************************/
1159
1160
1161static void falcon_poll_flush_events(struct efx_nic *efx)
1162{
1163 struct efx_channel *channel = &efx->channel[0];
1164 struct efx_tx_queue *tx_queue;
1165 struct efx_rx_queue *rx_queue;
1166 unsigned int read_ptr, i;
1167
1168 read_ptr = channel->eventq_read_ptr;
1169 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
1170 efx_qword_t *event = falcon_event(channel, read_ptr);
1171 int ev_code, ev_sub_code, ev_queue;
1172 bool ev_failed;
1173 if (!falcon_event_present(event))
1174 break;
1175
1176 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
1177 if (ev_code != DRIVER_EV_DECODE)
1178 continue;
1179
1180 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
1181 switch (ev_sub_code) {
1182 case TX_DESCQ_FLS_DONE_EV_DECODE:
1183 ev_queue = EFX_QWORD_FIELD(*event,
1184 DRIVER_EV_TX_DESCQ_ID);
1185 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1186 tx_queue = efx->tx_queue + ev_queue;
1187 tx_queue->flushed = true;
1188 }
1189 break;
1190 case RX_DESCQ_FLS_DONE_EV_DECODE:
1191 ev_queue = EFX_QWORD_FIELD(*event,
1192 DRIVER_EV_RX_DESCQ_ID);
1193 ev_failed = EFX_QWORD_FIELD(*event,
1194 DRIVER_EV_RX_FLUSH_FAIL);
1195 if (ev_queue < efx->n_rx_queues) {
1196 rx_queue = efx->rx_queue + ev_queue;
1197
1198 /* retry the rx flush */
1199 if (ev_failed)
1200 falcon_flush_rx_queue(rx_queue);
1201 else
1202 rx_queue->flushed = true;
1203 }
1204 break;
1205 }
1206
1207 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1208 }
1209}
1210
1211/* Handle tx and rx flushes at the same time, since they run in
1212 * parallel in the hardware and there's no reason for us to
1213 * serialise them */
1214int falcon_flush_queues(struct efx_nic *efx)
1215{
1216 struct efx_rx_queue *rx_queue;
1217 struct efx_tx_queue *tx_queue;
1218 int i;
1219 bool outstanding;
1220
1221 /* Issue flush requests */
1222 efx_for_each_tx_queue(tx_queue, efx) {
1223 tx_queue->flushed = false;
1224 falcon_flush_tx_queue(tx_queue);
1225 }
1226 efx_for_each_rx_queue(rx_queue, efx) {
1227 rx_queue->flushed = false;
1228 falcon_flush_rx_queue(rx_queue);
1229 }
1230
1231 /* Poll the evq looking for flush completions. Since we're not pushing
1232 * any more rx or tx descriptors at this point, we're in no danger of
1233 * overflowing the evq whilst we wait */
1234 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1235 msleep(FALCON_FLUSH_INTERVAL);
1236 falcon_poll_flush_events(efx);
1237
1238 /* Check if every queue has been succesfully flushed */
1239 outstanding = false;
1240 efx_for_each_tx_queue(tx_queue, efx)
1241 outstanding |= !tx_queue->flushed;
1242 efx_for_each_rx_queue(rx_queue, efx)
1243 outstanding |= !rx_queue->flushed;
1244 if (!outstanding)
1245 return 0;
1246 }
1247
1248 /* Mark the queues as all flushed. We're going to return failure
1249 * leading to a reset, or fake up success anyway. "flushed" now
1250 * indicates that we tried to flush. */
1251 efx_for_each_tx_queue(tx_queue, efx) {
1252 if (!tx_queue->flushed)
1253 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1254 tx_queue->queue);
1255 tx_queue->flushed = true;
1256 }
1257 efx_for_each_rx_queue(rx_queue, efx) {
1258 if (!rx_queue->flushed)
1259 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1260 rx_queue->queue);
1261 rx_queue->flushed = true;
1262 }
1263
1264 if (EFX_WORKAROUND_7803(efx))
1265 return 0;
1266
1267 return -ETIMEDOUT;
1268}
1258 1269
1259/************************************************************************** 1270/**************************************************************************
1260 * 1271 *
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 30d61e48ac60..be025ba7a6c6 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -86,6 +86,7 @@ extern void falcon_fini_interrupt(struct efx_nic *efx);
86extern int falcon_probe_nic(struct efx_nic *efx); 86extern int falcon_probe_nic(struct efx_nic *efx);
87extern int falcon_probe_resources(struct efx_nic *efx); 87extern int falcon_probe_resources(struct efx_nic *efx);
88extern int falcon_init_nic(struct efx_nic *efx); 88extern int falcon_init_nic(struct efx_nic *efx);
89extern int falcon_flush_queues(struct efx_nic *efx);
89extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); 90extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
90extern void falcon_remove_resources(struct efx_nic *efx); 91extern void falcon_remove_resources(struct efx_nic *efx);
91extern void falcon_remove_nic(struct efx_nic *efx); 92extern void falcon_remove_nic(struct efx_nic *efx);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index be3d2ba3b74e..868ad1e703f1 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -160,6 +160,7 @@ struct efx_tx_buffer {
160 * @channel: The associated channel 160 * @channel: The associated channel
161 * @buffer: The software buffer ring 161 * @buffer: The software buffer ring
162 * @txd: The hardware descriptor ring 162 * @txd: The hardware descriptor ring
163 * @flushed: Used when handling queue flushing
163 * @read_count: Current read pointer. 164 * @read_count: Current read pointer.
164 * This is the number of buffers that have been removed from both rings. 165 * This is the number of buffers that have been removed from both rings.
165 * @stopped: Stopped count. 166 * @stopped: Stopped count.
@@ -192,6 +193,7 @@ struct efx_tx_queue {
192 struct efx_nic *nic; 193 struct efx_nic *nic;
193 struct efx_tx_buffer *buffer; 194 struct efx_tx_buffer *buffer;
194 struct efx_special_buffer txd; 195 struct efx_special_buffer txd;
196 bool flushed;
195 197
196 /* Members used mainly on the completion path */ 198 /* Members used mainly on the completion path */
197 unsigned int read_count ____cacheline_aligned_in_smp; 199 unsigned int read_count ____cacheline_aligned_in_smp;
@@ -260,6 +262,7 @@ struct efx_rx_buffer {
260 * the remaining space in the allocation. 262 * the remaining space in the allocation.
261 * @buf_dma_addr: Page's DMA address. 263 * @buf_dma_addr: Page's DMA address.
262 * @buf_data: Page's host address. 264 * @buf_data: Page's host address.
265 * @flushed: Use when handling queue flushing
263 */ 266 */
264struct efx_rx_queue { 267struct efx_rx_queue {
265 struct efx_nic *efx; 268 struct efx_nic *efx;
@@ -285,6 +288,7 @@ struct efx_rx_queue {
285 struct page *buf_page; 288 struct page *buf_page;
286 dma_addr_t buf_dma_addr; 289 dma_addr_t buf_dma_addr;
287 char *buf_data; 290 char *buf_data;
291 bool flushed;
288}; 292};
289 293
290/** 294/**