diff options
Diffstat (limited to 'drivers/net/sfc/falcon.c')
-rw-r--r-- | drivers/net/sfc/falcon.c | 260 |
1 files changed, 138 insertions, 122 deletions
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index e0c0b23f94ef..31ed1f49de00 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -108,10 +108,10 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
108 | /* Max number of internal errors. After this resets will not be performed */ | 108 | /* Max number of internal errors. After this resets will not be performed */ |
109 | #define FALCON_MAX_INT_ERRORS 4 | 109 | #define FALCON_MAX_INT_ERRORS 4 |
110 | 110 | ||
111 | /* Maximum period that we wait for flush events. If the flush event | 111 | /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times |
112 | * doesn't arrive in this period of time then we check if the queue | 112 | */ |
113 | * was disabled anyway. */ | 113 | #define FALCON_FLUSH_INTERVAL 10 |
114 | #define FALCON_FLUSH_TIMEOUT 10 /* 10ms */ | 114 | #define FALCON_FLUSH_POLL_COUNT 100 |
115 | 115 | ||
116 | /************************************************************************** | 116 | /************************************************************************** |
117 | * | 117 | * |
@@ -452,6 +452,8 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
452 | efx_oword_t tx_desc_ptr; | 452 | efx_oword_t tx_desc_ptr; |
453 | struct efx_nic *efx = tx_queue->efx; | 453 | struct efx_nic *efx = tx_queue->efx; |
454 | 454 | ||
455 | tx_queue->flushed = false; | ||
456 | |||
455 | /* Pin TX descriptor ring */ | 457 | /* Pin TX descriptor ring */ |
456 | falcon_init_special_buffer(efx, &tx_queue->txd); | 458 | falcon_init_special_buffer(efx, &tx_queue->txd); |
457 | 459 | ||
@@ -492,60 +494,16 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
492 | } | 494 | } |
493 | } | 495 | } |
494 | 496 | ||
495 | static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) | 497 | static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) |
496 | { | 498 | { |
497 | struct efx_nic *efx = tx_queue->efx; | 499 | struct efx_nic *efx = tx_queue->efx; |
498 | struct efx_channel *channel = &efx->channel[0]; | ||
499 | efx_oword_t tx_flush_descq; | 500 | efx_oword_t tx_flush_descq; |
500 | unsigned int read_ptr, i; | ||
501 | 501 | ||
502 | /* Post a flush command */ | 502 | /* Post a flush command */ |
503 | EFX_POPULATE_OWORD_2(tx_flush_descq, | 503 | EFX_POPULATE_OWORD_2(tx_flush_descq, |
504 | TX_FLUSH_DESCQ_CMD, 1, | 504 | TX_FLUSH_DESCQ_CMD, 1, |
505 | TX_FLUSH_DESCQ, tx_queue->queue); | 505 | TX_FLUSH_DESCQ, tx_queue->queue); |
506 | falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); | 506 | falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); |
507 | msleep(FALCON_FLUSH_TIMEOUT); | ||
508 | |||
509 | if (EFX_WORKAROUND_7803(efx)) | ||
510 | return 0; | ||
511 | |||
512 | /* Look for a flush completed event */ | ||
513 | read_ptr = channel->eventq_read_ptr; | ||
514 | for (i = 0; i < FALCON_EVQ_SIZE; ++i) { | ||
515 | efx_qword_t *event = falcon_event(channel, read_ptr); | ||
516 | int ev_code, ev_sub_code, ev_queue; | ||
517 | if (!falcon_event_present(event)) | ||
518 | break; | ||
519 | |||
520 | ev_code = EFX_QWORD_FIELD(*event, EV_CODE); | ||
521 | ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); | ||
522 | ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID); | ||
523 | if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) && | ||
524 | (ev_queue == tx_queue->queue)) { | ||
525 | EFX_LOG(efx, "tx queue %d flush command succesful\n", | ||
526 | tx_queue->queue); | ||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; | ||
531 | } | ||
532 | |||
533 | if (EFX_WORKAROUND_11557(efx)) { | ||
534 | efx_oword_t reg; | ||
535 | bool enabled; | ||
536 | |||
537 | falcon_read_table(efx, ®, efx->type->txd_ptr_tbl_base, | ||
538 | tx_queue->queue); | ||
539 | enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN); | ||
540 | if (!enabled) { | ||
541 | EFX_LOG(efx, "tx queue %d disabled without a " | ||
542 | "flush event seen\n", tx_queue->queue); | ||
543 | return 0; | ||
544 | } | ||
545 | } | ||
546 | |||
547 | EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue); | ||
548 | return -ETIMEDOUT; | ||
549 | } | 507 | } |
550 | 508 | ||
551 | void falcon_fini_tx(struct efx_tx_queue *tx_queue) | 509 | void falcon_fini_tx(struct efx_tx_queue *tx_queue) |
@@ -553,9 +511,8 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue) | |||
553 | struct efx_nic *efx = tx_queue->efx; | 511 | struct efx_nic *efx = tx_queue->efx; |
554 | efx_oword_t tx_desc_ptr; | 512 | efx_oword_t tx_desc_ptr; |
555 | 513 | ||
556 | /* Stop the hardware using the queue */ | 514 | /* The queue should have been flushed */ |
557 | if (falcon_flush_tx_queue(tx_queue)) | 515 | WARN_ON(!tx_queue->flushed); |
558 | EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue); | ||
559 | 516 | ||
560 | /* Remove TX descriptor ring from card */ | 517 | /* Remove TX descriptor ring from card */ |
561 | EFX_ZERO_OWORD(tx_desc_ptr); | 518 | EFX_ZERO_OWORD(tx_desc_ptr); |
@@ -643,6 +600,8 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue) | |||
643 | rx_queue->queue, rx_queue->rxd.index, | 600 | rx_queue->queue, rx_queue->rxd.index, |
644 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | 601 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); |
645 | 602 | ||
603 | rx_queue->flushed = false; | ||
604 | |||
646 | /* Pin RX descriptor ring */ | 605 | /* Pin RX descriptor ring */ |
647 | falcon_init_special_buffer(efx, &rx_queue->rxd); | 606 | falcon_init_special_buffer(efx, &rx_queue->rxd); |
648 | 607 | ||
@@ -663,11 +622,9 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue) | |||
663 | rx_queue->queue); | 622 | rx_queue->queue); |
664 | } | 623 | } |
665 | 624 | ||
666 | static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) | 625 | static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) |
667 | { | 626 | { |
668 | struct efx_nic *efx = rx_queue->efx; | 627 | struct efx_nic *efx = rx_queue->efx; |
669 | struct efx_channel *channel = &efx->channel[0]; | ||
670 | unsigned int read_ptr, i; | ||
671 | efx_oword_t rx_flush_descq; | 628 | efx_oword_t rx_flush_descq; |
672 | 629 | ||
673 | /* Post a flush command */ | 630 | /* Post a flush command */ |
@@ -675,76 +632,15 @@ static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) | |||
675 | RX_FLUSH_DESCQ_CMD, 1, | 632 | RX_FLUSH_DESCQ_CMD, 1, |
676 | RX_FLUSH_DESCQ, rx_queue->queue); | 633 | RX_FLUSH_DESCQ, rx_queue->queue); |
677 | falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); | 634 | falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); |
678 | msleep(FALCON_FLUSH_TIMEOUT); | ||
679 | |||
680 | if (EFX_WORKAROUND_7803(efx)) | ||
681 | return 0; | ||
682 | |||
683 | /* Look for a flush completed event */ | ||
684 | read_ptr = channel->eventq_read_ptr; | ||
685 | for (i = 0; i < FALCON_EVQ_SIZE; ++i) { | ||
686 | efx_qword_t *event = falcon_event(channel, read_ptr); | ||
687 | int ev_code, ev_sub_code, ev_queue; | ||
688 | bool ev_failed; | ||
689 | if (!falcon_event_present(event)) | ||
690 | break; | ||
691 | |||
692 | ev_code = EFX_QWORD_FIELD(*event, EV_CODE); | ||
693 | ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); | ||
694 | ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID); | ||
695 | ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL); | ||
696 | |||
697 | if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) && | ||
698 | (ev_queue == rx_queue->queue)) { | ||
699 | if (ev_failed) { | ||
700 | EFX_INFO(efx, "rx queue %d flush command " | ||
701 | "failed\n", rx_queue->queue); | ||
702 | return -EAGAIN; | ||
703 | } else { | ||
704 | EFX_LOG(efx, "rx queue %d flush command " | ||
705 | "succesful\n", rx_queue->queue); | ||
706 | return 0; | ||
707 | } | ||
708 | } | ||
709 | |||
710 | read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; | ||
711 | } | ||
712 | |||
713 | if (EFX_WORKAROUND_11557(efx)) { | ||
714 | efx_oword_t reg; | ||
715 | bool enabled; | ||
716 | |||
717 | falcon_read_table(efx, ®, efx->type->rxd_ptr_tbl_base, | ||
718 | rx_queue->queue); | ||
719 | enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN); | ||
720 | if (!enabled) { | ||
721 | EFX_LOG(efx, "rx queue %d disabled without a " | ||
722 | "flush event seen\n", rx_queue->queue); | ||
723 | return 0; | ||
724 | } | ||
725 | } | ||
726 | |||
727 | EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue); | ||
728 | return -ETIMEDOUT; | ||
729 | } | 635 | } |
730 | 636 | ||
731 | void falcon_fini_rx(struct efx_rx_queue *rx_queue) | 637 | void falcon_fini_rx(struct efx_rx_queue *rx_queue) |
732 | { | 638 | { |
733 | efx_oword_t rx_desc_ptr; | 639 | efx_oword_t rx_desc_ptr; |
734 | struct efx_nic *efx = rx_queue->efx; | 640 | struct efx_nic *efx = rx_queue->efx; |
735 | int i, rc; | ||
736 | 641 | ||
737 | /* Try and flush the rx queue. This may need to be repeated */ | 642 | /* The queue should already have been flushed */ |
738 | for (i = 0; i < 5; i++) { | 643 | WARN_ON(!rx_queue->flushed); |
739 | rc = falcon_flush_rx_queue(rx_queue); | ||
740 | if (rc == -EAGAIN) | ||
741 | continue; | ||
742 | break; | ||
743 | } | ||
744 | if (rc) { | ||
745 | EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); | ||
746 | efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); | ||
747 | } | ||
748 | 644 | ||
749 | /* Remove RX descriptor ring from card */ | 645 | /* Remove RX descriptor ring from card */ |
750 | EFX_ZERO_OWORD(rx_desc_ptr); | 646 | EFX_ZERO_OWORD(rx_desc_ptr); |
@@ -1007,7 +903,7 @@ static void falcon_handle_global_event(struct efx_channel *channel, | |||
1007 | is_phy_event = true; | 903 | is_phy_event = true; |
1008 | 904 | ||
1009 | if ((falcon_rev(efx) >= FALCON_REV_B0) && | 905 | if ((falcon_rev(efx) >= FALCON_REV_B0) && |
1010 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) | 906 | EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) |
1011 | is_phy_event = true; | 907 | is_phy_event = true; |
1012 | 908 | ||
1013 | if (is_phy_event) { | 909 | if (is_phy_event) { |
@@ -1255,6 +1151,121 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic) | |||
1255 | falcon_generate_event(channel, &test_event); | 1151 | falcon_generate_event(channel, &test_event); |
1256 | } | 1152 | } |
1257 | 1153 | ||
1154 | /************************************************************************** | ||
1155 | * | ||
1156 | * Flush handling | ||
1157 | * | ||
1158 | **************************************************************************/ | ||
1159 | |||
1160 | |||
1161 | static void falcon_poll_flush_events(struct efx_nic *efx) | ||
1162 | { | ||
1163 | struct efx_channel *channel = &efx->channel[0]; | ||
1164 | struct efx_tx_queue *tx_queue; | ||
1165 | struct efx_rx_queue *rx_queue; | ||
1166 | unsigned int read_ptr, i; | ||
1167 | |||
1168 | read_ptr = channel->eventq_read_ptr; | ||
1169 | for (i = 0; i < FALCON_EVQ_SIZE; ++i) { | ||
1170 | efx_qword_t *event = falcon_event(channel, read_ptr); | ||
1171 | int ev_code, ev_sub_code, ev_queue; | ||
1172 | bool ev_failed; | ||
1173 | if (!falcon_event_present(event)) | ||
1174 | break; | ||
1175 | |||
1176 | ev_code = EFX_QWORD_FIELD(*event, EV_CODE); | ||
1177 | if (ev_code != DRIVER_EV_DECODE) | ||
1178 | continue; | ||
1179 | |||
1180 | ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); | ||
1181 | switch (ev_sub_code) { | ||
1182 | case TX_DESCQ_FLS_DONE_EV_DECODE: | ||
1183 | ev_queue = EFX_QWORD_FIELD(*event, | ||
1184 | DRIVER_EV_TX_DESCQ_ID); | ||
1185 | if (ev_queue < EFX_TX_QUEUE_COUNT) { | ||
1186 | tx_queue = efx->tx_queue + ev_queue; | ||
1187 | tx_queue->flushed = true; | ||
1188 | } | ||
1189 | break; | ||
1190 | case RX_DESCQ_FLS_DONE_EV_DECODE: | ||
1191 | ev_queue = EFX_QWORD_FIELD(*event, | ||
1192 | DRIVER_EV_RX_DESCQ_ID); | ||
1193 | ev_failed = EFX_QWORD_FIELD(*event, | ||
1194 | DRIVER_EV_RX_FLUSH_FAIL); | ||
1195 | if (ev_queue < efx->n_rx_queues) { | ||
1196 | rx_queue = efx->rx_queue + ev_queue; | ||
1197 | |||
1198 | /* retry the rx flush */ | ||
1199 | if (ev_failed) | ||
1200 | falcon_flush_rx_queue(rx_queue); | ||
1201 | else | ||
1202 | rx_queue->flushed = true; | ||
1203 | } | ||
1204 | break; | ||
1205 | } | ||
1206 | |||
1207 | read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; | ||
1208 | } | ||
1209 | } | ||
1210 | |||
1211 | /* Handle tx and rx flushes at the same time, since they run in | ||
1212 | * parallel in the hardware and there's no reason for us to | ||
1213 | * serialise them */ | ||
1214 | int falcon_flush_queues(struct efx_nic *efx) | ||
1215 | { | ||
1216 | struct efx_rx_queue *rx_queue; | ||
1217 | struct efx_tx_queue *tx_queue; | ||
1218 | int i; | ||
1219 | bool outstanding; | ||
1220 | |||
1221 | /* Issue flush requests */ | ||
1222 | efx_for_each_tx_queue(tx_queue, efx) { | ||
1223 | tx_queue->flushed = false; | ||
1224 | falcon_flush_tx_queue(tx_queue); | ||
1225 | } | ||
1226 | efx_for_each_rx_queue(rx_queue, efx) { | ||
1227 | rx_queue->flushed = false; | ||
1228 | falcon_flush_rx_queue(rx_queue); | ||
1229 | } | ||
1230 | |||
1231 | /* Poll the evq looking for flush completions. Since we're not pushing | ||
1232 | * any more rx or tx descriptors at this point, we're in no danger of | ||
1233 | * overflowing the evq whilst we wait */ | ||
1234 | for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) { | ||
1235 | msleep(FALCON_FLUSH_INTERVAL); | ||
1236 | falcon_poll_flush_events(efx); | ||
1237 | |||
1238 | /* Check if every queue has been succesfully flushed */ | ||
1239 | outstanding = false; | ||
1240 | efx_for_each_tx_queue(tx_queue, efx) | ||
1241 | outstanding |= !tx_queue->flushed; | ||
1242 | efx_for_each_rx_queue(rx_queue, efx) | ||
1243 | outstanding |= !rx_queue->flushed; | ||
1244 | if (!outstanding) | ||
1245 | return 0; | ||
1246 | } | ||
1247 | |||
1248 | /* Mark the queues as all flushed. We're going to return failure | ||
1249 | * leading to a reset, or fake up success anyway. "flushed" now | ||
1250 | * indicates that we tried to flush. */ | ||
1251 | efx_for_each_tx_queue(tx_queue, efx) { | ||
1252 | if (!tx_queue->flushed) | ||
1253 | EFX_ERR(efx, "tx queue %d flush command timed out\n", | ||
1254 | tx_queue->queue); | ||
1255 | tx_queue->flushed = true; | ||
1256 | } | ||
1257 | efx_for_each_rx_queue(rx_queue, efx) { | ||
1258 | if (!rx_queue->flushed) | ||
1259 | EFX_ERR(efx, "rx queue %d flush command timed out\n", | ||
1260 | rx_queue->queue); | ||
1261 | rx_queue->flushed = true; | ||
1262 | } | ||
1263 | |||
1264 | if (EFX_WORKAROUND_7803(efx)) | ||
1265 | return 0; | ||
1266 | |||
1267 | return -ETIMEDOUT; | ||
1268 | } | ||
1258 | 1269 | ||
1259 | /************************************************************************** | 1270 | /************************************************************************** |
1260 | * | 1271 | * |
@@ -1363,10 +1374,11 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) | |||
1363 | EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); | 1374 | EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); |
1364 | } | 1375 | } |
1365 | 1376 | ||
1366 | /* Disable DMA bus mastering on both devices */ | 1377 | /* Disable both devices */ |
1367 | pci_disable_device(efx->pci_dev); | 1378 | pci_disable_device(efx->pci_dev); |
1368 | if (FALCON_IS_DUAL_FUNC(efx)) | 1379 | if (FALCON_IS_DUAL_FUNC(efx)) |
1369 | pci_disable_device(nic_data->pci_dev2); | 1380 | pci_disable_device(nic_data->pci_dev2); |
1381 | falcon_disable_interrupts(efx); | ||
1370 | 1382 | ||
1371 | if (++n_int_errors < FALCON_MAX_INT_ERRORS) { | 1383 | if (++n_int_errors < FALCON_MAX_INT_ERRORS) { |
1372 | EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); | 1384 | EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); |
@@ -1593,7 +1605,7 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
1593 | ************************************************************************** | 1605 | ************************************************************************** |
1594 | */ | 1606 | */ |
1595 | 1607 | ||
1596 | #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) | 1608 | #define FALCON_SPI_MAX_LEN ((unsigned) sizeof(efx_oword_t)) |
1597 | 1609 | ||
1598 | /* Wait for SPI command completion */ | 1610 | /* Wait for SPI command completion */ |
1599 | static int falcon_spi_wait(struct efx_nic *efx) | 1611 | static int falcon_spi_wait(struct efx_nic *efx) |
@@ -1942,8 +1954,10 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | |||
1942 | 1954 | ||
1943 | /* Wait for transfer to complete */ | 1955 | /* Wait for transfer to complete */ |
1944 | for (i = 0; i < 400; i++) { | 1956 | for (i = 0; i < 400; i++) { |
1945 | if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) | 1957 | if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) { |
1958 | rmb(); /* Ensure the stats are valid. */ | ||
1946 | return 0; | 1959 | return 0; |
1960 | } | ||
1947 | udelay(10); | 1961 | udelay(10); |
1948 | } | 1962 | } |
1949 | 1963 | ||
@@ -2758,6 +2772,8 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2758 | 2772 | ||
2759 | /* Allocate storage for hardware specific data */ | 2773 | /* Allocate storage for hardware specific data */ |
2760 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | 2774 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); |
2775 | if (!nic_data) | ||
2776 | return -ENOMEM; | ||
2761 | efx->nic_data = nic_data; | 2777 | efx->nic_data = nic_data; |
2762 | 2778 | ||
2763 | /* Determine number of ports etc. */ | 2779 | /* Determine number of ports etc. */ |