diff options
Diffstat (limited to 'drivers/net/sfc/falcon.c')
-rw-r--r-- | drivers/net/sfc/falcon.c | 767 |
1 files changed, 550 insertions, 217 deletions
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 9138ee5b7b7b..e0c0b23f94ef 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -242,7 +242,7 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = { | |||
242 | * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing | 242 | * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing |
243 | * it to be used for event queues, descriptor rings etc. | 243 | * it to be used for event queues, descriptor rings etc. |
244 | */ | 244 | */ |
245 | static int | 245 | static void |
246 | falcon_init_special_buffer(struct efx_nic *efx, | 246 | falcon_init_special_buffer(struct efx_nic *efx, |
247 | struct efx_special_buffer *buffer) | 247 | struct efx_special_buffer *buffer) |
248 | { | 248 | { |
@@ -266,8 +266,6 @@ falcon_init_special_buffer(struct efx_nic *efx, | |||
266 | BUF_OWNER_ID_FBUF, 0); | 266 | BUF_OWNER_ID_FBUF, 0); |
267 | falcon_write_sram(efx, &buf_desc, index); | 267 | falcon_write_sram(efx, &buf_desc, index); |
268 | } | 268 | } |
269 | |||
270 | return 0; | ||
271 | } | 269 | } |
272 | 270 | ||
273 | /* Unmaps a buffer from Falcon and clears the buffer table entries */ | 271 | /* Unmaps a buffer from Falcon and clears the buffer table entries */ |
@@ -449,16 +447,13 @@ int falcon_probe_tx(struct efx_tx_queue *tx_queue) | |||
449 | sizeof(efx_qword_t)); | 447 | sizeof(efx_qword_t)); |
450 | } | 448 | } |
451 | 449 | ||
452 | int falcon_init_tx(struct efx_tx_queue *tx_queue) | 450 | void falcon_init_tx(struct efx_tx_queue *tx_queue) |
453 | { | 451 | { |
454 | efx_oword_t tx_desc_ptr; | 452 | efx_oword_t tx_desc_ptr; |
455 | struct efx_nic *efx = tx_queue->efx; | 453 | struct efx_nic *efx = tx_queue->efx; |
456 | int rc; | ||
457 | 454 | ||
458 | /* Pin TX descriptor ring */ | 455 | /* Pin TX descriptor ring */ |
459 | rc = falcon_init_special_buffer(efx, &tx_queue->txd); | 456 | falcon_init_special_buffer(efx, &tx_queue->txd); |
460 | if (rc) | ||
461 | return rc; | ||
462 | 457 | ||
463 | /* Push TX descriptor ring to card */ | 458 | /* Push TX descriptor ring to card */ |
464 | EFX_POPULATE_OWORD_10(tx_desc_ptr, | 459 | EFX_POPULATE_OWORD_10(tx_desc_ptr, |
@@ -466,7 +461,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
466 | TX_ISCSI_DDIG_EN, 0, | 461 | TX_ISCSI_DDIG_EN, 0, |
467 | TX_ISCSI_HDIG_EN, 0, | 462 | TX_ISCSI_HDIG_EN, 0, |
468 | TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | 463 | TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, |
469 | TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum, | 464 | TX_DESCQ_EVQ_ID, tx_queue->channel->channel, |
470 | TX_DESCQ_OWNER_ID, 0, | 465 | TX_DESCQ_OWNER_ID, 0, |
471 | TX_DESCQ_LABEL, tx_queue->queue, | 466 | TX_DESCQ_LABEL, tx_queue->queue, |
472 | TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, | 467 | TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, |
@@ -474,9 +469,9 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
474 | TX_NON_IP_DROP_DIS_B0, 1); | 469 | TX_NON_IP_DROP_DIS_B0, 1); |
475 | 470 | ||
476 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 471 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
477 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); | 472 | int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; |
478 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); | 473 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum); |
479 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); | 474 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum); |
480 | } | 475 | } |
481 | 476 | ||
482 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 477 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, |
@@ -485,17 +480,16 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
485 | if (falcon_rev(efx) < FALCON_REV_B0) { | 480 | if (falcon_rev(efx) < FALCON_REV_B0) { |
486 | efx_oword_t reg; | 481 | efx_oword_t reg; |
487 | 482 | ||
488 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ | 483 | /* Only 128 bits in this register */ |
484 | BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); | ||
489 | 485 | ||
490 | falcon_read(efx, ®, TX_CHKSM_CFG_REG_KER_A1); | 486 | falcon_read(efx, ®, TX_CHKSM_CFG_REG_KER_A1); |
491 | if (efx->net_dev->features & NETIF_F_IP_CSUM) | 487 | if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) |
492 | clear_bit_le(tx_queue->queue, (void *)®); | 488 | clear_bit_le(tx_queue->queue, (void *)®); |
493 | else | 489 | else |
494 | set_bit_le(tx_queue->queue, (void *)®); | 490 | set_bit_le(tx_queue->queue, (void *)®); |
495 | falcon_write(efx, ®, TX_CHKSM_CFG_REG_KER_A1); | 491 | falcon_write(efx, ®, TX_CHKSM_CFG_REG_KER_A1); |
496 | } | 492 | } |
497 | |||
498 | return 0; | ||
499 | } | 493 | } |
500 | 494 | ||
501 | static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) | 495 | static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) |
@@ -538,7 +532,7 @@ static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) | |||
538 | 532 | ||
539 | if (EFX_WORKAROUND_11557(efx)) { | 533 | if (EFX_WORKAROUND_11557(efx)) { |
540 | efx_oword_t reg; | 534 | efx_oword_t reg; |
541 | int enabled; | 535 | bool enabled; |
542 | 536 | ||
543 | falcon_read_table(efx, ®, efx->type->txd_ptr_tbl_base, | 537 | falcon_read_table(efx, ®, efx->type->txd_ptr_tbl_base, |
544 | tx_queue->queue); | 538 | tx_queue->queue); |
@@ -638,29 +632,26 @@ int falcon_probe_rx(struct efx_rx_queue *rx_queue) | |||
638 | sizeof(efx_qword_t)); | 632 | sizeof(efx_qword_t)); |
639 | } | 633 | } |
640 | 634 | ||
641 | int falcon_init_rx(struct efx_rx_queue *rx_queue) | 635 | void falcon_init_rx(struct efx_rx_queue *rx_queue) |
642 | { | 636 | { |
643 | efx_oword_t rx_desc_ptr; | 637 | efx_oword_t rx_desc_ptr; |
644 | struct efx_nic *efx = rx_queue->efx; | 638 | struct efx_nic *efx = rx_queue->efx; |
645 | int rc; | 639 | bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0; |
646 | int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; | 640 | bool iscsi_digest_en = is_b0; |
647 | int iscsi_digest_en = is_b0; | ||
648 | 641 | ||
649 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | 642 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", |
650 | rx_queue->queue, rx_queue->rxd.index, | 643 | rx_queue->queue, rx_queue->rxd.index, |
651 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | 644 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); |
652 | 645 | ||
653 | /* Pin RX descriptor ring */ | 646 | /* Pin RX descriptor ring */ |
654 | rc = falcon_init_special_buffer(efx, &rx_queue->rxd); | 647 | falcon_init_special_buffer(efx, &rx_queue->rxd); |
655 | if (rc) | ||
656 | return rc; | ||
657 | 648 | ||
658 | /* Push RX descriptor ring to card */ | 649 | /* Push RX descriptor ring to card */ |
659 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | 650 | EFX_POPULATE_OWORD_10(rx_desc_ptr, |
660 | RX_ISCSI_DDIG_EN, iscsi_digest_en, | 651 | RX_ISCSI_DDIG_EN, iscsi_digest_en, |
661 | RX_ISCSI_HDIG_EN, iscsi_digest_en, | 652 | RX_ISCSI_HDIG_EN, iscsi_digest_en, |
662 | RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | 653 | RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, |
663 | RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum, | 654 | RX_DESCQ_EVQ_ID, rx_queue->channel->channel, |
664 | RX_DESCQ_OWNER_ID, 0, | 655 | RX_DESCQ_OWNER_ID, 0, |
665 | RX_DESCQ_LABEL, rx_queue->queue, | 656 | RX_DESCQ_LABEL, rx_queue->queue, |
666 | RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, | 657 | RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, |
@@ -670,7 +661,6 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue) | |||
670 | RX_DESCQ_EN, 1); | 661 | RX_DESCQ_EN, 1); |
671 | falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | 662 | falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, |
672 | rx_queue->queue); | 663 | rx_queue->queue); |
673 | return 0; | ||
674 | } | 664 | } |
675 | 665 | ||
676 | static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) | 666 | static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) |
@@ -694,7 +684,8 @@ static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) | |||
694 | read_ptr = channel->eventq_read_ptr; | 684 | read_ptr = channel->eventq_read_ptr; |
695 | for (i = 0; i < FALCON_EVQ_SIZE; ++i) { | 685 | for (i = 0; i < FALCON_EVQ_SIZE; ++i) { |
696 | efx_qword_t *event = falcon_event(channel, read_ptr); | 686 | efx_qword_t *event = falcon_event(channel, read_ptr); |
697 | int ev_code, ev_sub_code, ev_queue, ev_failed; | 687 | int ev_code, ev_sub_code, ev_queue; |
688 | bool ev_failed; | ||
698 | if (!falcon_event_present(event)) | 689 | if (!falcon_event_present(event)) |
699 | break; | 690 | break; |
700 | 691 | ||
@@ -721,7 +712,7 @@ static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) | |||
721 | 712 | ||
722 | if (EFX_WORKAROUND_11557(efx)) { | 713 | if (EFX_WORKAROUND_11557(efx)) { |
723 | efx_oword_t reg; | 714 | efx_oword_t reg; |
724 | int enabled; | 715 | bool enabled; |
725 | 716 | ||
726 | falcon_read_table(efx, ®, efx->type->rxd_ptr_tbl_base, | 717 | falcon_read_table(efx, ®, efx->type->rxd_ptr_tbl_base, |
727 | rx_queue->queue); | 718 | rx_queue->queue); |
@@ -793,7 +784,7 @@ void falcon_eventq_read_ack(struct efx_channel *channel) | |||
793 | 784 | ||
794 | EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr); | 785 | EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr); |
795 | falcon_writel_table(efx, ®, efx->type->evq_rptr_tbl_base, | 786 | falcon_writel_table(efx, ®, efx->type->evq_rptr_tbl_base, |
796 | channel->evqnum); | 787 | channel->channel); |
797 | } | 788 | } |
798 | 789 | ||
799 | /* Use HW to insert a SW defined event */ | 790 | /* Use HW to insert a SW defined event */ |
@@ -802,7 +793,7 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event) | |||
802 | efx_oword_t drv_ev_reg; | 793 | efx_oword_t drv_ev_reg; |
803 | 794 | ||
804 | EFX_POPULATE_OWORD_2(drv_ev_reg, | 795 | EFX_POPULATE_OWORD_2(drv_ev_reg, |
805 | DRV_EV_QID, channel->evqnum, | 796 | DRV_EV_QID, channel->channel, |
806 | DRV_EV_DATA, | 797 | DRV_EV_DATA, |
807 | EFX_QWORD_FIELD64(*event, WHOLE_EVENT)); | 798 | EFX_QWORD_FIELD64(*event, WHOLE_EVENT)); |
808 | falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER); | 799 | falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER); |
@@ -813,8 +804,8 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event) | |||
813 | * Falcon batches TX completion events; the message we receive is of | 804 | * Falcon batches TX completion events; the message we receive is of |
814 | * the form "complete all TX events up to this index". | 805 | * the form "complete all TX events up to this index". |
815 | */ | 806 | */ |
816 | static inline void falcon_handle_tx_event(struct efx_channel *channel, | 807 | static void falcon_handle_tx_event(struct efx_channel *channel, |
817 | efx_qword_t *event) | 808 | efx_qword_t *event) |
818 | { | 809 | { |
819 | unsigned int tx_ev_desc_ptr; | 810 | unsigned int tx_ev_desc_ptr; |
820 | unsigned int tx_ev_q_label; | 811 | unsigned int tx_ev_q_label; |
@@ -847,39 +838,19 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel, | |||
847 | } | 838 | } |
848 | } | 839 | } |
849 | 840 | ||
850 | /* Check received packet's destination MAC address. */ | ||
851 | static int check_dest_mac(struct efx_rx_queue *rx_queue, | ||
852 | const efx_qword_t *event) | ||
853 | { | ||
854 | struct efx_rx_buffer *rx_buf; | ||
855 | struct efx_nic *efx = rx_queue->efx; | ||
856 | int rx_ev_desc_ptr; | ||
857 | struct ethhdr *eh; | ||
858 | |||
859 | if (efx->promiscuous) | ||
860 | return 1; | ||
861 | |||
862 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); | ||
863 | rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr); | ||
864 | eh = (struct ethhdr *)rx_buf->data; | ||
865 | if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN)) | ||
866 | return 0; | ||
867 | return 1; | ||
868 | } | ||
869 | |||
870 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | 841 | /* Detect errors included in the rx_evt_pkt_ok bit. */ |
871 | static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | 842 | static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, |
872 | const efx_qword_t *event, | 843 | const efx_qword_t *event, |
873 | unsigned *rx_ev_pkt_ok, | 844 | bool *rx_ev_pkt_ok, |
874 | int *discard, int byte_count) | 845 | bool *discard) |
875 | { | 846 | { |
876 | struct efx_nic *efx = rx_queue->efx; | 847 | struct efx_nic *efx = rx_queue->efx; |
877 | unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | 848 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; |
878 | unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | 849 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; |
879 | unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | 850 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; |
880 | unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm; | 851 | bool rx_ev_other_err, rx_ev_pause_frm; |
881 | unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; | 852 | bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; |
882 | int snap, non_ip; | 853 | unsigned rx_ev_pkt_type; |
883 | 854 | ||
884 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); | 855 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); |
885 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); | 856 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); |
@@ -903,41 +874,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
903 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | 874 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | |
904 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | 875 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); |
905 | 876 | ||
906 | snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) || | ||
907 | (rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE); | ||
908 | non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE); | ||
909 | |||
910 | /* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the | ||
911 | * length field of an LLC frame, which sets TOBE_DISC. We could set | ||
912 | * PASS_LEN_ERR, but we want the MAC to filter out short frames (to | ||
913 | * protect the RX block). | ||
914 | * | ||
915 | * bug5475 - LLC/SNAP: Falcon identifies SNAP packets. | ||
916 | * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag. | ||
917 | * LLC can't encapsulate IP, so by definition | ||
918 | * these packets are NON_IP. | ||
919 | * | ||
920 | * Unicast mismatch will also cause TOBE_DISC, so the driver needs | ||
921 | * to check this. | ||
922 | */ | ||
923 | if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) { | ||
924 | /* If all the other flags are zero then we can state the | ||
925 | * entire packet is ok, which will flag to the kernel not | ||
926 | * to recalculate checksums. | ||
927 | */ | ||
928 | if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm)) | ||
929 | *rx_ev_pkt_ok = 1; | ||
930 | |||
931 | rx_ev_tobe_disc = 0; | ||
932 | |||
933 | /* TOBE_DISC is set for unicast mismatch. But given that | ||
934 | * we can't trust TOBE_DISC here, we must validate the dest | ||
935 | * MAC address ourselves. | ||
936 | */ | ||
937 | if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event)) | ||
938 | rx_ev_tobe_disc = 1; | ||
939 | } | ||
940 | |||
941 | /* Count errors that are not in MAC stats. */ | 877 | /* Count errors that are not in MAC stats. */ |
942 | if (rx_ev_frm_trunc) | 878 | if (rx_ev_frm_trunc) |
943 | ++rx_queue->channel->n_rx_frm_trunc; | 879 | ++rx_queue->channel->n_rx_frm_trunc; |
@@ -961,7 +897,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
961 | #ifdef EFX_ENABLE_DEBUG | 897 | #ifdef EFX_ENABLE_DEBUG |
962 | if (rx_ev_other_err) { | 898 | if (rx_ev_other_err) { |
963 | EFX_INFO_RL(efx, " RX queue %d unexpected RX event " | 899 | EFX_INFO_RL(efx, " RX queue %d unexpected RX event " |
964 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n", | 900 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", |
965 | rx_queue->queue, EFX_QWORD_VAL(*event), | 901 | rx_queue->queue, EFX_QWORD_VAL(*event), |
966 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | 902 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", |
967 | rx_ev_ip_hdr_chksum_err ? | 903 | rx_ev_ip_hdr_chksum_err ? |
@@ -972,8 +908,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
972 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | 908 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", |
973 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | 909 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", |
974 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | 910 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", |
975 | rx_ev_pause_frm ? " [PAUSE]" : "", | 911 | rx_ev_pause_frm ? " [PAUSE]" : ""); |
976 | snap ? " [SNAP/LLC]" : ""); | ||
977 | } | 912 | } |
978 | #endif | 913 | #endif |
979 | 914 | ||
@@ -1006,13 +941,13 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue, | |||
1006 | * Also "is multicast" and "matches multicast filter" flags can be used to | 941 | * Also "is multicast" and "matches multicast filter" flags can be used to |
1007 | * discard non-matching multicast packets. | 942 | * discard non-matching multicast packets. |
1008 | */ | 943 | */ |
1009 | static inline int falcon_handle_rx_event(struct efx_channel *channel, | 944 | static void falcon_handle_rx_event(struct efx_channel *channel, |
1010 | const efx_qword_t *event) | 945 | const efx_qword_t *event) |
1011 | { | 946 | { |
1012 | unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt; | 947 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; |
1013 | unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt; | 948 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; |
1014 | unsigned expected_ptr; | 949 | unsigned expected_ptr; |
1015 | int discard = 0, checksummed; | 950 | bool rx_ev_pkt_ok, discard = false, checksummed; |
1016 | struct efx_rx_queue *rx_queue; | 951 | struct efx_rx_queue *rx_queue; |
1017 | struct efx_nic *efx = channel->efx; | 952 | struct efx_nic *efx = channel->efx; |
1018 | 953 | ||
@@ -1022,16 +957,14 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel, | |||
1022 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); | 957 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); |
1023 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT)); | 958 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT)); |
1024 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1); | 959 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1); |
960 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel); | ||
1025 | 961 | ||
1026 | rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL); | 962 | rx_queue = &efx->rx_queue[channel->channel]; |
1027 | rx_queue = &efx->rx_queue[rx_ev_q_label]; | ||
1028 | 963 | ||
1029 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); | 964 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); |
1030 | expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; | 965 | expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; |
1031 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) { | 966 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) |
1032 | falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | 967 | falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); |
1033 | return rx_ev_q_label; | ||
1034 | } | ||
1035 | 968 | ||
1036 | if (likely(rx_ev_pkt_ok)) { | 969 | if (likely(rx_ev_pkt_ok)) { |
1037 | /* If packet is marked as OK and packet type is TCP/IPv4 or | 970 | /* If packet is marked as OK and packet type is TCP/IPv4 or |
@@ -1040,8 +973,8 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel, | |||
1040 | checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type); | 973 | checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type); |
1041 | } else { | 974 | } else { |
1042 | falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, | 975 | falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, |
1043 | &discard, rx_ev_byte_cnt); | 976 | &discard); |
1044 | checksummed = 0; | 977 | checksummed = false; |
1045 | } | 978 | } |
1046 | 979 | ||
1047 | /* Detect multicast packets that didn't match the filter */ | 980 | /* Detect multicast packets that didn't match the filter */ |
@@ -1051,14 +984,12 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel, | |||
1051 | EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH); | 984 | EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH); |
1052 | 985 | ||
1053 | if (unlikely(!rx_ev_mcast_hash_match)) | 986 | if (unlikely(!rx_ev_mcast_hash_match)) |
1054 | discard = 1; | 987 | discard = true; |
1055 | } | 988 | } |
1056 | 989 | ||
1057 | /* Handle received packet */ | 990 | /* Handle received packet */ |
1058 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, | 991 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, |
1059 | checksummed, discard); | 992 | checksummed, discard); |
1060 | |||
1061 | return rx_ev_q_label; | ||
1062 | } | 993 | } |
1063 | 994 | ||
1064 | /* Global events are basically PHY events */ | 995 | /* Global events are basically PHY events */ |
@@ -1066,23 +997,23 @@ static void falcon_handle_global_event(struct efx_channel *channel, | |||
1066 | efx_qword_t *event) | 997 | efx_qword_t *event) |
1067 | { | 998 | { |
1068 | struct efx_nic *efx = channel->efx; | 999 | struct efx_nic *efx = channel->efx; |
1069 | int is_phy_event = 0, handled = 0; | 1000 | bool is_phy_event = false, handled = false; |
1070 | 1001 | ||
1071 | /* Check for interrupt on either port. Some boards have a | 1002 | /* Check for interrupt on either port. Some boards have a |
1072 | * single PHY wired to the interrupt line for port 1. */ | 1003 | * single PHY wired to the interrupt line for port 1. */ |
1073 | if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || | 1004 | if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || |
1074 | EFX_QWORD_FIELD(*event, G_PHY1_INTR) || | 1005 | EFX_QWORD_FIELD(*event, G_PHY1_INTR) || |
1075 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) | 1006 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) |
1076 | is_phy_event = 1; | 1007 | is_phy_event = true; |
1077 | 1008 | ||
1078 | if ((falcon_rev(efx) >= FALCON_REV_B0) && | 1009 | if ((falcon_rev(efx) >= FALCON_REV_B0) && |
1079 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) | 1010 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) |
1080 | is_phy_event = 1; | 1011 | is_phy_event = true; |
1081 | 1012 | ||
1082 | if (is_phy_event) { | 1013 | if (is_phy_event) { |
1083 | efx->phy_op->clear_interrupt(efx); | 1014 | efx->phy_op->clear_interrupt(efx); |
1084 | queue_work(efx->workqueue, &efx->reconfigure_work); | 1015 | queue_work(efx->workqueue, &efx->reconfigure_work); |
1085 | handled = 1; | 1016 | handled = true; |
1086 | } | 1017 | } |
1087 | 1018 | ||
1088 | if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) { | 1019 | if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) { |
@@ -1092,7 +1023,7 @@ static void falcon_handle_global_event(struct efx_channel *channel, | |||
1092 | atomic_inc(&efx->rx_reset); | 1023 | atomic_inc(&efx->rx_reset); |
1093 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? | 1024 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? |
1094 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | 1025 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); |
1095 | handled = 1; | 1026 | handled = true; |
1096 | } | 1027 | } |
1097 | 1028 | ||
1098 | if (!handled) | 1029 | if (!handled) |
@@ -1163,13 +1094,12 @@ static void falcon_handle_driver_event(struct efx_channel *channel, | |||
1163 | } | 1094 | } |
1164 | } | 1095 | } |
1165 | 1096 | ||
1166 | int falcon_process_eventq(struct efx_channel *channel, int *rx_quota) | 1097 | int falcon_process_eventq(struct efx_channel *channel, int rx_quota) |
1167 | { | 1098 | { |
1168 | unsigned int read_ptr; | 1099 | unsigned int read_ptr; |
1169 | efx_qword_t event, *p_event; | 1100 | efx_qword_t event, *p_event; |
1170 | int ev_code; | 1101 | int ev_code; |
1171 | int rxq; | 1102 | int rx_packets = 0; |
1172 | int rxdmaqs = 0; | ||
1173 | 1103 | ||
1174 | read_ptr = channel->eventq_read_ptr; | 1104 | read_ptr = channel->eventq_read_ptr; |
1175 | 1105 | ||
@@ -1191,9 +1121,8 @@ int falcon_process_eventq(struct efx_channel *channel, int *rx_quota) | |||
1191 | 1121 | ||
1192 | switch (ev_code) { | 1122 | switch (ev_code) { |
1193 | case RX_IP_EV_DECODE: | 1123 | case RX_IP_EV_DECODE: |
1194 | rxq = falcon_handle_rx_event(channel, &event); | 1124 | falcon_handle_rx_event(channel, &event); |
1195 | rxdmaqs |= (1 << rxq); | 1125 | ++rx_packets; |
1196 | (*rx_quota)--; | ||
1197 | break; | 1126 | break; |
1198 | case TX_IP_EV_DECODE: | 1127 | case TX_IP_EV_DECODE: |
1199 | falcon_handle_tx_event(channel, &event); | 1128 | falcon_handle_tx_event(channel, &event); |
@@ -1220,10 +1149,10 @@ int falcon_process_eventq(struct efx_channel *channel, int *rx_quota) | |||
1220 | /* Increment read pointer */ | 1149 | /* Increment read pointer */ |
1221 | read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; | 1150 | read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; |
1222 | 1151 | ||
1223 | } while (*rx_quota); | 1152 | } while (rx_packets < rx_quota); |
1224 | 1153 | ||
1225 | channel->eventq_read_ptr = read_ptr; | 1154 | channel->eventq_read_ptr = read_ptr; |
1226 | return rxdmaqs; | 1155 | return rx_packets; |
1227 | } | 1156 | } |
1228 | 1157 | ||
1229 | void falcon_set_int_moderation(struct efx_channel *channel) | 1158 | void falcon_set_int_moderation(struct efx_channel *channel) |
@@ -1251,7 +1180,7 @@ void falcon_set_int_moderation(struct efx_channel *channel) | |||
1251 | TIMER_VAL, 0); | 1180 | TIMER_VAL, 0); |
1252 | } | 1181 | } |
1253 | falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, | 1182 | falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, |
1254 | channel->evqnum); | 1183 | channel->channel); |
1255 | 1184 | ||
1256 | } | 1185 | } |
1257 | 1186 | ||
@@ -1265,20 +1194,17 @@ int falcon_probe_eventq(struct efx_channel *channel) | |||
1265 | return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); | 1194 | return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); |
1266 | } | 1195 | } |
1267 | 1196 | ||
1268 | int falcon_init_eventq(struct efx_channel *channel) | 1197 | void falcon_init_eventq(struct efx_channel *channel) |
1269 | { | 1198 | { |
1270 | efx_oword_t evq_ptr; | 1199 | efx_oword_t evq_ptr; |
1271 | struct efx_nic *efx = channel->efx; | 1200 | struct efx_nic *efx = channel->efx; |
1272 | int rc; | ||
1273 | 1201 | ||
1274 | EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", | 1202 | EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", |
1275 | channel->channel, channel->eventq.index, | 1203 | channel->channel, channel->eventq.index, |
1276 | channel->eventq.index + channel->eventq.entries - 1); | 1204 | channel->eventq.index + channel->eventq.entries - 1); |
1277 | 1205 | ||
1278 | /* Pin event queue buffer */ | 1206 | /* Pin event queue buffer */ |
1279 | rc = falcon_init_special_buffer(efx, &channel->eventq); | 1207 | falcon_init_special_buffer(efx, &channel->eventq); |
1280 | if (rc) | ||
1281 | return rc; | ||
1282 | 1208 | ||
1283 | /* Fill event queue with all ones (i.e. empty events) */ | 1209 | /* Fill event queue with all ones (i.e. empty events) */ |
1284 | memset(channel->eventq.addr, 0xff, channel->eventq.len); | 1210 | memset(channel->eventq.addr, 0xff, channel->eventq.len); |
@@ -1289,11 +1215,9 @@ int falcon_init_eventq(struct efx_channel *channel) | |||
1289 | EVQ_SIZE, FALCON_EVQ_ORDER, | 1215 | EVQ_SIZE, FALCON_EVQ_ORDER, |
1290 | EVQ_BUF_BASE_ID, channel->eventq.index); | 1216 | EVQ_BUF_BASE_ID, channel->eventq.index); |
1291 | falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, | 1217 | falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, |
1292 | channel->evqnum); | 1218 | channel->channel); |
1293 | 1219 | ||
1294 | falcon_set_int_moderation(channel); | 1220 | falcon_set_int_moderation(channel); |
1295 | |||
1296 | return 0; | ||
1297 | } | 1221 | } |
1298 | 1222 | ||
1299 | void falcon_fini_eventq(struct efx_channel *channel) | 1223 | void falcon_fini_eventq(struct efx_channel *channel) |
@@ -1304,7 +1228,7 @@ void falcon_fini_eventq(struct efx_channel *channel) | |||
1304 | /* Remove event queue from card */ | 1228 | /* Remove event queue from card */ |
1305 | EFX_ZERO_OWORD(eventq_ptr); | 1229 | EFX_ZERO_OWORD(eventq_ptr); |
1306 | falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base, | 1230 | falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base, |
1307 | channel->evqnum); | 1231 | channel->channel); |
1308 | 1232 | ||
1309 | /* Unpin event queue */ | 1233 | /* Unpin event queue */ |
1310 | falcon_fini_special_buffer(efx, &channel->eventq); | 1234 | falcon_fini_special_buffer(efx, &channel->eventq); |
@@ -1371,7 +1295,7 @@ void falcon_enable_interrupts(struct efx_nic *efx) | |||
1371 | 1295 | ||
1372 | /* Force processing of all the channels to get the EVQ RPTRs up to | 1296 | /* Force processing of all the channels to get the EVQ RPTRs up to |
1373 | date */ | 1297 | date */ |
1374 | efx_for_each_channel_with_interrupt(channel, efx) | 1298 | efx_for_each_channel(channel, efx) |
1375 | efx_schedule_channel(channel); | 1299 | efx_schedule_channel(channel); |
1376 | } | 1300 | } |
1377 | 1301 | ||
@@ -1589,7 +1513,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx) | |||
1589 | offset < RX_RSS_INDIR_TBL_B0 + 0x800; | 1513 | offset < RX_RSS_INDIR_TBL_B0 + 0x800; |
1590 | offset += 0x10) { | 1514 | offset += 0x10) { |
1591 | EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0, | 1515 | EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0, |
1592 | i % efx->rss_queues); | 1516 | i % efx->n_rx_queues); |
1593 | falcon_writel(efx, &dword, offset); | 1517 | falcon_writel(efx, &dword, offset); |
1594 | i++; | 1518 | i++; |
1595 | } | 1519 | } |
@@ -1621,7 +1545,7 @@ int falcon_init_interrupt(struct efx_nic *efx) | |||
1621 | } | 1545 | } |
1622 | 1546 | ||
1623 | /* Hook MSI or MSI-X interrupt */ | 1547 | /* Hook MSI or MSI-X interrupt */ |
1624 | efx_for_each_channel_with_interrupt(channel, efx) { | 1548 | efx_for_each_channel(channel, efx) { |
1625 | rc = request_irq(channel->irq, falcon_msi_interrupt, | 1549 | rc = request_irq(channel->irq, falcon_msi_interrupt, |
1626 | IRQF_PROBE_SHARED, /* Not shared */ | 1550 | IRQF_PROBE_SHARED, /* Not shared */ |
1627 | efx->name, channel); | 1551 | efx->name, channel); |
@@ -1634,7 +1558,7 @@ int falcon_init_interrupt(struct efx_nic *efx) | |||
1634 | return 0; | 1558 | return 0; |
1635 | 1559 | ||
1636 | fail2: | 1560 | fail2: |
1637 | efx_for_each_channel_with_interrupt(channel, efx) | 1561 | efx_for_each_channel(channel, efx) |
1638 | free_irq(channel->irq, channel); | 1562 | free_irq(channel->irq, channel); |
1639 | fail1: | 1563 | fail1: |
1640 | return rc; | 1564 | return rc; |
@@ -1646,7 +1570,7 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
1646 | efx_oword_t reg; | 1570 | efx_oword_t reg; |
1647 | 1571 | ||
1648 | /* Disable MSI/MSI-X interrupts */ | 1572 | /* Disable MSI/MSI-X interrupts */ |
1649 | efx_for_each_channel_with_interrupt(channel, efx) { | 1573 | efx_for_each_channel(channel, efx) { |
1650 | if (channel->irq) | 1574 | if (channel->irq) |
1651 | free_irq(channel->irq, channel); | 1575 | free_irq(channel->irq, channel); |
1652 | } | 1576 | } |
@@ -1674,64 +1598,195 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
1674 | /* Wait for SPI command completion */ | 1598 | /* Wait for SPI command completion */ |
1675 | static int falcon_spi_wait(struct efx_nic *efx) | 1599 | static int falcon_spi_wait(struct efx_nic *efx) |
1676 | { | 1600 | { |
1601 | unsigned long timeout = jiffies + DIV_ROUND_UP(HZ, 10); | ||
1677 | efx_oword_t reg; | 1602 | efx_oword_t reg; |
1678 | int cmd_en, timer_active; | 1603 | bool cmd_en, timer_active; |
1679 | int count; | ||
1680 | 1604 | ||
1681 | count = 0; | 1605 | for (;;) { |
1682 | do { | ||
1683 | falcon_read(efx, ®, EE_SPI_HCMD_REG_KER); | 1606 | falcon_read(efx, ®, EE_SPI_HCMD_REG_KER); |
1684 | cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN); | 1607 | cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN); |
1685 | timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE); | 1608 | timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE); |
1686 | if (!cmd_en && !timer_active) | 1609 | if (!cmd_en && !timer_active) |
1687 | return 0; | 1610 | return 0; |
1688 | udelay(10); | 1611 | if (time_after_eq(jiffies, timeout)) { |
1689 | } while (++count < 10000); /* wait upto 100msec */ | 1612 | EFX_ERR(efx, "timed out waiting for SPI\n"); |
1690 | EFX_ERR(efx, "timed out waiting for SPI\n"); | 1613 | return -ETIMEDOUT; |
1691 | return -ETIMEDOUT; | 1614 | } |
1615 | cpu_relax(); | ||
1616 | } | ||
1692 | } | 1617 | } |
1693 | 1618 | ||
1694 | static int | 1619 | static int falcon_spi_cmd(const struct efx_spi_device *spi, |
1695 | falcon_spi_read(struct efx_nic *efx, int device_id, unsigned int command, | 1620 | unsigned int command, int address, |
1696 | unsigned int address, unsigned int addr_len, | 1621 | const void *in, void *out, unsigned int len) |
1697 | void *data, unsigned int len) | ||
1698 | { | 1622 | { |
1623 | struct efx_nic *efx = spi->efx; | ||
1624 | bool addressed = (address >= 0); | ||
1625 | bool reading = (out != NULL); | ||
1699 | efx_oword_t reg; | 1626 | efx_oword_t reg; |
1700 | int rc; | 1627 | int rc; |
1701 | 1628 | ||
1702 | BUG_ON(len > FALCON_SPI_MAX_LEN); | 1629 | /* Input validation */ |
1630 | if (len > FALCON_SPI_MAX_LEN) | ||
1631 | return -EINVAL; | ||
1703 | 1632 | ||
1704 | /* Check SPI not currently being accessed */ | 1633 | /* Check SPI not currently being accessed */ |
1705 | rc = falcon_spi_wait(efx); | 1634 | rc = falcon_spi_wait(efx); |
1706 | if (rc) | 1635 | if (rc) |
1707 | return rc; | 1636 | return rc; |
1708 | 1637 | ||
1709 | /* Program address register */ | 1638 | /* Program address register, if we have an address */ |
1710 | EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); | 1639 | if (addressed) { |
1711 | falcon_write(efx, ®, EE_SPI_HADR_REG_KER); | 1640 | EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); |
1641 | falcon_write(efx, ®, EE_SPI_HADR_REG_KER); | ||
1642 | } | ||
1643 | |||
1644 | /* Program data register, if we have data */ | ||
1645 | if (in != NULL) { | ||
1646 | memcpy(®, in, len); | ||
1647 | falcon_write(efx, ®, EE_SPI_HDATA_REG_KER); | ||
1648 | } | ||
1712 | 1649 | ||
1713 | /* Issue read command */ | 1650 | /* Issue read/write command */ |
1714 | EFX_POPULATE_OWORD_7(reg, | 1651 | EFX_POPULATE_OWORD_7(reg, |
1715 | EE_SPI_HCMD_CMD_EN, 1, | 1652 | EE_SPI_HCMD_CMD_EN, 1, |
1716 | EE_SPI_HCMD_SF_SEL, device_id, | 1653 | EE_SPI_HCMD_SF_SEL, spi->device_id, |
1717 | EE_SPI_HCMD_DABCNT, len, | 1654 | EE_SPI_HCMD_DABCNT, len, |
1718 | EE_SPI_HCMD_READ, EE_SPI_READ, | 1655 | EE_SPI_HCMD_READ, reading, |
1719 | EE_SPI_HCMD_DUBCNT, 0, | 1656 | EE_SPI_HCMD_DUBCNT, 0, |
1720 | EE_SPI_HCMD_ADBCNT, addr_len, | 1657 | EE_SPI_HCMD_ADBCNT, |
1658 | (addressed ? spi->addr_len : 0), | ||
1721 | EE_SPI_HCMD_ENC, command); | 1659 | EE_SPI_HCMD_ENC, command); |
1722 | falcon_write(efx, ®, EE_SPI_HCMD_REG_KER); | 1660 | falcon_write(efx, ®, EE_SPI_HCMD_REG_KER); |
1723 | 1661 | ||
1724 | /* Wait for read to complete */ | 1662 | /* Wait for read/write to complete */ |
1725 | rc = falcon_spi_wait(efx); | 1663 | rc = falcon_spi_wait(efx); |
1726 | if (rc) | 1664 | if (rc) |
1727 | return rc; | 1665 | return rc; |
1728 | 1666 | ||
1729 | /* Read data */ | 1667 | /* Read data */ |
1730 | falcon_read(efx, ®, EE_SPI_HDATA_REG_KER); | 1668 | if (out != NULL) { |
1731 | memcpy(data, ®, len); | 1669 | falcon_read(efx, ®, EE_SPI_HDATA_REG_KER); |
1670 | memcpy(out, ®, len); | ||
1671 | } | ||
1672 | |||
1732 | return 0; | 1673 | return 0; |
1733 | } | 1674 | } |
1734 | 1675 | ||
1676 | static unsigned int | ||
1677 | falcon_spi_write_limit(const struct efx_spi_device *spi, unsigned int start) | ||
1678 | { | ||
1679 | return min(FALCON_SPI_MAX_LEN, | ||
1680 | (spi->block_size - (start & (spi->block_size - 1)))); | ||
1681 | } | ||
1682 | |||
1683 | static inline u8 | ||
1684 | efx_spi_munge_command(const struct efx_spi_device *spi, | ||
1685 | const u8 command, const unsigned int address) | ||
1686 | { | ||
1687 | return command | (((address >> 8) & spi->munge_address) << 3); | ||
1688 | } | ||
1689 | |||
1690 | |||
1691 | static int falcon_spi_fast_wait(const struct efx_spi_device *spi) | ||
1692 | { | ||
1693 | u8 status; | ||
1694 | int i, rc; | ||
1695 | |||
1696 | /* Wait up to 1000us for flash/EEPROM to finish a fast operation. */ | ||
1697 | for (i = 0; i < 50; i++) { | ||
1698 | udelay(20); | ||
1699 | |||
1700 | rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, | ||
1701 | &status, sizeof(status)); | ||
1702 | if (rc) | ||
1703 | return rc; | ||
1704 | if (!(status & SPI_STATUS_NRDY)) | ||
1705 | return 0; | ||
1706 | } | ||
1707 | EFX_ERR(spi->efx, | ||
1708 | "timed out waiting for device %d last status=0x%02x\n", | ||
1709 | spi->device_id, status); | ||
1710 | return -ETIMEDOUT; | ||
1711 | } | ||
1712 | |||
1713 | int falcon_spi_read(const struct efx_spi_device *spi, loff_t start, | ||
1714 | size_t len, size_t *retlen, u8 *buffer) | ||
1715 | { | ||
1716 | unsigned int command, block_len, pos = 0; | ||
1717 | int rc = 0; | ||
1718 | |||
1719 | while (pos < len) { | ||
1720 | block_len = min((unsigned int)len - pos, | ||
1721 | FALCON_SPI_MAX_LEN); | ||
1722 | |||
1723 | command = efx_spi_munge_command(spi, SPI_READ, start + pos); | ||
1724 | rc = falcon_spi_cmd(spi, command, start + pos, NULL, | ||
1725 | buffer + pos, block_len); | ||
1726 | if (rc) | ||
1727 | break; | ||
1728 | pos += block_len; | ||
1729 | |||
1730 | /* Avoid locking up the system */ | ||
1731 | cond_resched(); | ||
1732 | if (signal_pending(current)) { | ||
1733 | rc = -EINTR; | ||
1734 | break; | ||
1735 | } | ||
1736 | } | ||
1737 | |||
1738 | if (retlen) | ||
1739 | *retlen = pos; | ||
1740 | return rc; | ||
1741 | } | ||
1742 | |||
1743 | int falcon_spi_write(const struct efx_spi_device *spi, loff_t start, | ||
1744 | size_t len, size_t *retlen, const u8 *buffer) | ||
1745 | { | ||
1746 | u8 verify_buffer[FALCON_SPI_MAX_LEN]; | ||
1747 | unsigned int command, block_len, pos = 0; | ||
1748 | int rc = 0; | ||
1749 | |||
1750 | while (pos < len) { | ||
1751 | rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0); | ||
1752 | if (rc) | ||
1753 | break; | ||
1754 | |||
1755 | block_len = min((unsigned int)len - pos, | ||
1756 | falcon_spi_write_limit(spi, start + pos)); | ||
1757 | command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); | ||
1758 | rc = falcon_spi_cmd(spi, command, start + pos, | ||
1759 | buffer + pos, NULL, block_len); | ||
1760 | if (rc) | ||
1761 | break; | ||
1762 | |||
1763 | rc = falcon_spi_fast_wait(spi); | ||
1764 | if (rc) | ||
1765 | break; | ||
1766 | |||
1767 | command = efx_spi_munge_command(spi, SPI_READ, start + pos); | ||
1768 | rc = falcon_spi_cmd(spi, command, start + pos, | ||
1769 | NULL, verify_buffer, block_len); | ||
1770 | if (memcmp(verify_buffer, buffer + pos, block_len)) { | ||
1771 | rc = -EIO; | ||
1772 | break; | ||
1773 | } | ||
1774 | |||
1775 | pos += block_len; | ||
1776 | |||
1777 | /* Avoid locking up the system */ | ||
1778 | cond_resched(); | ||
1779 | if (signal_pending(current)) { | ||
1780 | rc = -EINTR; | ||
1781 | break; | ||
1782 | } | ||
1783 | } | ||
1784 | |||
1785 | if (retlen) | ||
1786 | *retlen = pos; | ||
1787 | return rc; | ||
1788 | } | ||
1789 | |||
1735 | /************************************************************************** | 1790 | /************************************************************************** |
1736 | * | 1791 | * |
1737 | * MAC wrapper | 1792 | * MAC wrapper |
@@ -1812,7 +1867,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1812 | { | 1867 | { |
1813 | efx_oword_t reg; | 1868 | efx_oword_t reg; |
1814 | int link_speed; | 1869 | int link_speed; |
1815 | unsigned int tx_fc; | 1870 | bool tx_fc; |
1816 | 1871 | ||
1817 | if (efx->link_options & GM_LPA_10000) | 1872 | if (efx->link_options & GM_LPA_10000) |
1818 | link_speed = 0x3; | 1873 | link_speed = 0x3; |
@@ -1847,7 +1902,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1847 | /* Transmission of pause frames when RX crosses the threshold is | 1902 | /* Transmission of pause frames when RX crosses the threshold is |
1848 | * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. | 1903 | * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. |
1849 | * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ | 1904 | * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ |
1850 | tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0; | 1905 | tx_fc = !!(efx->flow_control & EFX_FC_TX); |
1851 | falcon_read(efx, ®, RX_CFG_REG_KER); | 1906 | falcon_read(efx, ®, RX_CFG_REG_KER); |
1852 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); | 1907 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); |
1853 | 1908 | ||
@@ -1951,7 +2006,7 @@ static int falcon_gmii_wait(struct efx_nic *efx) | |||
1951 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | 2006 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, |
1952 | int addr, int value) | 2007 | int addr, int value) |
1953 | { | 2008 | { |
1954 | struct efx_nic *efx = net_dev->priv; | 2009 | struct efx_nic *efx = netdev_priv(net_dev); |
1955 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; | 2010 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; |
1956 | efx_oword_t reg; | 2011 | efx_oword_t reg; |
1957 | 2012 | ||
@@ -2019,7 +2074,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | |||
2019 | * could be read, -1 will be returned. */ | 2074 | * could be read, -1 will be returned. */ |
2020 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) | 2075 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) |
2021 | { | 2076 | { |
2022 | struct efx_nic *efx = net_dev->priv; | 2077 | struct efx_nic *efx = netdev_priv(net_dev); |
2023 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; | 2078 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; |
2024 | efx_oword_t reg; | 2079 | efx_oword_t reg; |
2025 | int value = -1; | 2080 | int value = -1; |
@@ -2120,7 +2175,7 @@ int falcon_probe_port(struct efx_nic *efx) | |||
2120 | return rc; | 2175 | return rc; |
2121 | 2176 | ||
2122 | /* Set up GMII structure for PHY */ | 2177 | /* Set up GMII structure for PHY */ |
2123 | efx->mii.supports_gmii = 1; | 2178 | efx->mii.supports_gmii = true; |
2124 | falcon_init_mdio(&efx->mii); | 2179 | falcon_init_mdio(&efx->mii); |
2125 | 2180 | ||
2126 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ | 2181 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ |
@@ -2168,6 +2223,170 @@ void falcon_set_multicast_hash(struct efx_nic *efx) | |||
2168 | falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER); | 2223 | falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER); |
2169 | } | 2224 | } |
2170 | 2225 | ||
2226 | |||
2227 | /************************************************************************** | ||
2228 | * | ||
2229 | * Falcon test code | ||
2230 | * | ||
2231 | **************************************************************************/ | ||
2232 | |||
2233 | int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) | ||
2234 | { | ||
2235 | struct falcon_nvconfig *nvconfig; | ||
2236 | struct efx_spi_device *spi; | ||
2237 | void *region; | ||
2238 | int rc, magic_num, struct_ver; | ||
2239 | __le16 *word, *limit; | ||
2240 | u32 csum; | ||
2241 | |||
2242 | region = kmalloc(NVCONFIG_END, GFP_KERNEL); | ||
2243 | if (!region) | ||
2244 | return -ENOMEM; | ||
2245 | nvconfig = region + NVCONFIG_OFFSET; | ||
2246 | |||
2247 | spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom; | ||
2248 | rc = falcon_spi_read(spi, 0, NVCONFIG_END, NULL, region); | ||
2249 | if (rc) { | ||
2250 | EFX_ERR(efx, "Failed to read %s\n", | ||
2251 | efx->spi_flash ? "flash" : "EEPROM"); | ||
2252 | rc = -EIO; | ||
2253 | goto out; | ||
2254 | } | ||
2255 | |||
2256 | magic_num = le16_to_cpu(nvconfig->board_magic_num); | ||
2257 | struct_ver = le16_to_cpu(nvconfig->board_struct_ver); | ||
2258 | |||
2259 | rc = -EINVAL; | ||
2260 | if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) { | ||
2261 | EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); | ||
2262 | goto out; | ||
2263 | } | ||
2264 | if (struct_ver < 2) { | ||
2265 | EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver); | ||
2266 | goto out; | ||
2267 | } else if (struct_ver < 4) { | ||
2268 | word = &nvconfig->board_magic_num; | ||
2269 | limit = (__le16 *) (nvconfig + 1); | ||
2270 | } else { | ||
2271 | word = region; | ||
2272 | limit = region + NVCONFIG_END; | ||
2273 | } | ||
2274 | for (csum = 0; word < limit; ++word) | ||
2275 | csum += le16_to_cpu(*word); | ||
2276 | |||
2277 | if (~csum & 0xffff) { | ||
2278 | EFX_ERR(efx, "NVRAM has incorrect checksum\n"); | ||
2279 | goto out; | ||
2280 | } | ||
2281 | |||
2282 | rc = 0; | ||
2283 | if (nvconfig_out) | ||
2284 | memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig)); | ||
2285 | |||
2286 | out: | ||
2287 | kfree(region); | ||
2288 | return rc; | ||
2289 | } | ||
2290 | |||
2291 | /* Registers tested in the falcon register test */ | ||
2292 | static struct { | ||
2293 | unsigned address; | ||
2294 | efx_oword_t mask; | ||
2295 | } efx_test_registers[] = { | ||
2296 | { ADR_REGION_REG_KER, | ||
2297 | EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, | ||
2298 | { RX_CFG_REG_KER, | ||
2299 | EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, | ||
2300 | { TX_CFG_REG_KER, | ||
2301 | EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, | ||
2302 | { TX_CFG2_REG_KER, | ||
2303 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, | ||
2304 | { MAC0_CTRL_REG_KER, | ||
2305 | EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, | ||
2306 | { SRM_TX_DC_CFG_REG_KER, | ||
2307 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, | ||
2308 | { RX_DC_CFG_REG_KER, | ||
2309 | EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, | ||
2310 | { RX_DC_PF_WM_REG_KER, | ||
2311 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, | ||
2312 | { DP_CTRL_REG, | ||
2313 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, | ||
2314 | { XM_GLB_CFG_REG, | ||
2315 | EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, | ||
2316 | { XM_TX_CFG_REG, | ||
2317 | EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, | ||
2318 | { XM_RX_CFG_REG, | ||
2319 | EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, | ||
2320 | { XM_RX_PARAM_REG, | ||
2321 | EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, | ||
2322 | { XM_FC_REG, | ||
2323 | EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, | ||
2324 | { XM_ADR_LO_REG, | ||
2325 | EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, | ||
2326 | { XX_SD_CTL_REG, | ||
2327 | EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, | ||
2328 | }; | ||
2329 | |||
2330 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | ||
2331 | const efx_oword_t *mask) | ||
2332 | { | ||
2333 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | ||
2334 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | ||
2335 | } | ||
2336 | |||
2337 | int falcon_test_registers(struct efx_nic *efx) | ||
2338 | { | ||
2339 | unsigned address = 0, i, j; | ||
2340 | efx_oword_t mask, imask, original, reg, buf; | ||
2341 | |||
2342 | /* Falcon should be in loopback to isolate the XMAC from the PHY */ | ||
2343 | WARN_ON(!LOOPBACK_INTERNAL(efx)); | ||
2344 | |||
2345 | for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) { | ||
2346 | address = efx_test_registers[i].address; | ||
2347 | mask = imask = efx_test_registers[i].mask; | ||
2348 | EFX_INVERT_OWORD(imask); | ||
2349 | |||
2350 | falcon_read(efx, &original, address); | ||
2351 | |||
2352 | /* bit sweep on and off */ | ||
2353 | for (j = 0; j < 128; j++) { | ||
2354 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | ||
2355 | continue; | ||
2356 | |||
2357 | /* Test this testable bit can be set in isolation */ | ||
2358 | EFX_AND_OWORD(reg, original, mask); | ||
2359 | EFX_SET_OWORD32(reg, j, j, 1); | ||
2360 | |||
2361 | falcon_write(efx, ®, address); | ||
2362 | falcon_read(efx, &buf, address); | ||
2363 | |||
2364 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
2365 | goto fail; | ||
2366 | |||
2367 | /* Test this testable bit can be cleared in isolation */ | ||
2368 | EFX_OR_OWORD(reg, original, mask); | ||
2369 | EFX_SET_OWORD32(reg, j, j, 0); | ||
2370 | |||
2371 | falcon_write(efx, ®, address); | ||
2372 | falcon_read(efx, &buf, address); | ||
2373 | |||
2374 | if (efx_masked_compare_oword(®, &buf, &mask)) | ||
2375 | goto fail; | ||
2376 | } | ||
2377 | |||
2378 | falcon_write(efx, &original, address); | ||
2379 | } | ||
2380 | |||
2381 | return 0; | ||
2382 | |||
2383 | fail: | ||
2384 | EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | ||
2385 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | ||
2386 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | ||
2387 | return -EIO; | ||
2388 | } | ||
2389 | |||
2171 | /************************************************************************** | 2390 | /************************************************************************** |
2172 | * | 2391 | * |
2173 | * Device reset | 2392 | * Device reset |
@@ -2305,68 +2524,103 @@ static int falcon_reset_sram(struct efx_nic *efx) | |||
2305 | return -ETIMEDOUT; | 2524 | return -ETIMEDOUT; |
2306 | } | 2525 | } |
2307 | 2526 | ||
2527 | static int falcon_spi_device_init(struct efx_nic *efx, | ||
2528 | struct efx_spi_device **spi_device_ret, | ||
2529 | unsigned int device_id, u32 device_type) | ||
2530 | { | ||
2531 | struct efx_spi_device *spi_device; | ||
2532 | |||
2533 | if (device_type != 0) { | ||
2534 | spi_device = kmalloc(sizeof(*spi_device), GFP_KERNEL); | ||
2535 | if (!spi_device) | ||
2536 | return -ENOMEM; | ||
2537 | spi_device->device_id = device_id; | ||
2538 | spi_device->size = | ||
2539 | 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); | ||
2540 | spi_device->addr_len = | ||
2541 | SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN); | ||
2542 | spi_device->munge_address = (spi_device->size == 1 << 9 && | ||
2543 | spi_device->addr_len == 1); | ||
2544 | spi_device->block_size = | ||
2545 | 1 << SPI_DEV_TYPE_FIELD(device_type, | ||
2546 | SPI_DEV_TYPE_BLOCK_SIZE); | ||
2547 | |||
2548 | spi_device->efx = efx; | ||
2549 | } else { | ||
2550 | spi_device = NULL; | ||
2551 | } | ||
2552 | |||
2553 | kfree(*spi_device_ret); | ||
2554 | *spi_device_ret = spi_device; | ||
2555 | return 0; | ||
2556 | } | ||
2557 | |||
2558 | |||
2559 | static void falcon_remove_spi_devices(struct efx_nic *efx) | ||
2560 | { | ||
2561 | kfree(efx->spi_eeprom); | ||
2562 | efx->spi_eeprom = NULL; | ||
2563 | kfree(efx->spi_flash); | ||
2564 | efx->spi_flash = NULL; | ||
2565 | } | ||
2566 | |||
2308 | /* Extract non-volatile configuration */ | 2567 | /* Extract non-volatile configuration */ |
2309 | static int falcon_probe_nvconfig(struct efx_nic *efx) | 2568 | static int falcon_probe_nvconfig(struct efx_nic *efx) |
2310 | { | 2569 | { |
2311 | struct falcon_nvconfig *nvconfig; | 2570 | struct falcon_nvconfig *nvconfig; |
2312 | efx_oword_t nic_stat; | 2571 | int board_rev; |
2313 | int device_id; | ||
2314 | unsigned addr_len; | ||
2315 | size_t offset, len; | ||
2316 | int magic_num, struct_ver, board_rev; | ||
2317 | int rc; | 2572 | int rc; |
2318 | 2573 | ||
2319 | /* Find the boot device. */ | ||
2320 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | ||
2321 | if (EFX_OWORD_FIELD(nic_stat, SF_PRST)) { | ||
2322 | device_id = EE_SPI_FLASH; | ||
2323 | addr_len = 3; | ||
2324 | } else if (EFX_OWORD_FIELD(nic_stat, EE_PRST)) { | ||
2325 | device_id = EE_SPI_EEPROM; | ||
2326 | addr_len = 2; | ||
2327 | } else { | ||
2328 | return -ENODEV; | ||
2329 | } | ||
2330 | |||
2331 | nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); | 2574 | nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); |
2575 | if (!nvconfig) | ||
2576 | return -ENOMEM; | ||
2332 | 2577 | ||
2333 | /* Read the whole configuration structure into memory. */ | 2578 | rc = falcon_read_nvram(efx, nvconfig); |
2334 | for (offset = 0; offset < sizeof(*nvconfig); offset += len) { | 2579 | if (rc == -EINVAL) { |
2335 | len = min(sizeof(*nvconfig) - offset, | 2580 | EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n"); |
2336 | (size_t) FALCON_SPI_MAX_LEN); | ||
2337 | rc = falcon_spi_read(efx, device_id, SPI_READ, | ||
2338 | NVCONFIG_BASE + offset, addr_len, | ||
2339 | (char *)nvconfig + offset, len); | ||
2340 | if (rc) | ||
2341 | goto out; | ||
2342 | } | ||
2343 | |||
2344 | /* Read the MAC addresses */ | ||
2345 | memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); | ||
2346 | |||
2347 | /* Read the board configuration. */ | ||
2348 | magic_num = le16_to_cpu(nvconfig->board_magic_num); | ||
2349 | struct_ver = le16_to_cpu(nvconfig->board_struct_ver); | ||
2350 | |||
2351 | if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) { | ||
2352 | EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x " | ||
2353 | "therefore using defaults\n", magic_num, struct_ver); | ||
2354 | efx->phy_type = PHY_TYPE_NONE; | 2581 | efx->phy_type = PHY_TYPE_NONE; |
2355 | efx->mii.phy_id = PHY_ADDR_INVALID; | 2582 | efx->mii.phy_id = PHY_ADDR_INVALID; |
2356 | board_rev = 0; | 2583 | board_rev = 0; |
2584 | rc = 0; | ||
2585 | } else if (rc) { | ||
2586 | goto fail1; | ||
2357 | } else { | 2587 | } else { |
2358 | struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2; | 2588 | struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2; |
2589 | struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3; | ||
2359 | 2590 | ||
2360 | efx->phy_type = v2->port0_phy_type; | 2591 | efx->phy_type = v2->port0_phy_type; |
2361 | efx->mii.phy_id = v2->port0_phy_addr; | 2592 | efx->mii.phy_id = v2->port0_phy_addr; |
2362 | board_rev = le16_to_cpu(v2->board_revision); | 2593 | board_rev = le16_to_cpu(v2->board_revision); |
2594 | |||
2595 | if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { | ||
2596 | __le32 fl = v3->spi_device_type[EE_SPI_FLASH]; | ||
2597 | __le32 ee = v3->spi_device_type[EE_SPI_EEPROM]; | ||
2598 | rc = falcon_spi_device_init(efx, &efx->spi_flash, | ||
2599 | EE_SPI_FLASH, | ||
2600 | le32_to_cpu(fl)); | ||
2601 | if (rc) | ||
2602 | goto fail2; | ||
2603 | rc = falcon_spi_device_init(efx, &efx->spi_eeprom, | ||
2604 | EE_SPI_EEPROM, | ||
2605 | le32_to_cpu(ee)); | ||
2606 | if (rc) | ||
2607 | goto fail2; | ||
2608 | } | ||
2363 | } | 2609 | } |
2364 | 2610 | ||
2611 | /* Read the MAC addresses */ | ||
2612 | memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); | ||
2613 | |||
2365 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id); | 2614 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id); |
2366 | 2615 | ||
2367 | efx_set_board_info(efx, board_rev); | 2616 | efx_set_board_info(efx, board_rev); |
2368 | 2617 | ||
2369 | out: | 2618 | kfree(nvconfig); |
2619 | return 0; | ||
2620 | |||
2621 | fail2: | ||
2622 | falcon_remove_spi_devices(efx); | ||
2623 | fail1: | ||
2370 | kfree(nvconfig); | 2624 | kfree(nvconfig); |
2371 | return rc; | 2625 | return rc; |
2372 | } | 2626 | } |
@@ -2417,6 +2671,86 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2417 | return 0; | 2671 | return 0; |
2418 | } | 2672 | } |
2419 | 2673 | ||
2674 | /* Probe all SPI devices on the NIC */ | ||
2675 | static void falcon_probe_spi_devices(struct efx_nic *efx) | ||
2676 | { | ||
2677 | efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; | ||
2678 | bool has_flash, has_eeprom, boot_is_external; | ||
2679 | |||
2680 | falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER); | ||
2681 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | ||
2682 | falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); | ||
2683 | |||
2684 | has_flash = EFX_OWORD_FIELD(nic_stat, SF_PRST); | ||
2685 | has_eeprom = EFX_OWORD_FIELD(nic_stat, EE_PRST); | ||
2686 | boot_is_external = EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE); | ||
2687 | |||
2688 | if (has_flash) { | ||
2689 | /* Default flash SPI device: Atmel AT25F1024 | ||
2690 | * 128 KB, 24-bit address, 32 KB erase block, | ||
2691 | * 256 B write block | ||
2692 | */ | ||
2693 | u32 flash_device_type = | ||
2694 | (17 << SPI_DEV_TYPE_SIZE_LBN) | ||
2695 | | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN) | ||
2696 | | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN) | ||
2697 | | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) | ||
2698 | | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN); | ||
2699 | |||
2700 | falcon_spi_device_init(efx, &efx->spi_flash, | ||
2701 | EE_SPI_FLASH, flash_device_type); | ||
2702 | |||
2703 | if (!boot_is_external) { | ||
2704 | /* Disable VPD and set clock dividers to safe | ||
2705 | * values for initial programming. | ||
2706 | */ | ||
2707 | EFX_LOG(efx, "Booted from internal ASIC settings;" | ||
2708 | " setting SPI config\n"); | ||
2709 | EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0, | ||
2710 | /* 125 MHz / 7 ~= 20 MHz */ | ||
2711 | EE_SF_CLOCK_DIV, 7, | ||
2712 | /* 125 MHz / 63 ~= 2 MHz */ | ||
2713 | EE_EE_CLOCK_DIV, 63); | ||
2714 | falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); | ||
2715 | } | ||
2716 | } | ||
2717 | |||
2718 | if (has_eeprom) { | ||
2719 | u32 eeprom_device_type; | ||
2720 | |||
2721 | /* If it has no flash, it must have a large EEPROM | ||
2722 | * for chip config; otherwise check whether 9-bit | ||
2723 | * addressing is used for VPD configuration | ||
2724 | */ | ||
2725 | if (has_flash && | ||
2726 | (!boot_is_external || | ||
2727 | EFX_OWORD_FIELD(ee_vpd_cfg, EE_VPD_EN_AD9_MODE))) { | ||
2728 | /* Default SPI device: Atmel AT25040 or similar | ||
2729 | * 512 B, 9-bit address, 8 B write block | ||
2730 | */ | ||
2731 | eeprom_device_type = | ||
2732 | (9 << SPI_DEV_TYPE_SIZE_LBN) | ||
2733 | | (1 << SPI_DEV_TYPE_ADDR_LEN_LBN) | ||
2734 | | (3 << SPI_DEV_TYPE_BLOCK_SIZE_LBN); | ||
2735 | } else { | ||
2736 | /* "Large" SPI device: Atmel AT25640 or similar | ||
2737 | * 8 KB, 16-bit address, 32 B write block | ||
2738 | */ | ||
2739 | eeprom_device_type = | ||
2740 | (13 << SPI_DEV_TYPE_SIZE_LBN) | ||
2741 | | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN) | ||
2742 | | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN); | ||
2743 | } | ||
2744 | |||
2745 | falcon_spi_device_init(efx, &efx->spi_eeprom, | ||
2746 | EE_SPI_EEPROM, eeprom_device_type); | ||
2747 | } | ||
2748 | |||
2749 | EFX_LOG(efx, "flash is %s, EEPROM is %s\n", | ||
2750 | (has_flash ? "present" : "absent"), | ||
2751 | (has_eeprom ? "present" : "absent")); | ||
2752 | } | ||
2753 | |||
2420 | int falcon_probe_nic(struct efx_nic *efx) | 2754 | int falcon_probe_nic(struct efx_nic *efx) |
2421 | { | 2755 | { |
2422 | struct falcon_nic_data *nic_data; | 2756 | struct falcon_nic_data *nic_data; |
@@ -2467,6 +2801,8 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2467 | (unsigned long long)efx->irq_status.dma_addr, | 2801 | (unsigned long long)efx->irq_status.dma_addr, |
2468 | efx->irq_status.addr, virt_to_phys(efx->irq_status.addr)); | 2802 | efx->irq_status.addr, virt_to_phys(efx->irq_status.addr)); |
2469 | 2803 | ||
2804 | falcon_probe_spi_devices(efx); | ||
2805 | |||
2470 | /* Read in the non-volatile configuration */ | 2806 | /* Read in the non-volatile configuration */ |
2471 | rc = falcon_probe_nvconfig(efx); | 2807 | rc = falcon_probe_nvconfig(efx); |
2472 | if (rc) | 2808 | if (rc) |
@@ -2486,6 +2822,7 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2486 | return 0; | 2822 | return 0; |
2487 | 2823 | ||
2488 | fail5: | 2824 | fail5: |
2825 | falcon_remove_spi_devices(efx); | ||
2489 | falcon_free_buffer(efx, &efx->irq_status); | 2826 | falcon_free_buffer(efx, &efx->irq_status); |
2490 | fail4: | 2827 | fail4: |
2491 | fail3: | 2828 | fail3: |
@@ -2573,19 +2910,14 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2573 | EFX_INVERT_OWORD(temp); | 2910 | EFX_INVERT_OWORD(temp); |
2574 | falcon_write(efx, &temp, FATAL_INTR_REG_KER); | 2911 | falcon_write(efx, &temp, FATAL_INTR_REG_KER); |
2575 | 2912 | ||
2576 | /* Set number of RSS queues for receive path. */ | ||
2577 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); | ||
2578 | if (falcon_rev(efx) >= FALCON_REV_B0) | ||
2579 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); | ||
2580 | else | ||
2581 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); | ||
2582 | if (EFX_WORKAROUND_7244(efx)) { | 2913 | if (EFX_WORKAROUND_7244(efx)) { |
2914 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); | ||
2583 | EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); | 2915 | EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); |
2584 | EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); | 2916 | EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); |
2585 | EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); | 2917 | EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); |
2586 | EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); | 2918 | EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); |
2919 | falcon_write(efx, &temp, RX_FILTER_CTL_REG); | ||
2587 | } | 2920 | } |
2588 | falcon_write(efx, &temp, RX_FILTER_CTL_REG); | ||
2589 | 2921 | ||
2590 | falcon_setup_rss_indir_table(efx); | 2922 | falcon_setup_rss_indir_table(efx); |
2591 | 2923 | ||
@@ -2641,8 +2973,8 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2641 | rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh); | 2973 | rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh); |
2642 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256); | 2974 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256); |
2643 | /* RX control FIFO thresholds [32 entries] */ | 2975 | /* RX control FIFO thresholds [32 entries] */ |
2644 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25); | 2976 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20); |
2645 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20); | 2977 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25); |
2646 | falcon_write(efx, &temp, RX_CFG_REG_KER); | 2978 | falcon_write(efx, &temp, RX_CFG_REG_KER); |
2647 | 2979 | ||
2648 | /* Set destination of both TX and RX Flush events */ | 2980 | /* Set destination of both TX and RX Flush events */ |
@@ -2662,6 +2994,7 @@ void falcon_remove_nic(struct efx_nic *efx) | |||
2662 | rc = i2c_del_adapter(&efx->i2c_adap); | 2994 | rc = i2c_del_adapter(&efx->i2c_adap); |
2663 | BUG_ON(rc); | 2995 | BUG_ON(rc); |
2664 | 2996 | ||
2997 | falcon_remove_spi_devices(efx); | ||
2665 | falcon_free_buffer(efx, &efx->irq_status); | 2998 | falcon_free_buffer(efx, &efx->irq_status); |
2666 | 2999 | ||
2667 | falcon_reset_hw(efx, RESET_TYPE_ALL); | 3000 | falcon_reset_hw(efx, RESET_TYPE_ALL); |