aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h46
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c127
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c254
10 files changed, 553 insertions, 4 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 0f0d2d1d77e5..33e2ed60c18f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -74,6 +74,51 @@ struct qed_rt_data {
74 bool *b_valid; 74 bool *b_valid;
75}; 75};
76 76
77enum qed_tunn_mode {
78 QED_MODE_L2GENEVE_TUNN,
79 QED_MODE_IPGENEVE_TUNN,
80 QED_MODE_L2GRE_TUNN,
81 QED_MODE_IPGRE_TUNN,
82 QED_MODE_VXLAN_TUNN,
83};
84
85enum qed_tunn_clss {
86 QED_TUNN_CLSS_MAC_VLAN,
87 QED_TUNN_CLSS_MAC_VNI,
88 QED_TUNN_CLSS_INNER_MAC_VLAN,
89 QED_TUNN_CLSS_INNER_MAC_VNI,
90 MAX_QED_TUNN_CLSS,
91};
92
93struct qed_tunn_start_params {
94 unsigned long tunn_mode;
95 u16 vxlan_udp_port;
96 u16 geneve_udp_port;
97 u8 update_vxlan_udp_port;
98 u8 update_geneve_udp_port;
99 u8 tunn_clss_vxlan;
100 u8 tunn_clss_l2geneve;
101 u8 tunn_clss_ipgeneve;
102 u8 tunn_clss_l2gre;
103 u8 tunn_clss_ipgre;
104};
105
106struct qed_tunn_update_params {
107 unsigned long tunn_mode_update_mask;
108 unsigned long tunn_mode;
109 u16 vxlan_udp_port;
110 u16 geneve_udp_port;
111 u8 update_rx_pf_clss;
112 u8 update_tx_pf_clss;
113 u8 update_vxlan_udp_port;
114 u8 update_geneve_udp_port;
115 u8 tunn_clss_vxlan;
116 u8 tunn_clss_l2geneve;
117 u8 tunn_clss_ipgeneve;
118 u8 tunn_clss_l2gre;
119 u8 tunn_clss_ipgre;
120};
121
77/* The PCI personality is not quite synonymous to protocol ID: 122/* The PCI personality is not quite synonymous to protocol ID:
78 * 1. All personalities need CORE connections 123 * 1. All personalities need CORE connections
79 * 2. The Ethernet personality may support also the RoCE protocol 124 * 2. The Ethernet personality may support also the RoCE protocol
@@ -430,6 +475,7 @@ struct qed_dev {
430 u8 num_hwfns; 475 u8 num_hwfns;
431 struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; 476 struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
432 477
478 unsigned long tunn_mode;
433 u32 drv_type; 479 u32 drv_type;
434 480
435 struct qed_eth_stats *reset_stats; 481 struct qed_eth_stats *reset_stats;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index b7d100f6bd6f..bdae5a55afa4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -558,6 +558,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
558 558
559static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, 559static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
560 struct qed_ptt *p_ptt, 560 struct qed_ptt *p_ptt,
561 struct qed_tunn_start_params *p_tunn,
561 int hw_mode, 562 int hw_mode,
562 bool b_hw_start, 563 bool b_hw_start,
563 enum qed_int_mode int_mode, 564 enum qed_int_mode int_mode,
@@ -625,7 +626,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
625 qed_int_igu_enable(p_hwfn, p_ptt, int_mode); 626 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
626 627
627 /* send function start command */ 628 /* send function start command */
628 rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode); 629 rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
629 if (rc) 630 if (rc)
630 DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); 631 DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
631 } 632 }
@@ -672,6 +673,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
672} 673}
673 674
674int qed_hw_init(struct qed_dev *cdev, 675int qed_hw_init(struct qed_dev *cdev,
676 struct qed_tunn_start_params *p_tunn,
675 bool b_hw_start, 677 bool b_hw_start,
676 enum qed_int_mode int_mode, 678 enum qed_int_mode int_mode,
677 bool allow_npar_tx_switch, 679 bool allow_npar_tx_switch,
@@ -724,7 +726,7 @@ int qed_hw_init(struct qed_dev *cdev,
724 /* Fall into */ 726 /* Fall into */
725 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 727 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
726 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, 728 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
727 p_hwfn->hw_info.hw_mode, 729 p_tunn, p_hwfn->hw_info.hw_mode,
728 b_hw_start, int_mode, 730 b_hw_start, int_mode,
729 allow_npar_tx_switch); 731 allow_npar_tx_switch);
730 break; 732 break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index d6c7ddf4f4d4..6aac3f855aa1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev);
62 * @brief qed_hw_init - 62 * @brief qed_hw_init -
63 * 63 *
64 * @param cdev 64 * @param cdev
65 * @param p_tunn
65 * @param b_hw_start 66 * @param b_hw_start
66 * @param int_mode - interrupt mode [msix, inta, etc.] to use. 67 * @param int_mode - interrupt mode [msix, inta, etc.] to use.
67 * @param allow_npar_tx_switch - npar tx switching to be used 68 * @param allow_npar_tx_switch - npar tx switching to be used
@@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev);
72 * @return int 73 * @return int
73 */ 74 */
74int qed_hw_init(struct qed_dev *cdev, 75int qed_hw_init(struct qed_dev *cdev,
76 struct qed_tunn_start_params *p_tunn,
75 bool b_hw_start, 77 bool b_hw_start,
76 enum qed_int_mode int_mode, 78 enum qed_int_mode int_mode,
77 bool allow_npar_tx_switch, 79 bool allow_npar_tx_switch,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index a368f5e71d95..15e02ab9be5a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -46,7 +46,7 @@ enum common_ramrod_cmd_id {
46 COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, 46 COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
47 COMMON_RAMROD_RESERVED, 47 COMMON_RAMROD_RESERVED,
48 COMMON_RAMROD_RESERVED2, 48 COMMON_RAMROD_RESERVED2,
49 COMMON_RAMROD_RESERVED3, 49 COMMON_RAMROD_PF_UPDATE,
50 COMMON_RAMROD_EMPTY, 50 COMMON_RAMROD_EMPTY,
51 MAX_COMMON_RAMROD_CMD_ID 51 MAX_COMMON_RAMROD_CMD_ID
52}; 52};
@@ -626,6 +626,42 @@ struct pf_start_ramrod_data {
626 u8 reserved0[4]; 626 u8 reserved0[4];
627}; 627};
628 628
629/* tunnel configuration */
630struct pf_update_tunnel_config {
631 u8 update_rx_pf_clss;
632 u8 update_tx_pf_clss;
633 u8 set_vxlan_udp_port_flg;
634 u8 set_geneve_udp_port_flg;
635 u8 tx_enable_vxlan;
636 u8 tx_enable_l2geneve;
637 u8 tx_enable_ipgeneve;
638 u8 tx_enable_l2gre;
639 u8 tx_enable_ipgre;
640 u8 tunnel_clss_vxlan;
641 u8 tunnel_clss_l2geneve;
642 u8 tunnel_clss_ipgeneve;
643 u8 tunnel_clss_l2gre;
644 u8 tunnel_clss_ipgre;
645 __le16 vxlan_udp_port;
646 __le16 geneve_udp_port;
647 __le16 reserved[3];
648};
649
650struct pf_update_ramrod_data {
651 u32 reserved[2];
652 u32 reserved_1[6];
653 struct pf_update_tunnel_config tunnel_config;
654};
655
656/* Tunnel classification scheme */
657enum tunnel_clss {
658 TUNNEL_CLSS_MAC_VLAN = 0,
659 TUNNEL_CLSS_MAC_VNI,
660 TUNNEL_CLSS_INNER_MAC_VLAN,
661 TUNNEL_CLSS_INNER_MAC_VNI,
662 MAX_TUNNEL_CLSS
663};
664
629enum ports_mode { 665enum ports_mode {
630 ENGX2_PORTX1 /* 2 engines x 1 port */, 666 ENGX2_PORTX1 /* 2 engines x 1 port */,
631 ENGX2_PORTX2 /* 2 engines x 2 ports */, 667 ENGX2_PORTX2 /* 2 engines x 2 ports */,
@@ -1603,6 +1639,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
1603 u16 start_pq, 1639 u16 start_pq,
1604 u16 num_pqs); 1640 u16 num_pqs);
1605 1641
1642void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
1643 struct qed_ptt *p_ptt, u16 dest_port);
1644void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
1645 struct qed_ptt *p_ptt, bool vxlan_enable);
1646void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
1647 struct qed_ptt *p_ptt, bool eth_gre_enable,
1648 bool ip_gre_enable);
1649void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
1650 struct qed_ptt *p_ptt, u16 dest_port);
1651void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
1652 struct qed_ptt *p_ptt, bool eth_geneve_enable,
1653 bool ip_geneve_enable);
1654
1606/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ 1655/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
1607#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) 1656#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
1608#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) 1657#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index f55ebdc3c832..1dd53248b984 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -788,3 +788,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
788 788
789 return true; 789 return true;
790} 790}
791
792static void
793qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
794{
795 if (enable)
796 set_bit(bit, var);
797 else
798 clear_bit(bit, var);
799}
800
801#define PRS_ETH_TUNN_FIC_FORMAT -188897008
802
803void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
804 struct qed_ptt *p_ptt,
805 u16 dest_port)
806{
807 qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
808 qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
809 qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
810}
811
812void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
813 struct qed_ptt *p_ptt,
814 bool vxlan_enable)
815{
816 unsigned long reg_val = 0;
817 u8 shift;
818
819 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
820 shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
821 qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
822
823 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
824
825 if (reg_val)
826 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
827 PRS_ETH_TUNN_FIC_FORMAT);
828
829 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
830 shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
831 qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
832
833 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
834
835 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
836 vxlan_enable ? 1 : 0);
837}
838
839void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
840 bool eth_gre_enable, bool ip_gre_enable)
841{
842 unsigned long reg_val = 0;
843 u8 shift;
844
845 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
846 shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
847 qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
848
849 shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
850 qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
851 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
852 if (reg_val)
853 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
854 PRS_ETH_TUNN_FIC_FORMAT);
855
856 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
857 shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
858 qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
859
860 shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
861 qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
862 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
863
864 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
865 eth_gre_enable ? 1 : 0);
866 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
867 ip_gre_enable ? 1 : 0);
868}
869
870void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
871 struct qed_ptt *p_ptt,
872 u16 dest_port)
873{
874 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
875 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
876 qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
877}
878
879void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
880 struct qed_ptt *p_ptt,
881 bool eth_geneve_enable,
882 bool ip_geneve_enable)
883{
884 unsigned long reg_val = 0;
885 u8 shift;
886
887 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
888 shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
889 qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
890
891 shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
892 qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
893
894 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
895 if (reg_val)
896 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
897 PRS_ETH_TUNN_FIC_FORMAT);
898
899 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
900 eth_geneve_enable ? 1 : 0);
901 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
902
903 /* comp ver */
904 reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
905 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
906 qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
907 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
908
909 /* EDPM with geneve tunnel not supported in BB_B0 */
910 if (QED_IS_BB_B0(p_hwfn->cdev))
911 return;
912
913 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
914 eth_geneve_enable ? 1 : 0);
915 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
916 ip_geneve_enable ? 1 : 0);
917}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 5005497ee23e..fb5f3b815340 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1884,6 +1884,36 @@ static int qed_stop_txq(struct qed_dev *cdev,
1884 return 0; 1884 return 0;
1885} 1885}
1886 1886
1887static int qed_tunn_configure(struct qed_dev *cdev,
1888 struct qed_tunn_params *tunn_params)
1889{
1890 struct qed_tunn_update_params tunn_info;
1891 int i, rc;
1892
1893 memset(&tunn_info, 0, sizeof(tunn_info));
1894 if (tunn_params->update_vxlan_port == 1) {
1895 tunn_info.update_vxlan_udp_port = 1;
1896 tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
1897 }
1898
1899 if (tunn_params->update_geneve_port == 1) {
1900 tunn_info.update_geneve_udp_port = 1;
1901 tunn_info.geneve_udp_port = tunn_params->geneve_port;
1902 }
1903
1904 for_each_hwfn(cdev, i) {
1905 struct qed_hwfn *hwfn = &cdev->hwfns[i];
1906
1907 rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
1908 QED_SPQ_MODE_EBLOCK, NULL);
1909
1910 if (rc)
1911 return rc;
1912 }
1913
1914 return 0;
1915}
1916
1887static int qed_configure_filter_rx_mode(struct qed_dev *cdev, 1917static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
1888 enum qed_filter_rx_mode_type type) 1918 enum qed_filter_rx_mode_type type)
1889{ 1919{
@@ -2026,6 +2056,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
2026 .fastpath_stop = &qed_fastpath_stop, 2056 .fastpath_stop = &qed_fastpath_stop,
2027 .eth_cqe_completion = &qed_fp_cqe_completion, 2057 .eth_cqe_completion = &qed_fp_cqe_completion,
2028 .get_vport_stats = &qed_get_vport_stats, 2058 .get_vport_stats = &qed_get_vport_stats,
2059 .tunn_config = &qed_tunn_configure,
2029}; 2060};
2030 2061
2031const struct qed_eth_ops *qed_get_eth_ops(void) 2062const struct qed_eth_ops *qed_get_eth_ops(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c31d485f72d6..1916992ae8b1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -776,7 +776,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
776 /* Start the slowpath */ 776 /* Start the slowpath */
777 data = cdev->firmware->data; 777 data = cdev->firmware->data;
778 778
779 rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode, 779 rc = qed_hw_init(cdev, NULL, true, cdev->int_params.out.int_mode,
780 true, data); 780 true, data);
781 if (rc) 781 if (rc)
782 goto err2; 782 goto err2;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index c15b1622e636..55451a4dc587 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -427,4 +427,35 @@
427 0x2aae60UL 427 0x2aae60UL
428#define PGLUE_B_REG_PF_BAR1_SIZE \ 428#define PGLUE_B_REG_PF_BAR1_SIZE \
429 0x2aae64UL 429 0x2aae64UL
430#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
431#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
432#define PRS_REG_VXLAN_PORT 0x1f0738UL
433#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
434#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
435
436#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
437#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
438#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE (0x1 << 1)
439#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
440#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
441#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
442
443#define NIG_REG_VXLAN_PORT 0x50105cUL
444#define PBF_REG_VXLAN_PORT 0xd80518UL
445#define PBF_REG_NGE_PORT 0xd8051cUL
446#define PRS_REG_NGE_PORT 0x1f086cUL
447#define NIG_REG_NGE_PORT 0x508b38UL
448
449#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
450#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
451#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
452#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
453#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
454
455#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
456#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
457#define NIG_REG_NGE_COMP_VER 0x508b30UL
458#define PBF_REG_NGE_COMP_VER 0xd80524UL
459#define PRS_REG_NGE_COMP_VER 0x1f0878UL
460
430#endif 461#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index d39f914b66ee..4b91cb32f317 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -52,6 +52,7 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
52 52
53union ramrod_data { 53union ramrod_data {
54 struct pf_start_ramrod_data pf_start; 54 struct pf_start_ramrod_data pf_start;
55 struct pf_update_ramrod_data pf_update;
55 struct rx_queue_start_ramrod_data rx_queue_start; 56 struct rx_queue_start_ramrod_data rx_queue_start;
56 struct rx_queue_update_ramrod_data rx_queue_update; 57 struct rx_queue_update_ramrod_data rx_queue_update;
57 struct rx_queue_stop_ramrod_data rx_queue_stop; 58 struct rx_queue_stop_ramrod_data rx_queue_stop;
@@ -338,12 +339,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
338 * to the internal RAM of the UStorm by the Function Start Ramrod. 339 * to the internal RAM of the UStorm by the Function Start Ramrod.
339 * 340 *
340 * @param p_hwfn 341 * @param p_hwfn
342 * @param p_tunn
341 * @param mode 343 * @param mode
342 * 344 *
343 * @return int 345 * @return int
344 */ 346 */
345 347
346int qed_sp_pf_start(struct qed_hwfn *p_hwfn, 348int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
349 struct qed_tunn_start_params *p_tunn,
347 enum qed_mf_mode mode); 350 enum qed_mf_mode mode);
348 351
349/** 352/**
@@ -362,4 +365,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
362 365
363int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); 366int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
364 367
368int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
369 struct qed_tunn_update_params *p_tunn,
370 enum spq_mode comp_mode,
371 struct qed_spq_comp_cb *p_comp_data);
365#endif 372#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 1c06c37d4c3d..306da7000ddc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -87,7 +87,217 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
87 return 0; 87 return 0;
88} 88}
89 89
90static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
91{
92 switch (type) {
93 case QED_TUNN_CLSS_MAC_VLAN:
94 return TUNNEL_CLSS_MAC_VLAN;
95 case QED_TUNN_CLSS_MAC_VNI:
96 return TUNNEL_CLSS_MAC_VNI;
97 case QED_TUNN_CLSS_INNER_MAC_VLAN:
98 return TUNNEL_CLSS_INNER_MAC_VLAN;
99 case QED_TUNN_CLSS_INNER_MAC_VNI:
100 return TUNNEL_CLSS_INNER_MAC_VNI;
101 default:
102 return TUNNEL_CLSS_MAC_VLAN;
103 }
104}
105
106static void
107qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
108 struct qed_tunn_update_params *p_src,
109 struct pf_update_tunnel_config *p_tunn_cfg)
110{
111 unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
112 unsigned long update_mask = p_src->tunn_mode_update_mask;
113 unsigned long tunn_mode = p_src->tunn_mode;
114 unsigned long new_tunn_mode = 0;
115
116 if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
117 if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
118 __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
119 } else {
120 if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
121 __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
122 }
123
124 if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
125 if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
126 __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
127 } else {
128 if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
129 __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
130 }
131
132 if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
133 if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
134 __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
135 } else {
136 if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
137 __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
138 }
139
140 if (p_src->update_geneve_udp_port) {
141 p_tunn_cfg->set_geneve_udp_port_flg = 1;
142 p_tunn_cfg->geneve_udp_port =
143 cpu_to_le16(p_src->geneve_udp_port);
144 }
145
146 if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
147 if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
148 __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
149 } else {
150 if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
151 __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
152 }
153
154 if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
155 if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
156 __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
157 } else {
158 if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
159 __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
160 }
161
162 p_src->tunn_mode = new_tunn_mode;
163}
164
165static void
166qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
167 struct qed_tunn_update_params *p_src,
168 struct pf_update_tunnel_config *p_tunn_cfg)
169{
170 unsigned long tunn_mode = p_src->tunn_mode;
171 enum tunnel_clss type;
172
173 qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
174 p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
175 p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
176
177 type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
178 p_tunn_cfg->tunnel_clss_vxlan = type;
179
180 type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
181 p_tunn_cfg->tunnel_clss_l2gre = type;
182
183 type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
184 p_tunn_cfg->tunnel_clss_ipgre = type;
185
186 if (p_src->update_vxlan_udp_port) {
187 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
188 p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
189 }
190
191 if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
192 p_tunn_cfg->tx_enable_l2gre = 1;
193
194 if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
195 p_tunn_cfg->tx_enable_ipgre = 1;
196
197 if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
198 p_tunn_cfg->tx_enable_vxlan = 1;
199
200 if (p_src->update_geneve_udp_port) {
201 p_tunn_cfg->set_geneve_udp_port_flg = 1;
202 p_tunn_cfg->geneve_udp_port =
203 cpu_to_le16(p_src->geneve_udp_port);
204 }
205
206 if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
207 p_tunn_cfg->tx_enable_l2geneve = 1;
208
209 if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
210 p_tunn_cfg->tx_enable_ipgeneve = 1;
211
212 type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
213 p_tunn_cfg->tunnel_clss_l2geneve = type;
214
215 type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
216 p_tunn_cfg->tunnel_clss_ipgeneve = type;
217}
218
219static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
220 struct qed_ptt *p_ptt,
221 unsigned long tunn_mode)
222{
223 u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
224 u8 l2geneve_enable = 0, ipgeneve_enable = 0;
225
226 if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
227 l2gre_enable = 1;
228
229 if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
230 ipgre_enable = 1;
231
232 if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
233 vxlan_enable = 1;
234
235 qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
236 qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
237
238 if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
239 l2geneve_enable = 1;
240
241 if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
242 ipgeneve_enable = 1;
243
244 qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
245 ipgeneve_enable);
246}
247
248static void
249qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
250 struct qed_tunn_start_params *p_src,
251 struct pf_start_tunnel_config *p_tunn_cfg)
252{
253 unsigned long tunn_mode;
254 enum tunnel_clss type;
255
256 if (!p_src)
257 return;
258
259 tunn_mode = p_src->tunn_mode;
260 type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
261 p_tunn_cfg->tunnel_clss_vxlan = type;
262 type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
263 p_tunn_cfg->tunnel_clss_l2gre = type;
264 type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
265 p_tunn_cfg->tunnel_clss_ipgre = type;
266
267 if (p_src->update_vxlan_udp_port) {
268 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
269 p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
270 }
271
272 if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
273 p_tunn_cfg->tx_enable_l2gre = 1;
274
275 if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
276 p_tunn_cfg->tx_enable_ipgre = 1;
277
278 if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
279 p_tunn_cfg->tx_enable_vxlan = 1;
280
281 if (p_src->update_geneve_udp_port) {
282 p_tunn_cfg->set_geneve_udp_port_flg = 1;
283 p_tunn_cfg->geneve_udp_port =
284 cpu_to_le16(p_src->geneve_udp_port);
285 }
286
287 if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
288 p_tunn_cfg->tx_enable_l2geneve = 1;
289
290 if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
291 p_tunn_cfg->tx_enable_ipgeneve = 1;
292
293 type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
294 p_tunn_cfg->tunnel_clss_l2geneve = type;
295 type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
296 p_tunn_cfg->tunnel_clss_ipgeneve = type;
297}
298
90int qed_sp_pf_start(struct qed_hwfn *p_hwfn, 299int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
300 struct qed_tunn_start_params *p_tunn,
91 enum qed_mf_mode mode) 301 enum qed_mf_mode mode)
92{ 302{
93 struct pf_start_ramrod_data *p_ramrod = NULL; 303 struct pf_start_ramrod_data *p_ramrod = NULL;
@@ -143,6 +353,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
143 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, 353 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
144 p_hwfn->p_consq->chain.pbl.p_phys_table); 354 p_hwfn->p_consq->chain.pbl.p_phys_table);
145 355
356 qed_tunn_set_pf_start_params(p_hwfn, NULL, NULL);
146 p_hwfn->hw_info.personality = PERSONALITY_ETH; 357 p_hwfn->hw_info.personality = PERSONALITY_ETH;
147 358
148 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 359 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@@ -153,6 +364,49 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
153 return qed_spq_post(p_hwfn, p_ent, NULL); 364 return qed_spq_post(p_hwfn, p_ent, NULL);
154} 365}
155 366
367/* Set pf update ramrod command params */
368int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
369 struct qed_tunn_update_params *p_tunn,
370 enum spq_mode comp_mode,
371 struct qed_spq_comp_cb *p_comp_data)
372{
373 struct qed_spq_entry *p_ent = NULL;
374 struct qed_sp_init_data init_data;
375 int rc = -EINVAL;
376
377 /* Get SPQ entry */
378 memset(&init_data, 0, sizeof(init_data));
379 init_data.cid = qed_spq_get_cid(p_hwfn);
380 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
381 init_data.comp_mode = comp_mode;
382 init_data.p_comp_data = p_comp_data;
383
384 rc = qed_sp_init_request(p_hwfn, &p_ent,
385 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
386 &init_data);
387 if (rc)
388 return rc;
389
390 qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
391 &p_ent->ramrod.pf_update.tunnel_config);
392
393 rc = qed_spq_post(p_hwfn, p_ent, NULL);
394 if (rc)
395 return rc;
396
397 if (p_tunn->update_vxlan_udp_port)
398 qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
399 p_tunn->vxlan_udp_port);
400 if (p_tunn->update_geneve_udp_port)
401 qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
402 p_tunn->geneve_udp_port);
403
404 qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
405 p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
406
407 return rc;
408}
409
156int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) 410int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
157{ 411{
158 struct qed_spq_entry *p_ent = NULL; 412 struct qed_spq_entry *p_ent = NULL;