diff options
author | Ram Amrani <Ram.Amrani@caviumnetworks.com> | 2016-10-01 14:59:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-10-03 23:22:46 -0400 |
commit | 51ff17251c9c2c2e71974149d22bc73ea09c27cc (patch) | |
tree | 8f6a9a8e6644567eab832e7d7ca8c7b0b3b5dd8a | |
parent | cee9fbd8e2e9e713cd8bf227c6492fd8854de74b (diff) |
qed: Add support for RoCE hw init
This adds the backbone required for the various HW initalizations
which are necessary for the qedr driver - FW notification, resource
initializations, etc.
Signed-off-by: Ram Amrani <Ram.Amrani@caviumnetworks.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@caviumnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed.h | 33 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_cxt.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_cxt.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_dev.c | 154 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_main.c | 44 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_roce.c | 886 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_roce.h | 123 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_sp.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_spq.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_sriov.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_vf.c | 2 | ||||
-rw-r--r-- | include/linux/qed/common_hsi.h | 1 | ||||
-rw-r--r-- | include/linux/qed/qed_if.h | 5 | ||||
-rw-r--r-- | include/linux/qed/qed_roce_if.h | 345 | ||||
-rw-r--r-- | include/linux/qed/rdma_common.h | 1 |
17 files changed, 1620 insertions, 8 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index e067098f10a9..cda0af7fbc20 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile | |||
@@ -5,3 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ | |||
5 | qed_selftest.o qed_dcbx.o qed_debug.o | 5 | qed_selftest.o qed_dcbx.o qed_debug.o |
6 | qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o | 6 | qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o |
7 | qed-$(CONFIG_QED_LL2) += qed_ll2.o | 7 | qed-$(CONFIG_QED_LL2) += qed_ll2.o |
8 | qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 91b571a3670b..c5a098a2ca2c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
@@ -35,6 +35,9 @@ extern const struct qed_common_ops qed_common_ops_pass; | |||
35 | 35 | ||
36 | #define QED_WFQ_UNIT 100 | 36 | #define QED_WFQ_UNIT 100 |
37 | 37 | ||
38 | #define QED_WID_SIZE (1024) | ||
39 | #define QED_PF_DEMS_SIZE (4) | ||
40 | |||
38 | /* cau states */ | 41 | /* cau states */ |
39 | enum qed_coalescing_mode { | 42 | enum qed_coalescing_mode { |
40 | QED_COAL_MODE_DISABLE, | 43 | QED_COAL_MODE_DISABLE, |
@@ -50,6 +53,14 @@ enum qed_mcp_protocol_type; | |||
50 | static inline u32 qed_db_addr(u32 cid, u32 DEMS) | 53 | static inline u32 qed_db_addr(u32 cid, u32 DEMS) |
51 | { | 54 | { |
52 | u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | | 55 | u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | |
56 | (cid * QED_PF_DEMS_SIZE); | ||
57 | |||
58 | return db_addr; | ||
59 | } | ||
60 | |||
61 | static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS) | ||
62 | { | ||
63 | u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | | ||
53 | FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); | 64 | FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); |
54 | 65 | ||
55 | return db_addr; | 66 | return db_addr; |
@@ -152,14 +163,17 @@ enum QED_RESOURCES { | |||
152 | QED_RL, | 163 | QED_RL, |
153 | QED_MAC, | 164 | QED_MAC, |
154 | QED_VLAN, | 165 | QED_VLAN, |
166 | QED_RDMA_CNQ_RAM, | ||
155 | QED_ILT, | 167 | QED_ILT, |
156 | QED_LL2_QUEUE, | 168 | QED_LL2_QUEUE, |
169 | QED_RDMA_STATS_QUEUE, | ||
157 | QED_MAX_RESC, | 170 | QED_MAX_RESC, |
158 | }; | 171 | }; |
159 | 172 | ||
160 | enum QED_FEATURE { | 173 | enum QED_FEATURE { |
161 | QED_PF_L2_QUE, | 174 | QED_PF_L2_QUE, |
162 | QED_VF, | 175 | QED_VF, |
176 | QED_RDMA_CNQ, | ||
163 | QED_MAX_FEATURES, | 177 | QED_MAX_FEATURES, |
164 | }; | 178 | }; |
165 | 179 | ||
@@ -364,6 +378,7 @@ struct qed_hwfn { | |||
364 | /* Protocol related */ | 378 | /* Protocol related */ |
365 | bool using_ll2; | 379 | bool using_ll2; |
366 | struct qed_ll2_info *p_ll2_info; | 380 | struct qed_ll2_info *p_ll2_info; |
381 | struct qed_rdma_info *p_rdma_info; | ||
367 | struct qed_pf_params pf_params; | 382 | struct qed_pf_params pf_params; |
368 | 383 | ||
369 | bool b_rdma_enabled_in_prs; | 384 | bool b_rdma_enabled_in_prs; |
@@ -402,6 +417,17 @@ struct qed_hwfn { | |||
402 | 417 | ||
403 | struct dbg_tools_data dbg_info; | 418 | struct dbg_tools_data dbg_info; |
404 | 419 | ||
420 | /* PWM region specific data */ | ||
421 | u32 dpi_size; | ||
422 | u32 dpi_count; | ||
423 | |||
424 | /* This is used to calculate the doorbell address */ | ||
425 | u32 dpi_start_offset; | ||
426 | |||
427 | /* If one of the following is set then EDPM shouldn't be used */ | ||
428 | u8 dcbx_no_edpm; | ||
429 | u8 db_bar_no_edpm; | ||
430 | |||
405 | struct qed_simd_fp_handler simd_proto_handler[64]; | 431 | struct qed_simd_fp_handler simd_proto_handler[64]; |
406 | 432 | ||
407 | #ifdef CONFIG_QED_SRIOV | 433 | #ifdef CONFIG_QED_SRIOV |
@@ -435,6 +461,8 @@ struct qed_int_params { | |||
435 | bool fp_initialized; | 461 | bool fp_initialized; |
436 | u8 fp_msix_base; | 462 | u8 fp_msix_base; |
437 | u8 fp_msix_cnt; | 463 | u8 fp_msix_cnt; |
464 | u8 rdma_msix_base; | ||
465 | u8 rdma_msix_cnt; | ||
438 | }; | 466 | }; |
439 | 467 | ||
440 | struct qed_dbg_feature { | 468 | struct qed_dbg_feature { |
@@ -541,7 +569,6 @@ struct qed_dev { | |||
541 | 569 | ||
542 | bool b_is_vf; | 570 | bool b_is_vf; |
543 | u32 drv_type; | 571 | u32 drv_type; |
544 | |||
545 | struct qed_eth_stats *reset_stats; | 572 | struct qed_eth_stats *reset_stats; |
546 | struct qed_fw_data *fw_data; | 573 | struct qed_fw_data *fw_data; |
547 | 574 | ||
@@ -574,6 +601,10 @@ struct qed_dev { | |||
574 | #endif | 601 | #endif |
575 | 602 | ||
576 | const struct firmware *firmware; | 603 | const struct firmware *firmware; |
604 | |||
605 | u32 rdma_max_sge; | ||
606 | u32 rdma_max_inline; | ||
607 | u32 rdma_max_srq_sge; | ||
577 | }; | 608 | }; |
578 | 609 | ||
579 | #define NUM_OF_VFS(dev) MAX_NUM_VFS_BB | 610 | #define NUM_OF_VFS(dev) MAX_NUM_VFS_BB |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index d9bea2a9c9f7..82370a1a59ad 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c | |||
@@ -48,7 +48,13 @@ | |||
48 | #define TM_ELEM_SIZE 4 | 48 | #define TM_ELEM_SIZE 4 |
49 | 49 | ||
50 | /* ILT constants */ | 50 | /* ILT constants */ |
51 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
52 | /* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ | ||
53 | #define ILT_DEFAULT_HW_P_SIZE 4 | ||
54 | #else | ||
51 | #define ILT_DEFAULT_HW_P_SIZE 3 | 55 | #define ILT_DEFAULT_HW_P_SIZE 3 |
56 | #endif | ||
57 | |||
52 | #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) | 58 | #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) |
53 | #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET | 59 | #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET |
54 | 60 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index c6f6f2e8192d..d00ad055802b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h | |||
@@ -170,6 +170,12 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); | |||
170 | */ | 170 | */ |
171 | void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, | 171 | void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, |
172 | u32 cid); | 172 | u32 cid); |
173 | int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, | ||
174 | enum qed_cxt_elem_type elem_type, u32 iid); | ||
175 | u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, | ||
176 | enum protocol_type type); | ||
177 | u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, | ||
178 | enum protocol_type type); | ||
173 | 179 | ||
174 | #define QED_CTX_WORKING_MEM 0 | 180 | #define QED_CTX_WORKING_MEM 0 |
175 | #define QED_CTX_FL_MEM 1 | 181 | #define QED_CTX_FL_MEM 1 |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9a8e153df841..754f6a908858 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
@@ -35,9 +35,13 @@ | |||
35 | #include "qed_sp.h" | 35 | #include "qed_sp.h" |
36 | #include "qed_sriov.h" | 36 | #include "qed_sriov.h" |
37 | #include "qed_vf.h" | 37 | #include "qed_vf.h" |
38 | #include "qed_roce.h" | ||
38 | 39 | ||
39 | static DEFINE_SPINLOCK(qm_lock); | 40 | static DEFINE_SPINLOCK(qm_lock); |
40 | 41 | ||
42 | #define QED_MIN_DPIS (4) | ||
43 | #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) | ||
44 | |||
41 | /* API common to all protocols */ | 45 | /* API common to all protocols */ |
42 | enum BAR_ID { | 46 | enum BAR_ID { |
43 | BAR_ID_0, /* used for GRC */ | 47 | BAR_ID_0, /* used for GRC */ |
@@ -787,6 +791,136 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, | |||
787 | return rc; | 791 | return rc; |
788 | } | 792 | } |
789 | 793 | ||
794 | static int | ||
795 | qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, | ||
796 | struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) | ||
797 | { | ||
798 | u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size; | ||
799 | u32 dpi_bit_shift, dpi_count; | ||
800 | u32 min_dpis; | ||
801 | |||
802 | /* Calculate DPI size */ | ||
803 | dpi_page_size_1 = QED_WID_SIZE * n_cpus; | ||
804 | dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE); | ||
805 | dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2); | ||
806 | dpi_page_size = roundup_pow_of_two(dpi_page_size); | ||
807 | dpi_bit_shift = ilog2(dpi_page_size / 4096); | ||
808 | |||
809 | dpi_count = pwm_region_size / dpi_page_size; | ||
810 | |||
811 | min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; | ||
812 | min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis); | ||
813 | |||
814 | p_hwfn->dpi_size = dpi_page_size; | ||
815 | p_hwfn->dpi_count = dpi_count; | ||
816 | |||
817 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); | ||
818 | |||
819 | if (dpi_count < min_dpis) | ||
820 | return -EINVAL; | ||
821 | |||
822 | return 0; | ||
823 | } | ||
824 | |||
825 | enum QED_ROCE_EDPM_MODE { | ||
826 | QED_ROCE_EDPM_MODE_ENABLE = 0, | ||
827 | QED_ROCE_EDPM_MODE_FORCE_ON = 1, | ||
828 | QED_ROCE_EDPM_MODE_DISABLE = 2, | ||
829 | }; | ||
830 | |||
831 | static int | ||
832 | qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | ||
833 | { | ||
834 | u32 pwm_regsize, norm_regsize; | ||
835 | u32 non_pwm_conn, min_addr_reg1; | ||
836 | u32 db_bar_size, n_cpus; | ||
837 | u32 roce_edpm_mode; | ||
838 | u32 pf_dems_shift; | ||
839 | int rc = 0; | ||
840 | u8 cond; | ||
841 | |||
842 | db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1); | ||
843 | if (p_hwfn->cdev->num_hwfns > 1) | ||
844 | db_bar_size /= 2; | ||
845 | |||
846 | /* Calculate doorbell regions */ | ||
847 | non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + | ||
848 | qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, | ||
849 | NULL) + | ||
850 | qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, | ||
851 | NULL); | ||
852 | norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096); | ||
853 | min_addr_reg1 = norm_regsize / 4096; | ||
854 | pwm_regsize = db_bar_size - norm_regsize; | ||
855 | |||
856 | /* Check that the normal and PWM sizes are valid */ | ||
857 | if (db_bar_size < norm_regsize) { | ||
858 | DP_ERR(p_hwfn->cdev, | ||
859 | "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", | ||
860 | db_bar_size, norm_regsize); | ||
861 | return -EINVAL; | ||
862 | } | ||
863 | |||
864 | if (pwm_regsize < QED_MIN_PWM_REGION) { | ||
865 | DP_ERR(p_hwfn->cdev, | ||
866 | "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", | ||
867 | pwm_regsize, | ||
868 | QED_MIN_PWM_REGION, db_bar_size, norm_regsize); | ||
869 | return -EINVAL; | ||
870 | } | ||
871 | |||
872 | /* Calculate number of DPIs */ | ||
873 | roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; | ||
874 | if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) || | ||
875 | ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) { | ||
876 | /* Either EDPM is mandatory, or we are attempting to allocate a | ||
877 | * WID per CPU. | ||
878 | */ | ||
879 | n_cpus = num_active_cpus(); | ||
880 | rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); | ||
881 | } | ||
882 | |||
883 | cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) || | ||
884 | (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE); | ||
885 | if (cond || p_hwfn->dcbx_no_edpm) { | ||
886 | /* Either EDPM is disabled from user configuration, or it is | ||
887 | * disabled via DCBx, or it is not mandatory and we failed to | ||
888 | * allocated a WID per CPU. | ||
889 | */ | ||
890 | n_cpus = 1; | ||
891 | rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); | ||
892 | |||
893 | if (cond) | ||
894 | qed_rdma_dpm_bar(p_hwfn, p_ptt); | ||
895 | } | ||
896 | |||
897 | DP_INFO(p_hwfn, | ||
898 | "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", | ||
899 | norm_regsize, | ||
900 | pwm_regsize, | ||
901 | p_hwfn->dpi_size, | ||
902 | p_hwfn->dpi_count, | ||
903 | ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? | ||
904 | "disabled" : "enabled"); | ||
905 | |||
906 | if (rc) { | ||
907 | DP_ERR(p_hwfn, | ||
908 | "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n", | ||
909 | p_hwfn->dpi_count, | ||
910 | p_hwfn->pf_params.rdma_pf_params.min_dpis); | ||
911 | return -EINVAL; | ||
912 | } | ||
913 | |||
914 | p_hwfn->dpi_start_offset = norm_regsize; | ||
915 | |||
916 | /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ | ||
917 | pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4); | ||
918 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); | ||
919 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); | ||
920 | |||
921 | return 0; | ||
922 | } | ||
923 | |||
790 | static int qed_hw_init_port(struct qed_hwfn *p_hwfn, | 924 | static int qed_hw_init_port(struct qed_hwfn *p_hwfn, |
791 | struct qed_ptt *p_ptt, int hw_mode) | 925 | struct qed_ptt *p_ptt, int hw_mode) |
792 | { | 926 | { |
@@ -860,6 +994,10 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, | |||
860 | /* Pure runtime initializations - directly to the HW */ | 994 | /* Pure runtime initializations - directly to the HW */ |
861 | qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); | 995 | qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); |
862 | 996 | ||
997 | rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); | ||
998 | if (rc) | ||
999 | return rc; | ||
1000 | |||
863 | if (b_hw_start) { | 1001 | if (b_hw_start) { |
864 | /* enable interrupts */ | 1002 | /* enable interrupts */ |
865 | qed_int_igu_enable(p_hwfn, p_ptt, int_mode); | 1003 | qed_int_igu_enable(p_hwfn, p_ptt, int_mode); |
@@ -1284,6 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) | |||
1284 | u32 *feat_num = p_hwfn->hw_info.feat_num; | 1422 | u32 *feat_num = p_hwfn->hw_info.feat_num; |
1285 | int num_features = 1; | 1423 | int num_features = 1; |
1286 | 1424 | ||
1425 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
1426 | /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the | ||
1427 | * status blocks equally between L2 / RoCE but with consideration as | ||
1428 | * to how many l2 queues / cnqs we have | ||
1429 | */ | ||
1430 | if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { | ||
1431 | num_features++; | ||
1432 | |||
1433 | feat_num[QED_RDMA_CNQ] = | ||
1434 | min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, | ||
1435 | RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); | ||
1436 | } | ||
1437 | #endif | ||
1287 | feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / | 1438 | feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / |
1288 | num_features, | 1439 | num_features, |
1289 | RESC_NUM(p_hwfn, QED_L2_QUEUE)); | 1440 | RESC_NUM(p_hwfn, QED_L2_QUEUE)); |
@@ -1325,6 +1476,9 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) | |||
1325 | num_funcs; | 1476 | num_funcs; |
1326 | resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; | 1477 | resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; |
1327 | resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs; | 1478 | resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs; |
1479 | resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs; | ||
1480 | resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB / | ||
1481 | num_funcs; | ||
1328 | 1482 | ||
1329 | for (i = 0; i < QED_MAX_RESC; i++) | 1483 | for (i = 0; i < QED_MAX_RESC; i++) |
1330 | resc_start[i] = resc_num[i] * enabled_func_idx; | 1484 | resc_start[i] = resc_num[i] * enabled_func_idx; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 48cdf62c025b..4ee3151e80c2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -33,6 +33,11 @@ | |||
33 | #include "qed_hw.h" | 33 | #include "qed_hw.h" |
34 | #include "qed_selftest.h" | 34 | #include "qed_selftest.h" |
35 | 35 | ||
36 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
37 | #define QED_ROCE_QPS (8192) | ||
38 | #define QED_ROCE_DPIS (8) | ||
39 | #endif | ||
40 | |||
36 | static char version[] = | 41 | static char version[] = |
37 | "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; | 42 | "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; |
38 | 43 | ||
@@ -206,8 +211,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, | |||
206 | dev_info->pci_mem_start = cdev->pci_params.mem_start; | 211 | dev_info->pci_mem_start = cdev->pci_params.mem_start; |
207 | dev_info->pci_mem_end = cdev->pci_params.mem_end; | 212 | dev_info->pci_mem_end = cdev->pci_params.mem_end; |
208 | dev_info->pci_irq = cdev->pci_params.irq; | 213 | dev_info->pci_irq = cdev->pci_params.irq; |
209 | dev_info->rdma_supported = | 214 | dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == |
210 | (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE); | 215 | QED_PCI_ETH_ROCE); |
211 | dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); | 216 | dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); |
212 | ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); | 217 | ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); |
213 | 218 | ||
@@ -677,6 +682,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, | |||
677 | enum qed_int_mode int_mode) | 682 | enum qed_int_mode int_mode) |
678 | { | 683 | { |
679 | struct qed_sb_cnt_info sb_cnt_info; | 684 | struct qed_sb_cnt_info sb_cnt_info; |
685 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
686 | int num_l2_queues; | ||
687 | #endif | ||
680 | int rc; | 688 | int rc; |
681 | int i; | 689 | int i; |
682 | 690 | ||
@@ -707,6 +715,31 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, | |||
707 | cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - | 715 | cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - |
708 | cdev->num_hwfns; | 716 | cdev->num_hwfns; |
709 | 717 | ||
718 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
719 | num_l2_queues = 0; | ||
720 | for_each_hwfn(cdev, i) | ||
721 | num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); | ||
722 | |||
723 | DP_VERBOSE(cdev, QED_MSG_RDMA, | ||
724 | "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", | ||
725 | cdev->int_params.fp_msix_cnt, num_l2_queues); | ||
726 | |||
727 | if (cdev->int_params.fp_msix_cnt > num_l2_queues) { | ||
728 | cdev->int_params.rdma_msix_cnt = | ||
729 | (cdev->int_params.fp_msix_cnt - num_l2_queues) | ||
730 | / cdev->num_hwfns; | ||
731 | cdev->int_params.rdma_msix_base = | ||
732 | cdev->int_params.fp_msix_base + num_l2_queues; | ||
733 | cdev->int_params.fp_msix_cnt = num_l2_queues; | ||
734 | } else { | ||
735 | cdev->int_params.rdma_msix_cnt = 0; | ||
736 | } | ||
737 | |||
738 | DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", | ||
739 | cdev->int_params.rdma_msix_cnt, | ||
740 | cdev->int_params.rdma_msix_base); | ||
741 | #endif | ||
742 | |||
710 | return 0; | 743 | return 0; |
711 | } | 744 | } |
712 | 745 | ||
@@ -810,6 +843,13 @@ static void qed_update_pf_params(struct qed_dev *cdev, | |||
810 | { | 843 | { |
811 | int i; | 844 | int i; |
812 | 845 | ||
846 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
847 | params->rdma_pf_params.num_qps = QED_ROCE_QPS; | ||
848 | params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; | ||
849 | /* divide by 3 the MRs to avoid MF ILT overflow */ | ||
850 | params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; | ||
851 | params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; | ||
852 | #endif | ||
813 | for (i = 0; i < cdev->num_hwfns; i++) { | 853 | for (i = 0; i < cdev->num_hwfns; i++) { |
814 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | 854 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
815 | 855 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index e75738d21783..b414a0542177 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | |||
@@ -1448,5 +1448,11 @@ | |||
1448 | 0x620000UL | 1448 | 0x620000UL |
1449 | #define PHY_PCIE_REG_PHY1 \ | 1449 | #define PHY_PCIE_REG_PHY1 \ |
1450 | 0x624000UL | 1450 | 0x624000UL |
1451 | 1451 | #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL | |
1452 | #define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL | ||
1453 | #define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL | ||
1454 | #define DORQ_REG_PF_DPM_ENABLE 0x100510UL | ||
1455 | #define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL | ||
1456 | #define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL | ||
1457 | #define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL | ||
1452 | #endif | 1458 | #endif |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c new file mode 100644 index 000000000000..4c53b857cc1c --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c | |||
@@ -0,0 +1,886 @@ | |||
1 | /* QLogic qed NIC Driver | ||
2 | * Copyright (c) 2015-2016 QLogic Corporation | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and /or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | #include <linux/types.h> | ||
33 | #include <asm/byteorder.h> | ||
34 | #include <linux/bitops.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/dma-mapping.h> | ||
37 | #include <linux/errno.h> | ||
38 | #include <linux/etherdevice.h> | ||
39 | #include <linux/if_ether.h> | ||
40 | #include <linux/if_vlan.h> | ||
41 | #include <linux/io.h> | ||
42 | #include <linux/ip.h> | ||
43 | #include <linux/ipv6.h> | ||
44 | #include <linux/kernel.h> | ||
45 | #include <linux/list.h> | ||
46 | #include <linux/module.h> | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/pci.h> | ||
49 | #include <linux/slab.h> | ||
50 | #include <linux/spinlock.h> | ||
51 | #include <linux/string.h> | ||
52 | #include <linux/tcp.h> | ||
53 | #include <linux/bitops.h> | ||
54 | #include <linux/qed/qed_roce_if.h> | ||
55 | #include <linux/qed/qed_roce_if.h> | ||
56 | #include "qed.h" | ||
57 | #include "qed_cxt.h" | ||
58 | #include "qed_hsi.h" | ||
59 | #include "qed_hw.h" | ||
60 | #include "qed_init_ops.h" | ||
61 | #include "qed_int.h" | ||
62 | #include "qed_ll2.h" | ||
63 | #include "qed_mcp.h" | ||
64 | #include "qed_reg_addr.h" | ||
65 | #include "qed_sp.h" | ||
66 | #include "qed_roce.h" | ||
67 | |||
68 | void qed_async_roce_event(struct qed_hwfn *p_hwfn, | ||
69 | struct event_ring_entry *p_eqe) | ||
70 | { | ||
71 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; | ||
72 | |||
73 | p_rdma_info->events.affiliated_event(p_rdma_info->events.context, | ||
74 | p_eqe->opcode, &p_eqe->data); | ||
75 | } | ||
76 | |||
77 | static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, | ||
78 | struct qed_bmap *bmap, u32 max_count) | ||
79 | { | ||
80 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); | ||
81 | |||
82 | bmap->max_count = max_count; | ||
83 | |||
84 | bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long), | ||
85 | GFP_KERNEL); | ||
86 | if (!bmap->bitmap) { | ||
87 | DP_NOTICE(p_hwfn, | ||
88 | "qed bmap alloc failed: cannot allocate memory (bitmap)\n"); | ||
89 | return -ENOMEM; | ||
90 | } | ||
91 | |||
92 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n", | ||
93 | bmap->bitmap); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, | ||
98 | struct qed_bmap *bmap, u32 *id_num) | ||
99 | { | ||
100 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap); | ||
101 | |||
102 | *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); | ||
103 | |||
104 | if (*id_num >= bmap->max_count) { | ||
105 | DP_NOTICE(p_hwfn, "no id available max_count=%d\n", | ||
106 | bmap->max_count); | ||
107 | return -EINVAL; | ||
108 | } | ||
109 | |||
110 | __set_bit(*id_num, bmap->bitmap); | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static void qed_bmap_release_id(struct qed_hwfn *p_hwfn, | ||
116 | struct qed_bmap *bmap, u32 id_num) | ||
117 | { | ||
118 | bool b_acquired; | ||
119 | |||
120 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num); | ||
121 | if (id_num >= bmap->max_count) | ||
122 | return; | ||
123 | |||
124 | b_acquired = test_and_clear_bit(id_num, bmap->bitmap); | ||
125 | if (!b_acquired) { | ||
126 | DP_NOTICE(p_hwfn, "ID %d already released\n", id_num); | ||
127 | return; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) | ||
132 | { | ||
133 | /* First sb id for RoCE is after all the l2 sb */ | ||
134 | return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; | ||
135 | } | ||
136 | |||
137 | u32 qed_rdma_query_cau_timer_res(void *rdma_cxt) | ||
138 | { | ||
139 | return QED_CAU_DEF_RX_TIMER_RES; | ||
140 | } | ||
141 | |||
142 | static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, | ||
143 | struct qed_ptt *p_ptt, | ||
144 | struct qed_rdma_start_in_params *params) | ||
145 | { | ||
146 | struct qed_rdma_info *p_rdma_info; | ||
147 | u32 num_cons, num_tasks; | ||
148 | int rc = -ENOMEM; | ||
149 | |||
150 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); | ||
151 | |||
152 | /* Allocate a struct with current pf rdma info */ | ||
153 | p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); | ||
154 | if (!p_rdma_info) { | ||
155 | DP_NOTICE(p_hwfn, | ||
156 | "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n", | ||
157 | rc); | ||
158 | return rc; | ||
159 | } | ||
160 | |||
161 | p_hwfn->p_rdma_info = p_rdma_info; | ||
162 | p_rdma_info->proto = PROTOCOLID_ROCE; | ||
163 | |||
164 | num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0); | ||
165 | |||
166 | p_rdma_info->num_qps = num_cons / 2; | ||
167 | |||
168 | num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); | ||
169 | |||
170 | /* Each MR uses a single task */ | ||
171 | p_rdma_info->num_mrs = num_tasks; | ||
172 | |||
173 | /* Queue zone lines are shared between RoCE and L2 in such a way that | ||
174 | * they can be used by each without obstructing the other. | ||
175 | */ | ||
176 | p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE); | ||
177 | |||
178 | /* Allocate a struct with device params and fill it */ | ||
179 | p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); | ||
180 | if (!p_rdma_info->dev) { | ||
181 | DP_NOTICE(p_hwfn, | ||
182 | "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n", | ||
183 | rc); | ||
184 | goto free_rdma_info; | ||
185 | } | ||
186 | |||
187 | /* Allocate a struct with port params and fill it */ | ||
188 | p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); | ||
189 | if (!p_rdma_info->port) { | ||
190 | DP_NOTICE(p_hwfn, | ||
191 | "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n", | ||
192 | rc); | ||
193 | goto free_rdma_dev; | ||
194 | } | ||
195 | |||
196 | /* Allocate bit map for pd's */ | ||
197 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS); | ||
198 | if (rc) { | ||
199 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | ||
200 | "Failed to allocate pd_map, rc = %d\n", | ||
201 | rc); | ||
202 | goto free_rdma_port; | ||
203 | } | ||
204 | |||
205 | /* Allocate DPI bitmap */ | ||
206 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, | ||
207 | p_hwfn->dpi_count); | ||
208 | if (rc) { | ||
209 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | ||
210 | "Failed to allocate DPI bitmap, rc = %d\n", rc); | ||
211 | goto free_pd_map; | ||
212 | } | ||
213 | |||
214 | /* Allocate bitmap for cq's. The maximum number of CQs is bounded to | ||
215 | * twice the number of QPs. | ||
216 | */ | ||
217 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, | ||
218 | p_rdma_info->num_qps * 2); | ||
219 | if (rc) { | ||
220 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | ||
221 | "Failed to allocate cq bitmap, rc = %d\n", rc); | ||
222 | goto free_dpi_map; | ||
223 | } | ||
224 | |||
225 | /* Allocate bitmap for toggle bit for cq icids | ||
226 | * We toggle the bit every time we create or resize cq for a given icid. | ||
227 | * The maximum number of CQs is bounded to twice the number of QPs. | ||
228 | */ | ||
229 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, | ||
230 | p_rdma_info->num_qps * 2); | ||
231 | if (rc) { | ||
232 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | ||
233 | "Failed to allocate toogle bits, rc = %d\n", rc); | ||
234 | goto free_cq_map; | ||
235 | } | ||
236 | |||
237 | /* Allocate bitmap for itids */ | ||
238 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, | ||
239 | p_rdma_info->num_mrs); | ||
240 | if (rc) { | ||
241 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | ||
242 | "Failed to allocate itids bitmaps, rc = %d\n", rc); | ||
243 | goto free_toggle_map; | ||
244 | } | ||
245 | |||
246 | /* Allocate bitmap for cids used for qps. */ | ||
247 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons); | ||
248 | if (rc) { | ||
249 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | ||
250 | "Failed to allocate cid bitmap, rc = %d\n", rc); | ||
251 | goto free_tid_map; | ||
252 | } | ||
253 | |||
254 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); | ||
255 | return 0; | ||
256 | |||
257 | free_tid_map: | ||
258 | kfree(p_rdma_info->tid_map.bitmap); | ||
259 | free_toggle_map: | ||
260 | kfree(p_rdma_info->toggle_bits.bitmap); | ||
261 | free_cq_map: | ||
262 | kfree(p_rdma_info->cq_map.bitmap); | ||
263 | free_dpi_map: | ||
264 | kfree(p_rdma_info->dpi_map.bitmap); | ||
265 | free_pd_map: | ||
266 | kfree(p_rdma_info->pd_map.bitmap); | ||
267 | free_rdma_port: | ||
268 | kfree(p_rdma_info->port); | ||
269 | free_rdma_dev: | ||
270 | kfree(p_rdma_info->dev); | ||
271 | free_rdma_info: | ||
272 | kfree(p_rdma_info); | ||
273 | |||
274 | return rc; | ||
275 | } | ||
276 | |||
277 | void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) | ||
278 | { | ||
279 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; | ||
280 | |||
281 | kfree(p_rdma_info->cid_map.bitmap); | ||
282 | kfree(p_rdma_info->tid_map.bitmap); | ||
283 | kfree(p_rdma_info->toggle_bits.bitmap); | ||
284 | kfree(p_rdma_info->cq_map.bitmap); | ||
285 | kfree(p_rdma_info->dpi_map.bitmap); | ||
286 | kfree(p_rdma_info->pd_map.bitmap); | ||
287 | |||
288 | kfree(p_rdma_info->port); | ||
289 | kfree(p_rdma_info->dev); | ||
290 | |||
291 | kfree(p_rdma_info); | ||
292 | } | ||
293 | |||
294 | static void qed_rdma_free(struct qed_hwfn *p_hwfn) | ||
295 | { | ||
296 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); | ||
297 | |||
298 | qed_rdma_resc_free(p_hwfn); | ||
299 | } | ||
300 | |||
301 | static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) | ||
302 | { | ||
303 | guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; | ||
304 | guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; | ||
305 | guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; | ||
306 | guid[3] = 0xff; | ||
307 | guid[4] = 0xfe; | ||
308 | guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; | ||
309 | guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; | ||
310 | guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; | ||
311 | } | ||
312 | |||
313 | static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, | ||
314 | struct qed_rdma_start_in_params *params) | ||
315 | { | ||
316 | struct qed_rdma_events *events; | ||
317 | |||
318 | events = &p_hwfn->p_rdma_info->events; | ||
319 | |||
320 | events->unaffiliated_event = params->events->unaffiliated_event; | ||
321 | events->affiliated_event = params->events->affiliated_event; | ||
322 | events->context = params->events->context; | ||
323 | } | ||
324 | |||
325 | static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, | ||
326 | struct qed_rdma_start_in_params *params) | ||
327 | { | ||
328 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; | ||
329 | struct qed_dev *cdev = p_hwfn->cdev; | ||
330 | u32 pci_status_control; | ||
331 | u32 num_qps; | ||
332 | |||
333 | /* Vendor specific information */ | ||
334 | dev->vendor_id = cdev->vendor_id; | ||
335 | dev->vendor_part_id = cdev->device_id; | ||
336 | dev->hw_ver = 0; | ||
337 | dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | | ||
338 | (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); | ||
339 | |||
340 | qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); | ||
341 | dev->node_guid = dev->sys_image_guid; | ||
342 | |||
343 | dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, | ||
344 | RDMA_MAX_SGE_PER_RQ_WQE); | ||
345 | |||
346 | if (cdev->rdma_max_sge) | ||
347 | dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); | ||
348 | |||
349 | dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; | ||
350 | |||
351 | dev->max_inline = (cdev->rdma_max_inline) ? | ||
352 | min_t(u32, cdev->rdma_max_inline, dev->max_inline) : | ||
353 | dev->max_inline; | ||
354 | |||
355 | dev->max_wqe = QED_RDMA_MAX_WQE; | ||
356 | dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); | ||
357 | |||
358 | /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because | ||
359 | * it is up-aligned to 16 and then to ILT page size within qed cxt. | ||
360 | * This is OK in terms of ILT but we don't want to configure the FW | ||
361 | * above its abilities | ||
362 | */ | ||
363 | num_qps = ROCE_MAX_QPS; | ||
364 | num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); | ||
365 | dev->max_qp = num_qps; | ||
366 | |||
367 | /* CQs uses the same icids that QPs use hence they are limited by the | ||
368 | * number of icids. There are two icids per QP. | ||
369 | */ | ||
370 | dev->max_cq = num_qps * 2; | ||
371 | |||
372 | /* The number of mrs is smaller by 1 since the first is reserved */ | ||
373 | dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; | ||
374 | dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; | ||
375 | |||
376 | /* The maximum CQE capacity per CQ supported. | ||
377 | * max number of cqes will be in two layer pbl, | ||
378 | * 8 is the pointer size in bytes | ||
379 | * 32 is the size of cq element in bytes | ||
380 | */ | ||
381 | if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) | ||
382 | dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; | ||
383 | else | ||
384 | dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; | ||
385 | |||
386 | dev->max_mw = 0; | ||
387 | dev->max_fmr = QED_RDMA_MAX_FMR; | ||
388 | dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); | ||
389 | dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; | ||
390 | dev->max_pkey = QED_RDMA_MAX_P_KEY; | ||
391 | |||
392 | dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / | ||
393 | (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); | ||
394 | dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / | ||
395 | RDMA_REQ_RD_ATOMIC_ELM_SIZE; | ||
396 | dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * | ||
397 | p_hwfn->p_rdma_info->num_qps; | ||
398 | dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; | ||
399 | dev->dev_ack_delay = QED_RDMA_ACK_DELAY; | ||
400 | dev->max_pd = RDMA_MAX_PDS; | ||
401 | dev->max_ah = p_hwfn->p_rdma_info->num_qps; | ||
402 | dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); | ||
403 | |||
404 | /* Set capablities */ | ||
405 | dev->dev_caps = 0; | ||
406 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); | ||
407 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); | ||
408 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); | ||
409 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); | ||
410 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); | ||
411 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); | ||
412 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); | ||
413 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); | ||
414 | |||
415 | /* Check atomic operations support in PCI configuration space. */ | ||
416 | pci_read_config_dword(cdev->pdev, | ||
417 | cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, | ||
418 | &pci_status_control); | ||
419 | |||
420 | if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) | ||
421 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); | ||
422 | } | ||
423 | |||
424 | static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) | ||
425 | { | ||
426 | struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; | ||
427 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; | ||
428 | |||
429 | port->port_state = p_hwfn->mcp_info->link_output.link_up ? | ||
430 | QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; | ||
431 | |||
432 | port->max_msg_size = min_t(u64, | ||
433 | (dev->max_mr_mw_fmr_size * | ||
434 | p_hwfn->cdev->rdma_max_sge), | ||
435 | BIT(31)); | ||
436 | |||
437 | port->pkey_bad_counter = 0; | ||
438 | } | ||
439 | |||
440 | static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | ||
441 | { | ||
442 | u32 ll2_ethertype_en; | ||
443 | |||
444 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); | ||
445 | p_hwfn->b_rdma_enabled_in_prs = false; | ||
446 | |||
447 | qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); | ||
448 | |||
449 | p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; | ||
450 | |||
451 | /* We delay writing to this reg until first cid is allocated. See | ||
452 | * qed_cxt_dynamic_ilt_alloc function for more details | ||
453 | */ | ||
454 | ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); | ||
455 | qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, | ||
456 | (ll2_ethertype_en | 0x01)); | ||
457 | |||
458 | if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { | ||
459 | DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); | ||
460 | return -EINVAL; | ||
461 | } | ||
462 | |||
463 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); | ||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, | ||
468 | struct qed_rdma_start_in_params *params, | ||
469 | struct qed_ptt *p_ptt) | ||
470 | { | ||
471 | struct rdma_init_func_ramrod_data *p_ramrod; | ||
472 | struct qed_rdma_cnq_params *p_cnq_pbl_list; | ||
473 | struct rdma_init_func_hdr *p_params_header; | ||
474 | struct rdma_cnq_params *p_cnq_params; | ||
475 | struct qed_sp_init_data init_data; | ||
476 | struct qed_spq_entry *p_ent; | ||
477 | u32 cnq_id, sb_id; | ||
478 | int rc; | ||
479 | |||
480 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); | ||
481 | |||
482 | /* Save the number of cnqs for the function close ramrod */ | ||
483 | p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; | ||
484 | |||
485 | /* Get SPQ entry */ | ||
486 | memset(&init_data, 0, sizeof(init_data)); | ||
487 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
488 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | ||
489 | |||
490 | rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, | ||
491 | p_hwfn->p_rdma_info->proto, &init_data); | ||
492 | if (rc) | ||
493 | return rc; | ||
494 | |||
495 | p_ramrod = &p_ent->ramrod.roce_init_func.rdma; | ||
496 | |||
497 | p_params_header = &p_ramrod->params_header; | ||
498 | p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, | ||
499 | QED_RDMA_CNQ_RAM); | ||
500 | p_params_header->num_cnqs = params->desired_cnq; | ||
501 | |||
502 | if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) | ||
503 | p_params_header->cq_ring_mode = 1; | ||
504 | else | ||
505 | p_params_header->cq_ring_mode = 0; | ||
506 | |||
507 | for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { | ||
508 | sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); | ||
509 | p_cnq_params = &p_ramrod->cnq_params[cnq_id]; | ||
510 | p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; | ||
511 | p_cnq_params->sb_num = | ||
512 | cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id); | ||
513 | |||
514 | p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; | ||
515 | p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; | ||
516 | |||
517 | DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, | ||
518 | p_cnq_pbl_list->pbl_ptr); | ||
519 | |||
520 | /* we assume here that cnq_id and qz_offset are the same */ | ||
521 | p_cnq_params->queue_zone_num = | ||
522 | cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + | ||
523 | cnq_id); | ||
524 | } | ||
525 | |||
526 | return qed_spq_post(p_hwfn, p_ent, NULL); | ||
527 | } | ||
528 | |||
529 | static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) | ||
530 | { | ||
531 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; | ||
532 | |||
533 | /* The first DPI is reserved for the Kernel */ | ||
534 | __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap); | ||
535 | |||
536 | /* Tid 0 will be used as the key for "reserved MR". | ||
537 | * The driver should allocate memory for it so it can be loaded but no | ||
538 | * ramrod should be passed on it. | ||
539 | */ | ||
540 | qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); | ||
541 | if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { | ||
542 | DP_NOTICE(p_hwfn, | ||
543 | "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); | ||
544 | return -EINVAL; | ||
545 | } | ||
546 | |||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static int qed_rdma_setup(struct qed_hwfn *p_hwfn, | ||
551 | struct qed_ptt *p_ptt, | ||
552 | struct qed_rdma_start_in_params *params) | ||
553 | { | ||
554 | int rc; | ||
555 | |||
556 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); | ||
557 | |||
558 | spin_lock_init(&p_hwfn->p_rdma_info->lock); | ||
559 | |||
560 | qed_rdma_init_devinfo(p_hwfn, params); | ||
561 | qed_rdma_init_port(p_hwfn); | ||
562 | qed_rdma_init_events(p_hwfn, params); | ||
563 | |||
564 | rc = qed_rdma_reserve_lkey(p_hwfn); | ||
565 | if (rc) | ||
566 | return rc; | ||
567 | |||
568 | rc = qed_rdma_init_hw(p_hwfn, p_ptt); | ||
569 | if (rc) | ||
570 | return rc; | ||
571 | |||
572 | return qed_rdma_start_fw(p_hwfn, params, p_ptt); | ||
573 | } | ||
574 | |||
575 | int qed_rdma_stop(void *rdma_cxt) | ||
576 | { | ||
577 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | ||
578 | struct rdma_close_func_ramrod_data *p_ramrod; | ||
579 | struct qed_sp_init_data init_data; | ||
580 | struct qed_spq_entry *p_ent; | ||
581 | struct qed_ptt *p_ptt; | ||
582 | u32 ll2_ethertype_en; | ||
583 | int rc = -EBUSY; | ||
584 | |||
585 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); | ||
586 | |||
587 | p_ptt = qed_ptt_acquire(p_hwfn); | ||
588 | if (!p_ptt) { | ||
589 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); | ||
590 | return rc; | ||
591 | } | ||
592 | |||
593 | /* Disable RoCE search */ | ||
594 | qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); | ||
595 | p_hwfn->b_rdma_enabled_in_prs = false; | ||
596 | |||
597 | qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); | ||
598 | |||
599 | ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); | ||
600 | |||
601 | qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, | ||
602 | (ll2_ethertype_en & 0xFFFE)); | ||
603 | |||
604 | qed_ptt_release(p_hwfn, p_ptt); | ||
605 | |||
606 | /* Get SPQ entry */ | ||
607 | memset(&init_data, 0, sizeof(init_data)); | ||
608 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
609 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | ||
610 | |||
611 | /* Stop RoCE */ | ||
612 | rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, | ||
613 | p_hwfn->p_rdma_info->proto, &init_data); | ||
614 | if (rc) | ||
615 | goto out; | ||
616 | |||
617 | p_ramrod = &p_ent->ramrod.rdma_close_func; | ||
618 | |||
619 | p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; | ||
620 | p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); | ||
621 | |||
622 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | ||
623 | |||
624 | out: | ||
625 | qed_rdma_free(p_hwfn); | ||
626 | |||
627 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); | ||
628 | return rc; | ||
629 | } | ||
630 | |||
631 | int qed_rdma_add_user(void *rdma_cxt, | ||
632 | struct qed_rdma_add_user_out_params *out_params) | ||
633 | { | ||
634 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | ||
635 | u32 dpi_start_offset; | ||
636 | u32 returned_id = 0; | ||
637 | int rc; | ||
638 | |||
639 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); | ||
640 | |||
641 | /* Allocate DPI */ | ||
642 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | ||
643 | rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, | ||
644 | &returned_id); | ||
645 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | ||
646 | |||
647 | out_params->dpi = (u16)returned_id; | ||
648 | |||
649 | /* Calculate the corresponding DPI address */ | ||
650 | dpi_start_offset = p_hwfn->dpi_start_offset; | ||
651 | |||
652 | out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + | ||
653 | dpi_start_offset + | ||
654 | ((out_params->dpi) * p_hwfn->dpi_size)); | ||
655 | |||
656 | out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr + | ||
657 | dpi_start_offset + | ||
658 | ((out_params->dpi) * p_hwfn->dpi_size); | ||
659 | |||
660 | out_params->dpi_size = p_hwfn->dpi_size; | ||
661 | |||
662 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); | ||
663 | return rc; | ||
664 | } | ||
665 | |||
666 | struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) | ||
667 | { | ||
668 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | ||
669 | |||
670 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); | ||
671 | |||
672 | /* Return struct with device parameters */ | ||
673 | return p_hwfn->p_rdma_info->dev; | ||
674 | } | ||
675 | |||
676 | int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) | ||
677 | { | ||
678 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | ||
679 | int rc; | ||
680 | |||
681 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); | ||
682 | |||
683 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | ||
684 | rc = qed_rdma_bmap_alloc_id(p_hwfn, | ||
685 | &p_hwfn->p_rdma_info->tid_map, itid); | ||
686 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | ||
687 | if (rc) | ||
688 | goto out; | ||
689 | |||
690 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); | ||
691 | out: | ||
692 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); | ||
693 | return rc; | ||
694 | } | ||
695 | |||
696 | void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) | ||
697 | { | ||
698 | struct qed_hwfn *p_hwfn; | ||
699 | u16 qz_num; | ||
700 | u32 addr; | ||
701 | |||
702 | p_hwfn = (struct qed_hwfn *)rdma_cxt; | ||
703 | qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; | ||
704 | addr = GTT_BAR0_MAP_REG_USDM_RAM + | ||
705 | USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); | ||
706 | |||
707 | REG_WR16(p_hwfn, addr, prod); | ||
708 | |||
709 | /* keep prod updates ordered */ | ||
710 | wmb(); | ||
711 | } | ||
712 | |||
713 | static int qed_fill_rdma_dev_info(struct qed_dev *cdev, | ||
714 | struct qed_dev_rdma_info *info) | ||
715 | { | ||
716 | memset(info, 0, sizeof(*info)); | ||
717 | |||
718 | info->rdma_type = QED_RDMA_TYPE_ROCE; | ||
719 | |||
720 | qed_fill_dev_info(cdev, &info->common); | ||
721 | |||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | static int qed_rdma_get_sb_start(struct qed_dev *cdev) | ||
726 | { | ||
727 | int feat_num; | ||
728 | |||
729 | if (cdev->num_hwfns > 1) | ||
730 | feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE); | ||
731 | else | ||
732 | feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) * | ||
733 | cdev->num_hwfns; | ||
734 | |||
735 | return feat_num; | ||
736 | } | ||
737 | |||
738 | static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) | ||
739 | { | ||
740 | int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ); | ||
741 | int n_msix = cdev->int_params.rdma_msix_cnt; | ||
742 | |||
743 | return min_t(int, n_cnq, n_msix); | ||
744 | } | ||
745 | |||
746 | static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) | ||
747 | { | ||
748 | int limit = 0; | ||
749 | |||
750 | /* Mark the fastpath as free/used */ | ||
751 | cdev->int_params.fp_initialized = cnt ? true : false; | ||
752 | |||
753 | if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { | ||
754 | DP_ERR(cdev, | ||
755 | "qed roce supports only MSI-X interrupts (detected %d).\n", | ||
756 | cdev->int_params.out.int_mode); | ||
757 | return -EINVAL; | ||
758 | } else if (cdev->int_params.fp_msix_cnt) { | ||
759 | limit = cdev->int_params.rdma_msix_cnt; | ||
760 | } | ||
761 | |||
762 | if (!limit) | ||
763 | return -ENOMEM; | ||
764 | |||
765 | return min_t(int, cnt, limit); | ||
766 | } | ||
767 | |||
768 | static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) | ||
769 | { | ||
770 | memset(info, 0, sizeof(*info)); | ||
771 | |||
772 | if (!cdev->int_params.fp_initialized) { | ||
773 | DP_INFO(cdev, | ||
774 | "Protocol driver requested interrupt information, but its support is not yet configured\n"); | ||
775 | return -EINVAL; | ||
776 | } | ||
777 | |||
778 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { | ||
779 | int msix_base = cdev->int_params.rdma_msix_base; | ||
780 | |||
781 | info->msix_cnt = cdev->int_params.rdma_msix_cnt; | ||
782 | info->msix = &cdev->int_params.msix_table[msix_base]; | ||
783 | |||
784 | DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", | ||
785 | info->msix_cnt, msix_base); | ||
786 | } | ||
787 | |||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) | ||
792 | { | ||
793 | return QED_LEADING_HWFN(cdev); | ||
794 | } | ||
795 | |||
796 | static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | ||
797 | { | ||
798 | u32 val; | ||
799 | |||
800 | val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; | ||
801 | |||
802 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); | ||
803 | DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), | ||
804 | "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", | ||
805 | val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); | ||
806 | } | ||
807 | |||
808 | void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | ||
809 | { | ||
810 | p_hwfn->db_bar_no_edpm = true; | ||
811 | |||
812 | qed_rdma_dpm_conf(p_hwfn, p_ptt); | ||
813 | } | ||
814 | |||
815 | int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params) | ||
816 | { | ||
817 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | ||
818 | struct qed_ptt *p_ptt; | ||
819 | int rc = -EBUSY; | ||
820 | |||
821 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | ||
822 | "desired_cnq = %08x\n", params->desired_cnq); | ||
823 | |||
824 | p_ptt = qed_ptt_acquire(p_hwfn); | ||
825 | if (!p_ptt) | ||
826 | goto err; | ||
827 | |||
828 | rc = qed_rdma_alloc(p_hwfn, p_ptt, params); | ||
829 | if (rc) | ||
830 | goto err1; | ||
831 | |||
832 | rc = qed_rdma_setup(p_hwfn, p_ptt, params); | ||
833 | if (rc) | ||
834 | goto err2; | ||
835 | |||
836 | qed_ptt_release(p_hwfn, p_ptt); | ||
837 | |||
838 | return rc; | ||
839 | |||
840 | err2: | ||
841 | qed_rdma_free(p_hwfn); | ||
842 | err1: | ||
843 | qed_ptt_release(p_hwfn, p_ptt); | ||
844 | err: | ||
845 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); | ||
846 | return rc; | ||
847 | } | ||
848 | |||
849 | static int qed_rdma_init(struct qed_dev *cdev, | ||
850 | struct qed_rdma_start_in_params *params) | ||
851 | { | ||
852 | return qed_rdma_start(QED_LEADING_HWFN(cdev), params); | ||
853 | } | ||
854 | |||
855 | void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) | ||
856 | { | ||
857 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | ||
858 | |||
859 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); | ||
860 | |||
861 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | ||
862 | qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); | ||
863 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | ||
864 | } | ||
865 | |||
866 | static const struct qed_rdma_ops qed_rdma_ops_pass = { | ||
867 | .common = &qed_common_ops_pass, | ||
868 | .fill_dev_info = &qed_fill_rdma_dev_info, | ||
869 | .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, | ||
870 | .rdma_init = &qed_rdma_init, | ||
871 | .rdma_add_user = &qed_rdma_add_user, | ||
872 | .rdma_remove_user = &qed_rdma_remove_user, | ||
873 | .rdma_stop = &qed_rdma_stop, | ||
874 | .rdma_query_device = &qed_rdma_query_device, | ||
875 | .rdma_get_start_sb = &qed_rdma_get_sb_start, | ||
876 | .rdma_get_rdma_int = &qed_rdma_get_int, | ||
877 | .rdma_set_rdma_int = &qed_rdma_set_int, | ||
878 | .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, | ||
879 | .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, | ||
880 | }; | ||
881 | |||
882 | const struct qed_rdma_ops *qed_get_rdma_ops() | ||
883 | { | ||
884 | return &qed_rdma_ops_pass; | ||
885 | } | ||
886 | EXPORT_SYMBOL(qed_get_rdma_ops); | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h new file mode 100644 index 000000000000..e55048106a83 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h | |||
@@ -0,0 +1,123 @@ | |||
1 | /* QLogic qed NIC Driver | ||
2 | * Copyright (c) 2015-2016 QLogic Corporation | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and /or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | #ifndef _QED_ROCE_H | ||
33 | #define _QED_ROCE_H | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/bitops.h> | ||
36 | #include <linux/kernel.h> | ||
37 | #include <linux/list.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/qed/qed_if.h> | ||
41 | #include <linux/qed/qed_roce_if.h> | ||
42 | #include "qed.h" | ||
43 | #include "qed_dev_api.h" | ||
44 | #include "qed_hsi.h" | ||
45 | |||
46 | #define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS) | ||
47 | #define QED_RDMA_MAX_P_KEY (1) | ||
48 | #define QED_RDMA_MAX_WQE (0x7FFF) | ||
49 | #define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF) | ||
50 | #define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000) | ||
51 | #define QED_RDMA_ACK_DELAY (15) | ||
52 | #define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL) | ||
53 | #define QED_RDMA_MAX_CQS (RDMA_MAX_CQS) | ||
54 | #define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS) | ||
55 | /* Add 1 for header element */ | ||
56 | #define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1) | ||
57 | #define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE) | ||
58 | #define QED_RDMA_SRQ_WQE_ELEM_SIZE (16) | ||
59 | #define QED_RDMA_MAX_SRQS (32 * 1024) | ||
60 | |||
61 | #define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1) | ||
62 | #define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1) | ||
63 | |||
64 | enum qed_rdma_toggle_bit { | ||
65 | QED_RDMA_TOGGLE_BIT_CLEAR = 0, | ||
66 | QED_RDMA_TOGGLE_BIT_SET = 1 | ||
67 | }; | ||
68 | |||
69 | struct qed_bmap { | ||
70 | unsigned long *bitmap; | ||
71 | u32 max_count; | ||
72 | }; | ||
73 | |||
74 | struct qed_rdma_info { | ||
75 | /* spin lock to protect bitmaps */ | ||
76 | spinlock_t lock; | ||
77 | |||
78 | struct qed_bmap cq_map; | ||
79 | struct qed_bmap pd_map; | ||
80 | struct qed_bmap tid_map; | ||
81 | struct qed_bmap qp_map; | ||
82 | struct qed_bmap srq_map; | ||
83 | struct qed_bmap cid_map; | ||
84 | struct qed_bmap dpi_map; | ||
85 | struct qed_bmap toggle_bits; | ||
86 | struct qed_rdma_events events; | ||
87 | struct qed_rdma_device *dev; | ||
88 | struct qed_rdma_port *port; | ||
89 | u32 last_tid; | ||
90 | u8 num_cnqs; | ||
91 | u32 num_qps; | ||
92 | u32 num_mrs; | ||
93 | u16 queue_zone_base; | ||
94 | enum protocol_type proto; | ||
95 | }; | ||
96 | |||
97 | int | ||
98 | qed_rdma_add_user(void *rdma_cxt, | ||
99 | struct qed_rdma_add_user_out_params *out_params); | ||
100 | int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd); | ||
101 | int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid); | ||
102 | int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid); | ||
103 | void qed_rdma_free_tid(void *rdma_cxt, u32 tid); | ||
104 | struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt); | ||
105 | int | ||
106 | qed_rdma_register_tid(void *rdma_cxt, | ||
107 | struct qed_rdma_register_tid_in_params *params); | ||
108 | void qed_rdma_remove_user(void *rdma_cxt, u16 dpi); | ||
109 | int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params); | ||
110 | int qed_rdma_stop(void *rdma_cxt); | ||
111 | u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id); | ||
112 | u32 qed_rdma_query_cau_timer_res(void *p_hwfn); | ||
113 | void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod); | ||
114 | void qed_rdma_resc_free(struct qed_hwfn *p_hwfn); | ||
115 | void qed_async_roce_event(struct qed_hwfn *p_hwfn, | ||
116 | struct event_ring_entry *p_eqe); | ||
117 | |||
118 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
119 | void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); | ||
120 | #else | ||
121 | void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} | ||
122 | #endif | ||
123 | #endif | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index a3c539f1c2ac..652c90819758 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h | |||
@@ -85,6 +85,7 @@ union ramrod_data { | |||
85 | struct rdma_srq_create_ramrod_data rdma_create_srq; | 85 | struct rdma_srq_create_ramrod_data rdma_create_srq; |
86 | struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; | 86 | struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; |
87 | struct rdma_srq_modify_ramrod_data rdma_modify_srq; | 87 | struct rdma_srq_modify_ramrod_data rdma_modify_srq; |
88 | struct roce_init_func_ramrod_data roce_init_func; | ||
88 | 89 | ||
89 | struct iscsi_slow_path_hdr iscsi_empty; | 90 | struct iscsi_slow_path_hdr iscsi_empty; |
90 | struct iscsi_init_ramrod_params iscsi_init; | 91 | struct iscsi_init_ramrod_params iscsi_init; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 349af182d085..caff41544898 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c | |||
@@ -28,6 +28,9 @@ | |||
28 | #include "qed_reg_addr.h" | 28 | #include "qed_reg_addr.h" |
29 | #include "qed_sp.h" | 29 | #include "qed_sp.h" |
30 | #include "qed_sriov.h" | 30 | #include "qed_sriov.h" |
31 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
32 | #include "qed_roce.h" | ||
33 | #endif | ||
31 | 34 | ||
32 | /*************************************************************************** | 35 | /*************************************************************************** |
33 | * Structures & Definitions | 36 | * Structures & Definitions |
@@ -237,6 +240,11 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, | |||
237 | struct event_ring_entry *p_eqe) | 240 | struct event_ring_entry *p_eqe) |
238 | { | 241 | { |
239 | switch (p_eqe->protocol_id) { | 242 | switch (p_eqe->protocol_id) { |
243 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
244 | case PROTOCOLID_ROCE: | ||
245 | qed_async_roce_event(p_hwfn, p_eqe); | ||
246 | return 0; | ||
247 | #endif | ||
240 | case PROTOCOLID_COMMON: | 248 | case PROTOCOLID_COMMON: |
241 | return qed_sriov_eqe_event(p_hwfn, | 249 | return qed_sriov_eqe_event(p_hwfn, |
242 | p_eqe->opcode, | 250 | p_eqe->opcode, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index a4a3cead15bb..d2d6621fe0e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c | |||
@@ -1851,8 +1851,8 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, | |||
1851 | if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { | 1851 | if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { |
1852 | u16 qid = mbx->req_virt->start_txq.tx_qid; | 1852 | u16 qid = mbx->req_virt->start_txq.tx_qid; |
1853 | 1853 | ||
1854 | p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid, | 1854 | p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid, |
1855 | DQ_DEMS_LEGACY); | 1855 | DQ_DEMS_LEGACY); |
1856 | } | 1856 | } |
1857 | 1857 | ||
1858 | qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); | 1858 | qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 85334ceaf69c..abf5bf11f865 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c | |||
@@ -544,7 +544,7 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, | |||
544 | u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; | 544 | u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; |
545 | u32 db_addr; | 545 | u32 db_addr; |
546 | 546 | ||
547 | db_addr = qed_db_addr(cid, DQ_DEMS_LEGACY); | 547 | db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); |
548 | *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + | 548 | *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + |
549 | db_addr; | 549 | db_addr; |
550 | } | 550 | } |
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 19027635df0d..734deb094618 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h | |||
@@ -674,6 +674,7 @@ union event_ring_data { | |||
674 | struct iscsi_eqe_data iscsi_info; | 674 | struct iscsi_eqe_data iscsi_info; |
675 | struct malicious_vf_eqe_data malicious_vf; | 675 | struct malicious_vf_eqe_data malicious_vf; |
676 | struct initial_cleanup_eqe_data vf_init_cleanup; | 676 | struct initial_cleanup_eqe_data vf_init_cleanup; |
677 | struct regpair roce_handle; | ||
677 | }; | 678 | }; |
678 | 679 | ||
679 | /* Event Ring Entry */ | 680 | /* Event Ring Entry */ |
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index e313742b571d..f9ae903bbb84 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h | |||
@@ -34,6 +34,8 @@ enum dcbx_protocol_type { | |||
34 | DCBX_MAX_PROTOCOL_TYPE | 34 | DCBX_MAX_PROTOCOL_TYPE |
35 | }; | 35 | }; |
36 | 36 | ||
37 | #define QED_ROCE_PROTOCOL_INDEX (3) | ||
38 | |||
37 | #ifdef CONFIG_DCB | 39 | #ifdef CONFIG_DCB |
38 | #define QED_LLDP_CHASSIS_ID_STAT_LEN 4 | 40 | #define QED_LLDP_CHASSIS_ID_STAT_LEN 4 |
39 | #define QED_LLDP_PORT_ID_STAT_LEN 4 | 41 | #define QED_LLDP_PORT_ID_STAT_LEN 4 |
@@ -268,6 +270,7 @@ struct qed_dev_info { | |||
268 | 270 | ||
269 | enum qed_sb_type { | 271 | enum qed_sb_type { |
270 | QED_SB_TYPE_L2_QUEUE, | 272 | QED_SB_TYPE_L2_QUEUE, |
273 | QED_SB_TYPE_CNQ, | ||
271 | }; | 274 | }; |
272 | 275 | ||
273 | enum qed_protocol { | 276 | enum qed_protocol { |
@@ -628,7 +631,7 @@ enum DP_MODULE { | |||
628 | QED_MSG_CXT = 0x800000, | 631 | QED_MSG_CXT = 0x800000, |
629 | QED_MSG_LL2 = 0x1000000, | 632 | QED_MSG_LL2 = 0x1000000, |
630 | QED_MSG_ILT = 0x2000000, | 633 | QED_MSG_ILT = 0x2000000, |
631 | QED_MSG_ROCE = 0x4000000, | 634 | QED_MSG_RDMA = 0x4000000, |
632 | QED_MSG_DEBUG = 0x8000000, | 635 | QED_MSG_DEBUG = 0x8000000, |
633 | /* to be added...up to 0x8000000 */ | 636 | /* to be added...up to 0x8000000 */ |
634 | }; | 637 | }; |
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h new file mode 100644 index 000000000000..0f7d5275e515 --- /dev/null +++ b/include/linux/qed/qed_roce_if.h | |||
@@ -0,0 +1,345 @@ | |||
1 | /* QLogic qed NIC Driver | ||
2 | * Copyright (c) 2015-2016 QLogic Corporation | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and /or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | #ifndef _QED_ROCE_IF_H | ||
33 | #define _QED_ROCE_IF_H | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/list.h> | ||
37 | #include <linux/mutex.h> | ||
38 | #include <linux/pci.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/qed/qed_if.h> | ||
41 | #include <linux/qed/qed_ll2_if.h> | ||
42 | |||
43 | #define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) | ||
44 | |||
45 | /* rdma interface */ | ||
46 | enum qed_rdma_tid_type { | ||
47 | QED_RDMA_TID_REGISTERED_MR, | ||
48 | QED_RDMA_TID_FMR, | ||
49 | QED_RDMA_TID_MW_TYPE1, | ||
50 | QED_RDMA_TID_MW_TYPE2A | ||
51 | }; | ||
52 | |||
53 | struct qed_rdma_events { | ||
54 | void *context; | ||
55 | void (*affiliated_event)(void *context, u8 fw_event_code, | ||
56 | void *fw_handle); | ||
57 | void (*unaffiliated_event)(void *context, u8 event_code); | ||
58 | }; | ||
59 | |||
60 | struct qed_rdma_device { | ||
61 | u32 vendor_id; | ||
62 | u32 vendor_part_id; | ||
63 | u32 hw_ver; | ||
64 | u64 fw_ver; | ||
65 | |||
66 | u64 node_guid; | ||
67 | u64 sys_image_guid; | ||
68 | |||
69 | u8 max_cnq; | ||
70 | u8 max_sge; | ||
71 | u8 max_srq_sge; | ||
72 | u16 max_inline; | ||
73 | u32 max_wqe; | ||
74 | u32 max_srq_wqe; | ||
75 | u8 max_qp_resp_rd_atomic_resc; | ||
76 | u8 max_qp_req_rd_atomic_resc; | ||
77 | u64 max_dev_resp_rd_atomic_resc; | ||
78 | u32 max_cq; | ||
79 | u32 max_qp; | ||
80 | u32 max_srq; | ||
81 | u32 max_mr; | ||
82 | u64 max_mr_size; | ||
83 | u32 max_cqe; | ||
84 | u32 max_mw; | ||
85 | u32 max_fmr; | ||
86 | u32 max_mr_mw_fmr_pbl; | ||
87 | u64 max_mr_mw_fmr_size; | ||
88 | u32 max_pd; | ||
89 | u32 max_ah; | ||
90 | u8 max_pkey; | ||
91 | u16 max_srq_wr; | ||
92 | u8 max_stats_queues; | ||
93 | u32 dev_caps; | ||
94 | |||
95 | /* Abilty to support RNR-NAK generation */ | ||
96 | |||
97 | #define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1 | ||
98 | #define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0 | ||
99 | /* Abilty to support shutdown port */ | ||
100 | #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1 | ||
101 | #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1 | ||
102 | /* Abilty to support port active event */ | ||
103 | #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1 | ||
104 | #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2 | ||
105 | /* Abilty to support port change event */ | ||
106 | #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1 | ||
107 | #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3 | ||
108 | /* Abilty to support system image GUID */ | ||
109 | #define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1 | ||
110 | #define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4 | ||
111 | /* Abilty to support bad P_Key counter support */ | ||
112 | #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1 | ||
113 | #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5 | ||
114 | /* Abilty to support atomic operations */ | ||
115 | #define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1 | ||
116 | #define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6 | ||
117 | #define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1 | ||
118 | #define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7 | ||
119 | /* Abilty to support modifying the maximum number of | ||
120 | * outstanding work requests per QP | ||
121 | */ | ||
122 | #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1 | ||
123 | #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8 | ||
124 | /* Abilty to support automatic path migration */ | ||
125 | #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1 | ||
126 | #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9 | ||
127 | /* Abilty to support the base memory management extensions */ | ||
128 | #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1 | ||
129 | #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10 | ||
130 | #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1 | ||
131 | #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11 | ||
132 | /* Abilty to support multipile page sizes per memory region */ | ||
133 | #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1 | ||
134 | #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12 | ||
135 | /* Abilty to support block list physical buffer list */ | ||
136 | #define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1 | ||
137 | #define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13 | ||
138 | /* Abilty to support zero based virtual addresses */ | ||
139 | #define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1 | ||
140 | #define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14 | ||
141 | /* Abilty to support local invalidate fencing */ | ||
142 | #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1 | ||
143 | #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15 | ||
144 | /* Abilty to support Loopback on QP */ | ||
145 | #define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1 | ||
146 | #define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16 | ||
147 | u64 page_size_caps; | ||
148 | u8 dev_ack_delay; | ||
149 | u32 reserved_lkey; | ||
150 | u32 bad_pkey_counter; | ||
151 | struct qed_rdma_events events; | ||
152 | }; | ||
153 | |||
154 | enum qed_port_state { | ||
155 | QED_RDMA_PORT_UP, | ||
156 | QED_RDMA_PORT_DOWN, | ||
157 | }; | ||
158 | |||
159 | enum qed_roce_capability { | ||
160 | QED_ROCE_V1 = 1 << 0, | ||
161 | QED_ROCE_V2 = 1 << 1, | ||
162 | }; | ||
163 | |||
164 | struct qed_rdma_port { | ||
165 | enum qed_port_state port_state; | ||
166 | int link_speed; | ||
167 | u64 max_msg_size; | ||
168 | u8 source_gid_table_len; | ||
169 | void *source_gid_table_ptr; | ||
170 | u8 pkey_table_len; | ||
171 | void *pkey_table_ptr; | ||
172 | u32 pkey_bad_counter; | ||
173 | enum qed_roce_capability capability; | ||
174 | }; | ||
175 | |||
176 | struct qed_rdma_cnq_params { | ||
177 | u8 num_pbl_pages; | ||
178 | u64 pbl_ptr; | ||
179 | }; | ||
180 | |||
181 | /* The CQ Mode affects the CQ doorbell transaction size. | ||
182 | * 64/32 bit machines should configure to 32/16 bits respectively. | ||
183 | */ | ||
184 | enum qed_rdma_cq_mode { | ||
185 | QED_RDMA_CQ_MODE_16_BITS, | ||
186 | QED_RDMA_CQ_MODE_32_BITS, | ||
187 | }; | ||
188 | |||
189 | struct qed_roce_dcqcn_params { | ||
190 | u8 notification_point; | ||
191 | u8 reaction_point; | ||
192 | |||
193 | /* fields for notification point */ | ||
194 | u32 cnp_send_timeout; | ||
195 | |||
196 | /* fields for reaction point */ | ||
197 | u32 rl_bc_rate; | ||
198 | u16 rl_max_rate; | ||
199 | u16 rl_r_ai; | ||
200 | u16 rl_r_hai; | ||
201 | u16 dcqcn_g; | ||
202 | u32 dcqcn_k_us; | ||
203 | u32 dcqcn_timeout_us; | ||
204 | }; | ||
205 | |||
206 | struct qed_rdma_start_in_params { | ||
207 | struct qed_rdma_events *events; | ||
208 | struct qed_rdma_cnq_params cnq_pbl_list[128]; | ||
209 | u8 desired_cnq; | ||
210 | enum qed_rdma_cq_mode cq_mode; | ||
211 | struct qed_roce_dcqcn_params dcqcn_params; | ||
212 | u16 max_mtu; | ||
213 | u8 mac_addr[ETH_ALEN]; | ||
214 | u8 iwarp_flags; | ||
215 | }; | ||
216 | |||
217 | struct qed_rdma_add_user_out_params { | ||
218 | u16 dpi; | ||
219 | u64 dpi_addr; | ||
220 | u64 dpi_phys_addr; | ||
221 | u32 dpi_size; | ||
222 | }; | ||
223 | |||
224 | enum roce_mode { | ||
225 | ROCE_V1, | ||
226 | ROCE_V2_IPV4, | ||
227 | ROCE_V2_IPV6, | ||
228 | MAX_ROCE_MODE | ||
229 | }; | ||
230 | |||
231 | union qed_gid { | ||
232 | u8 bytes[16]; | ||
233 | u16 words[8]; | ||
234 | u32 dwords[4]; | ||
235 | u64 qwords[2]; | ||
236 | u32 ipv4_addr; | ||
237 | }; | ||
238 | |||
239 | struct qed_rdma_register_tid_in_params { | ||
240 | u32 itid; | ||
241 | enum qed_rdma_tid_type tid_type; | ||
242 | u8 key; | ||
243 | u16 pd; | ||
244 | bool local_read; | ||
245 | bool local_write; | ||
246 | bool remote_read; | ||
247 | bool remote_write; | ||
248 | bool remote_atomic; | ||
249 | bool mw_bind; | ||
250 | u64 pbl_ptr; | ||
251 | bool pbl_two_level; | ||
252 | u8 pbl_page_size_log; | ||
253 | u8 page_size_log; | ||
254 | u32 fbo; | ||
255 | u64 length; | ||
256 | u64 vaddr; | ||
257 | bool zbva; | ||
258 | bool phy_mr; | ||
259 | bool dma_mr; | ||
260 | |||
261 | bool dif_enabled; | ||
262 | u64 dif_error_addr; | ||
263 | u64 dif_runt_addr; | ||
264 | }; | ||
265 | |||
266 | struct qed_rdma_create_srq_in_params { | ||
267 | u64 pbl_base_addr; | ||
268 | u64 prod_pair_addr; | ||
269 | u16 num_pages; | ||
270 | u16 pd_id; | ||
271 | u16 page_size; | ||
272 | }; | ||
273 | |||
274 | struct qed_rdma_create_srq_out_params { | ||
275 | u16 srq_id; | ||
276 | }; | ||
277 | |||
278 | struct qed_rdma_destroy_srq_in_params { | ||
279 | u16 srq_id; | ||
280 | }; | ||
281 | |||
282 | struct qed_rdma_modify_srq_in_params { | ||
283 | u32 wqe_limit; | ||
284 | u16 srq_id; | ||
285 | }; | ||
286 | |||
287 | struct qed_rdma_stats_out_params { | ||
288 | u64 sent_bytes; | ||
289 | u64 sent_pkts; | ||
290 | u64 rcv_bytes; | ||
291 | u64 rcv_pkts; | ||
292 | }; | ||
293 | |||
294 | struct qed_rdma_counters_out_params { | ||
295 | u64 pd_count; | ||
296 | u64 max_pd; | ||
297 | u64 dpi_count; | ||
298 | u64 max_dpi; | ||
299 | u64 cq_count; | ||
300 | u64 max_cq; | ||
301 | u64 qp_count; | ||
302 | u64 max_qp; | ||
303 | u64 tid_count; | ||
304 | u64 max_tid; | ||
305 | }; | ||
306 | |||
307 | #define QED_ROCE_TX_HEAD_FAILURE (1) | ||
308 | #define QED_ROCE_TX_FRAG_FAILURE (2) | ||
309 | |||
310 | enum qed_rdma_type { | ||
311 | QED_RDMA_TYPE_ROCE, | ||
312 | }; | ||
313 | |||
314 | struct qed_dev_rdma_info { | ||
315 | struct qed_dev_info common; | ||
316 | enum qed_rdma_type rdma_type; | ||
317 | }; | ||
318 | |||
319 | struct qed_rdma_ops { | ||
320 | const struct qed_common_ops *common; | ||
321 | |||
322 | int (*fill_dev_info)(struct qed_dev *cdev, | ||
323 | struct qed_dev_rdma_info *info); | ||
324 | void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev); | ||
325 | |||
326 | int (*rdma_init)(struct qed_dev *dev, | ||
327 | struct qed_rdma_start_in_params *iparams); | ||
328 | |||
329 | int (*rdma_add_user)(void *rdma_cxt, | ||
330 | struct qed_rdma_add_user_out_params *oparams); | ||
331 | |||
332 | void (*rdma_remove_user)(void *rdma_cxt, u16 dpi); | ||
333 | int (*rdma_stop)(void *rdma_cxt); | ||
334 | struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt); | ||
335 | int (*rdma_get_start_sb)(struct qed_dev *cdev); | ||
336 | int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev); | ||
337 | void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod); | ||
338 | int (*rdma_get_rdma_int)(struct qed_dev *cdev, | ||
339 | struct qed_int_info *info); | ||
340 | int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); | ||
341 | }; | ||
342 | |||
343 | const struct qed_rdma_ops *qed_get_rdma_ops(void); | ||
344 | |||
345 | #endif | ||
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 187991c1f439..7663725faa94 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #define RDMA_MAX_PDS (64 * 1024) | 28 | #define RDMA_MAX_PDS (64 * 1024) |
29 | 29 | ||
30 | #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS | 30 | #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS |
31 | #define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB | ||
31 | 32 | ||
32 | #define RDMA_TASK_TYPE (PROTOCOLID_ROCE) | 33 | #define RDMA_TASK_TYPE (PROTOCOLID_ROCE) |
33 | 34 | ||