aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorRam Amrani <Ram.Amrani@caviumnetworks.com>2016-10-01 14:59:58 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-03 23:22:47 -0400
commitc295f86e60f5ba67f0f4bba2bb2c22b3cbf01ec1 (patch)
tree3642b82c16e2e3750eb4a36f4deaebf01062d7a8 /drivers/net
parent51ff17251c9c2c2e71974149d22bc73ea09c27cc (diff)
qed: PD,PKEY and CQ verb support
Add support for the configurations of the protection domain and completion queues. Signed-off-by: Ram Amrani <Ram.Amrani@caviumnetworks.com> Signed-off-by: Yuval Mintz <Yuval.Mintz@caviumnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c324
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h21
2 files changed, 345 insertions, 0 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 4c53b857cc1c..f9551643428f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -663,6 +663,22 @@ int qed_rdma_add_user(void *rdma_cxt,
663 return rc; 663 return rc;
664} 664}
665 665
666struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
667{
668 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
669 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
670
671 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
672
673 /* Link may have changed */
674 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
675 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
676
677 p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
678
679 return p_port;
680}
681
666struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) 682struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
667{ 683{
668 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 684 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -788,6 +804,309 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
788 return 0; 804 return 0;
789} 805}
790 806
807int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
808{
809 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
810 u32 returned_id;
811 int rc;
812
813 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
814
815 /* Allocates an unused protection domain */
816 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
817 rc = qed_rdma_bmap_alloc_id(p_hwfn,
818 &p_hwfn->p_rdma_info->pd_map, &returned_id);
819 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
820
821 *pd = (u16)returned_id;
822
823 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
824 return rc;
825}
826
827void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
828{
829 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
830
831 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
832
833 /* Returns a previously allocated protection domain for reuse */
834 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
835 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
836 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
837}
838
839static enum qed_rdma_toggle_bit
840qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
841{
842 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
843 enum qed_rdma_toggle_bit toggle_bit;
844 u32 bmap_id;
845
846 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
847
848 /* the function toggle the bit that is related to a given icid
849 * and returns the new toggle bit's value
850 */
851 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
852
853 spin_lock_bh(&p_info->lock);
854 toggle_bit = !test_and_change_bit(bmap_id,
855 p_info->toggle_bits.bitmap);
856 spin_unlock_bh(&p_info->lock);
857
858 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
859 toggle_bit);
860
861 return toggle_bit;
862}
863
864int qed_rdma_create_cq(void *rdma_cxt,
865 struct qed_rdma_create_cq_in_params *params, u16 *icid)
866{
867 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
868 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
869 struct rdma_create_cq_ramrod_data *p_ramrod;
870 enum qed_rdma_toggle_bit toggle_bit;
871 struct qed_sp_init_data init_data;
872 struct qed_spq_entry *p_ent;
873 u32 returned_id, start_cid;
874 int rc;
875
876 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
877 params->cq_handle_hi, params->cq_handle_lo);
878
879 /* Allocate icid */
880 spin_lock_bh(&p_info->lock);
881 rc = qed_rdma_bmap_alloc_id(p_hwfn,
882 &p_info->cq_map, &returned_id);
883 spin_unlock_bh(&p_info->lock);
884
885 if (rc) {
886 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
887 return rc;
888 }
889
890 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
891 p_info->proto);
892 *icid = returned_id + start_cid;
893
894 /* Check if icid requires a page allocation */
895 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
896 if (rc)
897 goto err;
898
899 /* Get SPQ entry */
900 memset(&init_data, 0, sizeof(init_data));
901 init_data.cid = *icid;
902 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
903 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
904
905 /* Send create CQ ramrod */
906 rc = qed_sp_init_request(p_hwfn, &p_ent,
907 RDMA_RAMROD_CREATE_CQ,
908 p_info->proto, &init_data);
909 if (rc)
910 goto err;
911
912 p_ramrod = &p_ent->ramrod.rdma_create_cq;
913
914 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
915 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
916 p_ramrod->dpi = cpu_to_le16(params->dpi);
917 p_ramrod->is_two_level_pbl = params->pbl_two_level;
918 p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
919 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
920 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
921 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
922 params->cnq_id;
923 p_ramrod->int_timeout = params->int_timeout;
924
925 /* toggle the bit for every resize or create cq for a given icid */
926 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
927
928 p_ramrod->toggle_bit = toggle_bit;
929
930 rc = qed_spq_post(p_hwfn, p_ent, NULL);
931 if (rc) {
932 /* restore toggle bit */
933 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
934 goto err;
935 }
936
937 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
938 return rc;
939
940err:
941 /* release allocated icid */
942 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
943 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
944
945 return rc;
946}
947
948int qed_rdma_resize_cq(void *rdma_cxt,
949 struct qed_rdma_resize_cq_in_params *in_params,
950 struct qed_rdma_resize_cq_out_params *out_params)
951{
952 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
953 struct rdma_resize_cq_output_params *p_ramrod_res;
954 struct rdma_resize_cq_ramrod_data *p_ramrod;
955 enum qed_rdma_toggle_bit toggle_bit;
956 struct qed_sp_init_data init_data;
957 struct qed_spq_entry *p_ent;
958 dma_addr_t ramrod_res_phys;
959 u8 fw_return_code;
960 int rc = -ENOMEM;
961
962 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
963
964 p_ramrod_res =
965 (struct rdma_resize_cq_output_params *)
966 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
967 sizeof(struct rdma_resize_cq_output_params),
968 &ramrod_res_phys, GFP_KERNEL);
969 if (!p_ramrod_res) {
970 DP_NOTICE(p_hwfn,
971 "qed resize cq failed: cannot allocate memory (ramrod)\n");
972 return rc;
973 }
974
975 /* Get SPQ entry */
976 memset(&init_data, 0, sizeof(init_data));
977 init_data.cid = in_params->icid;
978 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
979 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
980
981 rc = qed_sp_init_request(p_hwfn, &p_ent,
982 RDMA_RAMROD_RESIZE_CQ,
983 p_hwfn->p_rdma_info->proto, &init_data);
984 if (rc)
985 goto err;
986
987 p_ramrod = &p_ent->ramrod.rdma_resize_cq;
988
989 p_ramrod->flags = 0;
990
991 /* toggle the bit for every resize or create cq for a given icid */
992 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
993 in_params->icid);
994
995 SET_FIELD(p_ramrod->flags,
996 RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
997
998 SET_FIELD(p_ramrod->flags,
999 RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
1000 in_params->pbl_two_level);
1001
1002 p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
1003 p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
1004 p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
1005 DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
1006 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1007
1008 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1009 if (rc)
1010 goto err;
1011
1012 if (fw_return_code != RDMA_RETURN_OK) {
1013 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1014 rc = -EINVAL;
1015 goto err;
1016 }
1017
1018 out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
1019 out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
1020
1021 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1022 sizeof(struct rdma_resize_cq_output_params),
1023 p_ramrod_res, ramrod_res_phys);
1024
1025 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
1026
1027 return rc;
1028
1029err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1030 sizeof(struct rdma_resize_cq_output_params),
1031 p_ramrod_res, ramrod_res_phys);
1032 DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
1033
1034 return rc;
1035}
1036
1037int qed_rdma_destroy_cq(void *rdma_cxt,
1038 struct qed_rdma_destroy_cq_in_params *in_params,
1039 struct qed_rdma_destroy_cq_out_params *out_params)
1040{
1041 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1042 struct rdma_destroy_cq_output_params *p_ramrod_res;
1043 struct rdma_destroy_cq_ramrod_data *p_ramrod;
1044 struct qed_sp_init_data init_data;
1045 struct qed_spq_entry *p_ent;
1046 dma_addr_t ramrod_res_phys;
1047 int rc = -ENOMEM;
1048
1049 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
1050
1051 p_ramrod_res =
1052 (struct rdma_destroy_cq_output_params *)
1053 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1054 sizeof(struct rdma_destroy_cq_output_params),
1055 &ramrod_res_phys, GFP_KERNEL);
1056 if (!p_ramrod_res) {
1057 DP_NOTICE(p_hwfn,
1058 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1059 return rc;
1060 }
1061
1062 /* Get SPQ entry */
1063 memset(&init_data, 0, sizeof(init_data));
1064 init_data.cid = in_params->icid;
1065 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1066 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1067
1068 /* Send destroy CQ ramrod */
1069 rc = qed_sp_init_request(p_hwfn, &p_ent,
1070 RDMA_RAMROD_DESTROY_CQ,
1071 p_hwfn->p_rdma_info->proto, &init_data);
1072 if (rc)
1073 goto err;
1074
1075 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1076 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1077
1078 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1079 if (rc)
1080 goto err;
1081
1082 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1083
1084 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1085 sizeof(struct rdma_destroy_cq_output_params),
1086 p_ramrod_res, ramrod_res_phys);
1087
1088 /* Free icid */
1089 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1090
1091 qed_bmap_release_id(p_hwfn,
1092 &p_hwfn->p_rdma_info->cq_map,
1093 (in_params->icid -
1094 qed_cxt_get_proto_cid_start(p_hwfn,
1095 p_hwfn->
1096 p_rdma_info->proto)));
1097
1098 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1099
1100 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1101 return rc;
1102
1103err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1104 sizeof(struct rdma_destroy_cq_output_params),
1105 p_ramrod_res, ramrod_res_phys);
1106
1107 return rc;
1108}
1109
791static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) 1110static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
792{ 1111{
793 return QED_LEADING_HWFN(cdev); 1112 return QED_LEADING_HWFN(cdev);
@@ -871,12 +1190,17 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
871 .rdma_add_user = &qed_rdma_add_user, 1190 .rdma_add_user = &qed_rdma_add_user,
872 .rdma_remove_user = &qed_rdma_remove_user, 1191 .rdma_remove_user = &qed_rdma_remove_user,
873 .rdma_stop = &qed_rdma_stop, 1192 .rdma_stop = &qed_rdma_stop,
1193 .rdma_query_port = &qed_rdma_query_port,
874 .rdma_query_device = &qed_rdma_query_device, 1194 .rdma_query_device = &qed_rdma_query_device,
875 .rdma_get_start_sb = &qed_rdma_get_sb_start, 1195 .rdma_get_start_sb = &qed_rdma_get_sb_start,
876 .rdma_get_rdma_int = &qed_rdma_get_int, 1196 .rdma_get_rdma_int = &qed_rdma_get_int,
877 .rdma_set_rdma_int = &qed_rdma_set_int, 1197 .rdma_set_rdma_int = &qed_rdma_set_int,
878 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, 1198 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
879 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, 1199 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
1200 .rdma_alloc_pd = &qed_rdma_alloc_pd,
1201 .rdma_dealloc_pd = &qed_rdma_free_pd,
1202 .rdma_create_cq = &qed_rdma_create_cq,
1203 .rdma_destroy_cq = &qed_rdma_destroy_cq,
880}; 1204};
881 1205
882const struct qed_rdma_ops *qed_get_rdma_ops() 1206const struct qed_rdma_ops *qed_get_rdma_ops()
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index e55048106a83..1fe73707e0b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -94,6 +94,26 @@ struct qed_rdma_info {
94 enum protocol_type proto; 94 enum protocol_type proto;
95}; 95};
96 96
97struct qed_rdma_resize_cq_in_params {
98 u16 icid;
99 u32 cq_size;
100 bool pbl_two_level;
101 u64 pbl_ptr;
102 u16 pbl_num_pages;
103 u8 pbl_page_size_log;
104};
105
106struct qed_rdma_resize_cq_out_params {
107 u32 prod;
108 u32 cons;
109};
110
111struct qed_rdma_resize_cnq_in_params {
112 u32 cnq_id;
113 u32 pbl_page_size_log;
114 u64 pbl_ptr;
115};
116
97int 117int
98qed_rdma_add_user(void *rdma_cxt, 118qed_rdma_add_user(void *rdma_cxt,
99 struct qed_rdma_add_user_out_params *out_params); 119 struct qed_rdma_add_user_out_params *out_params);
@@ -102,6 +122,7 @@ int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
102int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid); 122int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
103void qed_rdma_free_tid(void *rdma_cxt, u32 tid); 123void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
104struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt); 124struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
125struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
105int 126int
106qed_rdma_register_tid(void *rdma_cxt, 127qed_rdma_register_tid(void *rdma_cxt,
107 struct qed_rdma_register_tid_in_params *params); 128 struct qed_rdma_register_tid_in_params *params);