diff options
author | Ram Amrani <Ram.Amrani@cavium.com> | 2016-10-10 06:15:34 -0400 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-10-14 15:00:10 -0400 |
commit | cecbcddf6461a11ce229e80bb3981415220c9763 (patch) | |
tree | 36698f6c07e427f27b57f7c922700141f8ea2ab7 | |
parent | a7efd7773e31b60f695816c27393fc717a9df127 (diff) |
qedr: Add support for QP verbs
Add support for Queue Pair verbs which adds, deletes,
modifies and queries Queue Pairs.
Signed-off-by: Rajesh Borundia <rajesh.borundia@cavium.com>
Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/qedr/main.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/qedr.h | 125 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/qedr_cm.h | 40 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/qedr_hsi_rdma.h | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/verbs.c | 1089 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/verbs.h | 7 | ||||
-rw-r--r-- | include/uapi/rdma/qedr-abi.h | 34 |
7 files changed, 1320 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 35928abb6b63..13ba47b7b99f 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c | |||
@@ -48,6 +48,8 @@ MODULE_AUTHOR("QLogic Corporation"); | |||
48 | MODULE_LICENSE("Dual BSD/GPL"); | 48 | MODULE_LICENSE("Dual BSD/GPL"); |
49 | MODULE_VERSION(QEDR_MODULE_VERSION); | 49 | MODULE_VERSION(QEDR_MODULE_VERSION); |
50 | 50 | ||
51 | #define QEDR_WQ_MULTIPLIER_DFT (3) | ||
52 | |||
51 | void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num, | 53 | void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num, |
52 | enum ib_event_type type) | 54 | enum ib_event_type type) |
53 | { | 55 | { |
@@ -94,7 +96,11 @@ static int qedr_register_device(struct qedr_dev *dev) | |||
94 | QEDR_UVERBS(CREATE_CQ) | | 96 | QEDR_UVERBS(CREATE_CQ) | |
95 | QEDR_UVERBS(RESIZE_CQ) | | 97 | QEDR_UVERBS(RESIZE_CQ) | |
96 | QEDR_UVERBS(DESTROY_CQ) | | 98 | QEDR_UVERBS(DESTROY_CQ) | |
97 | QEDR_UVERBS(REQ_NOTIFY_CQ); | 99 | QEDR_UVERBS(REQ_NOTIFY_CQ) | |
100 | QEDR_UVERBS(CREATE_QP) | | ||
101 | QEDR_UVERBS(MODIFY_QP) | | ||
102 | QEDR_UVERBS(QUERY_QP) | | ||
103 | QEDR_UVERBS(DESTROY_QP); | ||
98 | 104 | ||
99 | dev->ibdev.phys_port_cnt = 1; | 105 | dev->ibdev.phys_port_cnt = 1; |
100 | dev->ibdev.num_comp_vectors = dev->num_cnq; | 106 | dev->ibdev.num_comp_vectors = dev->num_cnq; |
@@ -120,6 +126,11 @@ static int qedr_register_device(struct qedr_dev *dev) | |||
120 | dev->ibdev.resize_cq = qedr_resize_cq; | 126 | dev->ibdev.resize_cq = qedr_resize_cq; |
121 | dev->ibdev.req_notify_cq = qedr_arm_cq; | 127 | dev->ibdev.req_notify_cq = qedr_arm_cq; |
122 | 128 | ||
129 | dev->ibdev.create_qp = qedr_create_qp; | ||
130 | dev->ibdev.modify_qp = qedr_modify_qp; | ||
131 | dev->ibdev.query_qp = qedr_query_qp; | ||
132 | dev->ibdev.destroy_qp = qedr_destroy_qp; | ||
133 | |||
123 | dev->ibdev.query_pkey = qedr_query_pkey; | 134 | dev->ibdev.query_pkey = qedr_query_pkey; |
124 | 135 | ||
125 | dev->ibdev.dma_device = &dev->pdev->dev; | 136 | dev->ibdev.dma_device = &dev->pdev->dev; |
@@ -630,6 +641,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, | |||
630 | goto init_err; | 641 | goto init_err; |
631 | } | 642 | } |
632 | 643 | ||
644 | dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT; | ||
645 | |||
633 | qedr_pci_set_atomic(dev, pdev); | 646 | qedr_pci_set_atomic(dev, pdev); |
634 | 647 | ||
635 | rc = qedr_alloc_resources(dev); | 648 | rc = qedr_alloc_resources(dev); |
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 9e2846a599ce..e9fe941c48ad 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h | |||
@@ -52,6 +52,9 @@ | |||
52 | #define QEDR_MSG_MISC "MISC" | 52 | #define QEDR_MSG_MISC "MISC" |
53 | #define QEDR_MSG_CQ " CQ" | 53 | #define QEDR_MSG_CQ " CQ" |
54 | #define QEDR_MSG_MR " MR" | 54 | #define QEDR_MSG_MR " MR" |
55 | #define QEDR_MSG_RQ " RQ" | ||
56 | #define QEDR_MSG_SQ " SQ" | ||
57 | #define QEDR_MSG_QP " QP" | ||
55 | 58 | ||
56 | #define QEDR_CQ_MAGIC_NUMBER (0x11223344) | 59 | #define QEDR_CQ_MAGIC_NUMBER (0x11223344) |
57 | 60 | ||
@@ -143,6 +146,8 @@ struct qedr_dev { | |||
143 | u32 dp_module; | 146 | u32 dp_module; |
144 | u8 dp_level; | 147 | u8 dp_level; |
145 | u8 num_hwfns; | 148 | u8 num_hwfns; |
149 | uint wq_multiplier; | ||
150 | |||
146 | }; | 151 | }; |
147 | 152 | ||
148 | #define QEDR_MAX_SQ_PBL (0x8000) | 153 | #define QEDR_MAX_SQ_PBL (0x8000) |
@@ -272,6 +277,122 @@ struct qedr_mm { | |||
272 | struct list_head entry; | 277 | struct list_head entry; |
273 | }; | 278 | }; |
274 | 279 | ||
280 | union db_prod32 { | ||
281 | struct rdma_pwm_val16_data data; | ||
282 | u32 raw; | ||
283 | }; | ||
284 | |||
285 | struct qedr_qp_hwq_info { | ||
286 | /* WQE Elements */ | ||
287 | struct qed_chain pbl; | ||
288 | u64 p_phys_addr_tbl; | ||
289 | u32 max_sges; | ||
290 | |||
291 | /* WQE */ | ||
292 | u16 prod; | ||
293 | u16 cons; | ||
294 | u16 wqe_cons; | ||
295 | u16 max_wr; | ||
296 | |||
297 | /* DB */ | ||
298 | void __iomem *db; | ||
299 | union db_prod32 db_data; | ||
300 | }; | ||
301 | |||
302 | #define QEDR_INC_SW_IDX(p_info, index) \ | ||
303 | do { \ | ||
304 | p_info->index = (p_info->index + 1) & \ | ||
305 | qed_chain_get_capacity(p_info->pbl) \ | ||
306 | } while (0) | ||
307 | |||
308 | enum qedr_qp_err_bitmap { | ||
309 | QEDR_QP_ERR_SQ_FULL = 1, | ||
310 | QEDR_QP_ERR_RQ_FULL = 2, | ||
311 | QEDR_QP_ERR_BAD_SR = 4, | ||
312 | QEDR_QP_ERR_BAD_RR = 8, | ||
313 | QEDR_QP_ERR_SQ_PBL_FULL = 16, | ||
314 | QEDR_QP_ERR_RQ_PBL_FULL = 32, | ||
315 | }; | ||
316 | |||
317 | struct qedr_qp { | ||
318 | struct ib_qp ibqp; /* must be first */ | ||
319 | struct qedr_dev *dev; | ||
320 | |||
321 | struct qedr_qp_hwq_info sq; | ||
322 | struct qedr_qp_hwq_info rq; | ||
323 | |||
324 | u32 max_inline_data; | ||
325 | |||
326 | /* Lock for QP's */ | ||
327 | spinlock_t q_lock; | ||
328 | struct qedr_cq *sq_cq; | ||
329 | struct qedr_cq *rq_cq; | ||
330 | struct qedr_srq *srq; | ||
331 | enum qed_roce_qp_state state; | ||
332 | u32 id; | ||
333 | struct qedr_pd *pd; | ||
334 | enum ib_qp_type qp_type; | ||
335 | struct qed_rdma_qp *qed_qp; | ||
336 | u32 qp_id; | ||
337 | u16 icid; | ||
338 | u16 mtu; | ||
339 | int sgid_idx; | ||
340 | u32 rq_psn; | ||
341 | u32 sq_psn; | ||
342 | u32 qkey; | ||
343 | u32 dest_qp_num; | ||
344 | |||
345 | /* Relevant to qps created from kernel space only (ULPs) */ | ||
346 | u8 prev_wqe_size; | ||
347 | u16 wqe_cons; | ||
348 | u32 err_bitmap; | ||
349 | bool signaled; | ||
350 | |||
351 | /* SQ shadow */ | ||
352 | struct { | ||
353 | u64 wr_id; | ||
354 | enum ib_wc_opcode opcode; | ||
355 | u32 bytes_len; | ||
356 | u8 wqe_size; | ||
357 | bool signaled; | ||
358 | dma_addr_t icrc_mapping; | ||
359 | u32 *icrc; | ||
360 | struct qedr_mr *mr; | ||
361 | } *wqe_wr_id; | ||
362 | |||
363 | /* RQ shadow */ | ||
364 | struct { | ||
365 | u64 wr_id; | ||
366 | struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE]; | ||
367 | u8 wqe_size; | ||
368 | |||
369 | u16 vlan_id; | ||
370 | int rc; | ||
371 | } *rqe_wr_id; | ||
372 | |||
373 | /* Relevant to qps created from user space only (applications) */ | ||
374 | struct qedr_userq usq; | ||
375 | struct qedr_userq urq; | ||
376 | }; | ||
377 | |||
378 | static inline int qedr_get_dmac(struct qedr_dev *dev, | ||
379 | struct ib_ah_attr *ah_attr, u8 *mac_addr) | ||
380 | { | ||
381 | union ib_gid zero_sgid = { { 0 } }; | ||
382 | struct in6_addr in6; | ||
383 | |||
384 | if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) { | ||
385 | DP_ERR(dev, "Local port GID not supported\n"); | ||
386 | eth_zero_addr(mac_addr); | ||
387 | return -EINVAL; | ||
388 | } | ||
389 | |||
390 | memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); | ||
391 | ether_addr_copy(mac_addr, ah_attr->dmac); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
275 | static inline | 396 | static inline |
276 | struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext) | 397 | struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext) |
277 | { | 398 | { |
@@ -293,4 +414,8 @@ static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq) | |||
293 | return container_of(ibcq, struct qedr_cq, ibcq); | 414 | return container_of(ibcq, struct qedr_cq, ibcq); |
294 | } | 415 | } |
295 | 416 | ||
417 | static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp) | ||
418 | { | ||
419 | return container_of(ibqp, struct qedr_qp, ibqp); | ||
420 | } | ||
296 | #endif | 421 | #endif |
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h new file mode 100644 index 000000000000..b8a8b76d77b8 --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr_cm.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* QLogic qedr NIC Driver | ||
2 | * Copyright (c) 2015-2016 QLogic Corporation | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and /or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | #ifndef LINUX_QEDR_CM_H_ | ||
33 | #define LINUX_QEDR_CM_H_ | ||
34 | |||
35 | static inline u32 qedr_get_ipv4_from_gid(u8 *gid) | ||
36 | { | ||
37 | return *(u32 *)(void *)&gid[12]; | ||
38 | } | ||
39 | |||
40 | #endif | ||
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h index 84f6520107cc..47705598eec6 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h | |||
@@ -158,6 +158,17 @@ struct rdma_srq_sge { | |||
158 | __le32 l_key; | 158 | __le32 l_key; |
159 | }; | 159 | }; |
160 | 160 | ||
161 | /* Rdma doorbell data for SQ and RQ */ | ||
162 | struct rdma_pwm_val16_data { | ||
163 | __le16 icid; | ||
164 | __le16 value; | ||
165 | }; | ||
166 | |||
167 | union rdma_pwm_val16_data_union { | ||
168 | struct rdma_pwm_val16_data as_struct; | ||
169 | __le32 as_dword; | ||
170 | }; | ||
171 | |||
161 | /* Rdma doorbell data for CQ */ | 172 | /* Rdma doorbell data for CQ */ |
162 | struct rdma_pwm_val32_data { | 173 | struct rdma_pwm_val32_data { |
163 | __le16 icid; | 174 | __le16 icid; |
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index b525c6cf1df0..a0d1c5fffb63 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include "qedr.h" | 48 | #include "qedr.h" |
49 | #include "verbs.h" | 49 | #include "verbs.h" |
50 | #include <rdma/qedr-abi.h> | 50 | #include <rdma/qedr-abi.h> |
51 | #include "qedr_cm.h" | ||
51 | 52 | ||
52 | #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) | 53 | #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) |
53 | 54 | ||
@@ -993,3 +994,1091 @@ int qedr_destroy_cq(struct ib_cq *ibcq) | |||
993 | 994 | ||
994 | return 0; | 995 | return 0; |
995 | } | 996 | } |
997 | |||
998 | static inline int get_gid_info_from_table(struct ib_qp *ibqp, | ||
999 | struct ib_qp_attr *attr, | ||
1000 | int attr_mask, | ||
1001 | struct qed_rdma_modify_qp_in_params | ||
1002 | *qp_params) | ||
1003 | { | ||
1004 | enum rdma_network_type nw_type; | ||
1005 | struct ib_gid_attr gid_attr; | ||
1006 | union ib_gid gid; | ||
1007 | u32 ipv4_addr; | ||
1008 | int rc = 0; | ||
1009 | int i; | ||
1010 | |||
1011 | rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num, | ||
1012 | attr->ah_attr.grh.sgid_index, &gid, &gid_attr); | ||
1013 | if (rc) | ||
1014 | return rc; | ||
1015 | |||
1016 | if (!memcmp(&gid, &zgid, sizeof(gid))) | ||
1017 | return -ENOENT; | ||
1018 | |||
1019 | if (gid_attr.ndev) { | ||
1020 | qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev); | ||
1021 | |||
1022 | dev_put(gid_attr.ndev); | ||
1023 | nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid); | ||
1024 | switch (nw_type) { | ||
1025 | case RDMA_NETWORK_IPV6: | ||
1026 | memcpy(&qp_params->sgid.bytes[0], &gid.raw[0], | ||
1027 | sizeof(qp_params->sgid)); | ||
1028 | memcpy(&qp_params->dgid.bytes[0], | ||
1029 | &attr->ah_attr.grh.dgid, | ||
1030 | sizeof(qp_params->dgid)); | ||
1031 | qp_params->roce_mode = ROCE_V2_IPV6; | ||
1032 | SET_FIELD(qp_params->modify_flags, | ||
1033 | QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); | ||
1034 | break; | ||
1035 | case RDMA_NETWORK_IB: | ||
1036 | memcpy(&qp_params->sgid.bytes[0], &gid.raw[0], | ||
1037 | sizeof(qp_params->sgid)); | ||
1038 | memcpy(&qp_params->dgid.bytes[0], | ||
1039 | &attr->ah_attr.grh.dgid, | ||
1040 | sizeof(qp_params->dgid)); | ||
1041 | qp_params->roce_mode = ROCE_V1; | ||
1042 | break; | ||
1043 | case RDMA_NETWORK_IPV4: | ||
1044 | memset(&qp_params->sgid, 0, sizeof(qp_params->sgid)); | ||
1045 | memset(&qp_params->dgid, 0, sizeof(qp_params->dgid)); | ||
1046 | ipv4_addr = qedr_get_ipv4_from_gid(gid.raw); | ||
1047 | qp_params->sgid.ipv4_addr = ipv4_addr; | ||
1048 | ipv4_addr = | ||
1049 | qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw); | ||
1050 | qp_params->dgid.ipv4_addr = ipv4_addr; | ||
1051 | SET_FIELD(qp_params->modify_flags, | ||
1052 | QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); | ||
1053 | qp_params->roce_mode = ROCE_V2_IPV4; | ||
1054 | break; | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | for (i = 0; i < 4; i++) { | ||
1059 | qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]); | ||
1060 | qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]); | ||
1061 | } | ||
1062 | |||
1063 | if (qp_params->vlan_id >= VLAN_CFI_MASK) | ||
1064 | qp_params->vlan_id = 0; | ||
1065 | |||
1066 | return 0; | ||
1067 | } | ||
1068 | |||
1069 | static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp) | ||
1070 | { | ||
1071 | qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl); | ||
1072 | ib_umem_release(qp->usq.umem); | ||
1073 | } | ||
1074 | |||
1075 | static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp) | ||
1076 | { | ||
1077 | qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl); | ||
1078 | ib_umem_release(qp->urq.umem); | ||
1079 | } | ||
1080 | |||
1081 | static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp) | ||
1082 | { | ||
1083 | dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); | ||
1084 | kfree(qp->wqe_wr_id); | ||
1085 | } | ||
1086 | |||
1087 | static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp) | ||
1088 | { | ||
1089 | dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl); | ||
1090 | kfree(qp->rqe_wr_id); | ||
1091 | } | ||
1092 | |||
1093 | static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, | ||
1094 | struct ib_qp_init_attr *attrs) | ||
1095 | { | ||
1096 | struct qedr_device_attr *qattr = &dev->attr; | ||
1097 | |||
1098 | /* QP0... attrs->qp_type == IB_QPT_GSI */ | ||
1099 | if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) { | ||
1100 | DP_DEBUG(dev, QEDR_MSG_QP, | ||
1101 | "create qp: unsupported qp type=0x%x requested\n", | ||
1102 | attrs->qp_type); | ||
1103 | return -EINVAL; | ||
1104 | } | ||
1105 | |||
1106 | if (attrs->cap.max_send_wr > qattr->max_sqe) { | ||
1107 | DP_ERR(dev, | ||
1108 | "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n", | ||
1109 | attrs->cap.max_send_wr, qattr->max_sqe); | ||
1110 | return -EINVAL; | ||
1111 | } | ||
1112 | |||
1113 | if (attrs->cap.max_inline_data > qattr->max_inline) { | ||
1114 | DP_ERR(dev, | ||
1115 | "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n", | ||
1116 | attrs->cap.max_inline_data, qattr->max_inline); | ||
1117 | return -EINVAL; | ||
1118 | } | ||
1119 | |||
1120 | if (attrs->cap.max_send_sge > qattr->max_sge) { | ||
1121 | DP_ERR(dev, | ||
1122 | "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n", | ||
1123 | attrs->cap.max_send_sge, qattr->max_sge); | ||
1124 | return -EINVAL; | ||
1125 | } | ||
1126 | |||
1127 | if (attrs->cap.max_recv_sge > qattr->max_sge) { | ||
1128 | DP_ERR(dev, | ||
1129 | "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n", | ||
1130 | attrs->cap.max_recv_sge, qattr->max_sge); | ||
1131 | return -EINVAL; | ||
1132 | } | ||
1133 | |||
1134 | /* Unprivileged user space cannot create special QP */ | ||
1135 | if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { | ||
1136 | DP_ERR(dev, | ||
1137 | "create qp: userspace can't create special QPs of type=0x%x\n", | ||
1138 | attrs->qp_type); | ||
1139 | return -EINVAL; | ||
1140 | } | ||
1141 | |||
1142 | return 0; | ||
1143 | } | ||
1144 | |||
1145 | static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp, | ||
1146 | struct qedr_qp *qp) | ||
1147 | { | ||
1148 | uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); | ||
1149 | uresp->rq_icid = qp->icid; | ||
1150 | } | ||
1151 | |||
1152 | static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp, | ||
1153 | struct qedr_qp *qp) | ||
1154 | { | ||
1155 | uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); | ||
1156 | uresp->sq_icid = qp->icid + 1; | ||
1157 | } | ||
1158 | |||
1159 | static int qedr_copy_qp_uresp(struct qedr_dev *dev, | ||
1160 | struct qedr_qp *qp, struct ib_udata *udata) | ||
1161 | { | ||
1162 | struct qedr_create_qp_uresp uresp; | ||
1163 | int rc; | ||
1164 | |||
1165 | memset(&uresp, 0, sizeof(uresp)); | ||
1166 | qedr_copy_sq_uresp(&uresp, qp); | ||
1167 | qedr_copy_rq_uresp(&uresp, qp); | ||
1168 | |||
1169 | uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; | ||
1170 | uresp.qp_id = qp->qp_id; | ||
1171 | |||
1172 | rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | ||
1173 | if (rc) | ||
1174 | DP_ERR(dev, | ||
1175 | "create qp: failed a copy to user space with qp icid=0x%x.\n", | ||
1176 | qp->icid); | ||
1177 | |||
1178 | return rc; | ||
1179 | } | ||
1180 | |||
1181 | static void qedr_set_qp_init_params(struct qedr_dev *dev, | ||
1182 | struct qedr_qp *qp, | ||
1183 | struct qedr_pd *pd, | ||
1184 | struct ib_qp_init_attr *attrs) | ||
1185 | { | ||
1186 | qp->pd = pd; | ||
1187 | |||
1188 | spin_lock_init(&qp->q_lock); | ||
1189 | |||
1190 | qp->qp_type = attrs->qp_type; | ||
1191 | qp->max_inline_data = attrs->cap.max_inline_data; | ||
1192 | qp->sq.max_sges = attrs->cap.max_send_sge; | ||
1193 | qp->state = QED_ROCE_QP_STATE_RESET; | ||
1194 | qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; | ||
1195 | qp->sq_cq = get_qedr_cq(attrs->send_cq); | ||
1196 | qp->rq_cq = get_qedr_cq(attrs->recv_cq); | ||
1197 | qp->dev = dev; | ||
1198 | |||
1199 | DP_DEBUG(dev, QEDR_MSG_QP, | ||
1200 | "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n", | ||
1201 | pd->pd_id, qp->qp_type, qp->max_inline_data, | ||
1202 | qp->state, qp->signaled, (attrs->srq) ? 1 : 0); | ||
1203 | DP_DEBUG(dev, QEDR_MSG_QP, | ||
1204 | "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n", | ||
1205 | qp->sq.max_sges, qp->sq_cq->icid); | ||
1206 | qp->rq.max_sges = attrs->cap.max_recv_sge; | ||
1207 | DP_DEBUG(dev, QEDR_MSG_QP, | ||
1208 | "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n", | ||
1209 | qp->rq.max_sges, qp->rq_cq->icid); | ||
1210 | } | ||
1211 | |||
1212 | static inline void | ||
1213 | qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params, | ||
1214 | struct qedr_create_qp_ureq *ureq) | ||
1215 | { | ||
1216 | /* QP handle to be written in CQE */ | ||
1217 | params->qp_handle_lo = ureq->qp_handle_lo; | ||
1218 | params->qp_handle_hi = ureq->qp_handle_hi; | ||
1219 | } | ||
1220 | |||
1221 | static inline void | ||
1222 | qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp) | ||
1223 | { | ||
1224 | qp->sq.db = dev->db_addr + | ||
1225 | DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); | ||
1226 | qp->sq.db_data.data.icid = qp->icid + 1; | ||
1227 | } | ||
1228 | |||
1229 | static inline void | ||
1230 | qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp) | ||
1231 | { | ||
1232 | qp->rq.db = dev->db_addr + | ||
1233 | DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); | ||
1234 | qp->rq.db_data.data.icid = qp->icid; | ||
1235 | } | ||
1236 | |||
1237 | static inline int | ||
1238 | qedr_init_qp_kernel_params_rq(struct qedr_dev *dev, | ||
1239 | struct qedr_qp *qp, struct ib_qp_init_attr *attrs) | ||
1240 | { | ||
1241 | /* Allocate driver internal RQ array */ | ||
1242 | qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id), | ||
1243 | GFP_KERNEL); | ||
1244 | if (!qp->rqe_wr_id) | ||
1245 | return -ENOMEM; | ||
1246 | |||
1247 | DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr); | ||
1248 | |||
1249 | return 0; | ||
1250 | } | ||
1251 | |||
1252 | static inline int | ||
1253 | qedr_init_qp_kernel_params_sq(struct qedr_dev *dev, | ||
1254 | struct qedr_qp *qp, | ||
1255 | struct ib_qp_init_attr *attrs, | ||
1256 | struct qed_rdma_create_qp_in_params *params) | ||
1257 | { | ||
1258 | u32 temp_max_wr; | ||
1259 | |||
1260 | /* Allocate driver internal SQ array */ | ||
1261 | temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier; | ||
1262 | temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe); | ||
1263 | |||
1264 | /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */ | ||
1265 | qp->sq.max_wr = (u16)temp_max_wr; | ||
1266 | qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), | ||
1267 | GFP_KERNEL); | ||
1268 | if (!qp->wqe_wr_id) | ||
1269 | return -ENOMEM; | ||
1270 | |||
1271 | DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr); | ||
1272 | |||
1273 | /* QP handle to be written in CQE */ | ||
1274 | params->qp_handle_lo = lower_32_bits((uintptr_t)qp); | ||
1275 | params->qp_handle_hi = upper_32_bits((uintptr_t)qp); | ||
1276 | |||
1277 | return 0; | ||
1278 | } | ||
1279 | |||
1280 | static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev, | ||
1281 | struct qedr_qp *qp, | ||
1282 | struct ib_qp_init_attr *attrs) | ||
1283 | { | ||
1284 | u32 n_sq_elems, n_sq_entries; | ||
1285 | int rc; | ||
1286 | |||
1287 | /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in | ||
1288 | * the ring. The ring should allow at least a single WR, even if the | ||
1289 | * user requested none, due to allocation issues. | ||
1290 | */ | ||
1291 | n_sq_entries = attrs->cap.max_send_wr; | ||
1292 | n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe); | ||
1293 | n_sq_entries = max_t(u32, n_sq_entries, 1); | ||
1294 | n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE; | ||
1295 | rc = dev->ops->common->chain_alloc(dev->cdev, | ||
1296 | QED_CHAIN_USE_TO_PRODUCE, | ||
1297 | QED_CHAIN_MODE_PBL, | ||
1298 | QED_CHAIN_CNT_TYPE_U32, | ||
1299 | n_sq_elems, | ||
1300 | QEDR_SQE_ELEMENT_SIZE, | ||
1301 | &qp->sq.pbl); | ||
1302 | if (rc) { | ||
1303 | DP_ERR(dev, "failed to allocate QP %p SQ\n", qp); | ||
1304 | return rc; | ||
1305 | } | ||
1306 | |||
1307 | DP_DEBUG(dev, QEDR_MSG_SQ, | ||
1308 | "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n", | ||
1309 | qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr, | ||
1310 | n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc); | ||
1311 | return 0; | ||
1312 | } | ||
1313 | |||
1314 | static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev, | ||
1315 | struct qedr_qp *qp, | ||
1316 | struct ib_qp_init_attr *attrs) | ||
1317 | { | ||
1318 | u32 n_rq_elems, n_rq_entries; | ||
1319 | int rc; | ||
1320 | |||
1321 | /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in | ||
1322 | * the ring. There ring should allow at least a single WR, even if the | ||
1323 | * user requested none, due to allocation issues. | ||
1324 | */ | ||
1325 | n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1); | ||
1326 | n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE; | ||
1327 | rc = dev->ops->common->chain_alloc(dev->cdev, | ||
1328 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, | ||
1329 | QED_CHAIN_MODE_PBL, | ||
1330 | QED_CHAIN_CNT_TYPE_U32, | ||
1331 | n_rq_elems, | ||
1332 | QEDR_RQE_ELEMENT_SIZE, | ||
1333 | &qp->rq.pbl); | ||
1334 | |||
1335 | if (rc) { | ||
1336 | DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp); | ||
1337 | return -ENOMEM; | ||
1338 | } | ||
1339 | |||
1340 | DP_DEBUG(dev, QEDR_MSG_RQ, | ||
1341 | "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n", | ||
1342 | qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr, | ||
1343 | n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc); | ||
1344 | |||
1345 | /* n_rq_entries < u16 so the casting is safe */ | ||
1346 | qp->rq.max_wr = (u16)n_rq_entries; | ||
1347 | |||
1348 | return 0; | ||
1349 | } | ||
1350 | |||
1351 | static inline void | ||
1352 | qedr_init_qp_in_params_sq(struct qedr_dev *dev, | ||
1353 | struct qedr_pd *pd, | ||
1354 | struct qedr_qp *qp, | ||
1355 | struct ib_qp_init_attr *attrs, | ||
1356 | struct ib_udata *udata, | ||
1357 | struct qed_rdma_create_qp_in_params *params) | ||
1358 | { | ||
1359 | /* QP handle to be written in an async event */ | ||
1360 | params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp); | ||
1361 | params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp); | ||
1362 | |||
1363 | params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR); | ||
1364 | params->fmr_and_reserved_lkey = !udata; | ||
1365 | params->pd = pd->pd_id; | ||
1366 | params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi; | ||
1367 | params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid; | ||
1368 | params->max_sq_sges = 0; | ||
1369 | params->stats_queue = 0; | ||
1370 | |||
1371 | if (udata) { | ||
1372 | params->sq_num_pages = qp->usq.pbl_info.num_pbes; | ||
1373 | params->sq_pbl_ptr = qp->usq.pbl_tbl->pa; | ||
1374 | } else { | ||
1375 | params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl); | ||
1376 | params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); | ||
1377 | } | ||
1378 | } | ||
1379 | |||
1380 | static inline void | ||
1381 | qedr_init_qp_in_params_rq(struct qedr_qp *qp, | ||
1382 | struct ib_qp_init_attr *attrs, | ||
1383 | struct ib_udata *udata, | ||
1384 | struct qed_rdma_create_qp_in_params *params) | ||
1385 | { | ||
1386 | params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid; | ||
1387 | params->srq_id = 0; | ||
1388 | params->use_srq = false; | ||
1389 | |||
1390 | if (udata) { | ||
1391 | params->rq_num_pages = qp->urq.pbl_info.num_pbes; | ||
1392 | params->rq_pbl_ptr = qp->urq.pbl_tbl->pa; | ||
1393 | } else { | ||
1394 | params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl); | ||
1395 | params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl); | ||
1396 | } | ||
1397 | } | ||
1398 | |||
1399 | static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp) | ||
1400 | { | ||
1401 | DP_DEBUG(dev, QEDR_MSG_QP, | ||
1402 | "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n", | ||
1403 | qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr, | ||
1404 | qp->urq.buf_len); | ||
1405 | } | ||
1406 | |||
1407 | static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx, | ||
1408 | struct qedr_dev *dev, | ||
1409 | struct qedr_qp *qp, | ||
1410 | struct qedr_create_qp_ureq *ureq) | ||
1411 | { | ||
1412 | int rc; | ||
1413 | |||
1414 | /* SQ - read access only (0), dma sync not required (0) */ | ||
1415 | rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr, | ||
1416 | ureq->sq_len, 0, 0); | ||
1417 | if (rc) | ||
1418 | return rc; | ||
1419 | |||
1420 | /* RQ - read access only (0), dma sync not required (0) */ | ||
1421 | rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr, | ||
1422 | ureq->rq_len, 0, 0); | ||
1423 | |||
1424 | if (rc) | ||
1425 | qedr_cleanup_user_sq(dev, qp); | ||
1426 | return rc; | ||
1427 | } | ||
1428 | |||
1429 | static inline int | ||
1430 | qedr_init_kernel_qp(struct qedr_dev *dev, | ||
1431 | struct qedr_qp *qp, | ||
1432 | struct ib_qp_init_attr *attrs, | ||
1433 | struct qed_rdma_create_qp_in_params *params) | ||
1434 | { | ||
1435 | int rc; | ||
1436 | |||
1437 | rc = qedr_init_qp_kernel_sq(dev, qp, attrs); | ||
1438 | if (rc) { | ||
1439 | DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp); | ||
1440 | return rc; | ||
1441 | } | ||
1442 | |||
1443 | rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params); | ||
1444 | if (rc) { | ||
1445 | dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); | ||
1446 | DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp); | ||
1447 | return rc; | ||
1448 | } | ||
1449 | |||
1450 | rc = qedr_init_qp_kernel_rq(dev, qp, attrs); | ||
1451 | if (rc) { | ||
1452 | qedr_cleanup_kernel_sq(dev, qp); | ||
1453 | DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp); | ||
1454 | return rc; | ||
1455 | } | ||
1456 | |||
1457 | rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs); | ||
1458 | if (rc) { | ||
1459 | DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp); | ||
1460 | qedr_cleanup_kernel_sq(dev, qp); | ||
1461 | dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl); | ||
1462 | return rc; | ||
1463 | } | ||
1464 | |||
1465 | return rc; | ||
1466 | } | ||
1467 | |||
1468 | struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, | ||
1469 | struct ib_qp_init_attr *attrs, | ||
1470 | struct ib_udata *udata) | ||
1471 | { | ||
1472 | struct qedr_dev *dev = get_qedr_dev(ibpd->device); | ||
1473 | struct qed_rdma_create_qp_out_params out_params; | ||
1474 | struct qed_rdma_create_qp_in_params in_params; | ||
1475 | struct qedr_pd *pd = get_qedr_pd(ibpd); | ||
1476 | struct ib_ucontext *ib_ctx = NULL; | ||
1477 | struct qedr_ucontext *ctx = NULL; | ||
1478 | struct qedr_create_qp_ureq ureq; | ||
1479 | struct qedr_qp *qp; | ||
1480 | int rc = 0; | ||
1481 | |||
1482 | DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n", | ||
1483 | udata ? "user library" : "kernel", pd); | ||
1484 | |||
1485 | rc = qedr_check_qp_attrs(ibpd, dev, attrs); | ||
1486 | if (rc) | ||
1487 | return ERR_PTR(rc); | ||
1488 | |||
1489 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | ||
1490 | if (!qp) | ||
1491 | return ERR_PTR(-ENOMEM); | ||
1492 | |||
1493 | if (attrs->srq) | ||
1494 | return ERR_PTR(-EINVAL); | ||
1495 | |||
1496 | DP_DEBUG(dev, QEDR_MSG_QP, | ||
1497 | "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n", | ||
1498 | get_qedr_cq(attrs->send_cq), | ||
1499 | get_qedr_cq(attrs->send_cq)->icid, | ||
1500 | get_qedr_cq(attrs->recv_cq), | ||
1501 | get_qedr_cq(attrs->recv_cq)->icid); | ||
1502 | |||
1503 | qedr_set_qp_init_params(dev, qp, pd, attrs); | ||
1504 | |||
1505 | memset(&in_params, 0, sizeof(in_params)); | ||
1506 | |||
1507 | if (udata) { | ||
1508 | if (!(udata && ibpd->uobject && ibpd->uobject->context)) | ||
1509 | goto err0; | ||
1510 | |||
1511 | ib_ctx = ibpd->uobject->context; | ||
1512 | ctx = get_qedr_ucontext(ib_ctx); | ||
1513 | |||
1514 | memset(&ureq, 0, sizeof(ureq)); | ||
1515 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) { | ||
1516 | DP_ERR(dev, | ||
1517 | "create qp: problem copying data from user space\n"); | ||
1518 | goto err0; | ||
1519 | } | ||
1520 | |||
1521 | rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq); | ||
1522 | if (rc) | ||
1523 | goto err0; | ||
1524 | |||
1525 | qedr_init_qp_user_params(&in_params, &ureq); | ||
1526 | } else { | ||
1527 | rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params); | ||
1528 | if (rc) | ||
1529 | goto err0; | ||
1530 | } | ||
1531 | |||
1532 | qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params); | ||
1533 | qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params); | ||
1534 | |||
1535 | qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, | ||
1536 | &in_params, &out_params); | ||
1537 | |||
1538 | if (!qp->qed_qp) | ||
1539 | goto err1; | ||
1540 | |||
1541 | qp->qp_id = out_params.qp_id; | ||
1542 | qp->icid = out_params.icid; | ||
1543 | qp->ibqp.qp_num = qp->qp_id; | ||
1544 | |||
1545 | if (udata) { | ||
1546 | rc = qedr_copy_qp_uresp(dev, qp, udata); | ||
1547 | if (rc) | ||
1548 | goto err2; | ||
1549 | |||
1550 | qedr_qp_user_print(dev, qp); | ||
1551 | } else { | ||
1552 | qedr_init_qp_kernel_doorbell_sq(dev, qp); | ||
1553 | qedr_init_qp_kernel_doorbell_rq(dev, qp); | ||
1554 | } | ||
1555 | |||
1556 | DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n", | ||
1557 | udata ? "user" : "kernel", qp); | ||
1558 | |||
1559 | return &qp->ibqp; | ||
1560 | |||
1561 | err2: | ||
1562 | rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); | ||
1563 | if (rc) | ||
1564 | DP_ERR(dev, "create qp: fatal fault. rc=%d", rc); | ||
1565 | err1: | ||
1566 | if (udata) { | ||
1567 | qedr_cleanup_user_sq(dev, qp); | ||
1568 | qedr_cleanup_user_rq(dev, qp); | ||
1569 | } else { | ||
1570 | qedr_cleanup_kernel_sq(dev, qp); | ||
1571 | qedr_cleanup_kernel_rq(dev, qp); | ||
1572 | } | ||
1573 | |||
1574 | err0: | ||
1575 | kfree(qp); | ||
1576 | |||
1577 | return ERR_PTR(-EFAULT); | ||
1578 | } | ||
1579 | |||
1580 | enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) | ||
1581 | { | ||
1582 | switch (qp_state) { | ||
1583 | case QED_ROCE_QP_STATE_RESET: | ||
1584 | return IB_QPS_RESET; | ||
1585 | case QED_ROCE_QP_STATE_INIT: | ||
1586 | return IB_QPS_INIT; | ||
1587 | case QED_ROCE_QP_STATE_RTR: | ||
1588 | return IB_QPS_RTR; | ||
1589 | case QED_ROCE_QP_STATE_RTS: | ||
1590 | return IB_QPS_RTS; | ||
1591 | case QED_ROCE_QP_STATE_SQD: | ||
1592 | return IB_QPS_SQD; | ||
1593 | case QED_ROCE_QP_STATE_ERR: | ||
1594 | return IB_QPS_ERR; | ||
1595 | case QED_ROCE_QP_STATE_SQE: | ||
1596 | return IB_QPS_SQE; | ||
1597 | } | ||
1598 | return IB_QPS_ERR; | ||
1599 | } | ||
1600 | |||
1601 | enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) | ||
1602 | { | ||
1603 | switch (qp_state) { | ||
1604 | case IB_QPS_RESET: | ||
1605 | return QED_ROCE_QP_STATE_RESET; | ||
1606 | case IB_QPS_INIT: | ||
1607 | return QED_ROCE_QP_STATE_INIT; | ||
1608 | case IB_QPS_RTR: | ||
1609 | return QED_ROCE_QP_STATE_RTR; | ||
1610 | case IB_QPS_RTS: | ||
1611 | return QED_ROCE_QP_STATE_RTS; | ||
1612 | case IB_QPS_SQD: | ||
1613 | return QED_ROCE_QP_STATE_SQD; | ||
1614 | case IB_QPS_ERR: | ||
1615 | return QED_ROCE_QP_STATE_ERR; | ||
1616 | default: | ||
1617 | return QED_ROCE_QP_STATE_ERR; | ||
1618 | } | ||
1619 | } | ||
1620 | |||
1621 | static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph) | ||
1622 | { | ||
1623 | qed_chain_reset(&qph->pbl); | ||
1624 | qph->prod = 0; | ||
1625 | qph->cons = 0; | ||
1626 | qph->wqe_cons = 0; | ||
1627 | qph->db_data.data.value = cpu_to_le16(0); | ||
1628 | } | ||
1629 | |||
1630 | static int qedr_update_qp_state(struct qedr_dev *dev, | ||
1631 | struct qedr_qp *qp, | ||
1632 | enum qed_roce_qp_state new_state) | ||
1633 | { | ||
1634 | int status = 0; | ||
1635 | |||
1636 | if (new_state == qp->state) | ||
1637 | return 1; | ||
1638 | |||
1639 | switch (qp->state) { | ||
1640 | case QED_ROCE_QP_STATE_RESET: | ||
1641 | switch (new_state) { | ||
1642 | case QED_ROCE_QP_STATE_INIT: | ||
1643 | qp->prev_wqe_size = 0; | ||
1644 | qedr_reset_qp_hwq_info(&qp->sq); | ||
1645 | qedr_reset_qp_hwq_info(&qp->rq); | ||
1646 | break; | ||
1647 | default: | ||
1648 | status = -EINVAL; | ||
1649 | break; | ||
1650 | }; | ||
1651 | break; | ||
1652 | case QED_ROCE_QP_STATE_INIT: | ||
1653 | switch (new_state) { | ||
1654 | case QED_ROCE_QP_STATE_RTR: | ||
1655 | /* Update doorbell (in case post_recv was | ||
1656 | * done before move to RTR) | ||
1657 | */ | ||
1658 | wmb(); | ||
1659 | writel(qp->rq.db_data.raw, qp->rq.db); | ||
1660 | /* Make sure write takes effect */ | ||
1661 | mmiowb(); | ||
1662 | break; | ||
1663 | case QED_ROCE_QP_STATE_ERR: | ||
1664 | break; | ||
1665 | default: | ||
1666 | /* Invalid state change. */ | ||
1667 | status = -EINVAL; | ||
1668 | break; | ||
1669 | }; | ||
1670 | break; | ||
1671 | case QED_ROCE_QP_STATE_RTR: | ||
1672 | /* RTR->XXX */ | ||
1673 | switch (new_state) { | ||
1674 | case QED_ROCE_QP_STATE_RTS: | ||
1675 | break; | ||
1676 | case QED_ROCE_QP_STATE_ERR: | ||
1677 | break; | ||
1678 | default: | ||
1679 | /* Invalid state change. */ | ||
1680 | status = -EINVAL; | ||
1681 | break; | ||
1682 | }; | ||
1683 | break; | ||
1684 | case QED_ROCE_QP_STATE_RTS: | ||
1685 | /* RTS->XXX */ | ||
1686 | switch (new_state) { | ||
1687 | case QED_ROCE_QP_STATE_SQD: | ||
1688 | break; | ||
1689 | case QED_ROCE_QP_STATE_ERR: | ||
1690 | break; | ||
1691 | default: | ||
1692 | /* Invalid state change. */ | ||
1693 | status = -EINVAL; | ||
1694 | break; | ||
1695 | }; | ||
1696 | break; | ||
1697 | case QED_ROCE_QP_STATE_SQD: | ||
1698 | /* SQD->XXX */ | ||
1699 | switch (new_state) { | ||
1700 | case QED_ROCE_QP_STATE_RTS: | ||
1701 | case QED_ROCE_QP_STATE_ERR: | ||
1702 | break; | ||
1703 | default: | ||
1704 | /* Invalid state change. */ | ||
1705 | status = -EINVAL; | ||
1706 | break; | ||
1707 | }; | ||
1708 | break; | ||
1709 | case QED_ROCE_QP_STATE_ERR: | ||
1710 | /* ERR->XXX */ | ||
1711 | switch (new_state) { | ||
1712 | case QED_ROCE_QP_STATE_RESET: | ||
1713 | break; | ||
1714 | default: | ||
1715 | status = -EINVAL; | ||
1716 | break; | ||
1717 | }; | ||
1718 | break; | ||
1719 | default: | ||
1720 | status = -EINVAL; | ||
1721 | break; | ||
1722 | }; | ||
1723 | |||
1724 | return status; | ||
1725 | } | ||
1726 | |||
1727 | int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
1728 | int attr_mask, struct ib_udata *udata) | ||
1729 | { | ||
1730 | struct qedr_qp *qp = get_qedr_qp(ibqp); | ||
1731 | struct qed_rdma_modify_qp_in_params qp_params = { 0 }; | ||
1732 | struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); | ||
1733 | enum ib_qp_state old_qp_state, new_qp_state; | ||
1734 | int rc = 0; | ||
1735 | |||
1736 | DP_DEBUG(dev, QEDR_MSG_QP, | ||
1737 | "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask, | ||
1738 | attr->qp_state); | ||
1739 | |||
1740 | old_qp_state = qedr_get_ibqp_state(qp->state); | ||
1741 | if (attr_mask & IB_QP_STATE) | ||
1742 | new_qp_state = attr->qp_state; | ||
1743 | else | ||
1744 | new_qp_state = old_qp_state; | ||
1745 | |||
1746 | if (!ib_modify_qp_is_ok | ||
1747 | (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask, | ||
1748 | IB_LINK_LAYER_ETHERNET)) { | ||
1749 | DP_ERR(dev, | ||
1750 | "modify qp: invalid attribute mask=0x%x specified for\n" | ||
1751 | "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n", | ||
1752 | attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state, | ||
1753 | new_qp_state); | ||
1754 | rc = -EINVAL; | ||
1755 | goto err; | ||
1756 | } | ||
1757 | |||
1758 | /* Translate the masks... */ | ||
1759 | if (attr_mask & IB_QP_STATE) { | ||
1760 | SET_FIELD(qp_params.modify_flags, | ||
1761 | QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1); | ||
1762 | qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state); | ||
1763 | } | ||
1764 | |||
1765 | if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) | ||
1766 | qp_params.sqd_async = true; | ||
1767 | |||
1768 | if (attr_mask & IB_QP_PKEY_INDEX) { | ||
1769 | SET_FIELD(qp_params.modify_flags, | ||
1770 | QED_ROCE_MODIFY_QP_VALID_PKEY, 1); | ||
1771 | if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) { | ||
1772 | rc = -EINVAL; | ||
1773 | goto err; | ||
1774 | } | ||
1775 | |||
1776 | qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT; | ||
1777 | } | ||
1778 | |||
1779 | if (attr_mask & IB_QP_QKEY) | ||
1780 | qp->qkey = attr->qkey; | ||
1781 | |||
1782 | if (attr_mask & IB_QP_ACCESS_FLAGS) { | ||
1783 | SET_FIELD(qp_params.modify_flags, | ||
1784 | QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1); | ||
1785 | qp_params.incoming_rdma_read_en = attr->qp_access_flags & | ||
1786 | IB_ACCESS_REMOTE_READ; | ||
1787 | qp_params.incoming_rdma_write_en = attr->qp_access_flags & | ||
1788 | IB_ACCESS_REMOTE_WRITE; | ||
1789 | qp_params.incoming_atomic_en = attr->qp_access_flags & | ||
1790 | IB_ACCESS_REMOTE_ATOMIC; | ||
1791 | } | ||
1792 | |||
1793 | if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) { | ||
1794 | if (attr_mask & IB_QP_PATH_MTU) { | ||
1795 | if (attr->path_mtu < IB_MTU_256 || | ||
1796 | attr->path_mtu > IB_MTU_4096) { | ||
1797 | pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n"); | ||
1798 | rc = -EINVAL; | ||
1799 | goto err; | ||
1800 | } | ||
1801 | qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu), | ||
1802 | ib_mtu_enum_to_int(iboe_get_mtu | ||
1803 | (dev->ndev->mtu))); | ||
1804 | } | ||
1805 | |||
1806 | if (!qp->mtu) { | ||
1807 | qp->mtu = | ||
1808 | ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu)); | ||
1809 | pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu); | ||
1810 | } | ||
1811 | |||
1812 | SET_FIELD(qp_params.modify_flags, | ||
1813 | QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1); | ||
1814 | |||
1815 | qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class; | ||
1816 | qp_params.flow_label = attr->ah_attr.grh.flow_label; | ||
1817 | qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit; | ||
1818 | |||
1819 | qp->sgid_idx = attr->ah_attr.grh.sgid_index; | ||
1820 | |||
1821 | rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params); | ||
1822 | if (rc) { | ||
1823 | DP_ERR(dev, | ||
1824 | "modify qp: problems with GID index %d (rc=%d)\n", | ||
1825 | attr->ah_attr.grh.sgid_index, rc); | ||
1826 | return rc; | ||
1827 | } | ||
1828 | |||
1829 | rc = qedr_get_dmac(dev, &attr->ah_attr, | ||
1830 | qp_params.remote_mac_addr); | ||
1831 | if (rc) | ||
1832 | return rc; | ||
1833 | |||
1834 | qp_params.use_local_mac = true; | ||
1835 | ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr); | ||
1836 | |||
1837 | DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n", | ||
1838 | qp_params.dgid.dwords[0], qp_params.dgid.dwords[1], | ||
1839 | qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]); | ||
1840 | DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n", | ||
1841 | qp_params.sgid.dwords[0], qp_params.sgid.dwords[1], | ||
1842 | qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); | ||
1843 | DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", | ||
1844 | qp_params.remote_mac_addr); | ||
1845 | ; | ||
1846 | |||
1847 | qp_params.mtu = qp->mtu; | ||
1848 | qp_params.lb_indication = false; | ||
1849 | } | ||
1850 | |||
1851 | if (!qp_params.mtu) { | ||
1852 | /* Stay with current MTU */ | ||
1853 | if (qp->mtu) | ||
1854 | qp_params.mtu = qp->mtu; | ||
1855 | else | ||
1856 | qp_params.mtu = | ||
1857 | ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu)); | ||
1858 | } | ||
1859 | |||
1860 | if (attr_mask & IB_QP_TIMEOUT) { | ||
1861 | SET_FIELD(qp_params.modify_flags, | ||
1862 | QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1); | ||
1863 | |||
1864 | qp_params.ack_timeout = attr->timeout; | ||
1865 | if (attr->timeout) { | ||
1866 | u32 temp; | ||
1867 | |||
1868 | temp = 4096 * (1UL << attr->timeout) / 1000 / 1000; | ||
1869 | /* FW requires [msec] */ | ||
1870 | qp_params.ack_timeout = temp; | ||
1871 | } else { | ||
1872 | /* Infinite */ | ||
1873 | qp_params.ack_timeout = 0; | ||
1874 | } | ||
1875 | } | ||
1876 | if (attr_mask & IB_QP_RETRY_CNT) { | ||
1877 | SET_FIELD(qp_params.modify_flags, | ||
1878 | QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); | ||
1879 | qp_params.retry_cnt = attr->retry_cnt; | ||
1880 | } | ||
1881 | |||
1882 | if (attr_mask & IB_QP_RNR_RETRY) { | ||
1883 | SET_FIELD(qp_params.modify_flags, | ||
1884 | QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1); | ||
1885 | qp_params.rnr_retry_cnt = attr->rnr_retry; | ||
1886 | } | ||
1887 | |||
1888 | if (attr_mask & IB_QP_RQ_PSN) { | ||
1889 | SET_FIELD(qp_params.modify_flags, | ||
1890 | QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1); | ||
1891 | qp_params.rq_psn = attr->rq_psn; | ||
1892 | qp->rq_psn = attr->rq_psn; | ||
1893 | } | ||
1894 | |||
1895 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { | ||
1896 | if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) { | ||
1897 | rc = -EINVAL; | ||
1898 | DP_ERR(dev, | ||
1899 | "unsupported max_rd_atomic=%d, supported=%d\n", | ||
1900 | attr->max_rd_atomic, | ||
1901 | dev->attr.max_qp_req_rd_atomic_resc); | ||
1902 | goto err; | ||
1903 | } | ||
1904 | |||
1905 | SET_FIELD(qp_params.modify_flags, | ||
1906 | QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1); | ||
1907 | qp_params.max_rd_atomic_req = attr->max_rd_atomic; | ||
1908 | } | ||
1909 | |||
1910 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { | ||
1911 | SET_FIELD(qp_params.modify_flags, | ||
1912 | QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1); | ||
1913 | qp_params.min_rnr_nak_timer = attr->min_rnr_timer; | ||
1914 | } | ||
1915 | |||
1916 | if (attr_mask & IB_QP_SQ_PSN) { | ||
1917 | SET_FIELD(qp_params.modify_flags, | ||
1918 | QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1); | ||
1919 | qp_params.sq_psn = attr->sq_psn; | ||
1920 | qp->sq_psn = attr->sq_psn; | ||
1921 | } | ||
1922 | |||
1923 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { | ||
1924 | if (attr->max_dest_rd_atomic > | ||
1925 | dev->attr.max_qp_resp_rd_atomic_resc) { | ||
1926 | DP_ERR(dev, | ||
1927 | "unsupported max_dest_rd_atomic=%d, supported=%d\n", | ||
1928 | attr->max_dest_rd_atomic, | ||
1929 | dev->attr.max_qp_resp_rd_atomic_resc); | ||
1930 | |||
1931 | rc = -EINVAL; | ||
1932 | goto err; | ||
1933 | } | ||
1934 | |||
1935 | SET_FIELD(qp_params.modify_flags, | ||
1936 | QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1); | ||
1937 | qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic; | ||
1938 | } | ||
1939 | |||
1940 | if (attr_mask & IB_QP_DEST_QPN) { | ||
1941 | SET_FIELD(qp_params.modify_flags, | ||
1942 | QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1); | ||
1943 | |||
1944 | qp_params.dest_qp = attr->dest_qp_num; | ||
1945 | qp->dest_qp_num = attr->dest_qp_num; | ||
1946 | } | ||
1947 | |||
1948 | if (qp->qp_type != IB_QPT_GSI) | ||
1949 | rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, | ||
1950 | qp->qed_qp, &qp_params); | ||
1951 | |||
1952 | if (attr_mask & IB_QP_STATE) { | ||
1953 | if ((qp->qp_type != IB_QPT_GSI) && (!udata)) | ||
1954 | qedr_update_qp_state(dev, qp, qp_params.new_state); | ||
1955 | qp->state = qp_params.new_state; | ||
1956 | } | ||
1957 | |||
1958 | err: | ||
1959 | return rc; | ||
1960 | } | ||
1961 | |||
1962 | static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params) | ||
1963 | { | ||
1964 | int ib_qp_acc_flags = 0; | ||
1965 | |||
1966 | if (params->incoming_rdma_write_en) | ||
1967 | ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; | ||
1968 | if (params->incoming_rdma_read_en) | ||
1969 | ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ; | ||
1970 | if (params->incoming_atomic_en) | ||
1971 | ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC; | ||
1972 | ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; | ||
1973 | return ib_qp_acc_flags; | ||
1974 | } | ||
1975 | |||
1976 | int qedr_query_qp(struct ib_qp *ibqp, | ||
1977 | struct ib_qp_attr *qp_attr, | ||
1978 | int attr_mask, struct ib_qp_init_attr *qp_init_attr) | ||
1979 | { | ||
1980 | struct qed_rdma_query_qp_out_params params; | ||
1981 | struct qedr_qp *qp = get_qedr_qp(ibqp); | ||
1982 | struct qedr_dev *dev = qp->dev; | ||
1983 | int rc = 0; | ||
1984 | |||
1985 | memset(¶ms, 0, sizeof(params)); | ||
1986 | |||
1987 | rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms); | ||
1988 | if (rc) | ||
1989 | goto err; | ||
1990 | |||
1991 | memset(qp_attr, 0, sizeof(*qp_attr)); | ||
1992 | memset(qp_init_attr, 0, sizeof(*qp_init_attr)); | ||
1993 | |||
1994 | qp_attr->qp_state = qedr_get_ibqp_state(params.state); | ||
1995 | qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); | ||
1996 | qp_attr->path_mtu = iboe_get_mtu(params.mtu); | ||
1997 | qp_attr->path_mig_state = IB_MIG_MIGRATED; | ||
1998 | qp_attr->rq_psn = params.rq_psn; | ||
1999 | qp_attr->sq_psn = params.sq_psn; | ||
2000 | qp_attr->dest_qp_num = params.dest_qp; | ||
2001 | |||
2002 | qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms); | ||
2003 | |||
2004 | qp_attr->cap.max_send_wr = qp->sq.max_wr; | ||
2005 | qp_attr->cap.max_recv_wr = qp->rq.max_wr; | ||
2006 | qp_attr->cap.max_send_sge = qp->sq.max_sges; | ||
2007 | qp_attr->cap.max_recv_sge = qp->rq.max_sges; | ||
2008 | qp_attr->cap.max_inline_data = qp->max_inline_data; | ||
2009 | qp_init_attr->cap = qp_attr->cap; | ||
2010 | |||
2011 | memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0], | ||
2012 | sizeof(qp_attr->ah_attr.grh.dgid.raw)); | ||
2013 | |||
2014 | qp_attr->ah_attr.grh.flow_label = params.flow_label; | ||
2015 | qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; | ||
2016 | qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl; | ||
2017 | qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos; | ||
2018 | |||
2019 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; | ||
2020 | qp_attr->ah_attr.port_num = 1; | ||
2021 | qp_attr->ah_attr.sl = 0; | ||
2022 | qp_attr->timeout = params.timeout; | ||
2023 | qp_attr->rnr_retry = params.rnr_retry; | ||
2024 | qp_attr->retry_cnt = params.retry_cnt; | ||
2025 | qp_attr->min_rnr_timer = params.min_rnr_nak_timer; | ||
2026 | qp_attr->pkey_index = params.pkey_index; | ||
2027 | qp_attr->port_num = 1; | ||
2028 | qp_attr->ah_attr.src_path_bits = 0; | ||
2029 | qp_attr->ah_attr.static_rate = 0; | ||
2030 | qp_attr->alt_pkey_index = 0; | ||
2031 | qp_attr->alt_port_num = 0; | ||
2032 | qp_attr->alt_timeout = 0; | ||
2033 | memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); | ||
2034 | |||
2035 | qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0; | ||
2036 | qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic; | ||
2037 | qp_attr->max_rd_atomic = params.max_rd_atomic; | ||
2038 | qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0; | ||
2039 | |||
2040 | DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n", | ||
2041 | qp_attr->cap.max_inline_data); | ||
2042 | |||
2043 | err: | ||
2044 | return rc; | ||
2045 | } | ||
2046 | |||
2047 | int qedr_destroy_qp(struct ib_qp *ibqp) | ||
2048 | { | ||
2049 | struct qedr_qp *qp = get_qedr_qp(ibqp); | ||
2050 | struct qedr_dev *dev = qp->dev; | ||
2051 | struct ib_qp_attr attr; | ||
2052 | int attr_mask = 0; | ||
2053 | int rc = 0; | ||
2054 | |||
2055 | DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n", | ||
2056 | qp, qp->qp_type); | ||
2057 | |||
2058 | if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR | | ||
2059 | QED_ROCE_QP_STATE_INIT)) { | ||
2060 | attr.qp_state = IB_QPS_ERR; | ||
2061 | attr_mask |= IB_QP_STATE; | ||
2062 | |||
2063 | /* Change the QP state to ERROR */ | ||
2064 | qedr_modify_qp(ibqp, &attr, attr_mask, NULL); | ||
2065 | } | ||
2066 | |||
2067 | if (qp->qp_type != IB_QPT_GSI) { | ||
2068 | rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); | ||
2069 | if (rc) | ||
2070 | return rc; | ||
2071 | } | ||
2072 | |||
2073 | if (ibqp->uobject && ibqp->uobject->context) { | ||
2074 | qedr_cleanup_user_sq(dev, qp); | ||
2075 | qedr_cleanup_user_rq(dev, qp); | ||
2076 | } else { | ||
2077 | qedr_cleanup_kernel_sq(dev, qp); | ||
2078 | qedr_cleanup_kernel_rq(dev, qp); | ||
2079 | } | ||
2080 | |||
2081 | kfree(qp); | ||
2082 | |||
2083 | return rc; | ||
2084 | } | ||
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 36c8a692f740..056d6cb31fa2 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h | |||
@@ -62,5 +62,12 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev, | |||
62 | int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); | 62 | int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); |
63 | int qedr_destroy_cq(struct ib_cq *); | 63 | int qedr_destroy_cq(struct ib_cq *); |
64 | int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); | 64 | int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
65 | struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, | ||
66 | struct ib_udata *); | ||
67 | int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, | ||
68 | int attr_mask, struct ib_udata *udata); | ||
69 | int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, | ||
70 | int qp_attr_mask, struct ib_qp_init_attr *); | ||
71 | int qedr_destroy_qp(struct ib_qp *ibqp); | ||
65 | 72 | ||
66 | #endif | 73 | #endif |
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h index b0fc5f2125e0..75c270d839c8 100644 --- a/include/uapi/rdma/qedr-abi.h +++ b/include/uapi/rdma/qedr-abi.h | |||
@@ -69,4 +69,38 @@ struct qedr_create_cq_uresp { | |||
69 | __u16 icid; | 69 | __u16 icid; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | struct qedr_create_qp_ureq { | ||
73 | __u32 qp_handle_hi; | ||
74 | __u32 qp_handle_lo; | ||
75 | |||
76 | /* SQ */ | ||
77 | /* user space virtual address of SQ buffer */ | ||
78 | __u64 sq_addr; | ||
79 | |||
80 | /* length of SQ buffer */ | ||
81 | __u64 sq_len; | ||
82 | |||
83 | /* RQ */ | ||
84 | /* user space virtual address of RQ buffer */ | ||
85 | __u64 rq_addr; | ||
86 | |||
87 | /* length of RQ buffer */ | ||
88 | __u64 rq_len; | ||
89 | }; | ||
90 | |||
91 | struct qedr_create_qp_uresp { | ||
92 | __u32 qp_id; | ||
93 | __u32 atomic_supported; | ||
94 | |||
95 | /* SQ */ | ||
96 | __u32 sq_db_offset; | ||
97 | __u16 sq_icid; | ||
98 | |||
99 | /* RQ */ | ||
100 | __u32 rq_db_offset; | ||
101 | __u16 rq_icid; | ||
102 | |||
103 | __u32 rq_db2_offset; | ||
104 | }; | ||
105 | |||
72 | #endif /* __QEDR_USER_H__ */ | 106 | #endif /* __QEDR_USER_H__ */ |