aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRam Amrani <Ram.Amrani@cavium.com>2016-10-10 06:15:33 -0400
committerDoug Ledford <dledford@redhat.com>2016-10-14 15:00:10 -0400
commita7efd7773e31b60f695816c27393fc717a9df127 (patch)
tree476259221ef9aaeee0d79d902fefb5c533eba47d
parentac1b36e55a5137e2f146e60be36d0cc81069feb6 (diff)
qedr: Add support for PD,PKEY and CQ verbs
Add support for protection domain and completion queue verbs. Signed-off-by: Rajesh Borundia <rajesh.borundia@cavium.com> Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/qedr/main.c49
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h78
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h79
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c539
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h14
-rw-r--r--include/uapi/rdma/qedr-abi.h19
6 files changed, 777 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 55c4f565c214..35928abb6b63 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -87,7 +87,14 @@ static int qedr_register_device(struct qedr_dev *dev)
87 87
88 dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) | 88 dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
89 QEDR_UVERBS(QUERY_DEVICE) | 89 QEDR_UVERBS(QUERY_DEVICE) |
90 QEDR_UVERBS(QUERY_PORT); 90 QEDR_UVERBS(QUERY_PORT) |
91 QEDR_UVERBS(ALLOC_PD) |
92 QEDR_UVERBS(DEALLOC_PD) |
93 QEDR_UVERBS(CREATE_COMP_CHANNEL) |
94 QEDR_UVERBS(CREATE_CQ) |
95 QEDR_UVERBS(RESIZE_CQ) |
96 QEDR_UVERBS(DESTROY_CQ) |
97 QEDR_UVERBS(REQ_NOTIFY_CQ);
91 98
92 dev->ibdev.phys_port_cnt = 1; 99 dev->ibdev.phys_port_cnt = 1;
93 dev->ibdev.num_comp_vectors = dev->num_cnq; 100 dev->ibdev.num_comp_vectors = dev->num_cnq;
@@ -105,6 +112,16 @@ static int qedr_register_device(struct qedr_dev *dev)
105 dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext; 112 dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
106 dev->ibdev.mmap = qedr_mmap; 113 dev->ibdev.mmap = qedr_mmap;
107 114
115 dev->ibdev.alloc_pd = qedr_alloc_pd;
116 dev->ibdev.dealloc_pd = qedr_dealloc_pd;
117
118 dev->ibdev.create_cq = qedr_create_cq;
119 dev->ibdev.destroy_cq = qedr_destroy_cq;
120 dev->ibdev.resize_cq = qedr_resize_cq;
121 dev->ibdev.req_notify_cq = qedr_arm_cq;
122
123 dev->ibdev.query_pkey = qedr_query_pkey;
124
108 dev->ibdev.dma_device = &dev->pdev->dev; 125 dev->ibdev.dma_device = &dev->pdev->dev;
109 126
110 dev->ibdev.get_link_layer = qedr_link_layer; 127 dev->ibdev.get_link_layer = qedr_link_layer;
@@ -322,6 +339,8 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle)
322{ 339{
323 u16 hw_comp_cons, sw_comp_cons; 340 u16 hw_comp_cons, sw_comp_cons;
324 struct qedr_cnq *cnq = handle; 341 struct qedr_cnq *cnq = handle;
342 struct regpair *cq_handle;
343 struct qedr_cq *cq;
325 344
326 qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0); 345 qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
327 346
@@ -334,8 +353,36 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle)
334 rmb(); 353 rmb();
335 354
336 while (sw_comp_cons != hw_comp_cons) { 355 while (sw_comp_cons != hw_comp_cons) {
356 cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
357 cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
358 cq_handle->lo);
359
360 if (cq == NULL) {
361 DP_ERR(cnq->dev,
362 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
363 cq_handle->hi, cq_handle->lo, sw_comp_cons,
364 hw_comp_cons);
365
366 break;
367 }
368
369 if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
370 DP_ERR(cnq->dev,
371 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
372 cq_handle->hi, cq_handle->lo, cq);
373 break;
374 }
375
376 cq->arm_flags = 0;
377
378 if (cq->ibcq.comp_handler)
379 (*cq->ibcq.comp_handler)
380 (&cq->ibcq, cq->ibcq.cq_context);
381
337 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); 382 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
383
338 cnq->n_comp++; 384 cnq->n_comp++;
385
339 } 386 }
340 387
341 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, 388 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 2091c0d6e67d..9e2846a599ce 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -50,6 +50,10 @@
50 50
51#define QEDR_MSG_INIT "INIT" 51#define QEDR_MSG_INIT "INIT"
52#define QEDR_MSG_MISC "MISC" 52#define QEDR_MSG_MISC "MISC"
53#define QEDR_MSG_CQ " CQ"
54#define QEDR_MSG_MR " MR"
55
56#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
53 57
54struct qedr_dev; 58struct qedr_dev;
55 59
@@ -181,6 +185,12 @@ struct qedr_dev {
181#define QEDR_ROCE_PKEY_TABLE_LEN 1 185#define QEDR_ROCE_PKEY_TABLE_LEN 1
182#define QEDR_ROCE_PKEY_DEFAULT 0xffff 186#define QEDR_ROCE_PKEY_DEFAULT 0xffff
183 187
188struct qedr_pbl {
189 struct list_head list_entry;
190 void *va;
191 dma_addr_t pa;
192};
193
184struct qedr_ucontext { 194struct qedr_ucontext {
185 struct ib_ucontext ibucontext; 195 struct ib_ucontext ibucontext;
186 struct qedr_dev *dev; 196 struct qedr_dev *dev;
@@ -196,6 +206,64 @@ struct qedr_ucontext {
196 struct mutex mm_list_lock; 206 struct mutex mm_list_lock;
197}; 207};
198 208
209union db_prod64 {
210 struct rdma_pwm_val32_data data;
211 u64 raw;
212};
213
214enum qedr_cq_type {
215 QEDR_CQ_TYPE_GSI,
216 QEDR_CQ_TYPE_KERNEL,
217 QEDR_CQ_TYPE_USER,
218};
219
220struct qedr_pbl_info {
221 u32 num_pbls;
222 u32 num_pbes;
223 u32 pbl_size;
224 u32 pbe_size;
225 bool two_layered;
226};
227
228struct qedr_userq {
229 struct ib_umem *umem;
230 struct qedr_pbl_info pbl_info;
231 struct qedr_pbl *pbl_tbl;
232 u64 buf_addr;
233 size_t buf_len;
234};
235
236struct qedr_cq {
237 struct ib_cq ibcq;
238
239 enum qedr_cq_type cq_type;
240 u32 sig;
241
242 u16 icid;
243
244 /* Lock to protect multiplem CQ's */
245 spinlock_t cq_lock;
246 u8 arm_flags;
247 struct qed_chain pbl;
248
249 void __iomem *db_addr;
250 union db_prod64 db;
251
252 u8 pbl_toggle;
253 union rdma_cqe *latest_cqe;
254 union rdma_cqe *toggle_cqe;
255
256 u32 cq_cons;
257
258 struct qedr_userq q;
259};
260
261struct qedr_pd {
262 struct ib_pd ibpd;
263 u32 pd_id;
264 struct qedr_ucontext *uctx;
265};
266
199struct qedr_mm { 267struct qedr_mm {
200 struct { 268 struct {
201 u64 phy_addr; 269 u64 phy_addr;
@@ -215,4 +283,14 @@ static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
215 return container_of(ibdev, struct qedr_dev, ibdev); 283 return container_of(ibdev, struct qedr_dev, ibdev);
216} 284}
217 285
286static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
287{
288 return container_of(ibpd, struct qedr_pd, ibpd);
289}
290
291static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
292{
293 return container_of(ibcq, struct qedr_cq, ibcq);
294}
295
218#endif 296#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index 3e508fbd5e78..84f6520107cc 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -47,6 +47,19 @@ struct rdma_cqe_responder {
47 __le32 imm_data_hi; 47 __le32 imm_data_hi;
48 __le16 rq_cons; 48 __le16 rq_cons;
49 u8 flags; 49 u8 flags;
50#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
51#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
52#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3
53#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1
54#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1
55#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3
56#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1
57#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4
58#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1
59#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5
60#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3
61#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6
62 u8 status;
50}; 63};
51 64
52struct rdma_cqe_requester { 65struct rdma_cqe_requester {
@@ -58,6 +71,12 @@ struct rdma_cqe_requester {
58 __le32 reserved3; 71 __le32 reserved3;
59 __le16 reserved4; 72 __le16 reserved4;
60 u8 flags; 73 u8 flags;
74#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1
75#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
76#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3
77#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1
78#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F
79#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3
61 u8 status; 80 u8 status;
62}; 81};
63 82
@@ -66,6 +85,12 @@ struct rdma_cqe_common {
66 struct regpair qp_handle; 85 struct regpair qp_handle;
67 __le16 reserved1[7]; 86 __le16 reserved1[7];
68 u8 flags; 87 u8 flags;
88#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1
89#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
90#define RDMA_CQE_COMMON_TYPE_MASK 0x3
91#define RDMA_CQE_COMMON_TYPE_SHIFT 1
92#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F
93#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3
69 u8 status; 94 u8 status;
70}; 95};
71 96
@@ -76,6 +101,45 @@ union rdma_cqe {
76 struct rdma_cqe_common cmn; 101 struct rdma_cqe_common cmn;
77}; 102};
78 103
104/* * CQE requester status enumeration */
105enum rdma_cqe_requester_status_enum {
106 RDMA_CQE_REQ_STS_OK,
107 RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
108 RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
109 RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
110 RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
111 RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
112 RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
113 RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
114 RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
115 RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
116 RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
117 RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
118 MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
119};
120
121/* CQE responder status enumeration */
122enum rdma_cqe_responder_status_enum {
123 RDMA_CQE_RESP_STS_OK,
124 RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
125 RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
126 RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
127 RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
128 RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
129 RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
130 RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
131 MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
132};
133
134/* CQE type enumeration */
135enum rdma_cqe_type {
136 RDMA_CQE_TYPE_REQUESTER,
137 RDMA_CQE_TYPE_RESPONDER_RQ,
138 RDMA_CQE_TYPE_RESPONDER_SRQ,
139 RDMA_CQE_TYPE_INVALID,
140 MAX_RDMA_CQE_TYPE
141};
142
79struct rdma_sq_sge { 143struct rdma_sq_sge {
80 __le32 length; 144 __le32 length;
81 struct regpair addr; 145 struct regpair addr;
@@ -93,4 +157,19 @@ struct rdma_srq_sge {
93 __le32 length; 157 __le32 length;
94 __le32 l_key; 158 __le32 l_key;
95}; 159};
160
161/* Rdma doorbell data for CQ */
162struct rdma_pwm_val32_data {
163 __le16 icid;
164 u8 agg_flags;
165 u8 params;
166#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
167#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
168#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
169#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
170#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
171#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
172 __le32 value;
173};
174
96#endif /* __QED_HSI_RDMA__ */ 175#endif /* __QED_HSI_RDMA__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b9dcade1cb9f..b525c6cf1df0 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -49,6 +49,17 @@
49#include "verbs.h" 49#include "verbs.h"
50#include <rdma/qedr-abi.h> 50#include <rdma/qedr-abi.h>
51 51
52#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
53
54int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
55{
56 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
57 return -EINVAL;
58
59 *pkey = QEDR_ROCE_PKEY_DEFAULT;
60 return 0;
61}
62
52int qedr_query_gid(struct ib_device *ibdev, u8 port, int index, 63int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
53 union ib_gid *sgid) 64 union ib_gid *sgid)
54{ 65{
@@ -454,3 +465,531 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
454 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc); 465 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
455 return rc; 466 return rc;
456} 467}
468
469struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
470 struct ib_ucontext *context, struct ib_udata *udata)
471{
472 struct qedr_dev *dev = get_qedr_dev(ibdev);
473 struct qedr_ucontext *uctx = NULL;
474 struct qedr_alloc_pd_uresp uresp;
475 struct qedr_pd *pd;
476 u16 pd_id;
477 int rc;
478
479 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
480 (udata && context) ? "User Lib" : "Kernel");
481
482 if (!dev->rdma_ctx) {
483 DP_ERR(dev, "invlaid RDMA context\n");
484 return ERR_PTR(-EINVAL);
485 }
486
487 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
488 if (!pd)
489 return ERR_PTR(-ENOMEM);
490
491 dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
492
493 uresp.pd_id = pd_id;
494 pd->pd_id = pd_id;
495
496 if (udata && context) {
497 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
498 if (rc)
499 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
500 uctx = get_qedr_ucontext(context);
501 uctx->pd = pd;
502 pd->uctx = uctx;
503 }
504
505 return &pd->ibpd;
506}
507
508int qedr_dealloc_pd(struct ib_pd *ibpd)
509{
510 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
511 struct qedr_pd *pd = get_qedr_pd(ibpd);
512
513 if (!pd)
514 pr_err("Invalid PD received in dealloc_pd\n");
515
516 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
517 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
518
519 kfree(pd);
520
521 return 0;
522}
523
524static void qedr_free_pbl(struct qedr_dev *dev,
525 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
526{
527 struct pci_dev *pdev = dev->pdev;
528 int i;
529
530 for (i = 0; i < pbl_info->num_pbls; i++) {
531 if (!pbl[i].va)
532 continue;
533 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
534 pbl[i].va, pbl[i].pa);
535 }
536
537 kfree(pbl);
538}
539
540#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
541#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
542
543#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
544#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
545#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
546
547static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
548 struct qedr_pbl_info *pbl_info,
549 gfp_t flags)
550{
551 struct pci_dev *pdev = dev->pdev;
552 struct qedr_pbl *pbl_table;
553 dma_addr_t *pbl_main_tbl;
554 dma_addr_t pa;
555 void *va;
556 int i;
557
558 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
559 if (!pbl_table)
560 return ERR_PTR(-ENOMEM);
561
562 for (i = 0; i < pbl_info->num_pbls; i++) {
563 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
564 &pa, flags);
565 if (!va)
566 goto err;
567
568 memset(va, 0, pbl_info->pbl_size);
569 pbl_table[i].va = va;
570 pbl_table[i].pa = pa;
571 }
572
573 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
574 * the first one with physical pointers to all of the rest
575 */
576 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
577 for (i = 0; i < pbl_info->num_pbls - 1; i++)
578 pbl_main_tbl[i] = pbl_table[i + 1].pa;
579
580 return pbl_table;
581
582err:
583 for (i--; i >= 0; i--)
584 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
585 pbl_table[i].va, pbl_table[i].pa);
586
587 qedr_free_pbl(dev, pbl_info, pbl_table);
588
589 return ERR_PTR(-ENOMEM);
590}
591
592static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
593 struct qedr_pbl_info *pbl_info,
594 u32 num_pbes, int two_layer_capable)
595{
596 u32 pbl_capacity;
597 u32 pbl_size;
598 u32 num_pbls;
599
600 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
601 if (num_pbes > MAX_PBES_TWO_LAYER) {
602 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
603 num_pbes);
604 return -EINVAL;
605 }
606
607 /* calculate required pbl page size */
608 pbl_size = MIN_FW_PBL_PAGE_SIZE;
609 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
610 NUM_PBES_ON_PAGE(pbl_size);
611
612 while (pbl_capacity < num_pbes) {
613 pbl_size *= 2;
614 pbl_capacity = pbl_size / sizeof(u64);
615 pbl_capacity = pbl_capacity * pbl_capacity;
616 }
617
618 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
619 num_pbls++; /* One for the layer0 ( points to the pbls) */
620 pbl_info->two_layered = true;
621 } else {
622 /* One layered PBL */
623 num_pbls = 1;
624 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
625 roundup_pow_of_two((num_pbes * sizeof(u64))));
626 pbl_info->two_layered = false;
627 }
628
629 pbl_info->num_pbls = num_pbls;
630 pbl_info->pbl_size = pbl_size;
631 pbl_info->num_pbes = num_pbes;
632
633 DP_DEBUG(dev, QEDR_MSG_MR,
634 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
635 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
636
637 return 0;
638}
639
640static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
641 struct qedr_pbl *pbl,
642 struct qedr_pbl_info *pbl_info)
643{
644 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
645 struct qedr_pbl *pbl_tbl;
646 struct scatterlist *sg;
647 struct regpair *pbe;
648 int entry;
649 u32 addr;
650
651 if (!pbl_info->num_pbes)
652 return;
653
654 /* If we have a two layered pbl, the first pbl points to the rest
655 * of the pbls and the first entry lays on the second pbl in the table
656 */
657 if (pbl_info->two_layered)
658 pbl_tbl = &pbl[1];
659 else
660 pbl_tbl = pbl;
661
662 pbe = (struct regpair *)pbl_tbl->va;
663 if (!pbe) {
664 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
665 return;
666 }
667
668 pbe_cnt = 0;
669
670 shift = ilog2(umem->page_size);
671
672 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
673 pages = sg_dma_len(sg) >> shift;
674 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
675 /* store the page address in pbe */
676 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
677 umem->page_size * pg_cnt);
678 addr = upper_32_bits(sg_dma_address(sg) +
679 umem->page_size * pg_cnt);
680 pbe->hi = cpu_to_le32(addr);
681 pbe_cnt++;
682 total_num_pbes++;
683 pbe++;
684
685 if (total_num_pbes == pbl_info->num_pbes)
686 return;
687
688 /* If the given pbl is full storing the pbes,
689 * move to next pbl.
690 */
691 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
692 pbl_tbl++;
693 pbe = (struct regpair *)pbl_tbl->va;
694 pbe_cnt = 0;
695 }
696 }
697 }
698}
699
700static int qedr_copy_cq_uresp(struct qedr_dev *dev,
701 struct qedr_cq *cq, struct ib_udata *udata)
702{
703 struct qedr_create_cq_uresp uresp;
704 int rc;
705
706 memset(&uresp, 0, sizeof(uresp));
707
708 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
709 uresp.icid = cq->icid;
710
711 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
712 if (rc)
713 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
714
715 return rc;
716}
717
718static void consume_cqe(struct qedr_cq *cq)
719{
720 if (cq->latest_cqe == cq->toggle_cqe)
721 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
722
723 cq->latest_cqe = qed_chain_consume(&cq->pbl);
724}
725
726static inline int qedr_align_cq_entries(int entries)
727{
728 u64 size, aligned_size;
729
730 /* We allocate an extra entry that we don't report to the FW. */
731 size = (entries + 1) * QEDR_CQE_SIZE;
732 aligned_size = ALIGN(size, PAGE_SIZE);
733
734 return aligned_size / QEDR_CQE_SIZE;
735}
736
737static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
738 struct qedr_dev *dev,
739 struct qedr_userq *q,
740 u64 buf_addr, size_t buf_len,
741 int access, int dmasync)
742{
743 int page_cnt;
744 int rc;
745
746 q->buf_addr = buf_addr;
747 q->buf_len = buf_len;
748 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
749 if (IS_ERR(q->umem)) {
750 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
751 PTR_ERR(q->umem));
752 return PTR_ERR(q->umem);
753 }
754
755 page_cnt = ib_umem_page_count(q->umem);
756 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
757 if (rc)
758 goto err0;
759
760 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
761 if (IS_ERR_OR_NULL(q->pbl_tbl))
762 goto err0;
763
764 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
765
766 return 0;
767
768err0:
769 ib_umem_release(q->umem);
770
771 return rc;
772}
773
774static inline void qedr_init_cq_params(struct qedr_cq *cq,
775 struct qedr_ucontext *ctx,
776 struct qedr_dev *dev, int vector,
777 int chain_entries, int page_cnt,
778 u64 pbl_ptr,
779 struct qed_rdma_create_cq_in_params
780 *params)
781{
782 memset(params, 0, sizeof(*params));
783 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
784 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
785 params->cnq_id = vector;
786 params->cq_size = chain_entries - 1;
787 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
788 params->pbl_num_pages = page_cnt;
789 params->pbl_ptr = pbl_ptr;
790 params->pbl_two_level = 0;
791}
792
793static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
794{
795 /* Flush data before signalling doorbell */
796 wmb();
797 cq->db.data.agg_flags = flags;
798 cq->db.data.value = cpu_to_le32(cons);
799 writeq(cq->db.raw, cq->db_addr);
800
801 /* Make sure write would stick */
802 mmiowb();
803}
804
805int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
806{
807 struct qedr_cq *cq = get_qedr_cq(ibcq);
808 unsigned long sflags;
809
810 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
811 return 0;
812
813 spin_lock_irqsave(&cq->cq_lock, sflags);
814
815 cq->arm_flags = 0;
816
817 if (flags & IB_CQ_SOLICITED)
818 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
819
820 if (flags & IB_CQ_NEXT_COMP)
821 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
822
823 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
824
825 spin_unlock_irqrestore(&cq->cq_lock, sflags);
826
827 return 0;
828}
829
830struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
831 const struct ib_cq_init_attr *attr,
832 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
833{
834 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
835 struct qed_rdma_destroy_cq_out_params destroy_oparams;
836 struct qed_rdma_destroy_cq_in_params destroy_iparams;
837 struct qedr_dev *dev = get_qedr_dev(ibdev);
838 struct qed_rdma_create_cq_in_params params;
839 struct qedr_create_cq_ureq ureq;
840 int vector = attr->comp_vector;
841 int entries = attr->cqe;
842 struct qedr_cq *cq;
843 int chain_entries;
844 int page_cnt;
845 u64 pbl_ptr;
846 u16 icid;
847 int rc;
848
849 DP_DEBUG(dev, QEDR_MSG_INIT,
850 "create_cq: called from %s. entries=%d, vector=%d\n",
851 udata ? "User Lib" : "Kernel", entries, vector);
852
853 if (entries > QEDR_MAX_CQES) {
854 DP_ERR(dev,
855 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
856 entries, QEDR_MAX_CQES);
857 return ERR_PTR(-EINVAL);
858 }
859
860 chain_entries = qedr_align_cq_entries(entries);
861 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
862
863 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
864 if (!cq)
865 return ERR_PTR(-ENOMEM);
866
867 if (udata) {
868 memset(&ureq, 0, sizeof(ureq));
869 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
870 DP_ERR(dev,
871 "create cq: problem copying data from user space\n");
872 goto err0;
873 }
874
875 if (!ureq.len) {
876 DP_ERR(dev,
877 "create cq: cannot create a cq with 0 entries\n");
878 goto err0;
879 }
880
881 cq->cq_type = QEDR_CQ_TYPE_USER;
882
883 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
884 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
885 if (rc)
886 goto err0;
887
888 pbl_ptr = cq->q.pbl_tbl->pa;
889 page_cnt = cq->q.pbl_info.num_pbes;
890 } else {
891 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
892
893 rc = dev->ops->common->chain_alloc(dev->cdev,
894 QED_CHAIN_USE_TO_CONSUME,
895 QED_CHAIN_MODE_PBL,
896 QED_CHAIN_CNT_TYPE_U32,
897 chain_entries,
898 sizeof(union rdma_cqe),
899 &cq->pbl);
900 if (rc)
901 goto err1;
902
903 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
904 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
905 }
906
907 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
908 pbl_ptr, &params);
909
910 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
911 if (rc)
912 goto err2;
913
914 cq->icid = icid;
915 cq->sig = QEDR_CQ_MAGIC_NUMBER;
916 spin_lock_init(&cq->cq_lock);
917
918 if (ib_ctx) {
919 rc = qedr_copy_cq_uresp(dev, cq, udata);
920 if (rc)
921 goto err3;
922 } else {
923 /* Generate doorbell address. */
924 cq->db_addr = dev->db_addr +
925 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
926 cq->db.data.icid = cq->icid;
927 cq->db.data.params = DB_AGG_CMD_SET <<
928 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
929
930 /* point to the very last element, passing it we will toggle */
931 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
932 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
933 cq->latest_cqe = NULL;
934 consume_cqe(cq);
935 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
936 }
937
938 DP_DEBUG(dev, QEDR_MSG_CQ,
939 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
940 cq->icid, cq, params.cq_size);
941
942 return &cq->ibcq;
943
944err3:
945 destroy_iparams.icid = cq->icid;
946 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
947 &destroy_oparams);
948err2:
949 if (udata)
950 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
951 else
952 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
953err1:
954 if (udata)
955 ib_umem_release(cq->q.umem);
956err0:
957 kfree(cq);
958 return ERR_PTR(-EINVAL);
959}
960
961int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
962{
963 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
964 struct qedr_cq *cq = get_qedr_cq(ibcq);
965
966 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
967
968 return 0;
969}
970
971int qedr_destroy_cq(struct ib_cq *ibcq)
972{
973 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
974 struct qed_rdma_destroy_cq_out_params oparams;
975 struct qed_rdma_destroy_cq_in_params iparams;
976 struct qedr_cq *cq = get_qedr_cq(ibcq);
977
978 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
979
980 /* GSIs CQs are handled by driver, so they don't exist in the FW */
981 if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
982 iparams.icid = cq->icid;
983 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
984 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
985 }
986
987 if (ibcq->uobject && ibcq->uobject->context) {
988 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
989 ib_umem_release(cq->q.umem);
990 }
991
992 kfree(cq);
993
994 return 0;
995}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 9472044dd587..36c8a692f740 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -40,6 +40,8 @@ int qedr_modify_port(struct ib_device *, u8 port, int mask,
40 40
41int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid); 41int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
42 42
43int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
44
43struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *); 45struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
44int qedr_dealloc_ucontext(struct ib_ucontext *); 46int qedr_dealloc_ucontext(struct ib_ucontext *);
45 47
@@ -49,4 +51,16 @@ int qedr_del_gid(struct ib_device *device, u8 port_num,
49int qedr_add_gid(struct ib_device *device, u8 port_num, 51int qedr_add_gid(struct ib_device *device, u8 port_num,
50 unsigned int index, const union ib_gid *gid, 52 unsigned int index, const union ib_gid *gid,
51 const struct ib_gid_attr *attr, void **context); 53 const struct ib_gid_attr *attr, void **context);
54struct ib_pd *qedr_alloc_pd(struct ib_device *,
55 struct ib_ucontext *, struct ib_udata *);
56int qedr_dealloc_pd(struct ib_pd *pd);
57
58struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
59 const struct ib_cq_init_attr *attr,
60 struct ib_ucontext *ib_ctx,
61 struct ib_udata *udata);
62int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
63int qedr_destroy_cq(struct ib_cq *);
64int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
65
52#endif 66#endif
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index f7c7fff7a877..b0fc5f2125e0 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -50,4 +50,23 @@ struct qedr_alloc_ucontext_resp {
50 __u32 sges_per_srq_wr; 50 __u32 sges_per_srq_wr;
51 __u32 max_cqes; 51 __u32 max_cqes;
52}; 52};
53
54struct qedr_alloc_pd_ureq {
55 __u64 rsvd1;
56};
57
58struct qedr_alloc_pd_uresp {
59 __u32 pd_id;
60};
61
62struct qedr_create_cq_ureq {
63 __u64 addr;
64 __u64 len;
65};
66
67struct qedr_create_cq_uresp {
68 __u32 db_offset;
69 __u16 icid;
70};
71
53#endif /* __QEDR_USER_H__ */ 72#endif /* __QEDR_USER_H__ */