aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca/ehca_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_qp.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c178
1 files changed, 106 insertions, 72 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index a3146e696c5d..b178cba96345 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -275,34 +275,39 @@ static inline void queue2resp(struct ipzu_queue_resp *resp,
275 resp->toggle_state = queue->toggle_state; 275 resp->toggle_state = queue->toggle_state;
276} 276}
277 277
278static inline int ll_qp_msg_size(int nr_sge)
279{
280 return 128 << nr_sge;
281}
282
283/* 278/*
284 * init_qp_queue initializes/constructs r/squeue and registers queue pages. 279 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
285 */ 280 */
286static inline int init_qp_queue(struct ehca_shca *shca, 281static inline int init_qp_queue(struct ehca_shca *shca,
282 struct ehca_pd *pd,
287 struct ehca_qp *my_qp, 283 struct ehca_qp *my_qp,
288 struct ipz_queue *queue, 284 struct ipz_queue *queue,
289 int q_type, 285 int q_type,
290 u64 expected_hret, 286 u64 expected_hret,
291 int nr_q_pages, 287 struct ehca_alloc_queue_parms *parms,
292 int wqe_size, 288 int wqe_size)
293 int nr_sges)
294{ 289{
295 int ret, cnt, ipz_rc; 290 int ret, cnt, ipz_rc, nr_q_pages;
296 void *vpage; 291 void *vpage;
297 u64 rpage, h_ret; 292 u64 rpage, h_ret;
298 struct ib_device *ib_dev = &shca->ib_device; 293 struct ib_device *ib_dev = &shca->ib_device;
299 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle; 294 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
300 295
301 if (!nr_q_pages) 296 if (!parms->queue_size)
302 return 0; 297 return 0;
303 298
304 ipz_rc = ipz_queue_ctor(queue, nr_q_pages, EHCA_PAGESIZE, 299 if (parms->is_small) {
305 wqe_size, nr_sges); 300 nr_q_pages = 1;
301 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
302 128 << parms->page_size,
303 wqe_size, parms->act_nr_sges, 1);
304 } else {
305 nr_q_pages = parms->queue_size;
306 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
307 EHCA_PAGESIZE, wqe_size,
308 parms->act_nr_sges, 0);
309 }
310
306 if (!ipz_rc) { 311 if (!ipz_rc) {
307 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%x", 312 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%x",
308 ipz_rc); 313 ipz_rc);
@@ -323,7 +328,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
323 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, 328 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
324 my_qp->ipz_qp_handle, 329 my_qp->ipz_qp_handle,
325 NULL, 0, q_type, 330 NULL, 0, q_type,
326 rpage, 1, 331 rpage, parms->is_small ? 0 : 1,
327 my_qp->galpas.kernel); 332 my_qp->galpas.kernel);
328 if (cnt == (nr_q_pages - 1)) { /* last page! */ 333 if (cnt == (nr_q_pages - 1)) { /* last page! */
329 if (h_ret != expected_hret) { 334 if (h_ret != expected_hret) {
@@ -354,19 +359,55 @@ static inline int init_qp_queue(struct ehca_shca *shca,
354 return 0; 359 return 0;
355 360
356init_qp_queue1: 361init_qp_queue1:
357 ipz_queue_dtor(queue); 362 ipz_queue_dtor(pd, queue);
358 return ret; 363 return ret;
359} 364}
360 365
366static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
367{
368 if (is_llqp)
369 return 128 << act_nr_sge;
370 else
371 return offsetof(struct ehca_wqe,
372 u.nud.sg_list[act_nr_sge]);
373}
374
375static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
376 int req_nr_sge, int is_llqp)
377{
378 u32 wqe_size, q_size;
379 int act_nr_sge = req_nr_sge;
380
381 if (!is_llqp)
382 /* round up #SGEs so WQE size is a power of 2 */
383 for (act_nr_sge = 4; act_nr_sge <= 252;
384 act_nr_sge = 4 + 2 * act_nr_sge)
385 if (act_nr_sge >= req_nr_sge)
386 break;
387
388 wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
389 q_size = wqe_size * (queue->max_wr + 1);
390
391 if (q_size <= 512)
392 queue->page_size = 2;
393 else if (q_size <= 1024)
394 queue->page_size = 3;
395 else
396 queue->page_size = 0;
397
398 queue->is_small = (queue->page_size != 0);
399}
400
361/* 401/*
362 * Create an ib_qp struct that is either a QP or an SRQ, depending on 402 * Create an ib_qp struct that is either a QP or an SRQ, depending on
363 * the value of the is_srq parameter. If init_attr and srq_init_attr share 403 * the value of the is_srq parameter. If init_attr and srq_init_attr share
364 * fields, the field out of init_attr is used. 404 * fields, the field out of init_attr is used.
365 */ 405 */
366struct ehca_qp *internal_create_qp(struct ib_pd *pd, 406static struct ehca_qp *internal_create_qp(
367 struct ib_qp_init_attr *init_attr, 407 struct ib_pd *pd,
368 struct ib_srq_init_attr *srq_init_attr, 408 struct ib_qp_init_attr *init_attr,
369 struct ib_udata *udata, int is_srq) 409 struct ib_srq_init_attr *srq_init_attr,
410 struct ib_udata *udata, int is_srq)
370{ 411{
371 struct ehca_qp *my_qp; 412 struct ehca_qp *my_qp;
372 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); 413 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
@@ -552,10 +593,20 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
552 if (my_qp->recv_cq) 593 if (my_qp->recv_cq)
553 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle; 594 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
554 595
555 parms.max_send_wr = init_attr->cap.max_send_wr; 596 parms.squeue.max_wr = init_attr->cap.max_send_wr;
556 parms.max_recv_wr = init_attr->cap.max_recv_wr; 597 parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
557 parms.max_send_sge = max_send_sge; 598 parms.squeue.max_sge = max_send_sge;
558 parms.max_recv_sge = max_recv_sge; 599 parms.rqueue.max_sge = max_recv_sge;
600
601 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)
602 && !(context && udata)) { /* no small QP support in userspace ATM */
603 ehca_determine_small_queue(
604 &parms.squeue, max_send_sge, is_llqp);
605 ehca_determine_small_queue(
606 &parms.rqueue, max_recv_sge, is_llqp);
607 parms.qp_storage =
608 (parms.squeue.is_small || parms.rqueue.is_small);
609 }
559 610
560 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); 611 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
561 if (h_ret != H_SUCCESS) { 612 if (h_ret != H_SUCCESS) {
@@ -569,50 +620,33 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
569 my_qp->ipz_qp_handle = parms.qp_handle; 620 my_qp->ipz_qp_handle = parms.qp_handle;
570 my_qp->galpas = parms.galpas; 621 my_qp->galpas = parms.galpas;
571 622
623 swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
624 rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
625
572 switch (qp_type) { 626 switch (qp_type) {
573 case IB_QPT_RC: 627 case IB_QPT_RC:
574 if (!is_llqp) { 628 if (is_llqp) {
575 swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[ 629 parms.squeue.act_nr_sges = 1;
576 (parms.act_nr_send_sges)]); 630 parms.rqueue.act_nr_sges = 1;
577 rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
578 (parms.act_nr_recv_sges)]);
579 } else { /* for LLQP we need to use msg size, not wqe size */
580 swqe_size = ll_qp_msg_size(max_send_sge);
581 rwqe_size = ll_qp_msg_size(max_recv_sge);
582 parms.act_nr_send_sges = 1;
583 parms.act_nr_recv_sges = 1;
584 } 631 }
585 break; 632 break;
586 case IB_QPT_UC:
587 swqe_size = offsetof(struct ehca_wqe,
588 u.nud.sg_list[parms.act_nr_send_sges]);
589 rwqe_size = offsetof(struct ehca_wqe,
590 u.nud.sg_list[parms.act_nr_recv_sges]);
591 break;
592
593 case IB_QPT_UD: 633 case IB_QPT_UD:
594 case IB_QPT_GSI: 634 case IB_QPT_GSI:
595 case IB_QPT_SMI: 635 case IB_QPT_SMI:
636 /* UD circumvention */
596 if (is_llqp) { 637 if (is_llqp) {
597 swqe_size = ll_qp_msg_size(parms.act_nr_send_sges); 638 parms.squeue.act_nr_sges = 1;
598 rwqe_size = ll_qp_msg_size(parms.act_nr_recv_sges); 639 parms.rqueue.act_nr_sges = 1;
599 parms.act_nr_send_sges = 1;
600 parms.act_nr_recv_sges = 1;
601 } else { 640 } else {
602 /* UD circumvention */ 641 parms.squeue.act_nr_sges -= 2;
603 parms.act_nr_send_sges -= 2; 642 parms.rqueue.act_nr_sges -= 2;
604 parms.act_nr_recv_sges -= 2;
605 swqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[
606 parms.act_nr_send_sges]);
607 rwqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[
608 parms.act_nr_recv_sges]);
609 } 643 }
610 644
611 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) { 645 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
612 parms.act_nr_send_wqes = init_attr->cap.max_send_wr; 646 parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
613 parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; 647 parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
614 parms.act_nr_send_sges = init_attr->cap.max_send_sge; 648 parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
615 parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; 649 parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
616 ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1; 650 ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
617 } 651 }
618 652
@@ -625,10 +659,9 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
625 /* initialize r/squeue and register queue pages */ 659 /* initialize r/squeue and register queue pages */
626 if (HAS_SQ(my_qp)) { 660 if (HAS_SQ(my_qp)) {
627 ret = init_qp_queue( 661 ret = init_qp_queue(
628 shca, my_qp, &my_qp->ipz_squeue, 0, 662 shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
629 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS, 663 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
630 parms.nr_sq_pages, swqe_size, 664 &parms.squeue, swqe_size);
631 parms.act_nr_send_sges);
632 if (ret) { 665 if (ret) {
633 ehca_err(pd->device, "Couldn't initialize squeue " 666 ehca_err(pd->device, "Couldn't initialize squeue "
634 "and pages ret=%x", ret); 667 "and pages ret=%x", ret);
@@ -638,9 +671,8 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
638 671
639 if (HAS_RQ(my_qp)) { 672 if (HAS_RQ(my_qp)) {
640 ret = init_qp_queue( 673 ret = init_qp_queue(
641 shca, my_qp, &my_qp->ipz_rqueue, 1, 674 shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
642 H_SUCCESS, parms.nr_rq_pages, rwqe_size, 675 H_SUCCESS, &parms.rqueue, rwqe_size);
643 parms.act_nr_recv_sges);
644 if (ret) { 676 if (ret) {
645 ehca_err(pd->device, "Couldn't initialize rqueue " 677 ehca_err(pd->device, "Couldn't initialize rqueue "
646 "and pages ret=%x", ret); 678 "and pages ret=%x", ret);
@@ -670,10 +702,10 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
670 } 702 }
671 703
672 init_attr->cap.max_inline_data = 0; /* not supported yet */ 704 init_attr->cap.max_inline_data = 0; /* not supported yet */
673 init_attr->cap.max_recv_sge = parms.act_nr_recv_sges; 705 init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
674 init_attr->cap.max_recv_wr = parms.act_nr_recv_wqes; 706 init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
675 init_attr->cap.max_send_sge = parms.act_nr_send_sges; 707 init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
676 init_attr->cap.max_send_wr = parms.act_nr_send_wqes; 708 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
677 my_qp->init_attr = *init_attr; 709 my_qp->init_attr = *init_attr;
678 710
679 /* NOTE: define_apq0() not supported yet */ 711 /* NOTE: define_apq0() not supported yet */
@@ -707,6 +739,8 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
707 resp.ext_type = my_qp->ext_type; 739 resp.ext_type = my_qp->ext_type;
708 resp.qkey = my_qp->qkey; 740 resp.qkey = my_qp->qkey;
709 resp.real_qp_num = my_qp->real_qp_num; 741 resp.real_qp_num = my_qp->real_qp_num;
742 resp.ipz_rqueue.offset = my_qp->ipz_rqueue.offset;
743 resp.ipz_squeue.offset = my_qp->ipz_squeue.offset;
710 if (HAS_SQ(my_qp)) 744 if (HAS_SQ(my_qp))
711 queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue); 745 queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
712 if (HAS_RQ(my_qp)) 746 if (HAS_RQ(my_qp))
@@ -723,11 +757,11 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
723 757
724create_qp_exit4: 758create_qp_exit4:
725 if (HAS_RQ(my_qp)) 759 if (HAS_RQ(my_qp))
726 ipz_queue_dtor(&my_qp->ipz_rqueue); 760 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
727 761
728create_qp_exit3: 762create_qp_exit3:
729 if (HAS_SQ(my_qp)) 763 if (HAS_SQ(my_qp))
730 ipz_queue_dtor(&my_qp->ipz_squeue); 764 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
731 765
732create_qp_exit2: 766create_qp_exit2:
733 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 767 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
@@ -752,8 +786,8 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
752 return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp; 786 return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
753} 787}
754 788
755int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, 789static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
756 struct ib_uobject *uobject); 790 struct ib_uobject *uobject);
757 791
758struct ib_srq *ehca_create_srq(struct ib_pd *pd, 792struct ib_srq *ehca_create_srq(struct ib_pd *pd,
759 struct ib_srq_init_attr *srq_init_attr, 793 struct ib_srq_init_attr *srq_init_attr,
@@ -1669,8 +1703,8 @@ query_srq_exit1:
1669 return ret; 1703 return ret;
1670} 1704}
1671 1705
1672int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, 1706static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1673 struct ib_uobject *uobject) 1707 struct ib_uobject *uobject)
1674{ 1708{
1675 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device); 1709 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
1676 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 1710 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
@@ -1734,9 +1768,9 @@ int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1734 } 1768 }
1735 1769
1736 if (HAS_RQ(my_qp)) 1770 if (HAS_RQ(my_qp))
1737 ipz_queue_dtor(&my_qp->ipz_rqueue); 1771 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
1738 if (HAS_SQ(my_qp)) 1772 if (HAS_SQ(my_qp))
1739 ipz_queue_dtor(&my_qp->ipz_squeue); 1773 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
1740 kmem_cache_free(qp_cache, my_qp); 1774 kmem_cache_free(qp_cache, my_qp);
1741 return 0; 1775 return 0;
1742} 1776}