aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorStefan Roscher <stefan.roscher@de.ibm.com>2007-07-09 09:26:31 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-09 23:12:27 -0400
commit472803dab844c0a8a5d757d4c67fa5e76013dcbd (patch)
tree7c0bec5cb66cfd76ad025ca5d4d7a30f613eb7df /drivers/infiniband/hw
parenta6a12947fbf4a1782535468d756b0d44babf9760 (diff)
IB/ehca: Support UD low-latency QPs
Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c84
1 files changed, 57 insertions, 27 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 6f35f07dc02c..fa3e03050347 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -275,6 +275,11 @@ static inline void queue2resp(struct ipzu_queue_resp *resp,
275 resp->toggle_state = queue->toggle_state; 275 resp->toggle_state = queue->toggle_state;
276} 276}
277 277
278static inline int ll_qp_msg_size(int nr_sge)
279{
280 return 128 << nr_sge;
281}
282
278/* 283/*
279 * init_qp_queue initializes/constructs r/squeue and registers queue pages. 284 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
280 */ 285 */
@@ -363,8 +368,6 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
363 struct ib_srq_init_attr *srq_init_attr, 368 struct ib_srq_init_attr *srq_init_attr,
364 struct ib_udata *udata, int is_srq) 369 struct ib_udata *udata, int is_srq)
365{ 370{
366 static int da_rc_msg_size[] = { 128, 256, 512, 1024, 2048, 4096 };
367 static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 };
368 struct ehca_qp *my_qp; 371 struct ehca_qp *my_qp;
369 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); 372 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
370 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, 373 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
@@ -396,6 +399,7 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
396 parms.ll_comp_flags = qp_type & LLQP_COMP_MASK; 399 parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
397 } 400 }
398 qp_type &= 0x1F; 401 qp_type &= 0x1F;
402 init_attr->qp_type &= 0x1F;
399 403
400 /* handle SRQ base QPs */ 404 /* handle SRQ base QPs */
401 if (init_attr->srq) { 405 if (init_attr->srq) {
@@ -435,23 +439,49 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
435 return ERR_PTR(-EINVAL); 439 return ERR_PTR(-EINVAL);
436 } 440 }
437 441
438 if (is_llqp && (qp_type != IB_QPT_RC && qp_type != IB_QPT_UD)) { 442 if (is_llqp) {
439 ehca_err(pd->device, "unsupported LL QP Type=%x", qp_type); 443 switch (qp_type) {
440 return ERR_PTR(-EINVAL); 444 case IB_QPT_RC:
441 } else if (is_llqp && qp_type == IB_QPT_RC && 445 if ((init_attr->cap.max_send_wr > 255) ||
442 (init_attr->cap.max_send_wr > 255 || 446 (init_attr->cap.max_recv_wr > 255)) {
443 init_attr->cap.max_recv_wr > 255 )) { 447 ehca_err(pd->device,
444 ehca_err(pd->device, "Invalid Number of max_sq_wr=%x " 448 "Invalid Number of max_sq_wr=%x "
445 "or max_rq_wr=%x for RC LLQP", 449 "or max_rq_wr=%x for RC LLQP",
446 init_attr->cap.max_send_wr, 450 init_attr->cap.max_send_wr,
447 init_attr->cap.max_recv_wr); 451 init_attr->cap.max_recv_wr);
448 return ERR_PTR(-EINVAL); 452 return ERR_PTR(-EINVAL);
449 } else if (is_llqp && qp_type == IB_QPT_UD && 453 }
450 init_attr->cap.max_send_wr > 255) { 454 break;
451 ehca_err(pd->device, 455 case IB_QPT_UD:
452 "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x", 456 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
453 init_attr->cap.max_send_wr, qp_type); 457 ehca_err(pd->device, "UD LLQP not supported "
454 return ERR_PTR(-EINVAL); 458 "by this adapter");
459 return ERR_PTR(-ENOSYS);
460 }
461 if (!(init_attr->cap.max_send_sge <= 5
462 && init_attr->cap.max_send_sge >= 1
463 && init_attr->cap.max_recv_sge <= 5
464 && init_attr->cap.max_recv_sge >= 1)) {
465 ehca_err(pd->device,
466 "Invalid Number of max_send_sge=%x "
467 "or max_recv_sge=%x for UD LLQP",
468 init_attr->cap.max_send_sge,
469 init_attr->cap.max_recv_sge);
470 return ERR_PTR(-EINVAL);
471 } else if (init_attr->cap.max_send_wr > 255) {
472 ehca_err(pd->device,
473 "Invalid Number of "
474 "ax_send_wr=%x for UD QP_TYPE=%x",
475 init_attr->cap.max_send_wr, qp_type);
476 return ERR_PTR(-EINVAL);
477 }
478 break;
479 default:
480 ehca_err(pd->device, "unsupported LL QP Type=%x",
481 qp_type);
482 return ERR_PTR(-EINVAL);
483 break;
484 }
455 } 485 }
456 486
457 if (pd->uobject && udata) 487 if (pd->uobject && udata)
@@ -509,7 +539,7 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
509 /* UD_AV CIRCUMVENTION */ 539 /* UD_AV CIRCUMVENTION */
510 max_send_sge = init_attr->cap.max_send_sge; 540 max_send_sge = init_attr->cap.max_send_sge;
511 max_recv_sge = init_attr->cap.max_recv_sge; 541 max_recv_sge = init_attr->cap.max_recv_sge;
512 if (parms.servicetype == ST_UD) { 542 if (parms.servicetype == ST_UD && !is_llqp) {
513 max_send_sge += 2; 543 max_send_sge += 2;
514 max_recv_sge += 2; 544 max_recv_sge += 2;
515 } 545 }
@@ -547,8 +577,8 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
547 rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[ 577 rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
548 (parms.act_nr_recv_sges)]); 578 (parms.act_nr_recv_sges)]);
549 } else { /* for LLQP we need to use msg size, not wqe size */ 579 } else { /* for LLQP we need to use msg size, not wqe size */
550 swqe_size = da_rc_msg_size[max_send_sge]; 580 swqe_size = ll_qp_msg_size(max_send_sge);
551 rwqe_size = da_rc_msg_size[max_recv_sge]; 581 rwqe_size = ll_qp_msg_size(max_recv_sge);
552 parms.act_nr_send_sges = 1; 582 parms.act_nr_send_sges = 1;
553 parms.act_nr_recv_sges = 1; 583 parms.act_nr_recv_sges = 1;
554 } 584 }
@@ -563,15 +593,15 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
563 case IB_QPT_UD: 593 case IB_QPT_UD:
564 case IB_QPT_GSI: 594 case IB_QPT_GSI:
565 case IB_QPT_SMI: 595 case IB_QPT_SMI:
566 /* UD circumvention */
567 parms.act_nr_recv_sges -= 2;
568 parms.act_nr_send_sges -= 2;
569 if (is_llqp) { 596 if (is_llqp) {
570 swqe_size = da_ud_sq_msg_size[max_send_sge]; 597 swqe_size = ll_qp_msg_size(parms.act_nr_send_sges);
571 rwqe_size = da_rc_msg_size[max_recv_sge]; 598 rwqe_size = ll_qp_msg_size(parms.act_nr_recv_sges);
572 parms.act_nr_send_sges = 1; 599 parms.act_nr_send_sges = 1;
573 parms.act_nr_recv_sges = 1; 600 parms.act_nr_recv_sges = 1;
574 } else { 601 } else {
602 /* UD circumvention */
603 parms.act_nr_send_sges -= 2;
604 parms.act_nr_recv_sges -= 2;
575 swqe_size = offsetof(struct ehca_wqe, 605 swqe_size = offsetof(struct ehca_wqe,
576 u.ud_av.sg_list[parms.act_nr_send_sges]); 606 u.ud_av.sg_list[parms.act_nr_send_sges]);
577 rwqe_size = offsetof(struct ehca_wqe, 607 rwqe_size = offsetof(struct ehca_wqe,