diff options
author | Steve Wise <swise@opengridcomputing.com> | 2010-09-17 16:40:15 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2010-09-28 13:53:50 -0400 |
commit | 40dbf6ee381008e471d3c4a332971247b7799744 (patch) | |
tree | 6249fb3fd9cca9e2e42c01a798ef21b4f5a1e328 /drivers/infiniband/hw/cxgb4/qp.c | |
parent | 410ade4c26bdf256fea3246e968a12409eb08763 (diff) |
RDMA/cxgb4: Fastreg NSMR fixes
- Remove dsgl support - doesn't work in T4.
- Wrap the immediate PBL as needed when building it in the wr.
- Adjust max pbl depth allowed based on ulptx alignment requirements.
- Bump the slots per SQ to 5 to allow up to 128MB fast registers.
- Advertise fastreg support by default.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/qp.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 52 |
1 files changed, 25 insertions, 27 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index ff04e5cc28ce..057cb2505ea1 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -505,13 +505,15 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, | |||
505 | return 0; | 505 | return 0; |
506 | } | 506 | } |
507 | 507 | ||
508 | static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | 508 | static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, |
509 | struct ib_send_wr *wr, u8 *len16) | ||
509 | { | 510 | { |
510 | 511 | ||
511 | struct fw_ri_immd *imdp; | 512 | struct fw_ri_immd *imdp; |
512 | __be64 *p; | 513 | __be64 *p; |
513 | int i; | 514 | int i; |
514 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); | 515 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); |
516 | int rem; | ||
515 | 517 | ||
516 | if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) | 518 | if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) |
517 | return -EINVAL; | 519 | return -EINVAL; |
@@ -526,32 +528,28 @@ static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |||
526 | wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); | 528 | wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); |
527 | wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & | 529 | wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & |
528 | 0xffffffff); | 530 | 0xffffffff); |
529 | if (pbllen > T4_MAX_FR_IMMD) { | 531 | WARN_ON(pbllen > T4_MAX_FR_IMMD); |
530 | struct c4iw_fr_page_list *c4pl = | 532 | imdp = (struct fw_ri_immd *)(&wqe->fr + 1); |
531 | to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); | 533 | imdp->op = FW_RI_DATA_IMMD; |
532 | struct fw_ri_dsgl *sglp; | 534 | imdp->r1 = 0; |
533 | 535 | imdp->r2 = 0; | |
534 | sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); | 536 | imdp->immdlen = cpu_to_be32(pbllen); |
535 | sglp->op = FW_RI_DATA_DSGL; | 537 | p = (__be64 *)(imdp + 1); |
536 | sglp->r1 = 0; | 538 | rem = pbllen; |
537 | sglp->nsge = cpu_to_be16(1); | 539 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { |
538 | sglp->addr0 = cpu_to_be64(c4pl->dma_addr); | 540 | *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); |
539 | sglp->len0 = cpu_to_be32(pbllen); | 541 | rem -= sizeof *p; |
540 | 542 | if (++p == (__be64 *)&sq->queue[sq->size]) | |
541 | *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16); | 543 | p = (__be64 *)sq->queue; |
542 | } else { | 544 | } |
543 | imdp = (struct fw_ri_immd *)(&wqe->fr + 1); | 545 | BUG_ON(rem < 0); |
544 | imdp->op = FW_RI_DATA_IMMD; | 546 | while (rem) { |
545 | imdp->r1 = 0; | 547 | *p = 0; |
546 | imdp->r2 = 0; | 548 | rem -= sizeof *p; |
547 | imdp->immdlen = cpu_to_be32(pbllen); | 549 | if (++p == (__be64 *)&sq->queue[sq->size]) |
548 | p = (__be64 *)(imdp + 1); | 550 | p = (__be64 *)sq->queue; |
549 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) | ||
550 | *p = cpu_to_be64( | ||
551 | (u64)wr->wr.fast_reg.page_list->page_list[i]); | ||
552 | *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, | ||
553 | 16); | ||
554 | } | 551 | } |
552 | *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16); | ||
555 | return 0; | 553 | return 0; |
556 | } | 554 | } |
557 | 555 | ||
@@ -652,7 +650,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
652 | case IB_WR_FAST_REG_MR: | 650 | case IB_WR_FAST_REG_MR: |
653 | fw_opcode = FW_RI_FR_NSMR_WR; | 651 | fw_opcode = FW_RI_FR_NSMR_WR; |
654 | swsqe->opcode = FW_RI_FAST_REGISTER; | 652 | swsqe->opcode = FW_RI_FAST_REGISTER; |
655 | err = build_fastreg(wqe, wr, &len16); | 653 | err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16); |
656 | break; | 654 | break; |
657 | case IB_WR_LOCAL_INV: | 655 | case IB_WR_LOCAL_INV: |
658 | if (wr->send_flags & IB_SEND_FENCE) | 656 | if (wr->send_flags & IB_SEND_FENCE) |