diff options
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_eq.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_srq.c | 25 |
3 files changed, 40 insertions, 52 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 18f0981eb0c1..78152a8ad17d 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -476,12 +476,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
476 | int i; | 476 | int i; |
477 | u8 status; | 477 | u8 status; |
478 | 478 | ||
479 | /* Make sure EQ size is aligned to a power of 2 size. */ | 479 | eq->dev = dev; |
480 | for (i = 1; i < nent; i <<= 1) | 480 | eq->nent = roundup_pow_of_two(max(nent, 2)); |
481 | ; /* nothing */ | ||
482 | nent = i; | ||
483 | |||
484 | eq->dev = dev; | ||
485 | 481 | ||
486 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, | 482 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, |
487 | GFP_KERNEL); | 483 | GFP_KERNEL); |
@@ -512,7 +508,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
512 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); | 508 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); |
513 | } | 509 | } |
514 | 510 | ||
515 | for (i = 0; i < nent; ++i) | 511 | for (i = 0; i < eq->nent; ++i) |
516 | set_eqe_hw(get_eqe(eq, i)); | 512 | set_eqe_hw(get_eqe(eq, i)); |
517 | 513 | ||
518 | eq->eqn = mthca_alloc(&dev->eq_table.alloc); | 514 | eq->eqn = mthca_alloc(&dev->eq_table.alloc); |
@@ -528,8 +524,6 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
528 | if (err) | 524 | if (err) |
529 | goto err_out_free_eq; | 525 | goto err_out_free_eq; |
530 | 526 | ||
531 | eq->nent = nent; | ||
532 | |||
533 | memset(eq_context, 0, sizeof *eq_context); | 527 | memset(eq_context, 0, sizeof *eq_context); |
534 | eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | | 528 | eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | |
535 | MTHCA_EQ_OWNER_HW | | 529 | MTHCA_EQ_OWNER_HW | |
@@ -538,7 +532,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
538 | if (mthca_is_memfree(dev)) | 532 | if (mthca_is_memfree(dev)) |
539 | eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); | 533 | eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); |
540 | 534 | ||
541 | eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); | 535 | eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); |
542 | if (mthca_is_memfree(dev)) { | 536 | if (mthca_is_memfree(dev)) { |
543 | eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); | 537 | eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); |
544 | } else { | 538 | } else { |
@@ -569,7 +563,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
569 | dev->eq_table.arm_mask |= eq->eqn_mask; | 563 | dev->eq_table.arm_mask |= eq->eqn_mask; |
570 | 564 | ||
571 | mthca_dbg(dev, "Allocated EQ %d with %d entries\n", | 565 | mthca_dbg(dev, "Allocated EQ %d with %d entries\n", |
572 | eq->eqn, nent); | 566 | eq->eqn, eq->nent); |
573 | 567 | ||
574 | return err; | 568 | return err; |
575 | 569 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index bcef06bf15e7..5fa00669f9b8 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq) | |||
227 | wq->last_comp = wq->max - 1; | 227 | wq->last_comp = wq->max - 1; |
228 | wq->head = 0; | 228 | wq->head = 0; |
229 | wq->tail = 0; | 229 | wq->tail = 0; |
230 | wq->last = NULL; | ||
231 | } | 230 | } |
232 | 231 | ||
233 | void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | 232 | void mthca_qp_event(struct mthca_dev *dev, u32 qpn, |
@@ -687,7 +686,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
687 | } | 686 | } |
688 | 687 | ||
689 | if (attr_mask & IB_QP_TIMEOUT) { | 688 | if (attr_mask & IB_QP_TIMEOUT) { |
690 | qp_context->pri_path.ackto = attr->timeout; | 689 | qp_context->pri_path.ackto = attr->timeout << 3; |
691 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); | 690 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); |
692 | } | 691 | } |
693 | 692 | ||
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1103 | } | 1102 | } |
1104 | } | 1103 | } |
1105 | 1104 | ||
1105 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); | ||
1106 | qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); | ||
1107 | |||
1106 | return 0; | 1108 | return 0; |
1107 | } | 1109 | } |
1108 | 1110 | ||
@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1583 | goto out; | 1585 | goto out; |
1584 | } | 1586 | } |
1585 | 1587 | ||
1586 | if (prev_wqe) { | 1588 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1587 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | 1589 | cpu_to_be32(((ind << qp->sq.wqe_shift) + |
1588 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | 1590 | qp->send_wqe_offset) | |
1589 | qp->send_wqe_offset) | | 1591 | mthca_opcode[wr->opcode]); |
1590 | mthca_opcode[wr->opcode]); | 1592 | wmb(); |
1591 | wmb(); | 1593 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1592 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 1594 | cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size); |
1593 | cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size); | ||
1594 | } | ||
1595 | 1595 | ||
1596 | if (!size0) { | 1596 | if (!size0) { |
1597 | size0 = size; | 1597 | size0 = size; |
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1688 | 1688 | ||
1689 | qp->wrid[ind] = wr->wr_id; | 1689 | qp->wrid[ind] = wr->wr_id; |
1690 | 1690 | ||
1691 | if (likely(prev_wqe)) { | 1691 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1692 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | 1692 | cpu_to_be32((ind << qp->rq.wqe_shift) | 1); |
1693 | cpu_to_be32((ind << qp->rq.wqe_shift) | 1); | 1693 | wmb(); |
1694 | wmb(); | 1694 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1695 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 1695 | cpu_to_be32(MTHCA_NEXT_DBD | size); |
1696 | cpu_to_be32(MTHCA_NEXT_DBD | size); | ||
1697 | } | ||
1698 | 1696 | ||
1699 | if (!size0) | 1697 | if (!size0) |
1700 | size0 = size; | 1698 | size0 = size; |
@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1905 | goto out; | 1903 | goto out; |
1906 | } | 1904 | } |
1907 | 1905 | ||
1908 | if (likely(prev_wqe)) { | 1906 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1909 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | 1907 | cpu_to_be32(((ind << qp->sq.wqe_shift) + |
1910 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | 1908 | qp->send_wqe_offset) | |
1911 | qp->send_wqe_offset) | | 1909 | mthca_opcode[wr->opcode]); |
1912 | mthca_opcode[wr->opcode]); | 1910 | wmb(); |
1913 | wmb(); | 1911 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1914 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 1912 | cpu_to_be32(MTHCA_NEXT_DBD | size); |
1915 | cpu_to_be32(MTHCA_NEXT_DBD | size); | ||
1916 | } | ||
1917 | 1913 | ||
1918 | if (!size0) { | 1914 | if (!size0) { |
1919 | size0 = size; | 1915 | size0 = size; |
@@ -2127,5 +2123,6 @@ void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev) | |||
2127 | for (i = 0; i < 2; ++i) | 2123 | for (i = 0; i < 2; ++i) |
2128 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); | 2124 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); |
2129 | 2125 | ||
2126 | mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); | ||
2130 | mthca_alloc_cleanup(&dev->qp_table.alloc); | 2127 | mthca_alloc_cleanup(&dev->qp_table.alloc); |
2131 | } | 2128 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 75cd2d84ef12..18998d48c53e 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -172,6 +172,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, | |||
172 | scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | 172 | scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); |
173 | } | 173 | } |
174 | 174 | ||
175 | srq->last = get_wqe(srq, srq->max - 1); | ||
176 | |||
175 | return 0; | 177 | return 0; |
176 | } | 178 | } |
177 | 179 | ||
@@ -189,7 +191,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
189 | 191 | ||
190 | srq->max = attr->max_wr; | 192 | srq->max = attr->max_wr; |
191 | srq->max_gs = attr->max_sge; | 193 | srq->max_gs = attr->max_sge; |
192 | srq->last = NULL; | ||
193 | srq->counter = 0; | 194 | srq->counter = 0; |
194 | 195 | ||
195 | if (mthca_is_memfree(dev)) | 196 | if (mthca_is_memfree(dev)) |
@@ -409,7 +410,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
409 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | 410 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); |
410 | err = -ENOMEM; | 411 | err = -ENOMEM; |
411 | *bad_wr = wr; | 412 | *bad_wr = wr; |
412 | return nreq; | 413 | break; |
413 | } | 414 | } |
414 | 415 | ||
415 | wqe = get_wqe(srq, ind); | 416 | wqe = get_wqe(srq, ind); |
@@ -427,7 +428,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
427 | err = -EINVAL; | 428 | err = -EINVAL; |
428 | *bad_wr = wr; | 429 | *bad_wr = wr; |
429 | srq->last = prev_wqe; | 430 | srq->last = prev_wqe; |
430 | return nreq; | 431 | break; |
431 | } | 432 | } |
432 | 433 | ||
433 | for (i = 0; i < wr->num_sge; ++i) { | 434 | for (i = 0; i < wr->num_sge; ++i) { |
@@ -446,20 +447,16 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
446 | ((struct mthca_data_seg *) wqe)->addr = 0; | 447 | ((struct mthca_data_seg *) wqe)->addr = 0; |
447 | } | 448 | } |
448 | 449 | ||
449 | if (likely(prev_wqe)) { | 450 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
450 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | 451 | cpu_to_be32((ind << srq->wqe_shift) | 1); |
451 | cpu_to_be32((ind << srq->wqe_shift) | 1); | 452 | wmb(); |
452 | wmb(); | 453 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
453 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 454 | cpu_to_be32(MTHCA_NEXT_DBD); |
454 | cpu_to_be32(MTHCA_NEXT_DBD); | ||
455 | } | ||
456 | 455 | ||
457 | srq->wrid[ind] = wr->wr_id; | 456 | srq->wrid[ind] = wr->wr_id; |
458 | srq->first_free = next_ind; | 457 | srq->first_free = next_ind; |
459 | } | 458 | } |
460 | 459 | ||
461 | return nreq; | ||
462 | |||
463 | if (likely(nreq)) { | 460 | if (likely(nreq)) { |
464 | __be32 doorbell[2]; | 461 | __be32 doorbell[2]; |
465 | 462 | ||
@@ -503,7 +500,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
503 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | 500 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); |
504 | err = -ENOMEM; | 501 | err = -ENOMEM; |
505 | *bad_wr = wr; | 502 | *bad_wr = wr; |
506 | return nreq; | 503 | break; |
507 | } | 504 | } |
508 | 505 | ||
509 | wqe = get_wqe(srq, ind); | 506 | wqe = get_wqe(srq, ind); |
@@ -519,7 +516,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
519 | if (unlikely(wr->num_sge > srq->max_gs)) { | 516 | if (unlikely(wr->num_sge > srq->max_gs)) { |
520 | err = -EINVAL; | 517 | err = -EINVAL; |
521 | *bad_wr = wr; | 518 | *bad_wr = wr; |
522 | return nreq; | 519 | break; |
523 | } | 520 | } |
524 | 521 | ||
525 | for (i = 0; i < wr->num_sge; ++i) { | 522 | for (i = 0; i < wr->num_sge; ++i) { |