diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2007-03-15 17:44:51 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-04-18 23:20:55 -0400 |
commit | 3859e39d75b72f35f7d38c618fbbacb39a440c22 (patch) | |
tree | 51d57723574395b54914c08260b9d0a8467a91b1 /drivers/infiniband/hw/ipath/ipath_qp.c | |
parent | 7b21d26ddad6912bf345e8e88a51a5ce98a036ad (diff) |
IB/ipath: Support larger IB_QP_MAX_DEST_RD_ATOMIC and IB_QP_MAX_QP_RD_ATOMIC
This patch adds support for multiple RDMA reads and atomics to be sent
before an ACK is required to be seen by the requester.
Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_qp.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_qp.c | 26 |
1 files changed, 20 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 64f07b19349..c122fea9145 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -320,7 +320,8 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
320 | qp->remote_qpn = 0; | 320 | qp->remote_qpn = 0; |
321 | qp->qkey = 0; | 321 | qp->qkey = 0; |
322 | qp->qp_access_flags = 0; | 322 | qp->qp_access_flags = 0; |
323 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | 323 | qp->s_busy = 0; |
324 | qp->s_flags &= ~IPATH_S_SIGNAL_REQ_WR; | ||
324 | qp->s_hdrwords = 0; | 325 | qp->s_hdrwords = 0; |
325 | qp->s_psn = 0; | 326 | qp->s_psn = 0; |
326 | qp->r_psn = 0; | 327 | qp->r_psn = 0; |
@@ -333,7 +334,6 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
333 | qp->r_state = IB_OPCODE_UC_SEND_LAST; | 334 | qp->r_state = IB_OPCODE_UC_SEND_LAST; |
334 | } | 335 | } |
335 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | 336 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
336 | qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | ||
337 | qp->r_nak_state = 0; | 337 | qp->r_nak_state = 0; |
338 | qp->r_wrid_valid = 0; | 338 | qp->r_wrid_valid = 0; |
339 | qp->s_rnr_timeout = 0; | 339 | qp->s_rnr_timeout = 0; |
@@ -344,6 +344,10 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
344 | qp->s_ssn = 1; | 344 | qp->s_ssn = 1; |
345 | qp->s_lsn = 0; | 345 | qp->s_lsn = 0; |
346 | qp->s_wait_credit = 0; | 346 | qp->s_wait_credit = 0; |
347 | memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); | ||
348 | qp->r_head_ack_queue = 0; | ||
349 | qp->s_tail_ack_queue = 0; | ||
350 | qp->s_num_rd_atomic = 0; | ||
347 | if (qp->r_rq.wq) { | 351 | if (qp->r_rq.wq) { |
348 | qp->r_rq.wq->head = 0; | 352 | qp->r_rq.wq->head = 0; |
349 | qp->r_rq.wq->tail = 0; | 353 | qp->r_rq.wq->tail = 0; |
@@ -503,6 +507,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
503 | attr->path_mig_state != IB_MIG_REARM) | 507 | attr->path_mig_state != IB_MIG_REARM) |
504 | goto inval; | 508 | goto inval; |
505 | 509 | ||
510 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
511 | if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) | ||
512 | goto inval; | ||
513 | |||
506 | switch (new_state) { | 514 | switch (new_state) { |
507 | case IB_QPS_RESET: | 515 | case IB_QPS_RESET: |
508 | ipath_reset_qp(qp); | 516 | ipath_reset_qp(qp); |
@@ -559,6 +567,12 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
559 | if (attr_mask & IB_QP_QKEY) | 567 | if (attr_mask & IB_QP_QKEY) |
560 | qp->qkey = attr->qkey; | 568 | qp->qkey = attr->qkey; |
561 | 569 | ||
570 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
571 | qp->r_max_rd_atomic = attr->max_dest_rd_atomic; | ||
572 | |||
573 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) | ||
574 | qp->s_max_rd_atomic = attr->max_rd_atomic; | ||
575 | |||
562 | qp->state = new_state; | 576 | qp->state = new_state; |
563 | spin_unlock_irqrestore(&qp->s_lock, flags); | 577 | spin_unlock_irqrestore(&qp->s_lock, flags); |
564 | 578 | ||
@@ -598,8 +612,8 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
598 | attr->alt_pkey_index = 0; | 612 | attr->alt_pkey_index = 0; |
599 | attr->en_sqd_async_notify = 0; | 613 | attr->en_sqd_async_notify = 0; |
600 | attr->sq_draining = 0; | 614 | attr->sq_draining = 0; |
601 | attr->max_rd_atomic = 1; | 615 | attr->max_rd_atomic = qp->s_max_rd_atomic; |
602 | attr->max_dest_rd_atomic = 1; | 616 | attr->max_dest_rd_atomic = qp->r_max_rd_atomic; |
603 | attr->min_rnr_timer = qp->r_min_rnr_timer; | 617 | attr->min_rnr_timer = qp->r_min_rnr_timer; |
604 | attr->port_num = 1; | 618 | attr->port_num = 1; |
605 | attr->timeout = qp->timeout; | 619 | attr->timeout = qp->timeout; |
@@ -614,7 +628,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
614 | init_attr->recv_cq = qp->ibqp.recv_cq; | 628 | init_attr->recv_cq = qp->ibqp.recv_cq; |
615 | init_attr->srq = qp->ibqp.srq; | 629 | init_attr->srq = qp->ibqp.srq; |
616 | init_attr->cap = attr->cap; | 630 | init_attr->cap = attr->cap; |
617 | if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) | 631 | if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR) |
618 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; | 632 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; |
619 | else | 633 | else |
620 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | 634 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; |
@@ -786,7 +800,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
786 | qp->s_size = init_attr->cap.max_send_wr + 1; | 800 | qp->s_size = init_attr->cap.max_send_wr + 1; |
787 | qp->s_max_sge = init_attr->cap.max_send_sge; | 801 | qp->s_max_sge = init_attr->cap.max_send_sge; |
788 | if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) | 802 | if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) |
789 | qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR; | 803 | qp->s_flags = IPATH_S_SIGNAL_REQ_WR; |
790 | else | 804 | else |
791 | qp->s_flags = 0; | 805 | qp->s_flags = 0; |
792 | dev = to_idev(ibpd->device); | 806 | dev = to_idev(ibpd->device); |