aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_qp.c
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2007-07-25 14:08:28 -0400
committerRoland Dreier <rolandd@cisco.com>2007-10-09 23:05:49 -0400
commit4ee97180ac76deb5a715ac45b7d7516e6ee82ae7 (patch)
tree6683d1c34d3f36271a9d8275a645ce67222ffc56 /drivers/infiniband/hw/ipath/ipath_qp.c
parent210d6ca3db058cd1d6e6fd235ee3e25d6ac221cd (diff)
IB/ipath: Change UD to queue work requests like RC & UC
The code to post UD sends tried to process work requests at the time ib_post_send() is called without using a WQE queue. This was fine as long as HW resources were available for sending a packet. This patch changes UD to be handled more like RC and UC and shares more code. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_qp.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 1324b35ff1f8..a8c4a6b03d7a 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -338,6 +338,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
338 qp->s_busy = 0; 338 qp->s_busy = 0;
339 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; 339 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
340 qp->s_hdrwords = 0; 340 qp->s_hdrwords = 0;
341 qp->s_wqe = NULL;
341 qp->s_psn = 0; 342 qp->s_psn = 0;
342 qp->r_psn = 0; 343 qp->r_psn = 0;
343 qp->r_msn = 0; 344 qp->r_msn = 0;
@@ -751,6 +752,9 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
751 switch (init_attr->qp_type) { 752 switch (init_attr->qp_type) {
752 case IB_QPT_UC: 753 case IB_QPT_UC:
753 case IB_QPT_RC: 754 case IB_QPT_RC:
755 case IB_QPT_UD:
756 case IB_QPT_SMI:
757 case IB_QPT_GSI:
754 sz = sizeof(struct ipath_sge) * 758 sz = sizeof(struct ipath_sge) *
755 init_attr->cap.max_send_sge + 759 init_attr->cap.max_send_sge +
756 sizeof(struct ipath_swqe); 760 sizeof(struct ipath_swqe);
@@ -759,10 +763,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
759 ret = ERR_PTR(-ENOMEM); 763 ret = ERR_PTR(-ENOMEM);
760 goto bail; 764 goto bail;
761 } 765 }
762 /* FALLTHROUGH */
763 case IB_QPT_UD:
764 case IB_QPT_SMI:
765 case IB_QPT_GSI:
766 sz = sizeof(*qp); 766 sz = sizeof(*qp);
767 if (init_attr->srq) { 767 if (init_attr->srq) {
768 struct ipath_srq *srq = to_isrq(init_attr->srq); 768 struct ipath_srq *srq = to_isrq(init_attr->srq);
@@ -805,8 +805,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
805 spin_lock_init(&qp->r_rq.lock); 805 spin_lock_init(&qp->r_rq.lock);
806 atomic_set(&qp->refcount, 0); 806 atomic_set(&qp->refcount, 0);
807 init_waitqueue_head(&qp->wait); 807 init_waitqueue_head(&qp->wait);
808 tasklet_init(&qp->s_task, ipath_do_ruc_send, 808 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
809 (unsigned long)qp);
810 INIT_LIST_HEAD(&qp->piowait); 809 INIT_LIST_HEAD(&qp->piowait);
811 INIT_LIST_HEAD(&qp->timerwait); 810 INIT_LIST_HEAD(&qp->timerwait);
812 qp->state = IB_QPS_RESET; 811 qp->state = IB_QPS_RESET;