aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_qp.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c180
1 files changed, 90 insertions, 90 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 6058d70d7577..9f8855d970c8 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -188,8 +188,8 @@ static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
188 * Allocate the next available QPN and put the QP into the hash table. 188 * Allocate the next available QPN and put the QP into the hash table.
189 * The hash table holds a reference to the QP. 189 * The hash table holds a reference to the QP.
190 */ 190 */
191int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, 191static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
192 enum ib_qp_type type) 192 enum ib_qp_type type)
193{ 193{
194 unsigned long flags; 194 unsigned long flags;
195 u32 qpn; 195 u32 qpn;
@@ -232,7 +232,7 @@ bail:
232 * Remove the QP from the table so it can't be found asynchronously by 232 * Remove the QP from the table so it can't be found asynchronously by
233 * the receive interrupt routine. 233 * the receive interrupt routine.
234 */ 234 */
235void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) 235static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
236{ 236{
237 struct ipath_qp *q, **qpp; 237 struct ipath_qp *q, **qpp;
238 unsigned long flags; 238 unsigned long flags;
@@ -358,6 +358,65 @@ static void ipath_reset_qp(struct ipath_qp *qp)
358} 358}
359 359
360/** 360/**
361 * ipath_error_qp - put a QP into an error state
362 * @qp: the QP to put into an error state
363 *
364 * Flushes both send and receive work queues.
365 * QP r_rq.lock and s_lock should be held.
366 */
367
368static void ipath_error_qp(struct ipath_qp *qp)
369{
370 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
371 struct ib_wc wc;
372
373 _VERBS_INFO("QP%d/%d in error state\n",
374 qp->ibqp.qp_num, qp->remote_qpn);
375
376 spin_lock(&dev->pending_lock);
377 /* XXX What if its already removed by the timeout code? */
378 if (!list_empty(&qp->timerwait))
379 list_del_init(&qp->timerwait);
380 if (!list_empty(&qp->piowait))
381 list_del_init(&qp->piowait);
382 spin_unlock(&dev->pending_lock);
383
384 wc.status = IB_WC_WR_FLUSH_ERR;
385 wc.vendor_err = 0;
386 wc.byte_len = 0;
387 wc.imm_data = 0;
388 wc.qp_num = qp->ibqp.qp_num;
389 wc.src_qp = 0;
390 wc.wc_flags = 0;
391 wc.pkey_index = 0;
392 wc.slid = 0;
393 wc.sl = 0;
394 wc.dlid_path_bits = 0;
395 wc.port_num = 0;
396
397 while (qp->s_last != qp->s_head) {
398 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
399
400 wc.wr_id = wqe->wr.wr_id;
401 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
402 if (++qp->s_last >= qp->s_size)
403 qp->s_last = 0;
404 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
405 }
406 qp->s_cur = qp->s_tail = qp->s_head;
407 qp->s_hdrwords = 0;
408 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
409
410 wc.opcode = IB_WC_RECV;
411 while (qp->r_rq.tail != qp->r_rq.head) {
412 wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
413 if (++qp->r_rq.tail >= qp->r_rq.size)
414 qp->r_rq.tail = 0;
415 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
416 }
417}
418
419/**
361 * ipath_modify_qp - modify the attributes of a queue pair 420 * ipath_modify_qp - modify the attributes of a queue pair
362 * @ibqp: the queue pair who's attributes we're modifying 421 * @ibqp: the queue pair who's attributes we're modifying
363 * @attr: the new attributes 422 * @attr: the new attributes
@@ -368,6 +427,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
368int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 427int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
369 int attr_mask) 428 int attr_mask)
370{ 429{
430 struct ipath_ibdev *dev = to_idev(ibqp->device);
371 struct ipath_qp *qp = to_iqp(ibqp); 431 struct ipath_qp *qp = to_iqp(ibqp);
372 enum ib_qp_state cur_state, new_state; 432 enum ib_qp_state cur_state, new_state;
373 unsigned long flags; 433 unsigned long flags;
@@ -384,6 +444,19 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
384 attr_mask)) 444 attr_mask))
385 goto inval; 445 goto inval;
386 446
447 if (attr_mask & IB_QP_AV)
448 if (attr->ah_attr.dlid == 0 ||
449 attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
450 goto inval;
451
452 if (attr_mask & IB_QP_PKEY_INDEX)
453 if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
454 goto inval;
455
456 if (attr_mask & IB_QP_MIN_RNR_TIMER)
457 if (attr->min_rnr_timer > 31)
458 goto inval;
459
387 switch (new_state) { 460 switch (new_state) {
388 case IB_QPS_RESET: 461 case IB_QPS_RESET:
389 ipath_reset_qp(qp); 462 ipath_reset_qp(qp);
@@ -398,13 +471,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
398 471
399 } 472 }
400 473
401 if (attr_mask & IB_QP_PKEY_INDEX) { 474 if (attr_mask & IB_QP_PKEY_INDEX)
402 struct ipath_ibdev *dev = to_idev(ibqp->device);
403
404 if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
405 goto inval;
406 qp->s_pkey_index = attr->pkey_index; 475 qp->s_pkey_index = attr->pkey_index;
407 }
408 476
409 if (attr_mask & IB_QP_DEST_QPN) 477 if (attr_mask & IB_QP_DEST_QPN)
410 qp->remote_qpn = attr->dest_qp_num; 478 qp->remote_qpn = attr->dest_qp_num;
@@ -420,12 +488,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
420 if (attr_mask & IB_QP_ACCESS_FLAGS) 488 if (attr_mask & IB_QP_ACCESS_FLAGS)
421 qp->qp_access_flags = attr->qp_access_flags; 489 qp->qp_access_flags = attr->qp_access_flags;
422 490
423 if (attr_mask & IB_QP_AV) { 491 if (attr_mask & IB_QP_AV)
424 if (attr->ah_attr.dlid == 0 ||
425 attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
426 goto inval;
427 qp->remote_ah_attr = attr->ah_attr; 492 qp->remote_ah_attr = attr->ah_attr;
428 }
429 493
430 if (attr_mask & IB_QP_PATH_MTU) 494 if (attr_mask & IB_QP_PATH_MTU)
431 qp->path_mtu = attr->path_mtu; 495 qp->path_mtu = attr->path_mtu;
@@ -440,11 +504,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
440 qp->s_rnr_retry_cnt = qp->s_rnr_retry; 504 qp->s_rnr_retry_cnt = qp->s_rnr_retry;
441 } 505 }
442 506
443 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 507 if (attr_mask & IB_QP_MIN_RNR_TIMER)
444 if (attr->min_rnr_timer > 31)
445 goto inval;
446 qp->s_min_rnr_timer = attr->min_rnr_timer; 508 qp->s_min_rnr_timer = attr->min_rnr_timer;
447 }
448 509
449 if (attr_mask & IB_QP_QKEY) 510 if (attr_mask & IB_QP_QKEY)
450 qp->qkey = attr->qkey; 511 qp->qkey = attr->qkey;
@@ -651,10 +712,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
651 init_attr->qp_type == IB_QPT_RC ? 712 init_attr->qp_type == IB_QPT_RC ?
652 ipath_do_rc_send : ipath_do_uc_send, 713 ipath_do_rc_send : ipath_do_uc_send,
653 (unsigned long)qp); 714 (unsigned long)qp);
654 qp->piowait.next = LIST_POISON1; 715 INIT_LIST_HEAD(&qp->piowait);
655 qp->piowait.prev = LIST_POISON2; 716 INIT_LIST_HEAD(&qp->timerwait);
656 qp->timerwait.next = LIST_POISON1;
657 qp->timerwait.prev = LIST_POISON2;
658 qp->state = IB_QPS_RESET; 717 qp->state = IB_QPS_RESET;
659 qp->s_wq = swq; 718 qp->s_wq = swq;
660 qp->s_size = init_attr->cap.max_send_wr + 1; 719 qp->s_size = init_attr->cap.max_send_wr + 1;
@@ -675,7 +734,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
675 ipath_reset_qp(qp); 734 ipath_reset_qp(qp);
676 735
677 /* Tell the core driver that the kernel SMA is present. */ 736 /* Tell the core driver that the kernel SMA is present. */
678 if (qp->ibqp.qp_type == IB_QPT_SMI) 737 if (init_attr->qp_type == IB_QPT_SMI)
679 ipath_layer_set_verbs_flags(dev->dd, 738 ipath_layer_set_verbs_flags(dev->dd,
680 IPATH_VERBS_KERNEL_SMA); 739 IPATH_VERBS_KERNEL_SMA);
681 break; 740 break;
@@ -724,10 +783,10 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
724 783
725 /* Make sure the QP isn't on the timeout list. */ 784 /* Make sure the QP isn't on the timeout list. */
726 spin_lock_irqsave(&dev->pending_lock, flags); 785 spin_lock_irqsave(&dev->pending_lock, flags);
727 if (qp->timerwait.next != LIST_POISON1) 786 if (!list_empty(&qp->timerwait))
728 list_del(&qp->timerwait); 787 list_del_init(&qp->timerwait);
729 if (qp->piowait.next != LIST_POISON1) 788 if (!list_empty(&qp->piowait))
730 list_del(&qp->piowait); 789 list_del_init(&qp->piowait);
731 spin_unlock_irqrestore(&dev->pending_lock, flags); 790 spin_unlock_irqrestore(&dev->pending_lock, flags);
732 791
733 /* 792 /*
@@ -796,10 +855,10 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
796 855
797 spin_lock(&dev->pending_lock); 856 spin_lock(&dev->pending_lock);
798 /* XXX What if its already removed by the timeout code? */ 857 /* XXX What if its already removed by the timeout code? */
799 if (qp->timerwait.next != LIST_POISON1) 858 if (!list_empty(&qp->timerwait))
800 list_del(&qp->timerwait); 859 list_del_init(&qp->timerwait);
801 if (qp->piowait.next != LIST_POISON1) 860 if (!list_empty(&qp->piowait))
802 list_del(&qp->piowait); 861 list_del_init(&qp->piowait);
803 spin_unlock(&dev->pending_lock); 862 spin_unlock(&dev->pending_lock);
804 863
805 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 864 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
@@ -821,65 +880,6 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
821} 880}
822 881
823/** 882/**
824 * ipath_error_qp - put a QP into an error state
825 * @qp: the QP to put into an error state
826 *
827 * Flushes both send and receive work queues.
828 * QP r_rq.lock and s_lock should be held.
829 */
830
831void ipath_error_qp(struct ipath_qp *qp)
832{
833 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
834 struct ib_wc wc;
835
836 _VERBS_INFO("QP%d/%d in error state\n",
837 qp->ibqp.qp_num, qp->remote_qpn);
838
839 spin_lock(&dev->pending_lock);
840 /* XXX What if its already removed by the timeout code? */
841 if (qp->timerwait.next != LIST_POISON1)
842 list_del(&qp->timerwait);
843 if (qp->piowait.next != LIST_POISON1)
844 list_del(&qp->piowait);
845 spin_unlock(&dev->pending_lock);
846
847 wc.status = IB_WC_WR_FLUSH_ERR;
848 wc.vendor_err = 0;
849 wc.byte_len = 0;
850 wc.imm_data = 0;
851 wc.qp_num = qp->ibqp.qp_num;
852 wc.src_qp = 0;
853 wc.wc_flags = 0;
854 wc.pkey_index = 0;
855 wc.slid = 0;
856 wc.sl = 0;
857 wc.dlid_path_bits = 0;
858 wc.port_num = 0;
859
860 while (qp->s_last != qp->s_head) {
861 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
862
863 wc.wr_id = wqe->wr.wr_id;
864 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
865 if (++qp->s_last >= qp->s_size)
866 qp->s_last = 0;
867 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
868 }
869 qp->s_cur = qp->s_tail = qp->s_head;
870 qp->s_hdrwords = 0;
871 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
872
873 wc.opcode = IB_WC_RECV;
874 while (qp->r_rq.tail != qp->r_rq.head) {
875 wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
876 if (++qp->r_rq.tail >= qp->r_rq.size)
877 qp->r_rq.tail = 0;
878 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
879 }
880}
881
882/**
883 * ipath_get_credit - flush the send work queue of a QP 883 * ipath_get_credit - flush the send work queue of a QP
884 * @qp: the qp who's send work queue to flush 884 * @qp: the qp who's send work queue to flush
885 * @aeth: the Acknowledge Extended Transport Header 885 * @aeth: the Acknowledge Extended Transport Header