aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-07-01 07:35:50 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-01 12:55:58 -0400
commitddd4bb22108417fdc5c35324bd13a3265581ae76 (patch)
tree5cc35e54e8761af27746bae48ef66318237ad8a0 /drivers/infiniband/hw
parent759d57686dab8169ca68bbf938ce8e965d1e107a (diff)
[PATCH] IB/ipath: share more common code between RC and UC protocols
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Cc: "Michael S. Tsirkin" <mst@mellanox.co.il> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c204
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c245
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c153
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h20
6 files changed, 270 insertions, 362 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 28fffbd4bee6..e1d31bccf3eb 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -709,9 +709,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
709 spin_lock_init(&qp->r_rq.lock); 709 spin_lock_init(&qp->r_rq.lock);
710 atomic_set(&qp->refcount, 0); 710 atomic_set(&qp->refcount, 0);
711 init_waitqueue_head(&qp->wait); 711 init_waitqueue_head(&qp->wait);
712 tasklet_init(&qp->s_task, 712 tasklet_init(&qp->s_task, ipath_do_ruc_send,
713 init_attr->qp_type == IB_QPT_RC ?
714 ipath_do_rc_send : ipath_do_uc_send,
715 (unsigned long)qp); 713 (unsigned long)qp);
716 INIT_LIST_HEAD(&qp->piowait); 714 INIT_LIST_HEAD(&qp->piowait);
717 INIT_LIST_HEAD(&qp->timerwait); 715 INIT_LIST_HEAD(&qp->timerwait);
@@ -896,9 +894,9 @@ void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
896 * as many packets as we like. Otherwise, we have to 894 * as many packets as we like. Otherwise, we have to
897 * honor the credit field. 895 * honor the credit field.
898 */ 896 */
899 if (credit == IPS_AETH_CREDIT_INVAL) { 897 if (credit == IPS_AETH_CREDIT_INVAL)
900 qp->s_lsn = (u32) -1; 898 qp->s_lsn = (u32) -1;
901 } else if (qp->s_lsn != (u32) -1) { 899 else if (qp->s_lsn != (u32) -1) {
902 /* Compute new LSN (i.e., MSN + credit) */ 900 /* Compute new LSN (i.e., MSN + credit) */
903 credit = (aeth + credit_table[credit]) & IPS_MSN_MASK; 901 credit = (aeth + credit_table[credit]) & IPS_MSN_MASK;
904 if (ipath_cmp24(credit, qp->s_lsn) > 0) 902 if (ipath_cmp24(credit, qp->s_lsn) > 0)
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 33b8c78fcdcc..0ca89af3e10d 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -73,9 +73,9 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
73 * Return bth0 if constructed; otherwise, return 0. 73 * Return bth0 if constructed; otherwise, return 0.
74 * Note the QP s_lock must be held. 74 * Note the QP s_lock must be held.
75 */ 75 */
76static inline u32 ipath_make_rc_ack(struct ipath_qp *qp, 76u32 ipath_make_rc_ack(struct ipath_qp *qp,
77 struct ipath_other_headers *ohdr, 77 struct ipath_other_headers *ohdr,
78 u32 pmtu) 78 u32 pmtu)
79{ 79{
80 struct ipath_sge_state *ss; 80 struct ipath_sge_state *ss;
81 u32 hwords; 81 u32 hwords;
@@ -96,8 +96,7 @@ static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
96 if (len > pmtu) { 96 if (len > pmtu) {
97 len = pmtu; 97 len = pmtu;
98 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 98 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
99 } 99 } else
100 else
101 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 100 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
102 qp->s_rdma_len -= len; 101 qp->s_rdma_len -= len;
103 bth0 = qp->s_ack_state << 24; 102 bth0 = qp->s_ack_state << 24;
@@ -177,9 +176,9 @@ static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
177 * Return 1 if constructed; otherwise, return 0. 176 * Return 1 if constructed; otherwise, return 0.
178 * Note the QP s_lock must be held. 177 * Note the QP s_lock must be held.
179 */ 178 */
180static inline int ipath_make_rc_req(struct ipath_qp *qp, 179int ipath_make_rc_req(struct ipath_qp *qp,
181 struct ipath_other_headers *ohdr, 180 struct ipath_other_headers *ohdr,
182 u32 pmtu, u32 *bth0p, u32 *bth2p) 181 u32 pmtu, u32 *bth0p, u32 *bth2p)
183{ 182{
184 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 183 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
185 struct ipath_sge_state *ss; 184 struct ipath_sge_state *ss;
@@ -497,160 +496,33 @@ done:
497 return 0; 496 return 0;
498} 497}
499 498
500static inline void ipath_make_rc_grh(struct ipath_qp *qp,
501 struct ib_global_route *grh,
502 u32 nwords)
503{
504 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
505
506 /* GRH header size in 32-bit words. */
507 qp->s_hdrwords += 10;
508 qp->s_hdr.u.l.grh.version_tclass_flow =
509 cpu_to_be32((6 << 28) |
510 (grh->traffic_class << 20) |
511 grh->flow_label);
512 qp->s_hdr.u.l.grh.paylen =
513 cpu_to_be16(((qp->s_hdrwords - 12) + nwords +
514 SIZE_OF_CRC) << 2);
515 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
516 qp->s_hdr.u.l.grh.next_hdr = 0x1B;
517 qp->s_hdr.u.l.grh.hop_limit = grh->hop_limit;
518 /* The SGID is 32-bit aligned. */
519 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = dev->gid_prefix;
520 qp->s_hdr.u.l.grh.sgid.global.interface_id =
521 ipath_layer_get_guid(dev->dd);
522 qp->s_hdr.u.l.grh.dgid = grh->dgid;
523}
524
525/** 499/**
526 * ipath_do_rc_send - perform a send on an RC QP 500 * send_rc_ack - Construct an ACK packet and send it
527 * @data: contains a pointer to the QP 501 * @qp: a pointer to the QP
528 * 502 *
529 * Process entries in the send work queue until credit or queue is 503 * This is called from ipath_rc_rcv() and only uses the receive
530 * exhausted. Only allow one CPU to send a packet per QP (tasklet). 504 * side QP state.
531 * Otherwise, after we drop the QP s_lock, two threads could send 505 * Note that RDMA reads are handled in the send side QP state and tasklet.
532 * packets out of order.
533 */ 506 */
534void ipath_do_rc_send(unsigned long data)
535{
536 struct ipath_qp *qp = (struct ipath_qp *)data;
537 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
538 unsigned long flags;
539 u16 lrh0;
540 u32 nwords;
541 u32 extra_bytes;
542 u32 bth0;
543 u32 bth2;
544 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
545 struct ipath_other_headers *ohdr;
546
547 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
548 goto bail;
549
550 if (unlikely(qp->remote_ah_attr.dlid ==
551 ipath_layer_get_lid(dev->dd))) {
552 struct ib_wc wc;
553
554 /*
555 * Pass in an uninitialized ib_wc to be consistent with
556 * other places where ipath_ruc_loopback() is called.
557 */
558 ipath_ruc_loopback(qp, &wc);
559 goto clear;
560 }
561
562 ohdr = &qp->s_hdr.u.oth;
563 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
564 ohdr = &qp->s_hdr.u.l.oth;
565
566again:
567 /* Check for a constructed packet to be sent. */
568 if (qp->s_hdrwords != 0) {
569 /*
570 * If no PIO bufs are available, return. An interrupt will
571 * call ipath_ib_piobufavail() when one is available.
572 */
573 _VERBS_INFO("h %u %p\n", qp->s_hdrwords, &qp->s_hdr);
574 _VERBS_INFO("d %u %p %u %p %u %u %u %u\n", qp->s_cur_size,
575 qp->s_cur_sge->sg_list,
576 qp->s_cur_sge->num_sge,
577 qp->s_cur_sge->sge.vaddr,
578 qp->s_cur_sge->sge.sge_length,
579 qp->s_cur_sge->sge.length,
580 qp->s_cur_sge->sge.m,
581 qp->s_cur_sge->sge.n);
582 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
583 (u32 *) &qp->s_hdr, qp->s_cur_size,
584 qp->s_cur_sge)) {
585 ipath_no_bufs_available(qp, dev);
586 goto bail;
587 }
588 dev->n_unicast_xmit++;
589 /* Record that we sent the packet and s_hdr is empty. */
590 qp->s_hdrwords = 0;
591 }
592
593 /*
594 * The lock is needed to synchronize between setting
595 * qp->s_ack_state, resend timer, and post_send().
596 */
597 spin_lock_irqsave(&qp->s_lock, flags);
598
599 /* Sending responses has higher priority over sending requests. */
600 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
601 (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
602 bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
603 else if (!ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2))
604 goto done;
605
606 spin_unlock_irqrestore(&qp->s_lock, flags);
607
608 /* Construct the header. */
609 extra_bytes = (4 - qp->s_cur_size) & 3;
610 nwords = (qp->s_cur_size + extra_bytes) >> 2;
611 lrh0 = IPS_LRH_BTH;
612 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
613 ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, nwords);
614 lrh0 = IPS_LRH_GRH;
615 }
616 lrh0 |= qp->remote_ah_attr.sl << 4;
617 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
618 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
619 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
620 SIZE_OF_CRC);
621 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
622 bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
623 bth0 |= extra_bytes << 20;
624 ohdr->bth[0] = cpu_to_be32(bth0);
625 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
626 ohdr->bth[2] = cpu_to_be32(bth2);
627
628 /* Check for more work to do. */
629 goto again;
630
631done:
632 spin_unlock_irqrestore(&qp->s_lock, flags);
633clear:
634 clear_bit(IPATH_S_BUSY, &qp->s_flags);
635bail:
636 return;
637}
638
639static void send_rc_ack(struct ipath_qp *qp) 507static void send_rc_ack(struct ipath_qp *qp)
640{ 508{
641 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 509 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
642 u16 lrh0; 510 u16 lrh0;
643 u32 bth0; 511 u32 bth0;
512 u32 hwords;
513 struct ipath_ib_header hdr;
644 struct ipath_other_headers *ohdr; 514 struct ipath_other_headers *ohdr;
645 515
646 /* Construct the header. */ 516 /* Construct the header. */
647 ohdr = &qp->s_hdr.u.oth; 517 ohdr = &hdr.u.oth;
648 lrh0 = IPS_LRH_BTH; 518 lrh0 = IPS_LRH_BTH;
649 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */ 519 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
650 qp->s_hdrwords = 6; 520 hwords = 6;
651 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { 521 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
652 ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, 0); 522 hwords += ipath_make_grh(dev, &hdr.u.l.grh,
653 ohdr = &qp->s_hdr.u.l.oth; 523 &qp->remote_ah_attr.grh,
524 hwords, 0);
525 ohdr = &hdr.u.l.oth;
654 lrh0 = IPS_LRH_GRH; 526 lrh0 = IPS_LRH_GRH;
655 } 527 }
656 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 528 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
@@ -658,15 +530,14 @@ static void send_rc_ack(struct ipath_qp *qp)
658 if (qp->s_ack_state >= OP(COMPARE_SWAP)) { 530 if (qp->s_ack_state >= OP(COMPARE_SWAP)) {
659 bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24; 531 bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
660 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic); 532 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
661 qp->s_hdrwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4; 533 hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
662 } 534 } else
663 else
664 bth0 |= OP(ACKNOWLEDGE) << 24; 535 bth0 |= OP(ACKNOWLEDGE) << 24;
665 lrh0 |= qp->remote_ah_attr.sl << 4; 536 lrh0 |= qp->remote_ah_attr.sl << 4;
666 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); 537 hdr.lrh[0] = cpu_to_be16(lrh0);
667 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 538 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
668 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + SIZE_OF_CRC); 539 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
669 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); 540 hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
670 ohdr->bth[0] = cpu_to_be32(bth0); 541 ohdr->bth[0] = cpu_to_be32(bth0);
671 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 542 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
672 ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK); 543 ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK);
@@ -674,8 +545,7 @@ static void send_rc_ack(struct ipath_qp *qp)
674 /* 545 /*
675 * If we can send the ACK, clear the ACK state. 546 * If we can send the ACK, clear the ACK state.
676 */ 547 */
677 if (ipath_verbs_send(dev->dd, qp->s_hdrwords, (u32 *) &qp->s_hdr, 548 if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
678 0, NULL) == 0) {
679 qp->s_ack_state = OP(ACKNOWLEDGE); 549 qp->s_ack_state = OP(ACKNOWLEDGE);
680 dev->n_rc_qacks++; 550 dev->n_rc_qacks++;
681 dev->n_unicast_xmit++; 551 dev->n_unicast_xmit++;
@@ -805,7 +675,7 @@ bail:
805 * @qp: the QP 675 * @qp: the QP
806 * @psn: the packet sequence number to restart at 676 * @psn: the packet sequence number to restart at
807 * 677 *
808 * This is called from ipath_rc_rcv() to process an incoming RC ACK 678 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
809 * for the given QP. 679 * for the given QP.
810 * Called at interrupt level with the QP s_lock held. 680 * Called at interrupt level with the QP s_lock held.
811 */ 681 */
@@ -1231,18 +1101,12 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1231 * ICRC (4). 1101 * ICRC (4).
1232 */ 1102 */
1233 if (unlikely(tlen <= (hdrsize + pad + 8))) { 1103 if (unlikely(tlen <= (hdrsize + pad + 8))) {
1234 /* 1104 /* XXX Need to generate an error CQ entry. */
1235 * XXX Need to generate an error CQ
1236 * entry.
1237 */
1238 goto ack_done; 1105 goto ack_done;
1239 } 1106 }
1240 tlen -= hdrsize + pad + 8; 1107 tlen -= hdrsize + pad + 8;
1241 if (unlikely(tlen != qp->s_len)) { 1108 if (unlikely(tlen != qp->s_len)) {
1242 /* 1109 /* XXX Need to generate an error CQ entry. */
1243 * XXX Need to generate an error CQ
1244 * entry.
1245 */
1246 goto ack_done; 1110 goto ack_done;
1247 } 1111 }
1248 if (!header_in_data) 1112 if (!header_in_data)
@@ -1384,7 +1248,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1384 case OP(COMPARE_SWAP): 1248 case OP(COMPARE_SWAP):
1385 case OP(FETCH_ADD): 1249 case OP(FETCH_ADD):
1386 /* 1250 /*
1387 * Check for the PSN of the last atomic operations 1251 * Check for the PSN of the last atomic operation
1388 * performed and resend the result if found. 1252 * performed and resend the result if found.
1389 */ 1253 */
1390 if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) { 1254 if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) {
@@ -1454,11 +1318,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1454 } else 1318 } else
1455 psn = be32_to_cpu(ohdr->bth[2]); 1319 psn = be32_to_cpu(ohdr->bth[2]);
1456 } 1320 }
1457 /*
1458 * The opcode is in the low byte when its in network order
1459 * (top byte when in host order).
1460 */
1461 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1462 1321
1463 /* 1322 /*
1464 * Process responses (ACKs) before anything else. Note that the 1323 * Process responses (ACKs) before anything else. Note that the
@@ -1466,6 +1325,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1466 * queue rather than the expected receive packet sequence number. 1325 * queue rather than the expected receive packet sequence number.
1467 * In other words, this QP is the requester. 1326 * In other words, this QP is the requester.
1468 */ 1327 */
1328 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1469 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 1329 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1470 opcode <= OP(ATOMIC_ACKNOWLEDGE)) { 1330 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1471 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn, 1331 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 2714d6470c35..9a456a7ce352 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include "ipath_verbs.h" 34#include "ipath_verbs.h"
35#include "ips_common.h"
35 36
36/* 37/*
37 * Convert the AETH RNR timeout code into the number of milliseconds. 38 * Convert the AETH RNR timeout code into the number of milliseconds.
@@ -188,7 +189,6 @@ bail:
188/** 189/**
189 * ipath_ruc_loopback - handle UC and RC lookback requests 190 * ipath_ruc_loopback - handle UC and RC lookback requests
190 * @sqp: the loopback QP 191 * @sqp: the loopback QP
191 * @wc: the work completion entry
192 * 192 *
193 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to 193 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
194 * forward a WQE addressed to the same HCA. 194 * forward a WQE addressed to the same HCA.
@@ -197,13 +197,14 @@ bail:
197 * receive interrupts since this is a connected protocol and all packets 197 * receive interrupts since this is a connected protocol and all packets
198 * will pass through here. 198 * will pass through here.
199 */ 199 */
200void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc) 200static void ipath_ruc_loopback(struct ipath_qp *sqp)
201{ 201{
202 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); 202 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
203 struct ipath_qp *qp; 203 struct ipath_qp *qp;
204 struct ipath_swqe *wqe; 204 struct ipath_swqe *wqe;
205 struct ipath_sge *sge; 205 struct ipath_sge *sge;
206 unsigned long flags; 206 unsigned long flags;
207 struct ib_wc wc;
207 u64 sdata; 208 u64 sdata;
208 209
209 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); 210 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
@@ -234,8 +235,8 @@ again:
234 wqe = get_swqe_ptr(sqp, sqp->s_last); 235 wqe = get_swqe_ptr(sqp, sqp->s_last);
235 spin_unlock_irqrestore(&sqp->s_lock, flags); 236 spin_unlock_irqrestore(&sqp->s_lock, flags);
236 237
237 wc->wc_flags = 0; 238 wc.wc_flags = 0;
238 wc->imm_data = 0; 239 wc.imm_data = 0;
239 240
240 sqp->s_sge.sge = wqe->sg_list[0]; 241 sqp->s_sge.sge = wqe->sg_list[0];
241 sqp->s_sge.sg_list = wqe->sg_list + 1; 242 sqp->s_sge.sg_list = wqe->sg_list + 1;
@@ -243,8 +244,8 @@ again:
243 sqp->s_len = wqe->length; 244 sqp->s_len = wqe->length;
244 switch (wqe->wr.opcode) { 245 switch (wqe->wr.opcode) {
245 case IB_WR_SEND_WITH_IMM: 246 case IB_WR_SEND_WITH_IMM:
246 wc->wc_flags = IB_WC_WITH_IMM; 247 wc.wc_flags = IB_WC_WITH_IMM;
247 wc->imm_data = wqe->wr.imm_data; 248 wc.imm_data = wqe->wr.imm_data;
248 /* FALLTHROUGH */ 249 /* FALLTHROUGH */
249 case IB_WR_SEND: 250 case IB_WR_SEND:
250 spin_lock_irqsave(&qp->r_rq.lock, flags); 251 spin_lock_irqsave(&qp->r_rq.lock, flags);
@@ -255,7 +256,7 @@ again:
255 if (qp->ibqp.qp_type == IB_QPT_UC) 256 if (qp->ibqp.qp_type == IB_QPT_UC)
256 goto send_comp; 257 goto send_comp;
257 if (sqp->s_rnr_retry == 0) { 258 if (sqp->s_rnr_retry == 0) {
258 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 259 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
259 goto err; 260 goto err;
260 } 261 }
261 if (sqp->s_rnr_retry_cnt < 7) 262 if (sqp->s_rnr_retry_cnt < 7)
@@ -270,8 +271,8 @@ again:
270 break; 271 break;
271 272
272 case IB_WR_RDMA_WRITE_WITH_IMM: 273 case IB_WR_RDMA_WRITE_WITH_IMM:
273 wc->wc_flags = IB_WC_WITH_IMM; 274 wc.wc_flags = IB_WC_WITH_IMM;
274 wc->imm_data = wqe->wr.imm_data; 275 wc.imm_data = wqe->wr.imm_data;
275 spin_lock_irqsave(&qp->r_rq.lock, flags); 276 spin_lock_irqsave(&qp->r_rq.lock, flags);
276 if (!ipath_get_rwqe(qp, 1)) 277 if (!ipath_get_rwqe(qp, 1))
277 goto rnr_nak; 278 goto rnr_nak;
@@ -285,20 +286,20 @@ again:
285 wqe->wr.wr.rdma.rkey, 286 wqe->wr.wr.rdma.rkey,
286 IB_ACCESS_REMOTE_WRITE))) { 287 IB_ACCESS_REMOTE_WRITE))) {
287 acc_err: 288 acc_err:
288 wc->status = IB_WC_REM_ACCESS_ERR; 289 wc.status = IB_WC_REM_ACCESS_ERR;
289 err: 290 err:
290 wc->wr_id = wqe->wr.wr_id; 291 wc.wr_id = wqe->wr.wr_id;
291 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 292 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
292 wc->vendor_err = 0; 293 wc.vendor_err = 0;
293 wc->byte_len = 0; 294 wc.byte_len = 0;
294 wc->qp_num = sqp->ibqp.qp_num; 295 wc.qp_num = sqp->ibqp.qp_num;
295 wc->src_qp = sqp->remote_qpn; 296 wc.src_qp = sqp->remote_qpn;
296 wc->pkey_index = 0; 297 wc.pkey_index = 0;
297 wc->slid = sqp->remote_ah_attr.dlid; 298 wc.slid = sqp->remote_ah_attr.dlid;
298 wc->sl = sqp->remote_ah_attr.sl; 299 wc.sl = sqp->remote_ah_attr.sl;
299 wc->dlid_path_bits = 0; 300 wc.dlid_path_bits = 0;
300 wc->port_num = 0; 301 wc.port_num = 0;
301 ipath_sqerror_qp(sqp, wc); 302 ipath_sqerror_qp(sqp, &wc);
302 goto done; 303 goto done;
303 } 304 }
304 break; 305 break;
@@ -374,22 +375,22 @@ again:
374 goto send_comp; 375 goto send_comp;
375 376
376 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) 377 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
377 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 378 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
378 else 379 else
379 wc->opcode = IB_WC_RECV; 380 wc.opcode = IB_WC_RECV;
380 wc->wr_id = qp->r_wr_id; 381 wc.wr_id = qp->r_wr_id;
381 wc->status = IB_WC_SUCCESS; 382 wc.status = IB_WC_SUCCESS;
382 wc->vendor_err = 0; 383 wc.vendor_err = 0;
383 wc->byte_len = wqe->length; 384 wc.byte_len = wqe->length;
384 wc->qp_num = qp->ibqp.qp_num; 385 wc.qp_num = qp->ibqp.qp_num;
385 wc->src_qp = qp->remote_qpn; 386 wc.src_qp = qp->remote_qpn;
386 /* XXX do we know which pkey matched? Only needed for GSI. */ 387 /* XXX do we know which pkey matched? Only needed for GSI. */
387 wc->pkey_index = 0; 388 wc.pkey_index = 0;
388 wc->slid = qp->remote_ah_attr.dlid; 389 wc.slid = qp->remote_ah_attr.dlid;
389 wc->sl = qp->remote_ah_attr.sl; 390 wc.sl = qp->remote_ah_attr.sl;
390 wc->dlid_path_bits = 0; 391 wc.dlid_path_bits = 0;
391 /* Signal completion event if the solicited bit is set. */ 392 /* Signal completion event if the solicited bit is set. */
392 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc, 393 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
393 wqe->wr.send_flags & IB_SEND_SOLICITED); 394 wqe->wr.send_flags & IB_SEND_SOLICITED);
394 395
395send_comp: 396send_comp:
@@ -397,19 +398,19 @@ send_comp:
397 398
398 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) || 399 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) ||
399 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 400 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
400 wc->wr_id = wqe->wr.wr_id; 401 wc.wr_id = wqe->wr.wr_id;
401 wc->status = IB_WC_SUCCESS; 402 wc.status = IB_WC_SUCCESS;
402 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 403 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
403 wc->vendor_err = 0; 404 wc.vendor_err = 0;
404 wc->byte_len = wqe->length; 405 wc.byte_len = wqe->length;
405 wc->qp_num = sqp->ibqp.qp_num; 406 wc.qp_num = sqp->ibqp.qp_num;
406 wc->src_qp = 0; 407 wc.src_qp = 0;
407 wc->pkey_index = 0; 408 wc.pkey_index = 0;
408 wc->slid = 0; 409 wc.slid = 0;
409 wc->sl = 0; 410 wc.sl = 0;
410 wc->dlid_path_bits = 0; 411 wc.dlid_path_bits = 0;
411 wc->port_num = 0; 412 wc.port_num = 0;
412 ipath_cq_enter(to_icq(sqp->ibqp.send_cq), wc, 0); 413 ipath_cq_enter(to_icq(sqp->ibqp.send_cq), &wc, 0);
413 } 414 }
414 415
415 /* Update s_last now that we are finished with the SWQE */ 416 /* Update s_last now that we are finished with the SWQE */
@@ -455,11 +456,11 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
455} 456}
456 457
457/** 458/**
458 * ipath_post_rc_send - post RC and UC sends 459 * ipath_post_ruc_send - post RC and UC sends
459 * @qp: the QP to post on 460 * @qp: the QP to post on
460 * @wr: the work request to send 461 * @wr: the work request to send
461 */ 462 */
462int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr) 463int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
463{ 464{
464 struct ipath_swqe *wqe; 465 struct ipath_swqe *wqe;
465 unsigned long flags; 466 unsigned long flags;
@@ -534,13 +535,149 @@ int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
534 qp->s_head = next; 535 qp->s_head = next;
535 spin_unlock_irqrestore(&qp->s_lock, flags); 536 spin_unlock_irqrestore(&qp->s_lock, flags);
536 537
537 if (qp->ibqp.qp_type == IB_QPT_UC) 538 ipath_do_ruc_send((unsigned long) qp);
538 ipath_do_uc_send((unsigned long) qp);
539 else
540 ipath_do_rc_send((unsigned long) qp);
541 539
542 ret = 0; 540 ret = 0;
543 541
544bail: 542bail:
545 return ret; 543 return ret;
546} 544}
545
546/**
547 * ipath_make_grh - construct a GRH header
548 * @dev: a pointer to the ipath device
549 * @hdr: a pointer to the GRH header being constructed
550 * @grh: the global route address to send to
551 * @hwords: the number of 32 bit words of header being sent
552 * @nwords: the number of 32 bit words of data being sent
553 *
554 * Return the size of the header in 32 bit words.
555 */
556u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
557 struct ib_global_route *grh, u32 hwords, u32 nwords)
558{
559 hdr->version_tclass_flow =
560 cpu_to_be32((6 << 28) |
561 (grh->traffic_class << 20) |
562 grh->flow_label);
563 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
564 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
565 hdr->next_hdr = 0x1B;
566 hdr->hop_limit = grh->hop_limit;
567 /* The SGID is 32-bit aligned. */
568 hdr->sgid.global.subnet_prefix = dev->gid_prefix;
569 hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd);
570 hdr->dgid = grh->dgid;
571
572 /* GRH header size in 32-bit words. */
573 return sizeof(struct ib_grh) / sizeof(u32);
574}
575
576/**
577 * ipath_do_ruc_send - perform a send on an RC or UC QP
578 * @data: contains a pointer to the QP
579 *
580 * Process entries in the send work queue until credit or queue is
581 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
582 * Otherwise, after we drop the QP s_lock, two threads could send
583 * packets out of order.
584 */
585void ipath_do_ruc_send(unsigned long data)
586{
587 struct ipath_qp *qp = (struct ipath_qp *)data;
588 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
589 unsigned long flags;
590 u16 lrh0;
591 u32 nwords;
592 u32 extra_bytes;
593 u32 bth0;
594 u32 bth2;
595 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
596 struct ipath_other_headers *ohdr;
597
598 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
599 goto bail;
600
601 if (unlikely(qp->remote_ah_attr.dlid ==
602 ipath_layer_get_lid(dev->dd))) {
603 ipath_ruc_loopback(qp);
604 goto clear;
605 }
606
607 ohdr = &qp->s_hdr.u.oth;
608 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
609 ohdr = &qp->s_hdr.u.l.oth;
610
611again:
612 /* Check for a constructed packet to be sent. */
613 if (qp->s_hdrwords != 0) {
614 /*
615 * If no PIO bufs are available, return. An interrupt will
616 * call ipath_ib_piobufavail() when one is available.
617 */
618 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
619 (u32 *) &qp->s_hdr, qp->s_cur_size,
620 qp->s_cur_sge)) {
621 ipath_no_bufs_available(qp, dev);
622 goto bail;
623 }
624 dev->n_unicast_xmit++;
625 /* Record that we sent the packet and s_hdr is empty. */
626 qp->s_hdrwords = 0;
627 }
628
629 /*
630 * The lock is needed to synchronize between setting
631 * qp->s_ack_state, resend timer, and post_send().
632 */
633 spin_lock_irqsave(&qp->s_lock, flags);
634
635 /* Sending responses has higher priority over sending requests. */
636 if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE &&
637 (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
638 bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
639 else if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
640 ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
641 ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
642 /*
643 * Clear the busy bit before unlocking to avoid races with
644 * adding new work queue items and then failing to process
645 * them.
646 */
647 clear_bit(IPATH_S_BUSY, &qp->s_flags);
648 spin_unlock_irqrestore(&qp->s_lock, flags);
649 goto bail;
650 }
651
652 spin_unlock_irqrestore(&qp->s_lock, flags);
653
654 /* Construct the header. */
655 extra_bytes = (4 - qp->s_cur_size) & 3;
656 nwords = (qp->s_cur_size + extra_bytes) >> 2;
657 lrh0 = IPS_LRH_BTH;
658 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
659 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
660 &qp->remote_ah_attr.grh,
661 qp->s_hdrwords, nwords);
662 lrh0 = IPS_LRH_GRH;
663 }
664 lrh0 |= qp->remote_ah_attr.sl << 4;
665 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
666 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
667 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
668 SIZE_OF_CRC);
669 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
670 bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
671 bth0 |= extra_bytes << 20;
672 ohdr->bth[0] = cpu_to_be32(bth0);
673 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
674 ohdr->bth[2] = cpu_to_be32(bth2);
675
676 /* Check for more work to do. */
677 goto again;
678
679clear:
680 clear_bit(IPATH_S_BUSY, &qp->s_flags);
681bail:
682 return;
683}
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index b64d9ddc075c..89b3e1a5e3e3 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -62,90 +62,40 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
62} 62}
63 63
64/** 64/**
65 * ipath_do_uc_send - do a send on a UC queue 65 * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
66 * @data: contains a pointer to the QP to send on 66 * @qp: a pointer to the QP
67 * @ohdr: a pointer to the IB header being constructed
68 * @pmtu: the path MTU
69 * @bth0p: pointer to the BTH opcode word
70 * @bth2p: pointer to the BTH PSN word
67 * 71 *
68 * Process entries in the send work queue until the queue is exhausted. 72 * Return 1 if constructed; otherwise, return 0.
69 * Only allow one CPU to send a packet per QP (tasklet). 73 * Note the QP s_lock must be held and interrupts disabled.
70 * Otherwise, after we drop the QP lock, two threads could send
71 * packets out of order.
72 * This is similar to ipath_do_rc_send() below except we don't have
73 * timeouts or resends.
74 */ 74 */
75void ipath_do_uc_send(unsigned long data) 75int ipath_make_uc_req(struct ipath_qp *qp,
76 struct ipath_other_headers *ohdr,
77 u32 pmtu, u32 *bth0p, u32 *bth2p)
76{ 78{
77 struct ipath_qp *qp = (struct ipath_qp *)data;
78 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
79 struct ipath_swqe *wqe; 79 struct ipath_swqe *wqe;
80 unsigned long flags;
81 u16 lrh0;
82 u32 hwords; 80 u32 hwords;
83 u32 nwords;
84 u32 extra_bytes;
85 u32 bth0; 81 u32 bth0;
86 u32 bth2;
87 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
88 u32 len; 82 u32 len;
89 struct ipath_other_headers *ohdr;
90 struct ib_wc wc; 83 struct ib_wc wc;
91 84
92 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
93 goto bail;
94
95 if (unlikely(qp->remote_ah_attr.dlid ==
96 ipath_layer_get_lid(dev->dd))) {
97 /* Pass in an uninitialized ib_wc to save stack space. */
98 ipath_ruc_loopback(qp, &wc);
99 clear_bit(IPATH_S_BUSY, &qp->s_flags);
100 goto bail;
101 }
102
103 ohdr = &qp->s_hdr.u.oth;
104 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
105 ohdr = &qp->s_hdr.u.l.oth;
106
107again:
108 /* Check for a constructed packet to be sent. */
109 if (qp->s_hdrwords != 0) {
110 /*
111 * If no PIO bufs are available, return.
112 * An interrupt will call ipath_ib_piobufavail()
113 * when one is available.
114 */
115 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
116 (u32 *) &qp->s_hdr,
117 qp->s_cur_size,
118 qp->s_cur_sge)) {
119 ipath_no_bufs_available(qp, dev);
120 goto bail;
121 }
122 dev->n_unicast_xmit++;
123 /* Record that we sent the packet and s_hdr is empty. */
124 qp->s_hdrwords = 0;
125 }
126
127 lrh0 = IPS_LRH_BTH;
128 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
129 hwords = 5;
130
131 /*
132 * The lock is needed to synchronize between
133 * setting qp->s_ack_state and post_send().
134 */
135 spin_lock_irqsave(&qp->s_lock, flags);
136
137 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) 85 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
138 goto done; 86 goto done;
139 87
140 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 88 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
89 hwords = 5;
90 bth0 = 0;
141 91
142 /* Send a request. */ 92 /* Get the next send request. */
143 wqe = get_swqe_ptr(qp, qp->s_last); 93 wqe = get_swqe_ptr(qp, qp->s_last);
144 switch (qp->s_state) { 94 switch (qp->s_state) {
145 default: 95 default:
146 /* 96 /*
147 * Signal the completion of the last send (if there is 97 * Signal the completion of the last send
148 * one). 98 * (if there is one).
149 */ 99 */
150 if (qp->s_last != qp->s_tail) 100 if (qp->s_last != qp->s_tail)
151 complete_last_send(qp, wqe, &wc); 101 complete_last_send(qp, wqe, &wc);
@@ -258,61 +208,16 @@ again:
258 } 208 }
259 break; 209 break;
260 } 210 }
261 bth2 = qp->s_next_psn++ & IPS_PSN_MASK;
262 qp->s_len -= len; 211 qp->s_len -= len;
263 bth0 |= qp->s_state << 24;
264
265 spin_unlock_irqrestore(&qp->s_lock, flags);
266
267 /* Construct the header. */
268 extra_bytes = (4 - len) & 3;
269 nwords = (len + extra_bytes) >> 2;
270 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
271 /* Header size in 32-bit words. */
272 hwords += 10;
273 lrh0 = IPS_LRH_GRH;
274 qp->s_hdr.u.l.grh.version_tclass_flow =
275 cpu_to_be32((6 << 28) |
276 (qp->remote_ah_attr.grh.traffic_class
277 << 20) |
278 qp->remote_ah_attr.grh.flow_label);
279 qp->s_hdr.u.l.grh.paylen =
280 cpu_to_be16(((hwords - 12) + nwords +
281 SIZE_OF_CRC) << 2);
282 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
283 qp->s_hdr.u.l.grh.next_hdr = 0x1B;
284 qp->s_hdr.u.l.grh.hop_limit =
285 qp->remote_ah_attr.grh.hop_limit;
286 /* The SGID is 32-bit aligned. */
287 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
288 dev->gid_prefix;
289 qp->s_hdr.u.l.grh.sgid.global.interface_id =
290 ipath_layer_get_guid(dev->dd);
291 qp->s_hdr.u.l.grh.dgid = qp->remote_ah_attr.grh.dgid;
292 }
293 qp->s_hdrwords = hwords; 212 qp->s_hdrwords = hwords;
294 qp->s_cur_sge = &qp->s_sge; 213 qp->s_cur_sge = &qp->s_sge;
295 qp->s_cur_size = len; 214 qp->s_cur_size = len;
296 lrh0 |= qp->remote_ah_attr.sl << 4; 215 *bth0p = bth0 | (qp->s_state << 24);
297 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); 216 *bth2p = qp->s_next_psn++ & IPS_PSN_MASK;
298 /* DEST LID */ 217 return 1;
299 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
300 qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
301 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
302 bth0 |= extra_bytes << 20;
303 ohdr->bth[0] = cpu_to_be32(bth0);
304 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
305 ohdr->bth[2] = cpu_to_be32(bth2);
306
307 /* Check for more work to do. */
308 goto again;
309 218
310done: 219done:
311 spin_unlock_irqrestore(&qp->s_lock, flags); 220 return 0;
312 clear_bit(IPATH_S_BUSY, &qp->s_flags);
313
314bail:
315 return;
316} 221}
317 222
318/** 223/**
@@ -536,12 +441,13 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
536 if (qp->r_len != 0) { 441 if (qp->r_len != 0) {
537 u32 rkey = be32_to_cpu(reth->rkey); 442 u32 rkey = be32_to_cpu(reth->rkey);
538 u64 vaddr = be64_to_cpu(reth->vaddr); 443 u64 vaddr = be64_to_cpu(reth->vaddr);
444 int ok;
539 445
540 /* Check rkey */ 446 /* Check rkey */
541 if (unlikely(!ipath_rkey_ok( 447 ok = ipath_rkey_ok(dev, &qp->r_sge, qp->r_len,
542 dev, &qp->r_sge, qp->r_len, 448 vaddr, rkey,
543 vaddr, rkey, 449 IB_ACCESS_REMOTE_WRITE);
544 IB_ACCESS_REMOTE_WRITE))) { 450 if (unlikely(!ok)) {
545 dev->n_pkt_drops++; 451 dev->n_pkt_drops++;
546 goto done; 452 goto done;
547 } 453 }
@@ -559,8 +465,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
559 } 465 }
560 if (opcode == OP(RDMA_WRITE_ONLY)) 466 if (opcode == OP(RDMA_WRITE_ONLY))
561 goto rdma_last; 467 goto rdma_last;
562 else if (opcode == 468 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
563 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
564 goto rdma_last_imm; 469 goto rdma_last_imm;
565 /* FALLTHROUGH */ 470 /* FALLTHROUGH */
566 case OP(RDMA_WRITE_MIDDLE): 471 case OP(RDMA_WRITE_MIDDLE):
@@ -593,9 +498,9 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
593 dev->n_pkt_drops++; 498 dev->n_pkt_drops++;
594 goto done; 499 goto done;
595 } 500 }
596 if (qp->r_reuse_sge) { 501 if (qp->r_reuse_sge)
597 qp->r_reuse_sge = 0; 502 qp->r_reuse_sge = 0;
598 } else if (!ipath_get_rwqe(qp, 1)) { 503 else if (!ipath_get_rwqe(qp, 1)) {
599 dev->n_pkt_drops++; 504 dev->n_pkt_drops++;
600 goto done; 505 goto done;
601 } 506 }
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 683db8ad28d3..6af5402fe447 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -194,7 +194,7 @@ static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
194 switch (qp->ibqp.qp_type) { 194 switch (qp->ibqp.qp_type) {
195 case IB_QPT_UC: 195 case IB_QPT_UC:
196 case IB_QPT_RC: 196 case IB_QPT_RC:
197 err = ipath_post_rc_send(qp, wr); 197 err = ipath_post_ruc_send(qp, wr);
198 break; 198 break;
199 199
200 case IB_QPT_SMI: 200 case IB_QPT_SMI:
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 2b2311abf05a..b461316ca223 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -581,10 +581,6 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
581 581
582void ipath_get_credit(struct ipath_qp *qp, u32 aeth); 582void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
583 583
584void ipath_do_rc_send(unsigned long data);
585
586void ipath_do_uc_send(unsigned long data);
587
588void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); 584void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
589 585
590int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, 586int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
@@ -597,7 +593,7 @@ void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
597 593
598void ipath_skip_sge(struct ipath_sge_state *ss, u32 length); 594void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
599 595
600int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr); 596int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr);
601 597
602void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, 598void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
603 int has_grh, void *data, u32 tlen, struct ipath_qp *qp); 599 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
@@ -679,7 +675,19 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp);
679 675
680int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only); 676int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
681 677
682void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc); 678u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
679 struct ib_global_route *grh, u32 hwords, u32 nwords);
680
681void ipath_do_ruc_send(unsigned long data);
682
683u32 ipath_make_rc_ack(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
684 u32 pmtu);
685
686int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
687 u32 pmtu, u32 *bth0p, u32 *bth2p);
688
689int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
690 u32 pmtu, u32 *bth0p, u32 *bth2p);
683 691
684extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; 692extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
685 693