aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_rc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_rc.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c67
1 files changed, 44 insertions, 23 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 40f3e37d7adc..c405dfba5531 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -31,6 +31,8 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/io.h>
35
34#include "ipath_verbs.h" 36#include "ipath_verbs.h"
35#include "ipath_kernel.h" 37#include "ipath_kernel.h"
36 38
@@ -306,7 +308,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
306 else { 308 else {
307 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); 309 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
308 /* Immediate data comes after the BTH */ 310 /* Immediate data comes after the BTH */
309 ohdr->u.imm_data = wqe->wr.imm_data; 311 ohdr->u.imm_data = wqe->wr.ex.imm_data;
310 hwords += 1; 312 hwords += 1;
311 } 313 }
312 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 314 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -344,7 +346,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
344 qp->s_state = 346 qp->s_state =
345 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 347 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
346 /* Immediate data comes after RETH */ 348 /* Immediate data comes after RETH */
347 ohdr->u.rc.imm_data = wqe->wr.imm_data; 349 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
348 hwords += 1; 350 hwords += 1;
349 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 351 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
350 bth0 |= 1 << 23; 352 bth0 |= 1 << 23;
@@ -488,7 +490,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
488 else { 490 else {
489 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 491 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
490 /* Immediate data comes after the BTH */ 492 /* Immediate data comes after the BTH */
491 ohdr->u.imm_data = wqe->wr.imm_data; 493 ohdr->u.imm_data = wqe->wr.ex.imm_data;
492 hwords += 1; 494 hwords += 1;
493 } 495 }
494 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 496 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -524,7 +526,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
524 else { 526 else {
525 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 527 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
526 /* Immediate data comes after the BTH */ 528 /* Immediate data comes after the BTH */
527 ohdr->u.imm_data = wqe->wr.imm_data; 529 ohdr->u.imm_data = wqe->wr.ex.imm_data;
528 hwords += 1; 530 hwords += 1;
529 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 531 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
530 bth0 |= 1 << 23; 532 bth0 |= 1 << 23;
@@ -585,19 +587,39 @@ bail:
585static void send_rc_ack(struct ipath_qp *qp) 587static void send_rc_ack(struct ipath_qp *qp)
586{ 588{
587 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 589 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
590 struct ipath_devdata *dd;
588 u16 lrh0; 591 u16 lrh0;
589 u32 bth0; 592 u32 bth0;
590 u32 hwords; 593 u32 hwords;
594 u32 __iomem *piobuf;
591 struct ipath_ib_header hdr; 595 struct ipath_ib_header hdr;
592 struct ipath_other_headers *ohdr; 596 struct ipath_other_headers *ohdr;
593 unsigned long flags; 597 unsigned long flags;
594 598
599 spin_lock_irqsave(&qp->s_lock, flags);
600
595 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 601 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
596 if (qp->r_head_ack_queue != qp->s_tail_ack_queue || 602 if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
597 (qp->s_flags & IPATH_S_ACK_PENDING) || 603 (qp->s_flags & IPATH_S_ACK_PENDING) ||
598 qp->s_ack_state != OP(ACKNOWLEDGE)) 604 qp->s_ack_state != OP(ACKNOWLEDGE))
599 goto queue_ack; 605 goto queue_ack;
600 606
607 spin_unlock_irqrestore(&qp->s_lock, flags);
608
609 dd = dev->dd;
610 piobuf = ipath_getpiobuf(dd, 0, NULL);
611 if (!piobuf) {
612 /*
613 * We are out of PIO buffers at the moment.
614 * Pass responsibility for sending the ACK to the
615 * send tasklet so that when a PIO buffer becomes
616 * available, the ACK is sent ahead of other outgoing
617 * packets.
618 */
619 spin_lock_irqsave(&qp->s_lock, flags);
620 goto queue_ack;
621 }
622
601 /* Construct the header. */ 623 /* Construct the header. */
602 ohdr = &hdr.u.oth; 624 ohdr = &hdr.u.oth;
603 lrh0 = IPATH_LRH_BTH; 625 lrh0 = IPATH_LRH_BTH;
@@ -611,7 +633,7 @@ static void send_rc_ack(struct ipath_qp *qp)
611 lrh0 = IPATH_LRH_GRH; 633 lrh0 = IPATH_LRH_GRH;
612 } 634 }
613 /* read pkey_index w/o lock (its atomic) */ 635 /* read pkey_index w/o lock (its atomic) */
614 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) | 636 bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
615 (OP(ACKNOWLEDGE) << 24) | (1 << 22); 637 (OP(ACKNOWLEDGE) << 24) | (1 << 22);
616 if (qp->r_nak_state) 638 if (qp->r_nak_state)
617 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | 639 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
@@ -623,30 +645,29 @@ static void send_rc_ack(struct ipath_qp *qp)
623 hdr.lrh[0] = cpu_to_be16(lrh0); 645 hdr.lrh[0] = cpu_to_be16(lrh0);
624 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 646 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
625 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); 647 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
626 hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); 648 hdr.lrh[3] = cpu_to_be16(dd->ipath_lid);
627 ohdr->bth[0] = cpu_to_be32(bth0); 649 ohdr->bth[0] = cpu_to_be32(bth0);
628 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 650 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
629 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); 651 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
630 652
631 /* 653 writeq(hwords + 1, piobuf);
632 * If we can send the ACK, clear the ACK state.
633 */
634 if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
635 dev->n_unicast_xmit++;
636 goto done;
637 }
638 654
639 /* 655 if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
640 * We are out of PIO buffers at the moment. 656 u32 *hdrp = (u32 *) &hdr;
641 * Pass responsibility for sending the ACK to the 657
642 * send tasklet so that when a PIO buffer becomes 658 ipath_flush_wc();
643 * available, the ACK is sent ahead of other outgoing 659 __iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
644 * packets. 660 ipath_flush_wc();
645 */ 661 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
646 dev->n_rc_qacks++; 662 } else
663 __iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
664
665 ipath_flush_wc();
666
667 dev->n_unicast_xmit++;
668 goto done;
647 669
648queue_ack: 670queue_ack:
649 spin_lock_irqsave(&qp->s_lock, flags);
650 dev->n_rc_qacks++; 671 dev->n_rc_qacks++;
651 qp->s_flags |= IPATH_S_ACK_PENDING; 672 qp->s_flags |= IPATH_S_ACK_PENDING;
652 qp->s_nak_state = qp->r_nak_state; 673 qp->s_nak_state = qp->r_nak_state;