diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2008-04-17 00:09:27 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-04-17 00:09:27 -0400 |
commit | d98b1937768c9f4e4420bd25406e5f0304d224bb (patch) | |
tree | 306b10016a5ada414efa187ba071808a304be8bd /drivers/infiniband/hw | |
parent | c4b4d16e090e1b68d1d4d20a28757070982b9725 (diff) |
IB/ipath: Use PIO buffer for RC ACKs
This reduces the latency for RC ACKs when a PIO buffer is available.
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_rc.c | 57 |
1 files changed, 39 insertions, 18 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 40f3e37d7adc..f765d48464ab 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -31,6 +31,8 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/io.h> | ||
35 | |||
34 | #include "ipath_verbs.h" | 36 | #include "ipath_verbs.h" |
35 | #include "ipath_kernel.h" | 37 | #include "ipath_kernel.h" |
36 | 38 | ||
@@ -585,19 +587,39 @@ bail: | |||
585 | static void send_rc_ack(struct ipath_qp *qp) | 587 | static void send_rc_ack(struct ipath_qp *qp) |
586 | { | 588 | { |
587 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 589 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
590 | struct ipath_devdata *dd; | ||
588 | u16 lrh0; | 591 | u16 lrh0; |
589 | u32 bth0; | 592 | u32 bth0; |
590 | u32 hwords; | 593 | u32 hwords; |
594 | u32 __iomem *piobuf; | ||
591 | struct ipath_ib_header hdr; | 595 | struct ipath_ib_header hdr; |
592 | struct ipath_other_headers *ohdr; | 596 | struct ipath_other_headers *ohdr; |
593 | unsigned long flags; | 597 | unsigned long flags; |
594 | 598 | ||
599 | spin_lock_irqsave(&qp->s_lock, flags); | ||
600 | |||
595 | /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ | 601 | /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ |
596 | if (qp->r_head_ack_queue != qp->s_tail_ack_queue || | 602 | if (qp->r_head_ack_queue != qp->s_tail_ack_queue || |
597 | (qp->s_flags & IPATH_S_ACK_PENDING) || | 603 | (qp->s_flags & IPATH_S_ACK_PENDING) || |
598 | qp->s_ack_state != OP(ACKNOWLEDGE)) | 604 | qp->s_ack_state != OP(ACKNOWLEDGE)) |
599 | goto queue_ack; | 605 | goto queue_ack; |
600 | 606 | ||
607 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
608 | |||
609 | dd = dev->dd; | ||
610 | piobuf = ipath_getpiobuf(dd, 0, NULL); | ||
611 | if (!piobuf) { | ||
612 | /* | ||
613 | * We are out of PIO buffers at the moment. | ||
614 | * Pass responsibility for sending the ACK to the | ||
615 | * send tasklet so that when a PIO buffer becomes | ||
616 | * available, the ACK is sent ahead of other outgoing | ||
617 | * packets. | ||
618 | */ | ||
619 | spin_lock_irqsave(&qp->s_lock, flags); | ||
620 | goto queue_ack; | ||
621 | } | ||
622 | |||
601 | /* Construct the header. */ | 623 | /* Construct the header. */ |
602 | ohdr = &hdr.u.oth; | 624 | ohdr = &hdr.u.oth; |
603 | lrh0 = IPATH_LRH_BTH; | 625 | lrh0 = IPATH_LRH_BTH; |
@@ -611,7 +633,7 @@ static void send_rc_ack(struct ipath_qp *qp) | |||
611 | lrh0 = IPATH_LRH_GRH; | 633 | lrh0 = IPATH_LRH_GRH; |
612 | } | 634 | } |
613 | /* read pkey_index w/o lock (its atomic) */ | 635 | /* read pkey_index w/o lock (its atomic) */ |
614 | bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) | | 636 | bth0 = ipath_get_pkey(dd, qp->s_pkey_index) | |
615 | (OP(ACKNOWLEDGE) << 24) | (1 << 22); | 637 | (OP(ACKNOWLEDGE) << 24) | (1 << 22); |
616 | if (qp->r_nak_state) | 638 | if (qp->r_nak_state) |
617 | ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | | 639 | ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | |
@@ -623,30 +645,29 @@ static void send_rc_ack(struct ipath_qp *qp) | |||
623 | hdr.lrh[0] = cpu_to_be16(lrh0); | 645 | hdr.lrh[0] = cpu_to_be16(lrh0); |
624 | hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); | 646 | hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); |
625 | hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); | 647 | hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); |
626 | hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); | 648 | hdr.lrh[3] = cpu_to_be16(dd->ipath_lid); |
627 | ohdr->bth[0] = cpu_to_be32(bth0); | 649 | ohdr->bth[0] = cpu_to_be32(bth0); |
628 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); | 650 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); |
629 | ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); | 651 | ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); |
630 | 652 | ||
631 | /* | 653 | writeq(hwords + 1, piobuf); |
632 | * If we can send the ACK, clear the ACK state. | ||
633 | */ | ||
634 | if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) { | ||
635 | dev->n_unicast_xmit++; | ||
636 | goto done; | ||
637 | } | ||
638 | 654 | ||
639 | /* | 655 | if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) { |
640 | * We are out of PIO buffers at the moment. | 656 | u32 *hdrp = (u32 *) &hdr; |
641 | * Pass responsibility for sending the ACK to the | 657 | |
642 | * send tasklet so that when a PIO buffer becomes | 658 | ipath_flush_wc(); |
643 | * available, the ACK is sent ahead of other outgoing | 659 | __iowrite32_copy(piobuf + 2, hdrp, hwords - 1); |
644 | * packets. | 660 | ipath_flush_wc(); |
645 | */ | 661 | __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1); |
646 | dev->n_rc_qacks++; | 662 | } else |
663 | __iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords); | ||
664 | |||
665 | ipath_flush_wc(); | ||
666 | |||
667 | dev->n_unicast_xmit++; | ||
668 | goto done; | ||
647 | 669 | ||
648 | queue_ack: | 670 | queue_ack: |
649 | spin_lock_irqsave(&qp->s_lock, flags); | ||
650 | dev->n_rc_qacks++; | 671 | dev->n_rc_qacks++; |
651 | qp->s_flags |= IPATH_S_ACK_PENDING; | 672 | qp->s_flags |= IPATH_S_ACK_PENDING; |
652 | qp->s_nak_state = qp->r_nak_state; | 673 | qp->s_nak_state = qp->r_nak_state; |