aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2007-07-18 15:55:42 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-18 15:55:42 -0400
commit400ddc11eb01a8d04c580892fde3adbd45ebdc9e (patch)
treed2056196297d40238318421a342379762d0dfd22 /drivers/infiniband
parent0fbfa6a9062c71b62ec216c0294b676b76e41661 (diff)
IB/mthca: Factor out setting WQE remote address and atomic segment entries
Factor code to set remote address and atomic segment entries out of the work request posting functions into inline functions set_raddr_seg() and set_atomic_seg(). This doesn't change the generated code in any significant way, and makes the source easier on the eyes. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c99
1 files changed, 40 insertions, 59 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 2548250a822d..43d4d771f2d2 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1578,6 +1578,27 @@ static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1578 return cur + nreq >= wq->max; 1578 return cur + nreq >= wq->max;
1579} 1579}
1580 1580
1581static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
1582 u64 remote_addr, u32 rkey)
1583{
1584 rseg->raddr = cpu_to_be64(remote_addr);
1585 rseg->rkey = cpu_to_be32(rkey);
1586 rseg->reserved = 0;
1587}
1588
1589static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
1590 struct ib_send_wr *wr)
1591{
1592 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1593 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1594 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1595 } else {
1596 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1597 aseg->compare = 0;
1598 }
1599
1600}
1601
1581int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1602int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1582 struct ib_send_wr **bad_wr) 1603 struct ib_send_wr **bad_wr)
1583{ 1604{
@@ -1642,25 +1663,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1642 switch (wr->opcode) { 1663 switch (wr->opcode) {
1643 case IB_WR_ATOMIC_CMP_AND_SWP: 1664 case IB_WR_ATOMIC_CMP_AND_SWP:
1644 case IB_WR_ATOMIC_FETCH_AND_ADD: 1665 case IB_WR_ATOMIC_FETCH_AND_ADD:
1645 ((struct mthca_raddr_seg *) wqe)->raddr = 1666 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1646 cpu_to_be64(wr->wr.atomic.remote_addr); 1667 wr->wr.atomic.rkey);
1647 ((struct mthca_raddr_seg *) wqe)->rkey =
1648 cpu_to_be32(wr->wr.atomic.rkey);
1649 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1650
1651 wqe += sizeof (struct mthca_raddr_seg); 1668 wqe += sizeof (struct mthca_raddr_seg);
1652 1669
1653 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1670 set_atomic_seg(wqe, wr);
1654 ((struct mthca_atomic_seg *) wqe)->swap_add =
1655 cpu_to_be64(wr->wr.atomic.swap);
1656 ((struct mthca_atomic_seg *) wqe)->compare =
1657 cpu_to_be64(wr->wr.atomic.compare_add);
1658 } else {
1659 ((struct mthca_atomic_seg *) wqe)->swap_add =
1660 cpu_to_be64(wr->wr.atomic.compare_add);
1661 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1662 }
1663
1664 wqe += sizeof (struct mthca_atomic_seg); 1671 wqe += sizeof (struct mthca_atomic_seg);
1665 size += (sizeof (struct mthca_raddr_seg) + 1672 size += (sizeof (struct mthca_raddr_seg) +
1666 sizeof (struct mthca_atomic_seg)) / 16; 1673 sizeof (struct mthca_atomic_seg)) / 16;
@@ -1669,12 +1676,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1669 case IB_WR_RDMA_WRITE: 1676 case IB_WR_RDMA_WRITE:
1670 case IB_WR_RDMA_WRITE_WITH_IMM: 1677 case IB_WR_RDMA_WRITE_WITH_IMM:
1671 case IB_WR_RDMA_READ: 1678 case IB_WR_RDMA_READ:
1672 ((struct mthca_raddr_seg *) wqe)->raddr = 1679 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1673 cpu_to_be64(wr->wr.rdma.remote_addr); 1680 wr->wr.rdma.rkey);
1674 ((struct mthca_raddr_seg *) wqe)->rkey = 1681 wqe += sizeof (struct mthca_raddr_seg);
1675 cpu_to_be32(wr->wr.rdma.rkey);
1676 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1677 wqe += sizeof (struct mthca_raddr_seg);
1678 size += sizeof (struct mthca_raddr_seg) / 16; 1682 size += sizeof (struct mthca_raddr_seg) / 16;
1679 break; 1683 break;
1680 1684
@@ -1689,12 +1693,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1689 switch (wr->opcode) { 1693 switch (wr->opcode) {
1690 case IB_WR_RDMA_WRITE: 1694 case IB_WR_RDMA_WRITE:
1691 case IB_WR_RDMA_WRITE_WITH_IMM: 1695 case IB_WR_RDMA_WRITE_WITH_IMM:
1692 ((struct mthca_raddr_seg *) wqe)->raddr = 1696 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1693 cpu_to_be64(wr->wr.rdma.remote_addr); 1697 wr->wr.rdma.rkey);
1694 ((struct mthca_raddr_seg *) wqe)->rkey = 1698 wqe += sizeof (struct mthca_raddr_seg);
1695 cpu_to_be32(wr->wr.rdma.rkey);
1696 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1697 wqe += sizeof (struct mthca_raddr_seg);
1698 size += sizeof (struct mthca_raddr_seg) / 16; 1699 size += sizeof (struct mthca_raddr_seg) / 16;
1699 break; 1700 break;
1700 1701
@@ -2019,26 +2020,12 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2019 switch (wr->opcode) { 2020 switch (wr->opcode) {
2020 case IB_WR_ATOMIC_CMP_AND_SWP: 2021 case IB_WR_ATOMIC_CMP_AND_SWP:
2021 case IB_WR_ATOMIC_FETCH_AND_ADD: 2022 case IB_WR_ATOMIC_FETCH_AND_ADD:
2022 ((struct mthca_raddr_seg *) wqe)->raddr = 2023 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
2023 cpu_to_be64(wr->wr.atomic.remote_addr); 2024 wr->wr.atomic.rkey);
2024 ((struct mthca_raddr_seg *) wqe)->rkey =
2025 cpu_to_be32(wr->wr.atomic.rkey);
2026 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
2027
2028 wqe += sizeof (struct mthca_raddr_seg); 2025 wqe += sizeof (struct mthca_raddr_seg);
2029 2026
2030 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 2027 set_atomic_seg(wqe, wr);
2031 ((struct mthca_atomic_seg *) wqe)->swap_add = 2028 wqe += sizeof (struct mthca_atomic_seg);
2032 cpu_to_be64(wr->wr.atomic.swap);
2033 ((struct mthca_atomic_seg *) wqe)->compare =
2034 cpu_to_be64(wr->wr.atomic.compare_add);
2035 } else {
2036 ((struct mthca_atomic_seg *) wqe)->swap_add =
2037 cpu_to_be64(wr->wr.atomic.compare_add);
2038 ((struct mthca_atomic_seg *) wqe)->compare = 0;
2039 }
2040
2041 wqe += sizeof (struct mthca_atomic_seg);
2042 size += (sizeof (struct mthca_raddr_seg) + 2029 size += (sizeof (struct mthca_raddr_seg) +
2043 sizeof (struct mthca_atomic_seg)) / 16; 2030 sizeof (struct mthca_atomic_seg)) / 16;
2044 break; 2031 break;
@@ -2046,12 +2033,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2046 case IB_WR_RDMA_READ: 2033 case IB_WR_RDMA_READ:
2047 case IB_WR_RDMA_WRITE: 2034 case IB_WR_RDMA_WRITE:
2048 case IB_WR_RDMA_WRITE_WITH_IMM: 2035 case IB_WR_RDMA_WRITE_WITH_IMM:
2049 ((struct mthca_raddr_seg *) wqe)->raddr = 2036 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2050 cpu_to_be64(wr->wr.rdma.remote_addr); 2037 wr->wr.rdma.rkey);
2051 ((struct mthca_raddr_seg *) wqe)->rkey = 2038 wqe += sizeof (struct mthca_raddr_seg);
2052 cpu_to_be32(wr->wr.rdma.rkey);
2053 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
2054 wqe += sizeof (struct mthca_raddr_seg);
2055 size += sizeof (struct mthca_raddr_seg) / 16; 2039 size += sizeof (struct mthca_raddr_seg) / 16;
2056 break; 2040 break;
2057 2041
@@ -2066,12 +2050,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2066 switch (wr->opcode) { 2050 switch (wr->opcode) {
2067 case IB_WR_RDMA_WRITE: 2051 case IB_WR_RDMA_WRITE:
2068 case IB_WR_RDMA_WRITE_WITH_IMM: 2052 case IB_WR_RDMA_WRITE_WITH_IMM:
2069 ((struct mthca_raddr_seg *) wqe)->raddr = 2053 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2070 cpu_to_be64(wr->wr.rdma.remote_addr); 2054 wr->wr.rdma.rkey);
2071 ((struct mthca_raddr_seg *) wqe)->rkey = 2055 wqe += sizeof (struct mthca_raddr_seg);
2072 cpu_to_be32(wr->wr.rdma.rkey);
2073 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
2074 wqe += sizeof (struct mthca_raddr_seg);
2075 size += sizeof (struct mthca_raddr_seg) / 16; 2056 size += sizeof (struct mthca_raddr_seg) / 16;
2076 break; 2057 break;
2077 2058