aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c221
1 files changed, 101 insertions, 120 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 11f1d99db40b..df01b2026a64 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1578,6 +1578,45 @@ static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1578 return cur + nreq >= wq->max; 1578 return cur + nreq >= wq->max;
1579} 1579}
1580 1580
1581static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
1582 u64 remote_addr, u32 rkey)
1583{
1584 rseg->raddr = cpu_to_be64(remote_addr);
1585 rseg->rkey = cpu_to_be32(rkey);
1586 rseg->reserved = 0;
1587}
1588
1589static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
1590 struct ib_send_wr *wr)
1591{
1592 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1593 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1594 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1595 } else {
1596 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1597 aseg->compare = 0;
1598 }
1599
1600}
1601
1602static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
1603 struct ib_send_wr *wr)
1604{
1605 useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1606 useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1607 useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1608 useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1609
1610}
1611
1612static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
1613 struct ib_send_wr *wr)
1614{
1615 memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1616 useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1617 useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1618}
1619
1581int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1620int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1582 struct ib_send_wr **bad_wr) 1621 struct ib_send_wr **bad_wr)
1583{ 1622{
@@ -1590,8 +1629,15 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1590 int nreq; 1629 int nreq;
1591 int i; 1630 int i;
1592 int size; 1631 int size;
1593 int size0 = 0; 1632 /*
1594 u32 f0 = 0; 1633 * f0 and size0 are only used if nreq != 0, and they will
1634 * always be initialized the first time through the main loop
1635 * before nreq is incremented. So nreq cannot become non-zero
1636 * without initializing f0 and size0, and they are in fact
1637 * never used uninitialized.
1638 */
1639 int uninitialized_var(size0);
1640 u32 uninitialized_var(f0);
1595 int ind; 1641 int ind;
1596 u8 op0 = 0; 1642 u8 op0 = 0;
1597 1643
@@ -1636,25 +1682,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1636 switch (wr->opcode) { 1682 switch (wr->opcode) {
1637 case IB_WR_ATOMIC_CMP_AND_SWP: 1683 case IB_WR_ATOMIC_CMP_AND_SWP:
1638 case IB_WR_ATOMIC_FETCH_AND_ADD: 1684 case IB_WR_ATOMIC_FETCH_AND_ADD:
1639 ((struct mthca_raddr_seg *) wqe)->raddr = 1685 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1640 cpu_to_be64(wr->wr.atomic.remote_addr); 1686 wr->wr.atomic.rkey);
1641 ((struct mthca_raddr_seg *) wqe)->rkey =
1642 cpu_to_be32(wr->wr.atomic.rkey);
1643 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1644
1645 wqe += sizeof (struct mthca_raddr_seg); 1687 wqe += sizeof (struct mthca_raddr_seg);
1646 1688
1647 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1689 set_atomic_seg(wqe, wr);
1648 ((struct mthca_atomic_seg *) wqe)->swap_add =
1649 cpu_to_be64(wr->wr.atomic.swap);
1650 ((struct mthca_atomic_seg *) wqe)->compare =
1651 cpu_to_be64(wr->wr.atomic.compare_add);
1652 } else {
1653 ((struct mthca_atomic_seg *) wqe)->swap_add =
1654 cpu_to_be64(wr->wr.atomic.compare_add);
1655 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1656 }
1657
1658 wqe += sizeof (struct mthca_atomic_seg); 1690 wqe += sizeof (struct mthca_atomic_seg);
1659 size += (sizeof (struct mthca_raddr_seg) + 1691 size += (sizeof (struct mthca_raddr_seg) +
1660 sizeof (struct mthca_atomic_seg)) / 16; 1692 sizeof (struct mthca_atomic_seg)) / 16;
@@ -1663,12 +1695,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1663 case IB_WR_RDMA_WRITE: 1695 case IB_WR_RDMA_WRITE:
1664 case IB_WR_RDMA_WRITE_WITH_IMM: 1696 case IB_WR_RDMA_WRITE_WITH_IMM:
1665 case IB_WR_RDMA_READ: 1697 case IB_WR_RDMA_READ:
1666 ((struct mthca_raddr_seg *) wqe)->raddr = 1698 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1667 cpu_to_be64(wr->wr.rdma.remote_addr); 1699 wr->wr.rdma.rkey);
1668 ((struct mthca_raddr_seg *) wqe)->rkey = 1700 wqe += sizeof (struct mthca_raddr_seg);
1669 cpu_to_be32(wr->wr.rdma.rkey);
1670 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1671 wqe += sizeof (struct mthca_raddr_seg);
1672 size += sizeof (struct mthca_raddr_seg) / 16; 1701 size += sizeof (struct mthca_raddr_seg) / 16;
1673 break; 1702 break;
1674 1703
@@ -1683,12 +1712,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1683 switch (wr->opcode) { 1712 switch (wr->opcode) {
1684 case IB_WR_RDMA_WRITE: 1713 case IB_WR_RDMA_WRITE:
1685 case IB_WR_RDMA_WRITE_WITH_IMM: 1714 case IB_WR_RDMA_WRITE_WITH_IMM:
1686 ((struct mthca_raddr_seg *) wqe)->raddr = 1715 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1687 cpu_to_be64(wr->wr.rdma.remote_addr); 1716 wr->wr.rdma.rkey);
1688 ((struct mthca_raddr_seg *) wqe)->rkey = 1717 wqe += sizeof (struct mthca_raddr_seg);
1689 cpu_to_be32(wr->wr.rdma.rkey);
1690 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1691 wqe += sizeof (struct mthca_raddr_seg);
1692 size += sizeof (struct mthca_raddr_seg) / 16; 1718 size += sizeof (struct mthca_raddr_seg) / 16;
1693 break; 1719 break;
1694 1720
@@ -1700,16 +1726,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1700 break; 1726 break;
1701 1727
1702 case UD: 1728 case UD:
1703 ((struct mthca_tavor_ud_seg *) wqe)->lkey = 1729 set_tavor_ud_seg(wqe, wr);
1704 cpu_to_be32(to_mah(wr->wr.ud.ah)->key); 1730 wqe += sizeof (struct mthca_tavor_ud_seg);
1705 ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
1706 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1707 ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
1708 cpu_to_be32(wr->wr.ud.remote_qpn);
1709 ((struct mthca_tavor_ud_seg *) wqe)->qkey =
1710 cpu_to_be32(wr->wr.ud.remote_qkey);
1711
1712 wqe += sizeof (struct mthca_tavor_ud_seg);
1713 size += sizeof (struct mthca_tavor_ud_seg) / 16; 1731 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1714 break; 1732 break;
1715 1733
@@ -1734,13 +1752,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1734 } 1752 }
1735 1753
1736 for (i = 0; i < wr->num_sge; ++i) { 1754 for (i = 0; i < wr->num_sge; ++i) {
1737 ((struct mthca_data_seg *) wqe)->byte_count = 1755 mthca_set_data_seg(wqe, wr->sg_list + i);
1738 cpu_to_be32(wr->sg_list[i].length); 1756 wqe += sizeof (struct mthca_data_seg);
1739 ((struct mthca_data_seg *) wqe)->lkey =
1740 cpu_to_be32(wr->sg_list[i].lkey);
1741 ((struct mthca_data_seg *) wqe)->addr =
1742 cpu_to_be64(wr->sg_list[i].addr);
1743 wqe += sizeof (struct mthca_data_seg);
1744 size += sizeof (struct mthca_data_seg) / 16; 1757 size += sizeof (struct mthca_data_seg) / 16;
1745 } 1758 }
1746 1759
@@ -1768,11 +1781,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1768 mthca_opcode[wr->opcode]); 1781 mthca_opcode[wr->opcode]);
1769 wmb(); 1782 wmb();
1770 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1783 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1771 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size | 1784 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
1772 ((wr->send_flags & IB_SEND_FENCE) ? 1785 ((wr->send_flags & IB_SEND_FENCE) ?
1773 MTHCA_NEXT_FENCE : 0)); 1786 MTHCA_NEXT_FENCE : 0));
1774 1787
1775 if (!size0) { 1788 if (!nreq) {
1776 size0 = size; 1789 size0 = size;
1777 op0 = mthca_opcode[wr->opcode]; 1790 op0 = mthca_opcode[wr->opcode];
1778 f0 = wr->send_flags & IB_SEND_FENCE ? 1791 f0 = wr->send_flags & IB_SEND_FENCE ?
@@ -1822,7 +1835,14 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1822 int nreq; 1835 int nreq;
1823 int i; 1836 int i;
1824 int size; 1837 int size;
1825 int size0 = 0; 1838 /*
1839 * size0 is only used if nreq != 0, and it will always be
1840 * initialized the first time through the main loop before
1841 * nreq is incremented. So nreq cannot become non-zero
1842 * without initializing size0, and it is in fact never used
1843 * uninitialized.
1844 */
1845 int uninitialized_var(size0);
1826 int ind; 1846 int ind;
1827 void *wqe; 1847 void *wqe;
1828 void *prev_wqe; 1848 void *prev_wqe;
@@ -1863,13 +1883,8 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1863 } 1883 }
1864 1884
1865 for (i = 0; i < wr->num_sge; ++i) { 1885 for (i = 0; i < wr->num_sge; ++i) {
1866 ((struct mthca_data_seg *) wqe)->byte_count = 1886 mthca_set_data_seg(wqe, wr->sg_list + i);
1867 cpu_to_be32(wr->sg_list[i].length); 1887 wqe += sizeof (struct mthca_data_seg);
1868 ((struct mthca_data_seg *) wqe)->lkey =
1869 cpu_to_be32(wr->sg_list[i].lkey);
1870 ((struct mthca_data_seg *) wqe)->addr =
1871 cpu_to_be64(wr->sg_list[i].addr);
1872 wqe += sizeof (struct mthca_data_seg);
1873 size += sizeof (struct mthca_data_seg) / 16; 1888 size += sizeof (struct mthca_data_seg) / 16;
1874 } 1889 }
1875 1890
@@ -1881,7 +1896,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1881 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1896 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1882 cpu_to_be32(MTHCA_NEXT_DBD | size); 1897 cpu_to_be32(MTHCA_NEXT_DBD | size);
1883 1898
1884 if (!size0) 1899 if (!nreq)
1885 size0 = size; 1900 size0 = size;
1886 1901
1887 ++ind; 1902 ++ind;
@@ -1903,7 +1918,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1903 1918
1904 qp->rq.next_ind = ind; 1919 qp->rq.next_ind = ind;
1905 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; 1920 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1906 size0 = 0;
1907 } 1921 }
1908 } 1922 }
1909 1923
@@ -1945,8 +1959,15 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1945 int nreq; 1959 int nreq;
1946 int i; 1960 int i;
1947 int size; 1961 int size;
1948 int size0 = 0; 1962 /*
1949 u32 f0 = 0; 1963 * f0 and size0 are only used if nreq != 0, and they will
1964 * always be initialized the first time through the main loop
1965 * before nreq is incremented. So nreq cannot become non-zero
1966 * without initializing f0 and size0, and they are in fact
1967 * never used uninitialized.
1968 */
1969 int uninitialized_var(size0);
1970 u32 uninitialized_var(f0);
1950 int ind; 1971 int ind;
1951 u8 op0 = 0; 1972 u8 op0 = 0;
1952 1973
@@ -1966,7 +1987,6 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1966 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); 1987 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1967 1988
1968 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; 1989 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1969 size0 = 0;
1970 1990
1971 /* 1991 /*
1972 * Make sure that descriptors are written before 1992 * Make sure that descriptors are written before
@@ -2017,26 +2037,12 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2017 switch (wr->opcode) { 2037 switch (wr->opcode) {
2018 case IB_WR_ATOMIC_CMP_AND_SWP: 2038 case IB_WR_ATOMIC_CMP_AND_SWP:
2019 case IB_WR_ATOMIC_FETCH_AND_ADD: 2039 case IB_WR_ATOMIC_FETCH_AND_ADD:
2020 ((struct mthca_raddr_seg *) wqe)->raddr = 2040 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
2021 cpu_to_be64(wr->wr.atomic.remote_addr); 2041 wr->wr.atomic.rkey);
2022 ((struct mthca_raddr_seg *) wqe)->rkey =
2023 cpu_to_be32(wr->wr.atomic.rkey);
2024 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
2025
2026 wqe += sizeof (struct mthca_raddr_seg); 2042 wqe += sizeof (struct mthca_raddr_seg);
2027 2043
2028 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 2044 set_atomic_seg(wqe, wr);
2029 ((struct mthca_atomic_seg *) wqe)->swap_add = 2045 wqe += sizeof (struct mthca_atomic_seg);
2030 cpu_to_be64(wr->wr.atomic.swap);
2031 ((struct mthca_atomic_seg *) wqe)->compare =
2032 cpu_to_be64(wr->wr.atomic.compare_add);
2033 } else {
2034 ((struct mthca_atomic_seg *) wqe)->swap_add =
2035 cpu_to_be64(wr->wr.atomic.compare_add);
2036 ((struct mthca_atomic_seg *) wqe)->compare = 0;
2037 }
2038
2039 wqe += sizeof (struct mthca_atomic_seg);
2040 size += (sizeof (struct mthca_raddr_seg) + 2046 size += (sizeof (struct mthca_raddr_seg) +
2041 sizeof (struct mthca_atomic_seg)) / 16; 2047 sizeof (struct mthca_atomic_seg)) / 16;
2042 break; 2048 break;
@@ -2044,12 +2050,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2044 case IB_WR_RDMA_READ: 2050 case IB_WR_RDMA_READ:
2045 case IB_WR_RDMA_WRITE: 2051 case IB_WR_RDMA_WRITE:
2046 case IB_WR_RDMA_WRITE_WITH_IMM: 2052 case IB_WR_RDMA_WRITE_WITH_IMM:
2047 ((struct mthca_raddr_seg *) wqe)->raddr = 2053 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2048 cpu_to_be64(wr->wr.rdma.remote_addr); 2054 wr->wr.rdma.rkey);
2049 ((struct mthca_raddr_seg *) wqe)->rkey = 2055 wqe += sizeof (struct mthca_raddr_seg);
2050 cpu_to_be32(wr->wr.rdma.rkey);
2051 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
2052 wqe += sizeof (struct mthca_raddr_seg);
2053 size += sizeof (struct mthca_raddr_seg) / 16; 2056 size += sizeof (struct mthca_raddr_seg) / 16;
2054 break; 2057 break;
2055 2058
@@ -2064,12 +2067,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2064 switch (wr->opcode) { 2067 switch (wr->opcode) {
2065 case IB_WR_RDMA_WRITE: 2068 case IB_WR_RDMA_WRITE:
2066 case IB_WR_RDMA_WRITE_WITH_IMM: 2069 case IB_WR_RDMA_WRITE_WITH_IMM:
2067 ((struct mthca_raddr_seg *) wqe)->raddr = 2070 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2068 cpu_to_be64(wr->wr.rdma.remote_addr); 2071 wr->wr.rdma.rkey);
2069 ((struct mthca_raddr_seg *) wqe)->rkey = 2072 wqe += sizeof (struct mthca_raddr_seg);
2070 cpu_to_be32(wr->wr.rdma.rkey);
2071 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
2072 wqe += sizeof (struct mthca_raddr_seg);
2073 size += sizeof (struct mthca_raddr_seg) / 16; 2073 size += sizeof (struct mthca_raddr_seg) / 16;
2074 break; 2074 break;
2075 2075
@@ -2081,14 +2081,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2081 break; 2081 break;
2082 2082
2083 case UD: 2083 case UD:
2084 memcpy(((struct mthca_arbel_ud_seg *) wqe)->av, 2084 set_arbel_ud_seg(wqe, wr);
2085 to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); 2085 wqe += sizeof (struct mthca_arbel_ud_seg);
2086 ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
2087 cpu_to_be32(wr->wr.ud.remote_qpn);
2088 ((struct mthca_arbel_ud_seg *) wqe)->qkey =
2089 cpu_to_be32(wr->wr.ud.remote_qkey);
2090
2091 wqe += sizeof (struct mthca_arbel_ud_seg);
2092 size += sizeof (struct mthca_arbel_ud_seg) / 16; 2086 size += sizeof (struct mthca_arbel_ud_seg) / 16;
2093 break; 2087 break;
2094 2088
@@ -2113,13 +2107,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2113 } 2107 }
2114 2108
2115 for (i = 0; i < wr->num_sge; ++i) { 2109 for (i = 0; i < wr->num_sge; ++i) {
2116 ((struct mthca_data_seg *) wqe)->byte_count = 2110 mthca_set_data_seg(wqe, wr->sg_list + i);
2117 cpu_to_be32(wr->sg_list[i].length); 2111 wqe += sizeof (struct mthca_data_seg);
2118 ((struct mthca_data_seg *) wqe)->lkey =
2119 cpu_to_be32(wr->sg_list[i].lkey);
2120 ((struct mthca_data_seg *) wqe)->addr =
2121 cpu_to_be64(wr->sg_list[i].addr);
2122 wqe += sizeof (struct mthca_data_seg);
2123 size += sizeof (struct mthca_data_seg) / 16; 2112 size += sizeof (struct mthca_data_seg) / 16;
2124 } 2113 }
2125 2114
@@ -2151,7 +2140,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2151 ((wr->send_flags & IB_SEND_FENCE) ? 2140 ((wr->send_flags & IB_SEND_FENCE) ?
2152 MTHCA_NEXT_FENCE : 0)); 2141 MTHCA_NEXT_FENCE : 0));
2153 2142
2154 if (!size0) { 2143 if (!nreq) {
2155 size0 = size; 2144 size0 = size;
2156 op0 = mthca_opcode[wr->opcode]; 2145 op0 = mthca_opcode[wr->opcode];
2157 f0 = wr->send_flags & IB_SEND_FENCE ? 2146 f0 = wr->send_flags & IB_SEND_FENCE ?
@@ -2241,20 +2230,12 @@ int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2241 } 2230 }
2242 2231
2243 for (i = 0; i < wr->num_sge; ++i) { 2232 for (i = 0; i < wr->num_sge; ++i) {
2244 ((struct mthca_data_seg *) wqe)->byte_count = 2233 mthca_set_data_seg(wqe, wr->sg_list + i);
2245 cpu_to_be32(wr->sg_list[i].length);
2246 ((struct mthca_data_seg *) wqe)->lkey =
2247 cpu_to_be32(wr->sg_list[i].lkey);
2248 ((struct mthca_data_seg *) wqe)->addr =
2249 cpu_to_be64(wr->sg_list[i].addr);
2250 wqe += sizeof (struct mthca_data_seg); 2234 wqe += sizeof (struct mthca_data_seg);
2251 } 2235 }
2252 2236
2253 if (i < qp->rq.max_gs) { 2237 if (i < qp->rq.max_gs)
2254 ((struct mthca_data_seg *) wqe)->byte_count = 0; 2238 mthca_set_data_seg_inval(wqe);
2255 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
2256 ((struct mthca_data_seg *) wqe)->addr = 0;
2257 }
2258 2239
2259 qp->wrid[ind] = wr->wr_id; 2240 qp->wrid[ind] = wr->wr_id;
2260 2241