diff options
author | Doug Ledford <dledford@redhat.com> | 2015-10-28 22:23:34 -0400 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2015-10-28 22:23:34 -0400 |
commit | 63e8790d39a2d7c9a0ebeab987a6033d184bc6ba (patch) | |
tree | 9436939401b222d344f66e2bda59b445d5b9189f /drivers/infiniband/hw/mthca | |
parent | 95893dde99d9d14f8a6ac99ea3103792a8da5f25 (diff) | |
parent | eb14ab3ba14081e403be93dc6396627567fadf60 (diff) |
Merge branch 'wr-cleanup' into k.o/for-4.4
Diffstat (limited to 'drivers/infiniband/hw/mthca')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 84 |
1 files changed, 42 insertions, 42 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index e354b2f04ad9..35fe506e2cfa 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -1476,7 +1476,7 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1476 | 1476 | ||
1477 | /* Create UD header for an MLX send and build a data segment for it */ | 1477 | /* Create UD header for an MLX send and build a data segment for it */ |
1478 | static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | 1478 | static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, |
1479 | int ind, struct ib_send_wr *wr, | 1479 | int ind, struct ib_ud_wr *wr, |
1480 | struct mthca_mlx_seg *mlx, | 1480 | struct mthca_mlx_seg *mlx, |
1481 | struct mthca_data_seg *data) | 1481 | struct mthca_data_seg *data) |
1482 | { | 1482 | { |
@@ -1485,10 +1485,10 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1485 | u16 pkey; | 1485 | u16 pkey; |
1486 | 1486 | ||
1487 | ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, | 1487 | ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, |
1488 | mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, | 1488 | mthca_ah_grh_present(to_mah(wr->ah)), 0, |
1489 | &sqp->ud_header); | 1489 | &sqp->ud_header); |
1490 | 1490 | ||
1491 | err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); | 1491 | err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); |
1492 | if (err) | 1492 | if (err) |
1493 | return err; | 1493 | return err; |
1494 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); | 1494 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); |
@@ -1499,7 +1499,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1499 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | 1499 | mlx->rlid = sqp->ud_header.lrh.destination_lid; |
1500 | mlx->vcrc = 0; | 1500 | mlx->vcrc = 0; |
1501 | 1501 | ||
1502 | switch (wr->opcode) { | 1502 | switch (wr->wr.opcode) { |
1503 | case IB_WR_SEND: | 1503 | case IB_WR_SEND: |
1504 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; | 1504 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; |
1505 | sqp->ud_header.immediate_present = 0; | 1505 | sqp->ud_header.immediate_present = 0; |
@@ -1507,7 +1507,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1507 | case IB_WR_SEND_WITH_IMM: | 1507 | case IB_WR_SEND_WITH_IMM: |
1508 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; | 1508 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; |
1509 | sqp->ud_header.immediate_present = 1; | 1509 | sqp->ud_header.immediate_present = 1; |
1510 | sqp->ud_header.immediate_data = wr->ex.imm_data; | 1510 | sqp->ud_header.immediate_data = wr->wr.ex.imm_data; |
1511 | break; | 1511 | break; |
1512 | default: | 1512 | default: |
1513 | return -EINVAL; | 1513 | return -EINVAL; |
@@ -1516,18 +1516,18 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1516 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; | 1516 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; |
1517 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) | 1517 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) |
1518 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | 1518 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; |
1519 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); | 1519 | sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); |
1520 | if (!sqp->qp.ibqp.qp_num) | 1520 | if (!sqp->qp.ibqp.qp_num) |
1521 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, | 1521 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, |
1522 | sqp->pkey_index, &pkey); | 1522 | sqp->pkey_index, &pkey); |
1523 | else | 1523 | else |
1524 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, | 1524 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, |
1525 | wr->wr.ud.pkey_index, &pkey); | 1525 | wr->pkey_index, &pkey); |
1526 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); | 1526 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); |
1527 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1527 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); |
1528 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); | 1528 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); |
1529 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? | 1529 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? |
1530 | sqp->qkey : wr->wr.ud.remote_qkey); | 1530 | sqp->qkey : wr->remote_qkey); |
1531 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); | 1531 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); |
1532 | 1532 | ||
1533 | header_size = ib_ud_header_pack(&sqp->ud_header, | 1533 | header_size = ib_ud_header_pack(&sqp->ud_header, |
@@ -1569,34 +1569,34 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, | |||
1569 | } | 1569 | } |
1570 | 1570 | ||
1571 | static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, | 1571 | static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, |
1572 | struct ib_send_wr *wr) | 1572 | struct ib_atomic_wr *wr) |
1573 | { | 1573 | { |
1574 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | 1574 | if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { |
1575 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | 1575 | aseg->swap_add = cpu_to_be64(wr->swap); |
1576 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | 1576 | aseg->compare = cpu_to_be64(wr->compare_add); |
1577 | } else { | 1577 | } else { |
1578 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | 1578 | aseg->swap_add = cpu_to_be64(wr->compare_add); |
1579 | aseg->compare = 0; | 1579 | aseg->compare = 0; |
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | } | 1582 | } |
1583 | 1583 | ||
1584 | static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, | 1584 | static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, |
1585 | struct ib_send_wr *wr) | 1585 | struct ib_ud_wr *wr) |
1586 | { | 1586 | { |
1587 | useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key); | 1587 | useg->lkey = cpu_to_be32(to_mah(wr->ah)->key); |
1588 | useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); | 1588 | useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma); |
1589 | useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1589 | useg->dqpn = cpu_to_be32(wr->remote_qpn); |
1590 | useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 1590 | useg->qkey = cpu_to_be32(wr->remote_qkey); |
1591 | 1591 | ||
1592 | } | 1592 | } |
1593 | 1593 | ||
1594 | static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, | 1594 | static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, |
1595 | struct ib_send_wr *wr) | 1595 | struct ib_ud_wr *wr) |
1596 | { | 1596 | { |
1597 | memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); | 1597 | memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE); |
1598 | useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1598 | useg->dqpn = cpu_to_be32(wr->remote_qpn); |
1599 | useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 1599 | useg->qkey = cpu_to_be32(wr->remote_qkey); |
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 1602 | int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
@@ -1664,11 +1664,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1664 | switch (wr->opcode) { | 1664 | switch (wr->opcode) { |
1665 | case IB_WR_ATOMIC_CMP_AND_SWP: | 1665 | case IB_WR_ATOMIC_CMP_AND_SWP: |
1666 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 1666 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
1667 | set_raddr_seg(wqe, wr->wr.atomic.remote_addr, | 1667 | set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, |
1668 | wr->wr.atomic.rkey); | 1668 | atomic_wr(wr)->rkey); |
1669 | wqe += sizeof (struct mthca_raddr_seg); | 1669 | wqe += sizeof (struct mthca_raddr_seg); |
1670 | 1670 | ||
1671 | set_atomic_seg(wqe, wr); | 1671 | set_atomic_seg(wqe, atomic_wr(wr)); |
1672 | wqe += sizeof (struct mthca_atomic_seg); | 1672 | wqe += sizeof (struct mthca_atomic_seg); |
1673 | size += (sizeof (struct mthca_raddr_seg) + | 1673 | size += (sizeof (struct mthca_raddr_seg) + |
1674 | sizeof (struct mthca_atomic_seg)) / 16; | 1674 | sizeof (struct mthca_atomic_seg)) / 16; |
@@ -1677,8 +1677,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1677 | case IB_WR_RDMA_WRITE: | 1677 | case IB_WR_RDMA_WRITE: |
1678 | case IB_WR_RDMA_WRITE_WITH_IMM: | 1678 | case IB_WR_RDMA_WRITE_WITH_IMM: |
1679 | case IB_WR_RDMA_READ: | 1679 | case IB_WR_RDMA_READ: |
1680 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, | 1680 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
1681 | wr->wr.rdma.rkey); | 1681 | rdma_wr(wr)->rkey); |
1682 | wqe += sizeof (struct mthca_raddr_seg); | 1682 | wqe += sizeof (struct mthca_raddr_seg); |
1683 | size += sizeof (struct mthca_raddr_seg) / 16; | 1683 | size += sizeof (struct mthca_raddr_seg) / 16; |
1684 | break; | 1684 | break; |
@@ -1694,8 +1694,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1694 | switch (wr->opcode) { | 1694 | switch (wr->opcode) { |
1695 | case IB_WR_RDMA_WRITE: | 1695 | case IB_WR_RDMA_WRITE: |
1696 | case IB_WR_RDMA_WRITE_WITH_IMM: | 1696 | case IB_WR_RDMA_WRITE_WITH_IMM: |
1697 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, | 1697 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
1698 | wr->wr.rdma.rkey); | 1698 | rdma_wr(wr)->rkey); |
1699 | wqe += sizeof (struct mthca_raddr_seg); | 1699 | wqe += sizeof (struct mthca_raddr_seg); |
1700 | size += sizeof (struct mthca_raddr_seg) / 16; | 1700 | size += sizeof (struct mthca_raddr_seg) / 16; |
1701 | break; | 1701 | break; |
@@ -1708,13 +1708,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1708 | break; | 1708 | break; |
1709 | 1709 | ||
1710 | case UD: | 1710 | case UD: |
1711 | set_tavor_ud_seg(wqe, wr); | 1711 | set_tavor_ud_seg(wqe, ud_wr(wr)); |
1712 | wqe += sizeof (struct mthca_tavor_ud_seg); | 1712 | wqe += sizeof (struct mthca_tavor_ud_seg); |
1713 | size += sizeof (struct mthca_tavor_ud_seg) / 16; | 1713 | size += sizeof (struct mthca_tavor_ud_seg) / 16; |
1714 | break; | 1714 | break; |
1715 | 1715 | ||
1716 | case MLX: | 1716 | case MLX: |
1717 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | 1717 | err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), |
1718 | wqe - sizeof (struct mthca_next_seg), | 1718 | wqe - sizeof (struct mthca_next_seg), |
1719 | wqe); | 1719 | wqe); |
1720 | if (err) { | 1720 | if (err) { |
@@ -2005,11 +2005,11 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2005 | switch (wr->opcode) { | 2005 | switch (wr->opcode) { |
2006 | case IB_WR_ATOMIC_CMP_AND_SWP: | 2006 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2007 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 2007 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2008 | set_raddr_seg(wqe, wr->wr.atomic.remote_addr, | 2008 | set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, |
2009 | wr->wr.atomic.rkey); | 2009 | atomic_wr(wr)->rkey); |
2010 | wqe += sizeof (struct mthca_raddr_seg); | 2010 | wqe += sizeof (struct mthca_raddr_seg); |
2011 | 2011 | ||
2012 | set_atomic_seg(wqe, wr); | 2012 | set_atomic_seg(wqe, atomic_wr(wr)); |
2013 | wqe += sizeof (struct mthca_atomic_seg); | 2013 | wqe += sizeof (struct mthca_atomic_seg); |
2014 | size += (sizeof (struct mthca_raddr_seg) + | 2014 | size += (sizeof (struct mthca_raddr_seg) + |
2015 | sizeof (struct mthca_atomic_seg)) / 16; | 2015 | sizeof (struct mthca_atomic_seg)) / 16; |
@@ -2018,8 +2018,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2018 | case IB_WR_RDMA_READ: | 2018 | case IB_WR_RDMA_READ: |
2019 | case IB_WR_RDMA_WRITE: | 2019 | case IB_WR_RDMA_WRITE: |
2020 | case IB_WR_RDMA_WRITE_WITH_IMM: | 2020 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2021 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, | 2021 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
2022 | wr->wr.rdma.rkey); | 2022 | rdma_wr(wr)->rkey); |
2023 | wqe += sizeof (struct mthca_raddr_seg); | 2023 | wqe += sizeof (struct mthca_raddr_seg); |
2024 | size += sizeof (struct mthca_raddr_seg) / 16; | 2024 | size += sizeof (struct mthca_raddr_seg) / 16; |
2025 | break; | 2025 | break; |
@@ -2035,8 +2035,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2035 | switch (wr->opcode) { | 2035 | switch (wr->opcode) { |
2036 | case IB_WR_RDMA_WRITE: | 2036 | case IB_WR_RDMA_WRITE: |
2037 | case IB_WR_RDMA_WRITE_WITH_IMM: | 2037 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2038 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, | 2038 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
2039 | wr->wr.rdma.rkey); | 2039 | rdma_wr(wr)->rkey); |
2040 | wqe += sizeof (struct mthca_raddr_seg); | 2040 | wqe += sizeof (struct mthca_raddr_seg); |
2041 | size += sizeof (struct mthca_raddr_seg) / 16; | 2041 | size += sizeof (struct mthca_raddr_seg) / 16; |
2042 | break; | 2042 | break; |
@@ -2049,13 +2049,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2049 | break; | 2049 | break; |
2050 | 2050 | ||
2051 | case UD: | 2051 | case UD: |
2052 | set_arbel_ud_seg(wqe, wr); | 2052 | set_arbel_ud_seg(wqe, ud_wr(wr)); |
2053 | wqe += sizeof (struct mthca_arbel_ud_seg); | 2053 | wqe += sizeof (struct mthca_arbel_ud_seg); |
2054 | size += sizeof (struct mthca_arbel_ud_seg) / 16; | 2054 | size += sizeof (struct mthca_arbel_ud_seg) / 16; |
2055 | break; | 2055 | break; |
2056 | 2056 | ||
2057 | case MLX: | 2057 | case MLX: |
2058 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | 2058 | err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), |
2059 | wqe - sizeof (struct mthca_next_seg), | 2059 | wqe - sizeof (struct mthca_next_seg), |
2060 | wqe); | 2060 | wqe); |
2061 | if (err) { | 2061 | if (err) { |