diff options
author | Saeed Mahameed <saeedm@mellanox.com> | 2016-07-18 18:17:59 -0400 |
---|---|---|
committer | Leon Romanovsky <leon@kernel.org> | 2016-08-17 10:45:57 -0400 |
commit | 09a7d9eca1a6cf5eb4f9abfdf8914db9dbd96f08 (patch) | |
tree | 9ef5978e16db806d1d23dcd1a3f3ab60d5daa5fe | |
parent | ec22eb53106be1472ba6573dc900943f52f8fd1e (diff) |
{net,IB}/mlx5: QP/XRCD commands via mlx5 ifc
Remove old representation of manually created QP/XRCD commands layout
amd use mlx5_ifc canonical structures and defines.
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 154 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/qp.c | 167 | ||||
-rw-r--r-- | include/linux/mlx5/mlx5_ifc.h | 5 | ||||
-rw-r--r-- | include/linux/mlx5/qp.h | 108 |
5 files changed, 165 insertions, 283 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 21ab0e26fa71..d22492ff863e 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -726,7 +726,7 @@ err_umem: | |||
726 | static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, | 726 | static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, |
727 | struct mlx5_ib_qp *qp, struct ib_udata *udata, | 727 | struct mlx5_ib_qp *qp, struct ib_udata *udata, |
728 | struct ib_qp_init_attr *attr, | 728 | struct ib_qp_init_attr *attr, |
729 | struct mlx5_create_qp_mbox_in **in, | 729 | u32 **in, |
730 | struct mlx5_ib_create_qp_resp *resp, int *inlen, | 730 | struct mlx5_ib_create_qp_resp *resp, int *inlen, |
731 | struct mlx5_ib_qp_base *base) | 731 | struct mlx5_ib_qp_base *base) |
732 | { | 732 | { |
@@ -739,6 +739,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
739 | u32 offset = 0; | 739 | u32 offset = 0; |
740 | int uuarn; | 740 | int uuarn; |
741 | int ncont = 0; | 741 | int ncont = 0; |
742 | __be64 *pas; | ||
743 | void *qpc; | ||
742 | int err; | 744 | int err; |
743 | 745 | ||
744 | err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); | 746 | err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); |
@@ -795,20 +797,24 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
795 | ubuffer->umem = NULL; | 797 | ubuffer->umem = NULL; |
796 | } | 798 | } |
797 | 799 | ||
798 | *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; | 800 | *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + |
801 | MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; | ||
799 | *in = mlx5_vzalloc(*inlen); | 802 | *in = mlx5_vzalloc(*inlen); |
800 | if (!*in) { | 803 | if (!*in) { |
801 | err = -ENOMEM; | 804 | err = -ENOMEM; |
802 | goto err_umem; | 805 | goto err_umem; |
803 | } | 806 | } |
807 | |||
808 | pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas); | ||
804 | if (ubuffer->umem) | 809 | if (ubuffer->umem) |
805 | mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, | 810 | mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0); |
806 | (*in)->pas, 0); | 811 | |
807 | (*in)->ctx.log_pg_sz_remote_qpn = | 812 | qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); |
808 | cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); | 813 | |
809 | (*in)->ctx.params2 = cpu_to_be32(offset << 6); | 814 | MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); |
815 | MLX5_SET(qpc, qpc, page_offset, offset); | ||
810 | 816 | ||
811 | (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); | 817 | MLX5_SET(qpc, qpc, uar_page, uar_index); |
812 | resp->uuar_index = uuarn; | 818 | resp->uuar_index = uuarn; |
813 | qp->uuarn = uuarn; | 819 | qp->uuarn = uuarn; |
814 | 820 | ||
@@ -857,12 +863,13 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp, | |||
857 | static int create_kernel_qp(struct mlx5_ib_dev *dev, | 863 | static int create_kernel_qp(struct mlx5_ib_dev *dev, |
858 | struct ib_qp_init_attr *init_attr, | 864 | struct ib_qp_init_attr *init_attr, |
859 | struct mlx5_ib_qp *qp, | 865 | struct mlx5_ib_qp *qp, |
860 | struct mlx5_create_qp_mbox_in **in, int *inlen, | 866 | u32 **in, int *inlen, |
861 | struct mlx5_ib_qp_base *base) | 867 | struct mlx5_ib_qp_base *base) |
862 | { | 868 | { |
863 | enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; | 869 | enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; |
864 | struct mlx5_uuar_info *uuari; | 870 | struct mlx5_uuar_info *uuari; |
865 | int uar_index; | 871 | int uar_index; |
872 | void *qpc; | ||
866 | int uuarn; | 873 | int uuarn; |
867 | int err; | 874 | int err; |
868 | 875 | ||
@@ -902,25 +909,29 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, | |||
902 | } | 909 | } |
903 | 910 | ||
904 | qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); | 911 | qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); |
905 | *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; | 912 | *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + |
913 | MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; | ||
906 | *in = mlx5_vzalloc(*inlen); | 914 | *in = mlx5_vzalloc(*inlen); |
907 | if (!*in) { | 915 | if (!*in) { |
908 | err = -ENOMEM; | 916 | err = -ENOMEM; |
909 | goto err_buf; | 917 | goto err_buf; |
910 | } | 918 | } |
911 | (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); | 919 | |
912 | (*in)->ctx.log_pg_sz_remote_qpn = | 920 | qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); |
913 | cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); | 921 | MLX5_SET(qpc, qpc, uar_page, uar_index); |
922 | MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); | ||
923 | |||
914 | /* Set "fast registration enabled" for all kernel QPs */ | 924 | /* Set "fast registration enabled" for all kernel QPs */ |
915 | (*in)->ctx.params1 |= cpu_to_be32(1 << 11); | 925 | MLX5_SET(qpc, qpc, fre, 1); |
916 | (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); | 926 | MLX5_SET(qpc, qpc, rlky, 1); |
917 | 927 | ||
918 | if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) { | 928 | if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) { |
919 | (*in)->ctx.deth_sqpn = cpu_to_be32(1); | 929 | MLX5_SET(qpc, qpc, deth_sqpn, 1); |
920 | qp->flags |= MLX5_IB_QP_SQPN_QP1; | 930 | qp->flags |= MLX5_IB_QP_SQPN_QP1; |
921 | } | 931 | } |
922 | 932 | ||
923 | mlx5_fill_page_array(&qp->buf, (*in)->pas); | 933 | mlx5_fill_page_array(&qp->buf, |
934 | (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas)); | ||
924 | 935 | ||
925 | err = mlx5_db_alloc(dev->mdev, &qp->db); | 936 | err = mlx5_db_alloc(dev->mdev, &qp->db); |
926 | if (err) { | 937 | if (err) { |
@@ -974,15 +985,15 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) | |||
974 | free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); | 985 | free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); |
975 | } | 986 | } |
976 | 987 | ||
977 | static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) | 988 | static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) |
978 | { | 989 | { |
979 | if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) || | 990 | if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) || |
980 | (attr->qp_type == IB_QPT_XRC_INI)) | 991 | (attr->qp_type == IB_QPT_XRC_INI)) |
981 | return cpu_to_be32(MLX5_SRQ_RQ); | 992 | return MLX5_SRQ_RQ; |
982 | else if (!qp->has_rq) | 993 | else if (!qp->has_rq) |
983 | return cpu_to_be32(MLX5_ZERO_LEN_RQ); | 994 | return MLX5_ZERO_LEN_RQ; |
984 | else | 995 | else |
985 | return cpu_to_be32(MLX5_NON_ZERO_RQ); | 996 | return MLX5_NON_ZERO_RQ; |
986 | } | 997 | } |
987 | 998 | ||
988 | static int is_connected(enum ib_qp_type qp_type) | 999 | static int is_connected(enum ib_qp_type qp_type) |
@@ -1191,7 +1202,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, | |||
1191 | } | 1202 | } |
1192 | 1203 | ||
1193 | static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | 1204 | static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, |
1194 | struct mlx5_create_qp_mbox_in *in, | 1205 | u32 *in, |
1195 | struct ib_pd *pd) | 1206 | struct ib_pd *pd) |
1196 | { | 1207 | { |
1197 | struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; | 1208 | struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; |
@@ -1461,18 +1472,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1461 | struct ib_udata *udata, struct mlx5_ib_qp *qp) | 1472 | struct ib_udata *udata, struct mlx5_ib_qp *qp) |
1462 | { | 1473 | { |
1463 | struct mlx5_ib_resources *devr = &dev->devr; | 1474 | struct mlx5_ib_resources *devr = &dev->devr; |
1475 | int inlen = MLX5_ST_SZ_BYTES(create_qp_in); | ||
1464 | struct mlx5_core_dev *mdev = dev->mdev; | 1476 | struct mlx5_core_dev *mdev = dev->mdev; |
1465 | struct mlx5_ib_qp_base *base; | ||
1466 | struct mlx5_ib_create_qp_resp resp; | 1477 | struct mlx5_ib_create_qp_resp resp; |
1467 | struct mlx5_create_qp_mbox_in *in; | ||
1468 | struct mlx5_ib_create_qp ucmd; | ||
1469 | struct mlx5_ib_cq *send_cq; | 1478 | struct mlx5_ib_cq *send_cq; |
1470 | struct mlx5_ib_cq *recv_cq; | 1479 | struct mlx5_ib_cq *recv_cq; |
1471 | unsigned long flags; | 1480 | unsigned long flags; |
1472 | int inlen = sizeof(*in); | ||
1473 | int err; | ||
1474 | u32 uidx = MLX5_IB_DEFAULT_UIDX; | 1481 | u32 uidx = MLX5_IB_DEFAULT_UIDX; |
1482 | struct mlx5_ib_create_qp ucmd; | ||
1483 | struct mlx5_ib_qp_base *base; | ||
1475 | void *qpc; | 1484 | void *qpc; |
1485 | u32 *in; | ||
1486 | int err; | ||
1476 | 1487 | ||
1477 | base = init_attr->qp_type == IB_QPT_RAW_PACKET ? | 1488 | base = init_attr->qp_type == IB_QPT_RAW_PACKET ? |
1478 | &qp->raw_packet_qp.rq.base : | 1489 | &qp->raw_packet_qp.rq.base : |
@@ -1600,7 +1611,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1600 | if (err) | 1611 | if (err) |
1601 | return err; | 1612 | return err; |
1602 | } else { | 1613 | } else { |
1603 | in = mlx5_vzalloc(sizeof(*in)); | 1614 | in = mlx5_vzalloc(inlen); |
1604 | if (!in) | 1615 | if (!in) |
1605 | return -ENOMEM; | 1616 | return -ENOMEM; |
1606 | 1617 | ||
@@ -1610,26 +1621,29 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1610 | if (is_sqp(init_attr->qp_type)) | 1621 | if (is_sqp(init_attr->qp_type)) |
1611 | qp->port = init_attr->port_num; | 1622 | qp->port = init_attr->port_num; |
1612 | 1623 | ||
1613 | in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 | | 1624 | qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); |
1614 | MLX5_QP_PM_MIGRATED << 11); | 1625 | |
1626 | MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); | ||
1627 | MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); | ||
1615 | 1628 | ||
1616 | if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) | 1629 | if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) |
1617 | in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); | 1630 | MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn); |
1618 | else | 1631 | else |
1619 | in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE); | 1632 | MLX5_SET(qpc, qpc, latency_sensitive, 1); |
1633 | |||
1620 | 1634 | ||
1621 | if (qp->wq_sig) | 1635 | if (qp->wq_sig) |
1622 | in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); | 1636 | MLX5_SET(qpc, qpc, wq_signature, 1); |
1623 | 1637 | ||
1624 | if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) | 1638 | if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) |
1625 | in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST); | 1639 | MLX5_SET(qpc, qpc, block_lb_mc, 1); |
1626 | 1640 | ||
1627 | if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) | 1641 | if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) |
1628 | in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER); | 1642 | MLX5_SET(qpc, qpc, cd_master, 1); |
1629 | if (qp->flags & MLX5_IB_QP_MANAGED_SEND) | 1643 | if (qp->flags & MLX5_IB_QP_MANAGED_SEND) |
1630 | in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND); | 1644 | MLX5_SET(qpc, qpc, cd_slave_send, 1); |
1631 | if (qp->flags & MLX5_IB_QP_MANAGED_RECV) | 1645 | if (qp->flags & MLX5_IB_QP_MANAGED_RECV) |
1632 | in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV); | 1646 | MLX5_SET(qpc, qpc, cd_slave_receive, 1); |
1633 | 1647 | ||
1634 | if (qp->scat_cqe && is_connected(init_attr->qp_type)) { | 1648 | if (qp->scat_cqe && is_connected(init_attr->qp_type)) { |
1635 | int rcqe_sz; | 1649 | int rcqe_sz; |
@@ -1639,71 +1653,68 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1639 | scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); | 1653 | scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); |
1640 | 1654 | ||
1641 | if (rcqe_sz == 128) | 1655 | if (rcqe_sz == 128) |
1642 | in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE; | 1656 | MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE); |
1643 | else | 1657 | else |
1644 | in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE; | 1658 | MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE); |
1645 | 1659 | ||
1646 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) { | 1660 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) { |
1647 | if (scqe_sz == 128) | 1661 | if (scqe_sz == 128) |
1648 | in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE; | 1662 | MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE); |
1649 | else | 1663 | else |
1650 | in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE; | 1664 | MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE); |
1651 | } | 1665 | } |
1652 | } | 1666 | } |
1653 | 1667 | ||
1654 | if (qp->rq.wqe_cnt) { | 1668 | if (qp->rq.wqe_cnt) { |
1655 | in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); | 1669 | MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); |
1656 | in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; | 1670 | MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); |
1657 | } | 1671 | } |
1658 | 1672 | ||
1659 | in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); | 1673 | MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); |
1660 | 1674 | ||
1661 | if (qp->sq.wqe_cnt) | 1675 | if (qp->sq.wqe_cnt) |
1662 | in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); | 1676 | MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); |
1663 | else | 1677 | else |
1664 | in->ctx.sq_crq_size |= cpu_to_be16(0x8000); | 1678 | MLX5_SET(qpc, qpc, no_sq, 1); |
1665 | 1679 | ||
1666 | /* Set default resources */ | 1680 | /* Set default resources */ |
1667 | switch (init_attr->qp_type) { | 1681 | switch (init_attr->qp_type) { |
1668 | case IB_QPT_XRC_TGT: | 1682 | case IB_QPT_XRC_TGT: |
1669 | in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); | 1683 | MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); |
1670 | in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); | 1684 | MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn); |
1671 | in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); | 1685 | MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); |
1672 | in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn); | 1686 | MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn); |
1673 | break; | 1687 | break; |
1674 | case IB_QPT_XRC_INI: | 1688 | case IB_QPT_XRC_INI: |
1675 | in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); | 1689 | MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); |
1676 | in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); | 1690 | MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); |
1677 | in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); | 1691 | MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); |
1678 | break; | 1692 | break; |
1679 | default: | 1693 | default: |
1680 | if (init_attr->srq) { | 1694 | if (init_attr->srq) { |
1681 | in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn); | 1695 | MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn); |
1682 | in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); | 1696 | MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn); |
1683 | } else { | 1697 | } else { |
1684 | in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); | 1698 | MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); |
1685 | in->ctx.rq_type_srqn |= | 1699 | MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn); |
1686 | cpu_to_be32(to_msrq(devr->s1)->msrq.srqn); | ||
1687 | } | 1700 | } |
1688 | } | 1701 | } |
1689 | 1702 | ||
1690 | if (init_attr->send_cq) | 1703 | if (init_attr->send_cq) |
1691 | in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); | 1704 | MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); |
1692 | 1705 | ||
1693 | if (init_attr->recv_cq) | 1706 | if (init_attr->recv_cq) |
1694 | in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn); | 1707 | MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn); |
1695 | 1708 | ||
1696 | in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); | 1709 | MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); |
1697 | 1710 | ||
1698 | if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) { | 1711 | /* 0xffffff means we ask to work with cqe version 0 */ |
1699 | qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); | 1712 | if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) |
1700 | /* 0xffffff means we ask to work with cqe version 0 */ | ||
1701 | MLX5_SET(qpc, qpc, user_index, uidx); | 1713 | MLX5_SET(qpc, qpc, user_index, uidx); |
1702 | } | 1714 | |
1703 | /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ | 1715 | /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ |
1704 | if (init_attr->qp_type == IB_QPT_UD && | 1716 | if (init_attr->qp_type == IB_QPT_UD && |
1705 | (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) { | 1717 | (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) { |
1706 | qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); | ||
1707 | MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); | 1718 | MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); |
1708 | qp->flags |= MLX5_IB_QP_LSO; | 1719 | qp->flags |= MLX5_IB_QP_LSO; |
1709 | } | 1720 | } |
@@ -4320,21 +4331,24 @@ static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev, | |||
4320 | static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | 4331 | static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, |
4321 | struct ib_qp_attr *qp_attr) | 4332 | struct ib_qp_attr *qp_attr) |
4322 | { | 4333 | { |
4323 | struct mlx5_query_qp_mbox_out *outb; | 4334 | int outlen = MLX5_ST_SZ_BYTES(query_qp_out); |
4324 | struct mlx5_qp_context *context; | 4335 | struct mlx5_qp_context *context; |
4325 | int mlx5_state; | 4336 | int mlx5_state; |
4337 | u32 *outb; | ||
4326 | int err = 0; | 4338 | int err = 0; |
4327 | 4339 | ||
4328 | outb = kzalloc(sizeof(*outb), GFP_KERNEL); | 4340 | outb = kzalloc(outlen, GFP_KERNEL); |
4329 | if (!outb) | 4341 | if (!outb) |
4330 | return -ENOMEM; | 4342 | return -ENOMEM; |
4331 | 4343 | ||
4332 | context = &outb->ctx; | ||
4333 | err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, | 4344 | err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, |
4334 | sizeof(*outb)); | 4345 | outlen); |
4335 | if (err) | 4346 | if (err) |
4336 | goto out; | 4347 | goto out; |
4337 | 4348 | ||
4349 | /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */ | ||
4350 | context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc); | ||
4351 | |||
4338 | mlx5_state = be32_to_cpu(context->flags) >> 28; | 4352 | mlx5_state = be32_to_cpu(context->flags) >> 28; |
4339 | 4353 | ||
4340 | qp->state = to_ib_qp_state(mlx5_state); | 4354 | qp->state = to_ib_qp_state(mlx5_state); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index b7484e4128c8..e94a9532e218 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | |||
@@ -277,24 +277,28 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) | |||
277 | static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, | 277 | static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, |
278 | int index, int *is_str) | 278 | int index, int *is_str) |
279 | { | 279 | { |
280 | struct mlx5_query_qp_mbox_out *out; | 280 | int outlen = MLX5_ST_SZ_BYTES(query_qp_out); |
281 | struct mlx5_qp_context *ctx; | 281 | struct mlx5_qp_context *ctx; |
282 | u64 param = 0; | 282 | u64 param = 0; |
283 | u32 *out; | ||
283 | int err; | 284 | int err; |
284 | int no_sq; | 285 | int no_sq; |
285 | 286 | ||
286 | out = kzalloc(sizeof(*out), GFP_KERNEL); | 287 | out = kzalloc(outlen, GFP_KERNEL); |
287 | if (!out) | 288 | if (!out) |
288 | return param; | 289 | return param; |
289 | 290 | ||
290 | err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); | 291 | err = mlx5_core_qp_query(dev, qp, out, outlen); |
291 | if (err) { | 292 | if (err) { |
292 | mlx5_core_warn(dev, "failed to query qp\n"); | 293 | mlx5_core_warn(dev, "failed to query qp err=%d\n", err); |
293 | goto out; | 294 | goto out; |
294 | } | 295 | } |
295 | 296 | ||
296 | *is_str = 0; | 297 | *is_str = 0; |
297 | ctx = &out->ctx; | 298 | |
299 | /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */ | ||
300 | ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc); | ||
301 | |||
298 | switch (index) { | 302 | switch (index) { |
299 | case QP_PID: | 303 | case QP_PID: |
300 | param = qp->pid; | 304 | param = qp->pid; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index b82d65802d96..36d240c9d15f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c | |||
@@ -271,30 +271,21 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev, | |||
271 | 271 | ||
272 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, | 272 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, |
273 | struct mlx5_core_qp *qp, | 273 | struct mlx5_core_qp *qp, |
274 | struct mlx5_create_qp_mbox_in *in, | 274 | u32 *in, int inlen) |
275 | int inlen) | ||
276 | { | 275 | { |
277 | struct mlx5_create_qp_mbox_out out; | 276 | u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; |
278 | struct mlx5_destroy_qp_mbox_in din; | 277 | u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)]; |
279 | struct mlx5_destroy_qp_mbox_out dout; | 278 | u32 din[MLX5_ST_SZ_DW(destroy_qp_in)]; |
280 | int err; | 279 | int err; |
281 | 280 | ||
282 | memset(&out, 0, sizeof(out)); | 281 | MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); |
283 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); | ||
284 | 282 | ||
285 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | 283 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
286 | if (err) { | 284 | err = err ? : mlx5_cmd_status_to_err_v2(out); |
287 | mlx5_core_warn(dev, "ret %d\n", err); | 285 | if (err) |
288 | return err; | 286 | return err; |
289 | } | ||
290 | |||
291 | if (out.hdr.status) { | ||
292 | mlx5_core_warn(dev, "current num of QPs 0x%x\n", | ||
293 | atomic_read(&dev->num_qps)); | ||
294 | return mlx5_cmd_status_to_err(&out.hdr); | ||
295 | } | ||
296 | 287 | ||
297 | qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; | 288 | qp->qpn = MLX5_GET(create_qp_out, out, qpn); |
298 | mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); | 289 | mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); |
299 | 290 | ||
300 | err = create_qprqsq_common(dev, qp, MLX5_RES_QP); | 291 | err = create_qprqsq_common(dev, qp, MLX5_RES_QP); |
@@ -311,12 +302,12 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, | |||
311 | return 0; | 302 | return 0; |
312 | 303 | ||
313 | err_cmd: | 304 | err_cmd: |
314 | memset(&din, 0, sizeof(din)); | 305 | memset(din, 0, sizeof(din)); |
315 | memset(&dout, 0, sizeof(dout)); | 306 | memset(dout, 0, sizeof(dout)); |
316 | din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); | 307 | MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); |
317 | din.qpn = cpu_to_be32(qp->qpn); | 308 | MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); |
318 | mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); | 309 | mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); |
319 | 310 | mlx5_cmd_status_to_err_v2(dout); | |
320 | return err; | 311 | return err; |
321 | } | 312 | } |
322 | EXPORT_SYMBOL_GPL(mlx5_core_create_qp); | 313 | EXPORT_SYMBOL_GPL(mlx5_core_create_qp); |
@@ -324,25 +315,21 @@ EXPORT_SYMBOL_GPL(mlx5_core_create_qp); | |||
324 | int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, | 315 | int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, |
325 | struct mlx5_core_qp *qp) | 316 | struct mlx5_core_qp *qp) |
326 | { | 317 | { |
327 | struct mlx5_destroy_qp_mbox_in in; | 318 | u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0}; |
328 | struct mlx5_destroy_qp_mbox_out out; | 319 | u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0}; |
329 | int err; | 320 | int err; |
330 | 321 | ||
331 | mlx5_debug_qp_remove(dev, qp); | 322 | mlx5_debug_qp_remove(dev, qp); |
332 | 323 | ||
333 | destroy_qprqsq_common(dev, qp); | 324 | destroy_qprqsq_common(dev, qp); |
334 | 325 | ||
335 | memset(&in, 0, sizeof(in)); | 326 | MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); |
336 | memset(&out, 0, sizeof(out)); | 327 | MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); |
337 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); | 328 | err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
338 | in.qpn = cpu_to_be32(qp->qpn); | 329 | err = err ? : mlx5_cmd_status_to_err_v2(out); |
339 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
340 | if (err) | 330 | if (err) |
341 | return err; | 331 | return err; |
342 | 332 | ||
343 | if (out.hdr.status) | ||
344 | return mlx5_cmd_status_to_err(&out.hdr); | ||
345 | |||
346 | atomic_dec(&dev->num_qps); | 333 | atomic_dec(&dev->num_qps); |
347 | return 0; | 334 | return 0; |
348 | } | 335 | } |
@@ -382,66 +369,44 @@ void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) | |||
382 | } | 369 | } |
383 | 370 | ||
384 | int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, | 371 | int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, |
385 | struct mlx5_query_qp_mbox_out *out, int outlen) | 372 | u32 *out, int outlen) |
386 | { | 373 | { |
387 | struct mlx5_query_qp_mbox_in in; | 374 | u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0}; |
388 | int err; | 375 | int err; |
389 | 376 | ||
390 | memset(&in, 0, sizeof(in)); | 377 | MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP); |
391 | memset(out, 0, outlen); | 378 | MLX5_SET(query_qp_in, in, qpn, qp->qpn); |
392 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP); | ||
393 | in.qpn = cpu_to_be32(qp->qpn); | ||
394 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | ||
395 | if (err) | ||
396 | return err; | ||
397 | 379 | ||
398 | if (out->hdr.status) | 380 | err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); |
399 | return mlx5_cmd_status_to_err(&out->hdr); | 381 | return err ? : mlx5_cmd_status_to_err_v2(out); |
400 | |||
401 | return err; | ||
402 | } | 382 | } |
403 | EXPORT_SYMBOL_GPL(mlx5_core_qp_query); | 383 | EXPORT_SYMBOL_GPL(mlx5_core_qp_query); |
404 | 384 | ||
405 | int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) | 385 | int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) |
406 | { | 386 | { |
407 | struct mlx5_alloc_xrcd_mbox_in in; | 387 | u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0}; |
408 | struct mlx5_alloc_xrcd_mbox_out out; | 388 | u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0}; |
409 | int err; | 389 | int err; |
410 | 390 | ||
411 | memset(&in, 0, sizeof(in)); | 391 | MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD); |
412 | memset(&out, 0, sizeof(out)); | 392 | err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
413 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD); | 393 | err = err ? : mlx5_cmd_status_to_err_v2(out); |
414 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | 394 | if (!err) |
415 | if (err) | 395 | *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd); |
416 | return err; | ||
417 | |||
418 | if (out.hdr.status) | ||
419 | err = mlx5_cmd_status_to_err(&out.hdr); | ||
420 | else | ||
421 | *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff; | ||
422 | |||
423 | return err; | 396 | return err; |
424 | } | 397 | } |
425 | EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); | 398 | EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); |
426 | 399 | ||
427 | int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) | 400 | int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) |
428 | { | 401 | { |
429 | struct mlx5_dealloc_xrcd_mbox_in in; | 402 | u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0}; |
430 | struct mlx5_dealloc_xrcd_mbox_out out; | 403 | u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0}; |
431 | int err; | 404 | int err; |
432 | 405 | ||
433 | memset(&in, 0, sizeof(in)); | 406 | MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD); |
434 | memset(&out, 0, sizeof(out)); | 407 | MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn); |
435 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD); | 408 | err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
436 | in.xrcdn = cpu_to_be32(xrcdn); | 409 | return err ? : mlx5_cmd_status_to_err_v2(out); |
437 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
438 | if (err) | ||
439 | return err; | ||
440 | |||
441 | if (out.hdr.status) | ||
442 | err = mlx5_cmd_status_to_err(&out.hdr); | ||
443 | |||
444 | return err; | ||
445 | } | 410 | } |
446 | EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); | 411 | EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); |
447 | 412 | ||
@@ -449,28 +414,26 @@ EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); | |||
449 | int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, | 414 | int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, |
450 | u8 flags, int error) | 415 | u8 flags, int error) |
451 | { | 416 | { |
452 | struct mlx5_page_fault_resume_mbox_in in; | 417 | u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; |
453 | struct mlx5_page_fault_resume_mbox_out out; | 418 | u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; |
454 | int err; | 419 | int err; |
455 | 420 | ||
456 | memset(&in, 0, sizeof(in)); | 421 | MLX5_SET(page_fault_resume_in, in, opcode, |
457 | memset(&out, 0, sizeof(out)); | 422 | MLX5_CMD_OP_PAGE_FAULT_RESUME); |
458 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME); | ||
459 | in.hdr.opmod = 0; | ||
460 | flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR | | ||
461 | MLX5_PAGE_FAULT_RESUME_WRITE | | ||
462 | MLX5_PAGE_FAULT_RESUME_RDMA); | ||
463 | flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0); | ||
464 | in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) | | ||
465 | (flags << MLX5_QPN_BITS)); | ||
466 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
467 | if (err) | ||
468 | return err; | ||
469 | 423 | ||
470 | if (out.hdr.status) | 424 | MLX5_SET(page_fault_resume_in, in, qpn, qpn); |
471 | err = mlx5_cmd_status_to_err(&out.hdr); | ||
472 | 425 | ||
473 | return err; | 426 | if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR) |
427 | MLX5_SET(page_fault_resume_in, in, req_res, 1); | ||
428 | if (flags & MLX5_PAGE_FAULT_RESUME_WRITE) | ||
429 | MLX5_SET(page_fault_resume_in, in, read_write, 1); | ||
430 | if (flags & MLX5_PAGE_FAULT_RESUME_RDMA) | ||
431 | MLX5_SET(page_fault_resume_in, in, rdma, 1); | ||
432 | if (error) | ||
433 | MLX5_SET(page_fault_resume_in, in, error, 1); | ||
434 | |||
435 | err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); | ||
436 | return err ? : mlx5_cmd_status_to_err_v2(out); | ||
474 | } | 437 | } |
475 | EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); | 438 | EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); |
476 | #endif | 439 | #endif |
@@ -541,13 +504,10 @@ EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked); | |||
541 | 504 | ||
542 | int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id) | 505 | int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id) |
543 | { | 506 | { |
544 | u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]; | 507 | u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0}; |
545 | u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)]; | 508 | u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0}; |
546 | int err; | 509 | int err; |
547 | 510 | ||
548 | memset(in, 0, sizeof(in)); | ||
549 | memset(out, 0, sizeof(out)); | ||
550 | |||
551 | MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); | 511 | MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); |
552 | err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); | 512 | err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); |
553 | if (!err) | 513 | if (!err) |
@@ -559,11 +519,8 @@ EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter); | |||
559 | 519 | ||
560 | int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id) | 520 | int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id) |
561 | { | 521 | { |
562 | u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]; | 522 | u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0}; |
563 | u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)]; | 523 | u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0}; |
564 | |||
565 | memset(in, 0, sizeof(in)); | ||
566 | memset(out, 0, sizeof(out)); | ||
567 | 524 | ||
568 | MLX5_SET(dealloc_q_counter_in, in, opcode, | 525 | MLX5_SET(dealloc_q_counter_in, in, opcode, |
569 | MLX5_CMD_OP_DEALLOC_Q_COUNTER); | 526 | MLX5_CMD_OP_DEALLOC_Q_COUNTER); |
@@ -576,9 +533,7 @@ EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter); | |||
576 | int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, | 533 | int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, |
577 | int reset, void *out, int out_size) | 534 | int reset, void *out, int out_size) |
578 | { | 535 | { |
579 | u32 in[MLX5_ST_SZ_DW(query_q_counter_in)]; | 536 | u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0}; |
580 | |||
581 | memset(in, 0, sizeof(in)); | ||
582 | 537 | ||
583 | MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); | 538 | MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); |
584 | MLX5_SET(query_q_counter_in, in, clear, reset); | 539 | MLX5_SET(query_q_counter_in, in, clear, reset); |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 2a39a06dbad4..cb94ac5b8420 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
@@ -1966,7 +1966,10 @@ struct mlx5_ifc_qpc_bits { | |||
1966 | u8 reserved_at_3e0[0x8]; | 1966 | u8 reserved_at_3e0[0x8]; |
1967 | u8 cqn_snd[0x18]; | 1967 | u8 cqn_snd[0x18]; |
1968 | 1968 | ||
1969 | u8 reserved_at_400[0x40]; | 1969 | u8 reserved_at_400[0x8]; |
1970 | u8 deth_sqpn[0x18]; | ||
1971 | |||
1972 | u8 reserved_at_420[0x20]; | ||
1970 | 1973 | ||
1971 | u8 reserved_at_440[0x8]; | 1974 | u8 reserved_at_440[0x8]; |
1972 | u8 last_acked_psn[0x18]; | 1975 | u8 last_acked_psn[0x18]; |
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 7879bf411891..16e1efecaf66 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
@@ -123,12 +123,13 @@ enum { | |||
123 | }; | 123 | }; |
124 | 124 | ||
125 | enum { | 125 | enum { |
126 | MLX5_NON_ZERO_RQ = 0 << 24, | 126 | MLX5_NON_ZERO_RQ = 0x0, |
127 | MLX5_SRQ_RQ = 1 << 24, | 127 | MLX5_SRQ_RQ = 0x1, |
128 | MLX5_CRQ_RQ = 2 << 24, | 128 | MLX5_CRQ_RQ = 0x2, |
129 | MLX5_ZERO_LEN_RQ = 3 << 24 | 129 | MLX5_ZERO_LEN_RQ = 0x3 |
130 | }; | 130 | }; |
131 | 131 | ||
132 | /* TODO REM */ | ||
132 | enum { | 133 | enum { |
133 | /* params1 */ | 134 | /* params1 */ |
134 | MLX5_QP_BIT_SRE = 1 << 15, | 135 | MLX5_QP_BIT_SRE = 1 << 15, |
@@ -178,12 +179,6 @@ enum { | |||
178 | }; | 179 | }; |
179 | 180 | ||
180 | enum { | 181 | enum { |
181 | MLX5_QP_LAT_SENSITIVE = 1 << 28, | ||
182 | MLX5_QP_BLOCK_MCAST = 1 << 30, | ||
183 | MLX5_QP_ENABLE_SIG = 1 << 31, | ||
184 | }; | ||
185 | |||
186 | enum { | ||
187 | MLX5_RCV_DBR = 0, | 182 | MLX5_RCV_DBR = 0, |
188 | MLX5_SND_DBR = 1, | 183 | MLX5_SND_DBR = 1, |
189 | }; | 184 | }; |
@@ -525,34 +520,6 @@ struct mlx5_qp_context { | |||
525 | u8 rsvd1[24]; | 520 | u8 rsvd1[24]; |
526 | }; | 521 | }; |
527 | 522 | ||
528 | struct mlx5_create_qp_mbox_in { | ||
529 | struct mlx5_inbox_hdr hdr; | ||
530 | __be32 input_qpn; | ||
531 | u8 rsvd0[4]; | ||
532 | __be32 opt_param_mask; | ||
533 | u8 rsvd1[4]; | ||
534 | struct mlx5_qp_context ctx; | ||
535 | u8 rsvd3[16]; | ||
536 | __be64 pas[0]; | ||
537 | }; | ||
538 | |||
539 | struct mlx5_create_qp_mbox_out { | ||
540 | struct mlx5_outbox_hdr hdr; | ||
541 | __be32 qpn; | ||
542 | u8 rsvd0[4]; | ||
543 | }; | ||
544 | |||
545 | struct mlx5_destroy_qp_mbox_in { | ||
546 | struct mlx5_inbox_hdr hdr; | ||
547 | __be32 qpn; | ||
548 | u8 rsvd0[4]; | ||
549 | }; | ||
550 | |||
551 | struct mlx5_destroy_qp_mbox_out { | ||
552 | struct mlx5_outbox_hdr hdr; | ||
553 | u8 rsvd0[8]; | ||
554 | }; | ||
555 | |||
556 | struct mlx5_modify_qp_mbox_in { | 523 | struct mlx5_modify_qp_mbox_in { |
557 | struct mlx5_inbox_hdr hdr; | 524 | struct mlx5_inbox_hdr hdr; |
558 | __be32 qpn; | 525 | __be32 qpn; |
@@ -568,56 +535,6 @@ struct mlx5_modify_qp_mbox_out { | |||
568 | u8 rsvd0[8]; | 535 | u8 rsvd0[8]; |
569 | }; | 536 | }; |
570 | 537 | ||
571 | struct mlx5_query_qp_mbox_in { | ||
572 | struct mlx5_inbox_hdr hdr; | ||
573 | __be32 qpn; | ||
574 | u8 rsvd[4]; | ||
575 | }; | ||
576 | |||
577 | struct mlx5_query_qp_mbox_out { | ||
578 | struct mlx5_outbox_hdr hdr; | ||
579 | u8 rsvd1[8]; | ||
580 | __be32 optparam; | ||
581 | u8 rsvd0[4]; | ||
582 | struct mlx5_qp_context ctx; | ||
583 | u8 rsvd2[16]; | ||
584 | __be64 pas[0]; | ||
585 | }; | ||
586 | |||
587 | struct mlx5_conf_sqp_mbox_in { | ||
588 | struct mlx5_inbox_hdr hdr; | ||
589 | __be32 qpn; | ||
590 | u8 rsvd[3]; | ||
591 | u8 type; | ||
592 | }; | ||
593 | |||
594 | struct mlx5_conf_sqp_mbox_out { | ||
595 | struct mlx5_outbox_hdr hdr; | ||
596 | u8 rsvd[8]; | ||
597 | }; | ||
598 | |||
599 | struct mlx5_alloc_xrcd_mbox_in { | ||
600 | struct mlx5_inbox_hdr hdr; | ||
601 | u8 rsvd[8]; | ||
602 | }; | ||
603 | |||
604 | struct mlx5_alloc_xrcd_mbox_out { | ||
605 | struct mlx5_outbox_hdr hdr; | ||
606 | __be32 xrcdn; | ||
607 | u8 rsvd[4]; | ||
608 | }; | ||
609 | |||
610 | struct mlx5_dealloc_xrcd_mbox_in { | ||
611 | struct mlx5_inbox_hdr hdr; | ||
612 | __be32 xrcdn; | ||
613 | u8 rsvd[4]; | ||
614 | }; | ||
615 | |||
616 | struct mlx5_dealloc_xrcd_mbox_out { | ||
617 | struct mlx5_outbox_hdr hdr; | ||
618 | u8 rsvd[8]; | ||
619 | }; | ||
620 | |||
621 | static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) | 538 | static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) |
622 | { | 539 | { |
623 | return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); | 540 | return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); |
@@ -628,20 +545,9 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, | |||
628 | return radix_tree_lookup(&dev->priv.mkey_table.tree, key); | 545 | return radix_tree_lookup(&dev->priv.mkey_table.tree, key); |
629 | } | 546 | } |
630 | 547 | ||
631 | struct mlx5_page_fault_resume_mbox_in { | ||
632 | struct mlx5_inbox_hdr hdr; | ||
633 | __be32 flags_qpn; | ||
634 | u8 reserved[4]; | ||
635 | }; | ||
636 | |||
637 | struct mlx5_page_fault_resume_mbox_out { | ||
638 | struct mlx5_outbox_hdr hdr; | ||
639 | u8 rsvd[8]; | ||
640 | }; | ||
641 | |||
642 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, | 548 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, |
643 | struct mlx5_core_qp *qp, | 549 | struct mlx5_core_qp *qp, |
644 | struct mlx5_create_qp_mbox_in *in, | 550 | u32 *in, |
645 | int inlen); | 551 | int inlen); |
646 | int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation, | 552 | int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation, |
647 | struct mlx5_modify_qp_mbox_in *in, int sqd_event, | 553 | struct mlx5_modify_qp_mbox_in *in, int sqd_event, |
@@ -649,7 +555,7 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation, | |||
649 | int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, | 555 | int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, |
650 | struct mlx5_core_qp *qp); | 556 | struct mlx5_core_qp *qp); |
651 | int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, | 557 | int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, |
652 | struct mlx5_query_qp_mbox_out *out, int outlen); | 558 | u32 *out, int outlen); |
653 | 559 | ||
654 | int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); | 560 | int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); |
655 | int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); | 561 | int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); |