aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormajd@mellanox.com <majd@mellanox.com>2016-01-14 12:13:08 -0500
committerDoug Ledford <dledford@redhat.com>2016-01-21 12:01:09 -0500
commitad5f8e964cd0a88c746577aab2c6ea26e3b26673 (patch)
treeab859c819d4b7b5c7e20fd256c60764021c087c9
parent427c1e7bcd7e5cd62160fcda0ce215ebbe0da3a1 (diff)
IB/mlx5: Expose Raw Packet QP to user space consumers
Added Raw Packet QP modify functionality which will enable user space consumers to use it. Since Raw Packet QP is built of SQ and RQ sub-objects, therefore Raw Packet QP state changes are implemented by changing the state of the sub-objects. Signed-off-by: Majd Dibbiny <majd@mellanox.com> Reviewed-by: Matan Barak <matanb@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c139
1 files changed, 127 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 26e461b6a7b9..8fb9c27485e1 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1511,6 +1511,9 @@ static void get_cqs(struct mlx5_ib_qp *qp,
1511 } 1511 }
1512} 1512}
1513 1513
1514static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1515 u16 operation);
1516
1514static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 1517static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1515{ 1518{
1516 struct mlx5_ib_cq *send_cq, *recv_cq; 1519 struct mlx5_ib_cq *send_cq, *recv_cq;
@@ -1527,9 +1530,16 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1527 return; 1530 return;
1528 1531
1529 if (qp->state != IB_QPS_RESET) { 1532 if (qp->state != IB_QPS_RESET) {
1530 mlx5_ib_qp_disable_pagefaults(qp); 1533 if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
1531 if (mlx5_core_qp_modify(dev->mdev, MLX5_CMD_OP_2RST_QP, 1534 mlx5_ib_qp_disable_pagefaults(qp);
1532 in, 0, &base->mqp)) 1535 err = mlx5_core_qp_modify(dev->mdev,
1536 MLX5_CMD_OP_2RST_QP, in, 0,
1537 &base->mqp);
1538 } else {
1539 err = modify_raw_packet_qp(dev, qp,
1540 MLX5_CMD_OP_2RST_QP);
1541 }
1542 if (err)
1533 mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n", 1543 mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
1534 base->mqp.qpn); 1544 base->mqp.qpn);
1535 } 1545 }
@@ -1984,6 +1994,110 @@ static int ib_mask_to_mlx5_opt(int ib_mask)
1984 return result; 1994 return result;
1985} 1995}
1986 1996
1997static int modify_raw_packet_qp_rq(struct mlx5_core_dev *dev,
1998 struct mlx5_ib_rq *rq, int new_state)
1999{
2000 void *in;
2001 void *rqc;
2002 int inlen;
2003 int err;
2004
2005 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
2006 in = mlx5_vzalloc(inlen);
2007 if (!in)
2008 return -ENOMEM;
2009
2010 MLX5_SET(modify_rq_in, in, rq_state, rq->state);
2011
2012 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
2013 MLX5_SET(rqc, rqc, state, new_state);
2014
2015 err = mlx5_core_modify_rq(dev, rq->base.mqp.qpn, in, inlen);
2016 if (err)
2017 goto out;
2018
2019 rq->state = new_state;
2020
2021out:
2022 kvfree(in);
2023 return err;
2024}
2025
2026static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
2027 struct mlx5_ib_sq *sq, int new_state)
2028{
2029 void *in;
2030 void *sqc;
2031 int inlen;
2032 int err;
2033
2034 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
2035 in = mlx5_vzalloc(inlen);
2036 if (!in)
2037 return -ENOMEM;
2038
2039 MLX5_SET(modify_sq_in, in, sq_state, sq->state);
2040
2041 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
2042 MLX5_SET(sqc, sqc, state, new_state);
2043
2044 err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
2045 if (err)
2046 goto out;
2047
2048 sq->state = new_state;
2049
2050out:
2051 kvfree(in);
2052 return err;
2053}
2054
2055static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2056 u16 operation)
2057{
2058 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
2059 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
2060 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
2061 int rq_state;
2062 int sq_state;
2063 int err;
2064
2065 switch (operation) {
2066 case MLX5_CMD_OP_RST2INIT_QP:
2067 rq_state = MLX5_RQC_STATE_RDY;
2068 sq_state = MLX5_SQC_STATE_RDY;
2069 break;
2070 case MLX5_CMD_OP_2ERR_QP:
2071 rq_state = MLX5_RQC_STATE_ERR;
2072 sq_state = MLX5_SQC_STATE_ERR;
2073 break;
2074 case MLX5_CMD_OP_2RST_QP:
2075 rq_state = MLX5_RQC_STATE_RST;
2076 sq_state = MLX5_SQC_STATE_RST;
2077 break;
2078 case MLX5_CMD_OP_INIT2INIT_QP:
2079 case MLX5_CMD_OP_INIT2RTR_QP:
2080 case MLX5_CMD_OP_RTR2RTS_QP:
2081 case MLX5_CMD_OP_RTS2RTS_QP:
2082 /* Nothing to do here... */
2083 return 0;
2084 default:
2085 WARN_ON(1);
2086 return -EINVAL;
2087 }
2088
2089 if (qp->rq.wqe_cnt) {
2090 err = modify_raw_packet_qp_rq(dev->mdev, rq, rq_state);
2091 if (err)
2092 return err;
2093 }
2094
2095 if (qp->sq.wqe_cnt)
2096 return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
2097
2098 return 0;
2099}
2100
1987static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, 2101static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1988 const struct ib_qp_attr *attr, int attr_mask, 2102 const struct ib_qp_attr *attr, int attr_mask,
1989 enum ib_qp_state cur_state, enum ib_qp_state new_state) 2103 enum ib_qp_state cur_state, enum ib_qp_state new_state)
@@ -2181,7 +2295,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
2181 * again to RTS, and may cause the driver and the device to get out of 2295 * again to RTS, and may cause the driver and the device to get out of
2182 * sync. */ 2296 * sync. */
2183 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && 2297 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
2184 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) 2298 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR) &&
2299 (qp->ibqp.qp_type != IB_QPT_RAW_PACKET))
2185 mlx5_ib_qp_disable_pagefaults(qp); 2300 mlx5_ib_qp_disable_pagefaults(qp);
2186 2301
2187 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 2302 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
@@ -2192,12 +2307,17 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
2192 optpar = ib_mask_to_mlx5_opt(attr_mask); 2307 optpar = ib_mask_to_mlx5_opt(attr_mask);
2193 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 2308 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
2194 in->optparam = cpu_to_be32(optpar); 2309 in->optparam = cpu_to_be32(optpar);
2195 err = mlx5_core_qp_modify(dev->mdev, op, in, sqd_event, 2310
2196 &base->mqp); 2311 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
2312 err = modify_raw_packet_qp(dev, qp, op);
2313 else
2314 err = mlx5_core_qp_modify(dev->mdev, op, in, sqd_event,
2315 &base->mqp);
2197 if (err) 2316 if (err)
2198 goto out; 2317 goto out;
2199 2318
2200 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 2319 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT &&
2320 (qp->ibqp.qp_type != IB_QPT_RAW_PACKET))
2201 mlx5_ib_qp_enable_pagefaults(qp); 2321 mlx5_ib_qp_enable_pagefaults(qp);
2202 2322
2203 qp->state = new_state; 2323 qp->state = new_state;
@@ -2256,11 +2376,6 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2256 ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port); 2376 ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
2257 } 2377 }
2258 2378
2259 if (ibqp->qp_type == IB_QPT_RAW_PACKET) {
2260 err = -EOPNOTSUPP;
2261 goto out;
2262 }
2263
2264 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && 2379 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
2265 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, 2380 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
2266 ll)) 2381 ll))