aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/qp.c
diff options
context:
space:
mode:
authorYishai Hadas <yishaih@mellanox.com>2016-05-23 08:20:56 -0400
committerDoug Ledford <dledford@redhat.com>2016-06-23 11:02:44 -0400
commit28d6137008b2aa09e35750c604394e363dbfca94 (patch)
treee8c7bb82541ab073768071ca2c6d9acdce1be9f6 /drivers/infiniband/hw/mlx5/qp.c
parentc70285f880e88cb4f73effb722065a182ba5936f (diff)
IB/mlx5: Add RSS QP support
Add support for Raw Ethernet RX HASH QP. Currently, creation and destruction of such a QP are supported. This QP is implemented as a simple TIR object which points to the receive RQ indirection table. The given hashing configuration is used to configure the TIR and by that it chooses the right RQ from the RQ indirection table. Signed-off-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Matan Barak <matanb@mellanox.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c200
1 files changed, 200 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 1c0e332146b5..f9df4b527a11 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1266,6 +1266,187 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
1266 rq->doorbell = &qp->db; 1266 rq->doorbell = &qp->db;
1267} 1267}
1268 1268
1269static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1270{
1271 mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
1272}
1273
1274static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1275 struct ib_pd *pd,
1276 struct ib_qp_init_attr *init_attr,
1277 struct ib_udata *udata)
1278{
1279 struct ib_uobject *uobj = pd->uobject;
1280 struct ib_ucontext *ucontext = uobj->context;
1281 struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
1282 struct mlx5_ib_create_qp_resp resp = {};
1283 int inlen;
1284 int err;
1285 u32 *in;
1286 void *tirc;
1287 void *hfso;
1288 u32 selected_fields = 0;
1289 size_t min_resp_len;
1290 u32 tdn = mucontext->tdn;
1291 struct mlx5_ib_create_qp_rss ucmd = {};
1292 size_t required_cmd_sz;
1293
1294 if (init_attr->qp_type != IB_QPT_RAW_PACKET)
1295 return -EOPNOTSUPP;
1296
1297 if (init_attr->create_flags || init_attr->send_cq)
1298 return -EINVAL;
1299
1300 min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index);
1301 if (udata->outlen < min_resp_len)
1302 return -EINVAL;
1303
1304 required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
1305 if (udata->inlen < required_cmd_sz) {
1306 mlx5_ib_dbg(dev, "invalid inlen\n");
1307 return -EINVAL;
1308 }
1309
1310 if (udata->inlen > sizeof(ucmd) &&
1311 !ib_is_udata_cleared(udata, sizeof(ucmd),
1312 udata->inlen - sizeof(ucmd))) {
1313 mlx5_ib_dbg(dev, "inlen is not supported\n");
1314 return -EOPNOTSUPP;
1315 }
1316
1317 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
1318 mlx5_ib_dbg(dev, "copy failed\n");
1319 return -EFAULT;
1320 }
1321
1322 if (ucmd.comp_mask) {
1323 mlx5_ib_dbg(dev, "invalid comp mask\n");
1324 return -EOPNOTSUPP;
1325 }
1326
1327 if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)) || ucmd.reserved1) {
1328 mlx5_ib_dbg(dev, "invalid reserved\n");
1329 return -EOPNOTSUPP;
1330 }
1331
1332 err = ib_copy_to_udata(udata, &resp, min_resp_len);
1333 if (err) {
1334 mlx5_ib_dbg(dev, "copy failed\n");
1335 return -EINVAL;
1336 }
1337
1338 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1339 in = mlx5_vzalloc(inlen);
1340 if (!in)
1341 return -ENOMEM;
1342
1343 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1344 MLX5_SET(tirc, tirc, disp_type,
1345 MLX5_TIRC_DISP_TYPE_INDIRECT);
1346 MLX5_SET(tirc, tirc, indirect_table,
1347 init_attr->rwq_ind_tbl->ind_tbl_num);
1348 MLX5_SET(tirc, tirc, transport_domain, tdn);
1349
1350 hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1351 switch (ucmd.rx_hash_function) {
1352 case MLX5_RX_HASH_FUNC_TOEPLITZ:
1353 {
1354 void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
1355 size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
1356
1357 if (len != ucmd.rx_key_len) {
1358 err = -EINVAL;
1359 goto err;
1360 }
1361
1362 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
1363 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1364 memcpy(rss_key, ucmd.rx_hash_key, len);
1365 break;
1366 }
1367 default:
1368 err = -EOPNOTSUPP;
1369 goto err;
1370 }
1371
1372 if (!ucmd.rx_hash_fields_mask) {
1373 /* special case when this TIR serves as steering entry without hashing */
1374 if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
1375 goto create_tir;
1376 err = -EINVAL;
1377 goto err;
1378 }
1379
1380 if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1381 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
1382 ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1383 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
1384 err = -EINVAL;
1385 goto err;
1386 }
1387
1388 /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1389 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1390 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
1391 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1392 MLX5_L3_PROT_TYPE_IPV4);
1393 else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1394 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1395 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1396 MLX5_L3_PROT_TYPE_IPV6);
1397
1398 if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1399 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) &&
1400 ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1401 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))) {
1402 err = -EINVAL;
1403 goto err;
1404 }
1405
1406 /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1407 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1408 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1409 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1410 MLX5_L4_PROT_TYPE_TCP);
1411 else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1412 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1413 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1414 MLX5_L4_PROT_TYPE_UDP);
1415
1416 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1417 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
1418 selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
1419
1420 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
1421 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1422 selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
1423
1424 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1425 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
1426 selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
1427
1428 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
1429 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1430 selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
1431
1432 MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
1433
1434create_tir:
1435 err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
1436
1437 if (err)
1438 goto err;
1439
1440 kvfree(in);
1441 /* qpn is reserved for that QP */
1442 qp->trans_qp.base.mqp.qpn = 0;
1443 return 0;
1444
1445err:
1446 kvfree(in);
1447 return err;
1448}
1449
1269static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, 1450static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1270 struct ib_qp_init_attr *init_attr, 1451 struct ib_qp_init_attr *init_attr,
1271 struct ib_udata *udata, struct mlx5_ib_qp *qp) 1452 struct ib_udata *udata, struct mlx5_ib_qp *qp)
@@ -1292,6 +1473,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1292 spin_lock_init(&qp->sq.lock); 1473 spin_lock_init(&qp->sq.lock);
1293 spin_lock_init(&qp->rq.lock); 1474 spin_lock_init(&qp->rq.lock);
1294 1475
1476 if (init_attr->rwq_ind_tbl) {
1477 if (!udata)
1478 return -ENOSYS;
1479
1480 err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
1481 return err;
1482 }
1483
1295 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 1484 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
1296 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) { 1485 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
1297 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); 1486 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
@@ -1644,6 +1833,11 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1644 struct mlx5_modify_qp_mbox_in *in; 1833 struct mlx5_modify_qp_mbox_in *in;
1645 int err; 1834 int err;
1646 1835
1836 if (qp->ibqp.rwq_ind_tbl) {
1837 destroy_rss_raw_qp_tir(dev, qp);
1838 return;
1839 }
1840
1647 base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ? 1841 base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ?
1648 &qp->raw_packet_qp.rq.base : 1842 &qp->raw_packet_qp.rq.base :
1649 &qp->trans_qp.base; 1843 &qp->trans_qp.base;
@@ -2504,6 +2698,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2504 int port; 2698 int port;
2505 enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED; 2699 enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
2506 2700
2701 if (ibqp->rwq_ind_tbl)
2702 return -ENOSYS;
2703
2507 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 2704 if (unlikely(ibqp->qp_type == IB_QPT_GSI))
2508 return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask); 2705 return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
2509 2706
@@ -4119,6 +4316,9 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4119 int err = 0; 4316 int err = 0;
4120 u8 raw_packet_qp_state; 4317 u8 raw_packet_qp_state;
4121 4318
4319 if (ibqp->rwq_ind_tbl)
4320 return -ENOSYS;
4321
4122 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 4322 if (unlikely(ibqp->qp_type == IB_QPT_GSI))
4123 return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, 4323 return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
4124 qp_init_attr); 4324 qp_init_attr);