diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r-- | drivers/infiniband/hw/mlx4/Kconfig | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 137 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/srq.c | 18 |
5 files changed, 164 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig index b8912cdb9663..4175a4bd0c78 100644 --- a/drivers/infiniband/hw/mlx4/Kconfig +++ b/drivers/infiniband/hw/mlx4/Kconfig | |||
@@ -1,6 +1,5 @@ | |||
1 | config MLX4_INFINIBAND | 1 | config MLX4_INFINIBAND |
2 | tristate "Mellanox ConnectX HCA support" | 2 | tristate "Mellanox ConnectX HCA support" |
3 | depends on INFINIBAND | ||
4 | select MLX4_CORE | 3 | select MLX4_CORE |
5 | ---help--- | 4 | ---help--- |
6 | This driver provides low-level InfiniBand support for | 5 | This driver provides low-level InfiniBand support for |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c591616dccde..dde8fe9af47e 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -169,7 +169,7 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, | |||
169 | props->phys_state = out_mad->data[33] >> 4; | 169 | props->phys_state = out_mad->data[33] >> 4; |
170 | props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); | 170 | props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); |
171 | props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; | 171 | props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; |
172 | props->max_msg_sz = 0x80000000; | 172 | props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; |
173 | props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; | 173 | props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; |
174 | props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); | 174 | props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); |
175 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); | 175 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); |
@@ -523,11 +523,13 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
523 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | 523 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | |
524 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | 524 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | |
525 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | 525 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | |
526 | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | | ||
526 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | 527 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | |
527 | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | | 528 | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | |
528 | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | | 529 | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | |
529 | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | | 530 | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | |
530 | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | | 531 | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | |
532 | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | | ||
531 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); | 533 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); |
532 | 534 | ||
533 | ibdev->ib_dev.query_device = mlx4_ib_query_device; | 535 | ibdev->ib_dev.query_device = mlx4_ib_query_device; |
@@ -546,10 +548,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
546 | ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah; | 548 | ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah; |
547 | ibdev->ib_dev.create_srq = mlx4_ib_create_srq; | 549 | ibdev->ib_dev.create_srq = mlx4_ib_create_srq; |
548 | ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq; | 550 | ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq; |
551 | ibdev->ib_dev.query_srq = mlx4_ib_query_srq; | ||
549 | ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq; | 552 | ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq; |
550 | ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; | 553 | ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; |
551 | ibdev->ib_dev.create_qp = mlx4_ib_create_qp; | 554 | ibdev->ib_dev.create_qp = mlx4_ib_create_qp; |
552 | ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; | 555 | ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; |
556 | ibdev->ib_dev.query_qp = mlx4_ib_query_qp; | ||
553 | ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; | 557 | ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; |
554 | ibdev->ib_dev.post_send = mlx4_ib_post_send; | 558 | ibdev->ib_dev.post_send = mlx4_ib_post_send; |
555 | ibdev->ib_dev.post_recv = mlx4_ib_post_recv; | 559 | ibdev->ib_dev.post_recv = mlx4_ib_post_recv; |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 24ccadd6e4f8..705ff2fa237e 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | #include <linux/compiler.h> | 36 | #include <linux/compiler.h> |
37 | #include <linux/list.h> | 37 | #include <linux/list.h> |
38 | #include <linux/mutex.h> | ||
38 | 39 | ||
39 | #include <rdma/ib_verbs.h> | 40 | #include <rdma/ib_verbs.h> |
40 | #include <rdma/ib_umem.h> | 41 | #include <rdma/ib_umem.h> |
@@ -255,6 +256,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
255 | struct ib_udata *udata); | 256 | struct ib_udata *udata); |
256 | int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | 257 | int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
257 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); | 258 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
259 | int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); | ||
258 | int mlx4_ib_destroy_srq(struct ib_srq *srq); | 260 | int mlx4_ib_destroy_srq(struct ib_srq *srq); |
259 | void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index); | 261 | void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index); |
260 | int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | 262 | int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, |
@@ -266,6 +268,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
266 | int mlx4_ib_destroy_qp(struct ib_qp *qp); | 268 | int mlx4_ib_destroy_qp(struct ib_qp *qp); |
267 | int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 269 | int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
268 | int attr_mask, struct ib_udata *udata); | 270 | int attr_mask, struct ib_udata *udata); |
271 | int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, | ||
272 | struct ib_qp_init_attr *qp_init_attr); | ||
269 | int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 273 | int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
270 | struct ib_send_wr **bad_wr); | 274 | struct ib_send_wr **bad_wr); |
271 | int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | 275 | int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 28a08bdd1800..40042184ad58 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1455,3 +1455,140 @@ out: | |||
1455 | 1455 | ||
1456 | return err; | 1456 | return err; |
1457 | } | 1457 | } |
1458 | |||
1459 | static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state) | ||
1460 | { | ||
1461 | switch (mlx4_state) { | ||
1462 | case MLX4_QP_STATE_RST: return IB_QPS_RESET; | ||
1463 | case MLX4_QP_STATE_INIT: return IB_QPS_INIT; | ||
1464 | case MLX4_QP_STATE_RTR: return IB_QPS_RTR; | ||
1465 | case MLX4_QP_STATE_RTS: return IB_QPS_RTS; | ||
1466 | case MLX4_QP_STATE_SQ_DRAINING: | ||
1467 | case MLX4_QP_STATE_SQD: return IB_QPS_SQD; | ||
1468 | case MLX4_QP_STATE_SQER: return IB_QPS_SQE; | ||
1469 | case MLX4_QP_STATE_ERR: return IB_QPS_ERR; | ||
1470 | default: return -1; | ||
1471 | } | ||
1472 | } | ||
1473 | |||
1474 | static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state) | ||
1475 | { | ||
1476 | switch (mlx4_mig_state) { | ||
1477 | case MLX4_QP_PM_ARMED: return IB_MIG_ARMED; | ||
1478 | case MLX4_QP_PM_REARM: return IB_MIG_REARM; | ||
1479 | case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED; | ||
1480 | default: return -1; | ||
1481 | } | ||
1482 | } | ||
1483 | |||
1484 | static int to_ib_qp_access_flags(int mlx4_flags) | ||
1485 | { | ||
1486 | int ib_flags = 0; | ||
1487 | |||
1488 | if (mlx4_flags & MLX4_QP_BIT_RRE) | ||
1489 | ib_flags |= IB_ACCESS_REMOTE_READ; | ||
1490 | if (mlx4_flags & MLX4_QP_BIT_RWE) | ||
1491 | ib_flags |= IB_ACCESS_REMOTE_WRITE; | ||
1492 | if (mlx4_flags & MLX4_QP_BIT_RAE) | ||
1493 | ib_flags |= IB_ACCESS_REMOTE_ATOMIC; | ||
1494 | |||
1495 | return ib_flags; | ||
1496 | } | ||
1497 | |||
1498 | static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr, | ||
1499 | struct mlx4_qp_path *path) | ||
1500 | { | ||
1501 | memset(ib_ah_attr, 0, sizeof *path); | ||
1502 | ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; | ||
1503 | |||
1504 | if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) | ||
1505 | return; | ||
1506 | |||
1507 | ib_ah_attr->dlid = be16_to_cpu(path->rlid); | ||
1508 | ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf; | ||
1509 | ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; | ||
1510 | ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; | ||
1511 | ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; | ||
1512 | if (ib_ah_attr->ah_flags) { | ||
1513 | ib_ah_attr->grh.sgid_index = path->mgid_index; | ||
1514 | ib_ah_attr->grh.hop_limit = path->hop_limit; | ||
1515 | ib_ah_attr->grh.traffic_class = | ||
1516 | (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; | ||
1517 | ib_ah_attr->grh.flow_label = | ||
1518 | be32_to_cpu(path->tclass_flowlabel) & 0xffffff; | ||
1519 | memcpy(ib_ah_attr->grh.dgid.raw, | ||
1520 | path->rgid, sizeof ib_ah_attr->grh.dgid.raw); | ||
1521 | } | ||
1522 | } | ||
1523 | |||
1524 | int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, | ||
1525 | struct ib_qp_init_attr *qp_init_attr) | ||
1526 | { | ||
1527 | struct mlx4_ib_dev *dev = to_mdev(ibqp->device); | ||
1528 | struct mlx4_ib_qp *qp = to_mqp(ibqp); | ||
1529 | struct mlx4_qp_context context; | ||
1530 | int mlx4_state; | ||
1531 | int err; | ||
1532 | |||
1533 | if (qp->state == IB_QPS_RESET) { | ||
1534 | qp_attr->qp_state = IB_QPS_RESET; | ||
1535 | goto done; | ||
1536 | } | ||
1537 | |||
1538 | err = mlx4_qp_query(dev->dev, &qp->mqp, &context); | ||
1539 | if (err) | ||
1540 | return -EINVAL; | ||
1541 | |||
1542 | mlx4_state = be32_to_cpu(context.flags) >> 28; | ||
1543 | |||
1544 | qp_attr->qp_state = to_ib_qp_state(mlx4_state); | ||
1545 | qp_attr->path_mtu = context.mtu_msgmax >> 5; | ||
1546 | qp_attr->path_mig_state = | ||
1547 | to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); | ||
1548 | qp_attr->qkey = be32_to_cpu(context.qkey); | ||
1549 | qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff; | ||
1550 | qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff; | ||
1551 | qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff; | ||
1552 | qp_attr->qp_access_flags = | ||
1553 | to_ib_qp_access_flags(be32_to_cpu(context.params2)); | ||
1554 | |||
1555 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { | ||
1556 | to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path); | ||
1557 | to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path); | ||
1558 | qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; | ||
1559 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; | ||
1560 | } | ||
1561 | |||
1562 | qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; | ||
1563 | qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; | ||
1564 | |||
1565 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ | ||
1566 | qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; | ||
1567 | |||
1568 | qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7); | ||
1569 | |||
1570 | qp_attr->max_dest_rd_atomic = | ||
1571 | 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7); | ||
1572 | qp_attr->min_rnr_timer = | ||
1573 | (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f; | ||
1574 | qp_attr->timeout = context.pri_path.ackto >> 3; | ||
1575 | qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7; | ||
1576 | qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7; | ||
1577 | qp_attr->alt_timeout = context.alt_path.ackto >> 3; | ||
1578 | |||
1579 | done: | ||
1580 | qp_attr->cur_qp_state = qp_attr->qp_state; | ||
1581 | if (!ibqp->uobject) { | ||
1582 | qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; | ||
1583 | qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; | ||
1584 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | ||
1585 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | ||
1586 | qp_attr->cap.max_inline_data = (1 << qp->sq.wqe_shift) - | ||
1587 | send_wqe_overhead(qp->ibqp.qp_type) - | ||
1588 | sizeof (struct mlx4_wqe_inline_seg); | ||
1589 | qp_init_attr->cap = qp_attr->cap; | ||
1590 | } | ||
1591 | |||
1592 | return 0; | ||
1593 | } | ||
1594 | |||
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 12fac1c8989d..408748fb5285 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -240,6 +240,24 @@ int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |||
240 | return 0; | 240 | return 0; |
241 | } | 241 | } |
242 | 242 | ||
243 | int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | ||
244 | { | ||
245 | struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); | ||
246 | struct mlx4_ib_srq *srq = to_msrq(ibsrq); | ||
247 | int ret; | ||
248 | int limit_watermark; | ||
249 | |||
250 | ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark); | ||
251 | if (ret) | ||
252 | return ret; | ||
253 | |||
254 | srq_attr->srq_limit = be16_to_cpu(limit_watermark); | ||
255 | srq_attr->max_wr = srq->msrq.max - 1; | ||
256 | srq_attr->max_sge = srq->msrq.max_gs; | ||
257 | |||
258 | return 0; | ||
259 | } | ||
260 | |||
243 | int mlx4_ib_destroy_srq(struct ib_srq *srq) | 261 | int mlx4_ib_destroy_srq(struct ib_srq *srq) |
244 | { | 262 | { |
245 | struct mlx4_ib_dev *dev = to_mdev(srq->device); | 263 | struct mlx4_ib_dev *dev = to_mdev(srq->device); |