diff options
author | Jack Morgenstein <jackm@dev.mellanox.co.il> | 2007-06-21 05:27:47 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-07-12 18:41:00 -0400 |
commit | 6a775e2ba4f7635849ade628e64723ab2beef0bc (patch) | |
tree | 3c80d06d5f2f7135c81dddc2dcd6e536f9ecf22d /drivers/infiniband/hw | |
parent | 6164c8cd1333403a28202f7c7e64ff9086d8f1aa (diff) |
IB/mlx4: Implement query QP
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 137 |
3 files changed, 141 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 2fc8ccebaac1..6b9870a50bea 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -523,6 +523,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
523 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | 523 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | |
524 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | 524 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | |
525 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | 525 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | |
526 | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | | ||
526 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | 527 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | |
527 | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | | 528 | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | |
528 | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | | 529 | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | |
@@ -550,6 +551,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
550 | ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; | 551 | ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; |
551 | ibdev->ib_dev.create_qp = mlx4_ib_create_qp; | 552 | ibdev->ib_dev.create_qp = mlx4_ib_create_qp; |
552 | ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; | 553 | ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; |
554 | ibdev->ib_dev.query_qp = mlx4_ib_query_qp; | ||
553 | ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; | 555 | ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; |
554 | ibdev->ib_dev.post_send = mlx4_ib_post_send; | 556 | ibdev->ib_dev.post_send = mlx4_ib_post_send; |
555 | ibdev->ib_dev.post_recv = mlx4_ib_post_recv; | 557 | ibdev->ib_dev.post_recv = mlx4_ib_post_recv; |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 40b83914b7b2..d6dc57c5ccca 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -267,6 +267,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
267 | int mlx4_ib_destroy_qp(struct ib_qp *qp); | 267 | int mlx4_ib_destroy_qp(struct ib_qp *qp); |
268 | int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 268 | int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
269 | int attr_mask, struct ib_udata *udata); | 269 | int attr_mask, struct ib_udata *udata); |
270 | int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, | ||
271 | struct ib_qp_init_attr *qp_init_attr); | ||
270 | int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 272 | int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
271 | struct ib_send_wr **bad_wr); | 273 | struct ib_send_wr **bad_wr); |
272 | int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | 274 | int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 28a08bdd1800..40042184ad58 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1455,3 +1455,140 @@ out: | |||
1455 | 1455 | ||
1456 | return err; | 1456 | return err; |
1457 | } | 1457 | } |
1458 | |||
1459 | static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state) | ||
1460 | { | ||
1461 | switch (mlx4_state) { | ||
1462 | case MLX4_QP_STATE_RST: return IB_QPS_RESET; | ||
1463 | case MLX4_QP_STATE_INIT: return IB_QPS_INIT; | ||
1464 | case MLX4_QP_STATE_RTR: return IB_QPS_RTR; | ||
1465 | case MLX4_QP_STATE_RTS: return IB_QPS_RTS; | ||
1466 | case MLX4_QP_STATE_SQ_DRAINING: | ||
1467 | case MLX4_QP_STATE_SQD: return IB_QPS_SQD; | ||
1468 | case MLX4_QP_STATE_SQER: return IB_QPS_SQE; | ||
1469 | case MLX4_QP_STATE_ERR: return IB_QPS_ERR; | ||
1470 | default: return -1; | ||
1471 | } | ||
1472 | } | ||
1473 | |||
1474 | static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state) | ||
1475 | { | ||
1476 | switch (mlx4_mig_state) { | ||
1477 | case MLX4_QP_PM_ARMED: return IB_MIG_ARMED; | ||
1478 | case MLX4_QP_PM_REARM: return IB_MIG_REARM; | ||
1479 | case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED; | ||
1480 | default: return -1; | ||
1481 | } | ||
1482 | } | ||
1483 | |||
1484 | static int to_ib_qp_access_flags(int mlx4_flags) | ||
1485 | { | ||
1486 | int ib_flags = 0; | ||
1487 | |||
1488 | if (mlx4_flags & MLX4_QP_BIT_RRE) | ||
1489 | ib_flags |= IB_ACCESS_REMOTE_READ; | ||
1490 | if (mlx4_flags & MLX4_QP_BIT_RWE) | ||
1491 | ib_flags |= IB_ACCESS_REMOTE_WRITE; | ||
1492 | if (mlx4_flags & MLX4_QP_BIT_RAE) | ||
1493 | ib_flags |= IB_ACCESS_REMOTE_ATOMIC; | ||
1494 | |||
1495 | return ib_flags; | ||
1496 | } | ||
1497 | |||
1498 | static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr, | ||
1499 | struct mlx4_qp_path *path) | ||
1500 | { | ||
1501 | memset(ib_ah_attr, 0, sizeof *path); | ||
1502 | ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; | ||
1503 | |||
1504 | if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) | ||
1505 | return; | ||
1506 | |||
1507 | ib_ah_attr->dlid = be16_to_cpu(path->rlid); | ||
1508 | ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf; | ||
1509 | ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; | ||
1510 | ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; | ||
1511 | ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; | ||
1512 | if (ib_ah_attr->ah_flags) { | ||
1513 | ib_ah_attr->grh.sgid_index = path->mgid_index; | ||
1514 | ib_ah_attr->grh.hop_limit = path->hop_limit; | ||
1515 | ib_ah_attr->grh.traffic_class = | ||
1516 | (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; | ||
1517 | ib_ah_attr->grh.flow_label = | ||
1518 | be32_to_cpu(path->tclass_flowlabel) & 0xffffff; | ||
1519 | memcpy(ib_ah_attr->grh.dgid.raw, | ||
1520 | path->rgid, sizeof ib_ah_attr->grh.dgid.raw); | ||
1521 | } | ||
1522 | } | ||
1523 | |||
1524 | int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, | ||
1525 | struct ib_qp_init_attr *qp_init_attr) | ||
1526 | { | ||
1527 | struct mlx4_ib_dev *dev = to_mdev(ibqp->device); | ||
1528 | struct mlx4_ib_qp *qp = to_mqp(ibqp); | ||
1529 | struct mlx4_qp_context context; | ||
1530 | int mlx4_state; | ||
1531 | int err; | ||
1532 | |||
1533 | if (qp->state == IB_QPS_RESET) { | ||
1534 | qp_attr->qp_state = IB_QPS_RESET; | ||
1535 | goto done; | ||
1536 | } | ||
1537 | |||
1538 | err = mlx4_qp_query(dev->dev, &qp->mqp, &context); | ||
1539 | if (err) | ||
1540 | return -EINVAL; | ||
1541 | |||
1542 | mlx4_state = be32_to_cpu(context.flags) >> 28; | ||
1543 | |||
1544 | qp_attr->qp_state = to_ib_qp_state(mlx4_state); | ||
1545 | qp_attr->path_mtu = context.mtu_msgmax >> 5; | ||
1546 | qp_attr->path_mig_state = | ||
1547 | to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); | ||
1548 | qp_attr->qkey = be32_to_cpu(context.qkey); | ||
1549 | qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff; | ||
1550 | qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff; | ||
1551 | qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff; | ||
1552 | qp_attr->qp_access_flags = | ||
1553 | to_ib_qp_access_flags(be32_to_cpu(context.params2)); | ||
1554 | |||
1555 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { | ||
1556 | to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path); | ||
1557 | to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path); | ||
1558 | qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; | ||
1559 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; | ||
1560 | } | ||
1561 | |||
1562 | qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; | ||
1563 | qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; | ||
1564 | |||
1565 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ | ||
1566 | qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; | ||
1567 | |||
1568 | qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7); | ||
1569 | |||
1570 | qp_attr->max_dest_rd_atomic = | ||
1571 | 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7); | ||
1572 | qp_attr->min_rnr_timer = | ||
1573 | (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f; | ||
1574 | qp_attr->timeout = context.pri_path.ackto >> 3; | ||
1575 | qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7; | ||
1576 | qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7; | ||
1577 | qp_attr->alt_timeout = context.alt_path.ackto >> 3; | ||
1578 | |||
1579 | done: | ||
1580 | qp_attr->cur_qp_state = qp_attr->qp_state; | ||
1581 | if (!ibqp->uobject) { | ||
1582 | qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; | ||
1583 | qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; | ||
1584 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | ||
1585 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | ||
1586 | qp_attr->cap.max_inline_data = (1 << qp->sq.wqe_shift) - | ||
1587 | send_wqe_overhead(qp->ibqp.qp_type) - | ||
1588 | sizeof (struct mlx4_wqe_inline_seg); | ||
1589 | qp_init_attr->cap = qp_attr->cap; | ||
1590 | } | ||
1591 | |||
1592 | return 0; | ||
1593 | } | ||
1594 | |||