diff options
author | Roland Dreier <rolandd@cisco.com> | 2006-02-03 17:53:28 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-03-20 13:08:12 -0500 |
commit | 3fa1fa3e809dc009a080ca9f052cee2e17836c63 (patch) | |
tree | 471bc486f70e8cd4d3950b0ff256f12db057b951 /drivers/infiniband | |
parent | 5e9f71a16c56af7e2ff8b7ea251e0fd90b5de560 (diff) |
IB/mthca: Generate SQ drained events when requested
Add low-level driver support to ib_mthca so that consumers can request
a "send queue drained" event be generated when a transiton to the SQD
state completes.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 14 |
2 files changed, 15 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 7150fd143bff..9e7baa8b57a9 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -1638,7 +1638,8 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, | |||
1638 | } | 1638 | } |
1639 | 1639 | ||
1640 | } else | 1640 | } else |
1641 | err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num, | 1641 | err = mthca_cmd(dev, mailbox->dma, |
1642 | optmask | (!!is_ee << 24) | num, | ||
1642 | op_mod, op[trans], CMD_TIME_CLASS_C, status); | 1643 | op_mod, op[trans], CMD_TIME_CLASS_C, status); |
1643 | 1644 | ||
1644 | if (my_mailbox) | 1645 | if (my_mailbox) |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index bdba39a8d9f9..97f5303d2c02 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -415,6 +415,12 @@ static const struct { | |||
415 | }, | 415 | }, |
416 | [IB_QPS_SQD] = { | 416 | [IB_QPS_SQD] = { |
417 | .trans = MTHCA_TRANS_RTS2SQD, | 417 | .trans = MTHCA_TRANS_RTS2SQD, |
418 | .opt_param = { | ||
419 | [UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, | ||
420 | [UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, | ||
421 | [RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, | ||
422 | [MLX] = IB_QP_EN_SQD_ASYNC_NOTIFY | ||
423 | } | ||
418 | }, | 424 | }, |
419 | }, | 425 | }, |
420 | [IB_QPS_SQD] = { | 426 | [IB_QPS_SQD] = { |
@@ -577,6 +583,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
577 | struct mthca_qp_param *qp_param; | 583 | struct mthca_qp_param *qp_param; |
578 | struct mthca_qp_context *qp_context; | 584 | struct mthca_qp_context *qp_context; |
579 | u32 req_param, opt_param; | 585 | u32 req_param, opt_param; |
586 | u32 sqd_event = 0; | ||
580 | u8 status; | 587 | u8 status; |
581 | int err; | 588 | int err; |
582 | 589 | ||
@@ -841,8 +848,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
841 | qp_context->srqn = cpu_to_be32(1 << 24 | | 848 | qp_context->srqn = cpu_to_be32(1 << 24 | |
842 | to_msrq(ibqp->srq)->srqn); | 849 | to_msrq(ibqp->srq)->srqn); |
843 | 850 | ||
851 | if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && | ||
852 | attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && | ||
853 | attr->en_sqd_async_notify) | ||
854 | sqd_event = 1 << 31; | ||
855 | |||
844 | err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, | 856 | err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, |
845 | qp->qpn, 0, mailbox, 0, &status); | 857 | qp->qpn, 0, mailbox, sqd_event, &status); |
846 | if (status) { | 858 | if (status) { |
847 | mthca_warn(dev, "modify QP %d returned status %02x.\n", | 859 | mthca_warn(dev, "modify QP %d returned status %02x.\n", |
848 | state_table[cur_state][new_state].trans, status); | 860 | state_table[cur_state][new_state].trans, status); |