aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorEli Cohen <eli@mellanox.co.il>2006-02-13 19:40:21 -0500
committerRoland Dreier <rolandd@cisco.com>2006-03-20 13:08:15 -0500
commit8ebe5077e37a0cb0da527e397460188e6bfdd3ee (patch)
tree401491fb7a94516dfbeeee027d8eeb294117cb5b /drivers/infiniband/hw
parent8bdb0e8632e0f5061bd18b6934346cb609490135 (diff)
IB/mthca: Support for query QP and SRQ
Implement the query_qp and query_srq methods in mthca. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c135
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c32
6 files changed, 184 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index acd00831ef08..890c060ff4d1 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1560,6 +1560,13 @@ int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1560 CMD_TIME_CLASS_A, status); 1560 CMD_TIME_CLASS_A, status);
1561} 1561}
1562 1562
1563int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
1564 struct mthca_mailbox *mailbox, u8 *status)
1565{
1566 return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
1567 CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
1568}
1569
1563int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) 1570int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
1564{ 1571{
1565 return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, 1572 return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 5156701ae52c..e4ec35c40dd3 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -305,6 +305,8 @@ int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
305 int srq_num, u8 *status); 305 int srq_num, u8 *status);
306int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 306int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
307 int srq_num, u8 *status); 307 int srq_num, u8 *status);
308int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
309 struct mthca_mailbox *mailbox, u8 *status);
308int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status); 310int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
309int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur, 311int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
310 enum ib_qp_state next, u32 num, int is_ee, 312 enum ib_qp_state next, u32 num, int is_ee,
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index d827558c27be..2f4500f85ae0 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -479,6 +479,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
479void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); 479void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
480int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 480int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
481 enum ib_srq_attr_mask attr_mask); 481 enum ib_srq_attr_mask attr_mask);
482int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
482void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 483void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
483 enum ib_event_type event_type); 484 enum ib_event_type event_type);
484void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); 485void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
@@ -489,6 +490,8 @@ int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
489 490
490void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 491void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
491 enum ib_event_type event_type); 492 enum ib_event_type event_type);
493int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
494 struct ib_qp_init_attr *qp_init_attr);
492int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask); 495int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask);
493int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 496int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
494 struct ib_send_wr **bad_wr); 497 struct ib_send_wr **bad_wr);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1fa1b55ffffe..084bea592df5 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1264,12 +1264,14 @@ int mthca_register_device(struct mthca_dev *dev)
1264 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 1264 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1265 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 1265 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1266 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 1266 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1267 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1267 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1268 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1268 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1269 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1269 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 1270 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1270 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 1271 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1271 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 1272 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1272 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 1273 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1274 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1273 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); 1275 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1274 dev->ib_dev.node_type = IB_NODE_CA; 1276 dev->ib_dev.node_type = IB_NODE_CA;
1275 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1277 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
@@ -1291,7 +1293,8 @@ int mthca_register_device(struct mthca_dev *dev)
1291 1293
1292 if (dev->mthca_flags & MTHCA_FLAG_SRQ) { 1294 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1293 dev->ib_dev.create_srq = mthca_create_srq; 1295 dev->ib_dev.create_srq = mthca_create_srq;
1294 dev->ib_dev.modify_srq = mthca_modify_srq; 1296 dev->ib_dev.modify_srq = mthca_modify_srq;
1297 dev->ib_dev.query_srq = mthca_query_srq;
1295 dev->ib_dev.destroy_srq = mthca_destroy_srq; 1298 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1296 1299
1297 if (mthca_is_memfree(dev)) 1300 if (mthca_is_memfree(dev))
@@ -1302,6 +1305,7 @@ int mthca_register_device(struct mthca_dev *dev)
1302 1305
1303 dev->ib_dev.create_qp = mthca_create_qp; 1306 dev->ib_dev.create_qp = mthca_create_qp;
1304 dev->ib_dev.modify_qp = mthca_modify_qp; 1307 dev->ib_dev.modify_qp = mthca_modify_qp;
1308 dev->ib_dev.query_qp = mthca_query_qp;
1305 dev->ib_dev.destroy_qp = mthca_destroy_qp; 1309 dev->ib_dev.destroy_qp = mthca_destroy_qp;
1306 dev->ib_dev.create_cq = mthca_create_cq; 1310 dev->ib_dev.create_cq = mthca_create_cq;
1307 dev->ib_dev.resize_cq = mthca_resize_cq; 1311 dev->ib_dev.resize_cq = mthca_resize_cq;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index c2d3300dace9..e99d735f5f36 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -348,6 +348,141 @@ static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
348 return cpu_to_be32(hw_access_flags); 348 return cpu_to_be32(hw_access_flags);
349} 349}
350 350
351static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
352{
353 switch (mthca_state) {
354 case MTHCA_QP_STATE_RST: return IB_QPS_RESET;
355 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT;
356 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR;
357 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS;
358 case MTHCA_QP_STATE_DRAINING:
359 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD;
360 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE;
361 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR;
362 default: return -1;
363 }
364}
365
366static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
367{
368 switch (mthca_mig_state) {
369 case 0: return IB_MIG_ARMED;
370 case 1: return IB_MIG_REARM;
371 case 3: return IB_MIG_MIGRATED;
372 default: return -1;
373 }
374}
375
376static int to_ib_qp_access_flags(int mthca_flags)
377{
378 int ib_flags = 0;
379
380 if (mthca_flags & MTHCA_QP_BIT_RRE)
381 ib_flags |= IB_ACCESS_REMOTE_READ;
382 if (mthca_flags & MTHCA_QP_BIT_RWE)
383 ib_flags |= IB_ACCESS_REMOTE_WRITE;
384 if (mthca_flags & MTHCA_QP_BIT_RAE)
385 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
386
387 return ib_flags;
388}
389
390static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
391 struct mthca_qp_path *path)
392{
393 memset(ib_ah_attr, 0, sizeof *path);
394 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
395 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
396 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
397 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
398 ib_ah_attr->static_rate = path->static_rate & 0x7;
399 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
400 if (ib_ah_attr->ah_flags) {
401 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
402 ib_ah_attr->grh.hop_limit = path->hop_limit;
403 ib_ah_attr->grh.traffic_class =
404 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
405 ib_ah_attr->grh.flow_label =
406 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
407 memcpy(ib_ah_attr->grh.dgid.raw,
408 path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
409 }
410}
411
412int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
413 struct ib_qp_init_attr *qp_init_attr)
414{
415 struct mthca_dev *dev = to_mdev(ibqp->device);
416 struct mthca_qp *qp = to_mqp(ibqp);
417 int err;
418 struct mthca_mailbox *mailbox;
419 struct mthca_qp_param *qp_param;
420 struct mthca_qp_context *context;
421 int mthca_state;
422 u8 status;
423
424 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
425 if (IS_ERR(mailbox))
426 return PTR_ERR(mailbox);
427
428 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
429 if (err)
430 goto out;
431 if (status) {
432 mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
433 err = -EINVAL;
434 goto out;
435 }
436
437 qp_param = mailbox->buf;
438 context = &qp_param->context;
439 mthca_state = be32_to_cpu(context->flags) >> 28;
440
441 qp_attr->qp_state = to_ib_qp_state(mthca_state);
442 qp_attr->cur_qp_state = qp_attr->qp_state;
443 qp_attr->path_mtu = context->mtu_msgmax >> 5;
444 qp_attr->path_mig_state =
445 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
446 qp_attr->qkey = be32_to_cpu(context->qkey);
447 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
448 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
449 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
450 qp_attr->qp_access_flags =
451 to_ib_qp_access_flags(be32_to_cpu(context->params2));
452 qp_attr->cap.max_send_wr = qp->sq.max;
453 qp_attr->cap.max_recv_wr = qp->rq.max;
454 qp_attr->cap.max_send_sge = qp->sq.max_gs;
455 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
456 qp_attr->cap.max_inline_data = qp->max_inline_data;
457
458 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
459 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
460
461 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
462 qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
463
464 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
465 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
466
467 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
468
469 qp_attr->max_dest_rd_atomic =
470 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
471 qp_attr->min_rnr_timer =
472 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
473 qp_attr->port_num = qp_attr->ah_attr.port_num;
474 qp_attr->timeout = context->pri_path.ackto >> 3;
475 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
476 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
477 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
478 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
479 qp_init_attr->cap = qp_attr->cap;
480
481out:
482 mthca_free_mailbox(dev, mailbox);
483 return err;
484}
485
351static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path) 486static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
352{ 487{
353 path->g_mylmc = ah->src_path_bits & 0x7f; 488 path->g_mylmc = ah->src_path_bits & 0x7f;
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index f1a1da147d0b..deb526ce013d 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -360,6 +360,38 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
360 return 0; 360 return 0;
361} 361}
362 362
363int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
364{
365 struct mthca_dev *dev = to_mdev(ibsrq->device);
366 struct mthca_srq *srq = to_msrq(ibsrq);
367 struct mthca_mailbox *mailbox;
368 struct mthca_arbel_srq_context *arbel_ctx;
369 u8 status;
370 int err;
371
372 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
373 if (IS_ERR(mailbox))
374 return PTR_ERR(mailbox);
375
376 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
377 if (err)
378 goto out;
379
380 if (mthca_is_memfree(dev)) {
381 arbel_ctx = mailbox->buf;
382 srq_attr->srq_limit = arbel_ctx->limit_watermark;
383 } else
384 srq_attr->srq_limit = 0;
385
386 srq_attr->max_wr = srq->max;
387 srq_attr->max_sge = srq->max_gs;
388
389out:
390 mthca_free_mailbox(dev, mailbox);
391
392 return err;
393}
394
363void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 395void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
364 enum ib_event_type event_type) 396 enum ib_event_type event_type)
365{ 397{