aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2005-07-07 20:57:20 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-07 21:23:50 -0400
commit80c8ec2c04e539aac4e9810a46bc04c1b424b4dd (patch)
tree2f6b7a3e455f93435797c85a36ea3d07e74fb0cc /drivers/infiniband/hw
parent74c2174e7be52f9d2d210511bf3b490f4b41574c (diff)
[PATCH] IB uverbs: add mthca user QP support
Add support for userspace queue pairs (QPs) to mthca. Signed-off-by: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c80
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c215
3 files changed, 212 insertions, 85 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 751f69479a78..5ecdd2eeeb0f 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -440,12 +440,14 @@ int mthca_alloc_qp(struct mthca_dev *dev,
440 struct mthca_cq *recv_cq, 440 struct mthca_cq *recv_cq,
441 enum ib_qp_type type, 441 enum ib_qp_type type,
442 enum ib_sig_type send_policy, 442 enum ib_sig_type send_policy,
443 struct ib_qp_cap *cap,
443 struct mthca_qp *qp); 444 struct mthca_qp *qp);
444int mthca_alloc_sqp(struct mthca_dev *dev, 445int mthca_alloc_sqp(struct mthca_dev *dev,
445 struct mthca_pd *pd, 446 struct mthca_pd *pd,
446 struct mthca_cq *send_cq, 447 struct mthca_cq *send_cq,
447 struct mthca_cq *recv_cq, 448 struct mthca_cq *recv_cq,
448 enum ib_sig_type send_policy, 449 enum ib_sig_type send_policy,
450 struct ib_qp_cap *cap,
449 int qpn, 451 int qpn,
450 int port, 452 int port,
451 struct mthca_sqp *sqp); 453 struct mthca_sqp *sqp);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 9feb7618ba41..7a58ce90e179 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -424,6 +424,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
424 struct ib_qp_init_attr *init_attr, 424 struct ib_qp_init_attr *init_attr,
425 struct ib_udata *udata) 425 struct ib_udata *udata)
426{ 426{
427 struct mthca_create_qp ucmd;
427 struct mthca_qp *qp; 428 struct mthca_qp *qp;
428 int err; 429 int err;
429 430
@@ -432,41 +433,82 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
432 case IB_QPT_UC: 433 case IB_QPT_UC:
433 case IB_QPT_UD: 434 case IB_QPT_UD:
434 { 435 {
436 struct mthca_ucontext *context;
437
435 qp = kmalloc(sizeof *qp, GFP_KERNEL); 438 qp = kmalloc(sizeof *qp, GFP_KERNEL);
436 if (!qp) 439 if (!qp)
437 return ERR_PTR(-ENOMEM); 440 return ERR_PTR(-ENOMEM);
438 441
439 qp->sq.max = init_attr->cap.max_send_wr; 442 if (pd->uobject) {
440 qp->rq.max = init_attr->cap.max_recv_wr; 443 context = to_mucontext(pd->uobject->context);
441 qp->sq.max_gs = init_attr->cap.max_send_sge; 444
442 qp->rq.max_gs = init_attr->cap.max_recv_sge; 445 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
446 return ERR_PTR(-EFAULT);
447
448 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
449 context->db_tab,
450 ucmd.sq_db_index, ucmd.sq_db_page);
451 if (err) {
452 kfree(qp);
453 return ERR_PTR(err);
454 }
455
456 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
457 context->db_tab,
458 ucmd.rq_db_index, ucmd.rq_db_page);
459 if (err) {
460 mthca_unmap_user_db(to_mdev(pd->device),
461 &context->uar,
462 context->db_tab,
463 ucmd.sq_db_index);
464 kfree(qp);
465 return ERR_PTR(err);
466 }
467
468 qp->mr.ibmr.lkey = ucmd.lkey;
469 qp->sq.db_index = ucmd.sq_db_index;
470 qp->rq.db_index = ucmd.rq_db_index;
471 }
443 472
444 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), 473 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
445 to_mcq(init_attr->send_cq), 474 to_mcq(init_attr->send_cq),
446 to_mcq(init_attr->recv_cq), 475 to_mcq(init_attr->recv_cq),
447 init_attr->qp_type, init_attr->sq_sig_type, 476 init_attr->qp_type, init_attr->sq_sig_type,
448 qp); 477 &init_attr->cap, qp);
478
479 if (err && pd->uobject) {
480 context = to_mucontext(pd->uobject->context);
481
482 mthca_unmap_user_db(to_mdev(pd->device),
483 &context->uar,
484 context->db_tab,
485 ucmd.sq_db_index);
486 mthca_unmap_user_db(to_mdev(pd->device),
487 &context->uar,
488 context->db_tab,
489 ucmd.rq_db_index);
490 }
491
449 qp->ibqp.qp_num = qp->qpn; 492 qp->ibqp.qp_num = qp->qpn;
450 break; 493 break;
451 } 494 }
452 case IB_QPT_SMI: 495 case IB_QPT_SMI:
453 case IB_QPT_GSI: 496 case IB_QPT_GSI:
454 { 497 {
498 /* Don't allow userspace to create special QPs */
499 if (pd->uobject)
500 return ERR_PTR(-EINVAL);
501
455 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); 502 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
456 if (!qp) 503 if (!qp)
457 return ERR_PTR(-ENOMEM); 504 return ERR_PTR(-ENOMEM);
458 505
459 qp->sq.max = init_attr->cap.max_send_wr;
460 qp->rq.max = init_attr->cap.max_recv_wr;
461 qp->sq.max_gs = init_attr->cap.max_send_sge;
462 qp->rq.max_gs = init_attr->cap.max_recv_sge;
463
464 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; 506 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
465 507
466 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), 508 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
467 to_mcq(init_attr->send_cq), 509 to_mcq(init_attr->send_cq),
468 to_mcq(init_attr->recv_cq), 510 to_mcq(init_attr->recv_cq),
469 init_attr->sq_sig_type, 511 init_attr->sq_sig_type, &init_attr->cap,
470 qp->ibqp.qp_num, init_attr->port_num, 512 qp->ibqp.qp_num, init_attr->port_num,
471 to_msqp(qp)); 513 to_msqp(qp));
472 break; 514 break;
@@ -481,13 +523,27 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
481 return ERR_PTR(err); 523 return ERR_PTR(err);
482 } 524 }
483 525
484 init_attr->cap.max_inline_data = 0; 526 init_attr->cap.max_inline_data = 0;
527 init_attr->cap.max_send_wr = qp->sq.max;
528 init_attr->cap.max_recv_wr = qp->rq.max;
529 init_attr->cap.max_send_sge = qp->sq.max_gs;
530 init_attr->cap.max_recv_sge = qp->rq.max_gs;
485 531
486 return &qp->ibqp; 532 return &qp->ibqp;
487} 533}
488 534
489static int mthca_destroy_qp(struct ib_qp *qp) 535static int mthca_destroy_qp(struct ib_qp *qp)
490{ 536{
537 if (qp->uobject) {
538 mthca_unmap_user_db(to_mdev(qp->device),
539 &to_mucontext(qp->uobject->context)->uar,
540 to_mucontext(qp->uobject->context)->db_tab,
541 to_mqp(qp)->sq.db_index);
542 mthca_unmap_user_db(to_mdev(qp->device),
543 &to_mucontext(qp->uobject->context)->uar,
544 to_mucontext(qp->uobject->context)->db_tab,
545 to_mqp(qp)->rq.db_index);
546 }
491 mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); 547 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
492 kfree(qp); 548 kfree(qp);
493 return 0; 549 return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 163a8ef4186f..f7126b14d5ae 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -46,7 +47,9 @@ enum {
46 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, 47 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
47 MTHCA_ACK_REQ_FREQ = 10, 48 MTHCA_ACK_REQ_FREQ = 10,
48 MTHCA_FLIGHT_LIMIT = 9, 49 MTHCA_FLIGHT_LIMIT = 9,
49 MTHCA_UD_HEADER_SIZE = 72 /* largest UD header possible */ 50 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
51 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
52 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
50}; 53};
51 54
52enum { 55enum {
@@ -689,7 +692,11 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
689 692
690 /* leave arbel_sched_queue as 0 */ 693 /* leave arbel_sched_queue as 0 */
691 694
692 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); 695 if (qp->ibqp.uobject)
696 qp_context->usr_page =
697 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
698 else
699 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
693 qp_context->local_qpn = cpu_to_be32(qp->qpn); 700 qp_context->local_qpn = cpu_to_be32(qp->qpn);
694 if (attr_mask & IB_QP_DEST_QPN) { 701 if (attr_mask & IB_QP_DEST_QPN) {
695 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 702 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
@@ -954,6 +961,15 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
954 961
955 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, 962 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
956 1 << qp->sq.wqe_shift); 963 1 << qp->sq.wqe_shift);
964
965 /*
966 * If this is a userspace QP, we don't actually have to
967 * allocate anything. All we need is to calculate the WQE
968 * sizes and the send_wqe_offset, so we're done now.
969 */
970 if (pd->ibpd.uobject)
971 return 0;
972
957 size = PAGE_ALIGN(qp->send_wqe_offset + 973 size = PAGE_ALIGN(qp->send_wqe_offset +
958 (qp->sq.max << qp->sq.wqe_shift)); 974 (qp->sq.max << qp->sq.wqe_shift));
959 975
@@ -1053,10 +1069,32 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
1053 return err; 1069 return err;
1054} 1070}
1055 1071
1056static int mthca_alloc_memfree(struct mthca_dev *dev, 1072static void mthca_free_wqe_buf(struct mthca_dev *dev,
1057 struct mthca_qp *qp) 1073 struct mthca_qp *qp)
1058{ 1074{
1059 int ret = 0; 1075 int i;
1076 int size = PAGE_ALIGN(qp->send_wqe_offset +
1077 (qp->sq.max << qp->sq.wqe_shift));
1078
1079 if (qp->is_direct) {
1080 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1081 pci_unmap_addr(&qp->queue.direct, mapping));
1082 } else {
1083 for (i = 0; i < size / PAGE_SIZE; ++i) {
1084 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1085 qp->queue.page_list[i].buf,
1086 pci_unmap_addr(&qp->queue.page_list[i],
1087 mapping));
1088 }
1089 }
1090
1091 kfree(qp->wrid);
1092}
1093
1094static int mthca_map_memfree(struct mthca_dev *dev,
1095 struct mthca_qp *qp)
1096{
1097 int ret;
1060 1098
1061 if (mthca_is_memfree(dev)) { 1099 if (mthca_is_memfree(dev)) {
1062 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); 1100 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
@@ -1067,35 +1105,15 @@ static int mthca_alloc_memfree(struct mthca_dev *dev,
1067 if (ret) 1105 if (ret)
1068 goto err_qpc; 1106 goto err_qpc;
1069 1107
1070 ret = mthca_table_get(dev, dev->qp_table.rdb_table, 1108 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1071 qp->qpn << dev->qp_table.rdb_shift); 1109 qp->qpn << dev->qp_table.rdb_shift);
1072 if (ret) 1110 if (ret)
1073 goto err_eqpc; 1111 goto err_eqpc;
1074
1075 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1076 qp->qpn, &qp->rq.db);
1077 if (qp->rq.db_index < 0) {
1078 ret = -ENOMEM;
1079 goto err_rdb;
1080 }
1081 1112
1082 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1083 qp->qpn, &qp->sq.db);
1084 if (qp->sq.db_index < 0) {
1085 ret = -ENOMEM;
1086 goto err_rq_db;
1087 }
1088 } 1113 }
1089 1114
1090 return 0; 1115 return 0;
1091 1116
1092err_rq_db:
1093 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1094
1095err_rdb:
1096 mthca_table_put(dev, dev->qp_table.rdb_table,
1097 qp->qpn << dev->qp_table.rdb_shift);
1098
1099err_eqpc: 1117err_eqpc:
1100 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1118 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1101 1119
@@ -1105,6 +1123,35 @@ err_qpc:
1105 return ret; 1123 return ret;
1106} 1124}
1107 1125
1126static void mthca_unmap_memfree(struct mthca_dev *dev,
1127 struct mthca_qp *qp)
1128{
1129 mthca_table_put(dev, dev->qp_table.rdb_table,
1130 qp->qpn << dev->qp_table.rdb_shift);
1131 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1132 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1133}
1134
1135static int mthca_alloc_memfree(struct mthca_dev *dev,
1136 struct mthca_qp *qp)
1137{
1138 int ret = 0;
1139
1140 if (mthca_is_memfree(dev)) {
1141 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1142 qp->qpn, &qp->rq.db);
1143 if (qp->rq.db_index < 0)
1144 return ret;
1145
1146 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1147 qp->qpn, &qp->sq.db);
1148 if (qp->sq.db_index < 0)
1149 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1150 }
1151
1152 return ret;
1153}
1154
1108static void mthca_free_memfree(struct mthca_dev *dev, 1155static void mthca_free_memfree(struct mthca_dev *dev,
1109 struct mthca_qp *qp) 1156 struct mthca_qp *qp)
1110{ 1157{
@@ -1112,11 +1159,6 @@ static void mthca_free_memfree(struct mthca_dev *dev,
1112 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); 1159 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1113 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1160 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1114 } 1161 }
1115
1116 mthca_table_put(dev, dev->qp_table.rdb_table,
1117 qp->qpn << dev->qp_table.rdb_shift);
1118 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1119 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1120} 1162}
1121 1163
1122static void mthca_wq_init(struct mthca_wq* wq) 1164static void mthca_wq_init(struct mthca_wq* wq)
@@ -1147,13 +1189,28 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1147 mthca_wq_init(&qp->sq); 1189 mthca_wq_init(&qp->sq);
1148 mthca_wq_init(&qp->rq); 1190 mthca_wq_init(&qp->rq);
1149 1191
1150 ret = mthca_alloc_memfree(dev, qp); 1192 ret = mthca_map_memfree(dev, qp);
1151 if (ret) 1193 if (ret)
1152 return ret; 1194 return ret;
1153 1195
1154 ret = mthca_alloc_wqe_buf(dev, pd, qp); 1196 ret = mthca_alloc_wqe_buf(dev, pd, qp);
1155 if (ret) { 1197 if (ret) {
1156 mthca_free_memfree(dev, qp); 1198 mthca_unmap_memfree(dev, qp);
1199 return ret;
1200 }
1201
1202 /*
1203 * If this is a userspace QP, we're done now. The doorbells
1204 * will be allocated and buffers will be initialized in
1205 * userspace.
1206 */
1207 if (pd->ibpd.uobject)
1208 return 0;
1209
1210 ret = mthca_alloc_memfree(dev, qp);
1211 if (ret) {
1212 mthca_free_wqe_buf(dev, qp);
1213 mthca_unmap_memfree(dev, qp);
1157 return ret; 1214 return ret;
1158 } 1215 }
1159 1216
@@ -1186,22 +1243,39 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1186 return 0; 1243 return 0;
1187} 1244}
1188 1245
1189static void mthca_align_qp_size(struct mthca_dev *dev, struct mthca_qp *qp) 1246static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1247 struct mthca_qp *qp)
1190{ 1248{
1191 int i; 1249 /* Sanity check QP size before proceeding */
1192 1250 if (cap->max_send_wr > 65536 || cap->max_recv_wr > 65536 ||
1193 if (!mthca_is_memfree(dev)) 1251 cap->max_send_sge > 64 || cap->max_recv_sge > 64)
1194 return; 1252 return -EINVAL;
1195 1253
1196 for (i = 0; 1 << i < qp->rq.max; ++i) 1254 if (mthca_is_memfree(dev)) {
1197 ; /* nothing */ 1255 qp->rq.max = cap->max_recv_wr ?
1256 roundup_pow_of_two(cap->max_recv_wr) : 0;
1257 qp->sq.max = cap->max_send_wr ?
1258 roundup_pow_of_two(cap->max_send_wr) : 0;
1259 } else {
1260 qp->rq.max = cap->max_recv_wr;
1261 qp->sq.max = cap->max_send_wr;
1262 }
1198 1263
1199 qp->rq.max = 1 << i; 1264 qp->rq.max_gs = cap->max_recv_sge;
1265 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1266 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1267 MTHCA_INLINE_CHUNK_SIZE) /
1268 sizeof (struct mthca_data_seg));
1200 1269
1201 for (i = 0; 1 << i < qp->sq.max; ++i) 1270 /*
1202 ; /* nothing */ 1271 * For MLX transport we need 2 extra S/G entries:
1272 * one for the header and one for the checksum at the end
1273 */
1274 if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) ||
1275 qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg)
1276 return -EINVAL;
1203 1277
1204 qp->sq.max = 1 << i; 1278 return 0;
1205} 1279}
1206 1280
1207int mthca_alloc_qp(struct mthca_dev *dev, 1281int mthca_alloc_qp(struct mthca_dev *dev,
@@ -1210,11 +1284,14 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1210 struct mthca_cq *recv_cq, 1284 struct mthca_cq *recv_cq,
1211 enum ib_qp_type type, 1285 enum ib_qp_type type,
1212 enum ib_sig_type send_policy, 1286 enum ib_sig_type send_policy,
1287 struct ib_qp_cap *cap,
1213 struct mthca_qp *qp) 1288 struct mthca_qp *qp)
1214{ 1289{
1215 int err; 1290 int err;
1216 1291
1217 mthca_align_qp_size(dev, qp); 1292 err = mthca_set_qp_size(dev, cap, qp);
1293 if (err)
1294 return err;
1218 1295
1219 switch (type) { 1296 switch (type) {
1220 case IB_QPT_RC: qp->transport = RC; break; 1297 case IB_QPT_RC: qp->transport = RC; break;
@@ -1247,14 +1324,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
1247 struct mthca_cq *send_cq, 1324 struct mthca_cq *send_cq,
1248 struct mthca_cq *recv_cq, 1325 struct mthca_cq *recv_cq,
1249 enum ib_sig_type send_policy, 1326 enum ib_sig_type send_policy,
1327 struct ib_qp_cap *cap,
1250 int qpn, 1328 int qpn,
1251 int port, 1329 int port,
1252 struct mthca_sqp *sqp) 1330 struct mthca_sqp *sqp)
1253{ 1331{
1254 int err = 0;
1255 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; 1332 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1333 int err;
1256 1334
1257 mthca_align_qp_size(dev, &sqp->qp); 1335 err = mthca_set_qp_size(dev, cap, &sqp->qp);
1336 if (err)
1337 return err;
1258 1338
1259 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; 1339 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1260 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, 1340 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
@@ -1313,8 +1393,6 @@ void mthca_free_qp(struct mthca_dev *dev,
1313 struct mthca_qp *qp) 1393 struct mthca_qp *qp)
1314{ 1394{
1315 u8 status; 1395 u8 status;
1316 int size;
1317 int i;
1318 struct mthca_cq *send_cq; 1396 struct mthca_cq *send_cq;
1319 struct mthca_cq *recv_cq; 1397 struct mthca_cq *recv_cq;
1320 1398
@@ -1344,31 +1422,22 @@ void mthca_free_qp(struct mthca_dev *dev,
1344 if (qp->state != IB_QPS_RESET) 1422 if (qp->state != IB_QPS_RESET)
1345 mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status); 1423 mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
1346 1424
1347 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn); 1425 /*
1348 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1426 * If this is a userspace QP, the buffers, MR, CQs and so on
1349 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); 1427 * will be cleaned up in userspace, so all we have to do is
1350 1428 * unref the mem-free tables and free the QPN in our table.
1351 mthca_free_mr(dev, &qp->mr); 1429 */
1352 1430 if (!qp->ibqp.uobject) {
1353 size = PAGE_ALIGN(qp->send_wqe_offset + 1431 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn);
1354 (qp->sq.max << qp->sq.wqe_shift)); 1432 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1433 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);
1355 1434
1356 if (qp->is_direct) { 1435 mthca_free_mr(dev, &qp->mr);
1357 pci_free_consistent(dev->pdev, size, 1436 mthca_free_memfree(dev, qp);
1358 qp->queue.direct.buf, 1437 mthca_free_wqe_buf(dev, qp);
1359 pci_unmap_addr(&qp->queue.direct, mapping));
1360 } else {
1361 for (i = 0; i < size / PAGE_SIZE; ++i) {
1362 pci_free_consistent(dev->pdev, PAGE_SIZE,
1363 qp->queue.page_list[i].buf,
1364 pci_unmap_addr(&qp->queue.page_list[i],
1365 mapping));
1366 }
1367 } 1438 }
1368 1439
1369 kfree(qp->wrid); 1440 mthca_unmap_memfree(dev, qp);
1370
1371 mthca_free_memfree(dev, qp);
1372 1441
1373 if (is_sqp(dev, qp)) { 1442 if (is_sqp(dev, qp)) {
1374 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); 1443 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));