aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mthca/Makefile2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c32
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c48
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c82
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h28
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c33
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c591
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h11
13 files changed, 857 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile
index 5dcbd43073e2..1eb87408e069 100644
--- a/drivers/infiniband/hw/mthca/Makefile
+++ b/drivers/infiniband/hw/mthca/Makefile
@@ -9,4 +9,4 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
9ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ 9ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
10 mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ 10 mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \
11 mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ 11 mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \
12 mthca_provider.o mthca_memfree.o mthca_uar.o 12 mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index c258c1b7022e..60e4b213635a 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -109,6 +109,7 @@ enum {
109 CMD_SW2HW_SRQ = 0x35, 109 CMD_SW2HW_SRQ = 0x35,
110 CMD_HW2SW_SRQ = 0x36, 110 CMD_HW2SW_SRQ = 0x36,
111 CMD_QUERY_SRQ = 0x37, 111 CMD_QUERY_SRQ = 0x37,
112 CMD_ARM_SRQ = 0x40,
112 113
113 /* QP/EE commands */ 114 /* QP/EE commands */
114 CMD_RST2INIT_QPEE = 0x19, 115 CMD_RST2INIT_QPEE = 0x19,
@@ -1032,6 +1033,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1032 1033
1033 mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 1034 mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1034 dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); 1035 dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
1036 mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1037 dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
1035 mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 1038 mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1036 dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); 1039 dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
1037 mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", 1040 mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
@@ -1500,6 +1503,27 @@ int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1500 CMD_TIME_CLASS_A, status); 1503 CMD_TIME_CLASS_A, status);
1501} 1504}
1502 1505
1506int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1507 int srq_num, u8 *status)
1508{
1509 return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
1510 CMD_TIME_CLASS_A, status);
1511}
1512
1513int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1514 int srq_num, u8 *status)
1515{
1516 return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
1517 CMD_HW2SW_SRQ,
1518 CMD_TIME_CLASS_A, status);
1519}
1520
1521int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
1522{
1523 return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
1524 CMD_TIME_CLASS_B, status);
1525}
1526
1503int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 1527int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1504 int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 1528 int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
1505 u8 *status) 1529 u8 *status)
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 11f02a61432a..ef2a765d6953 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -298,6 +298,11 @@ int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
298 int cq_num, u8 *status); 298 int cq_num, u8 *status);
299int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 299int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
300 int cq_num, u8 *status); 300 int cq_num, u8 *status);
301int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
302 int srq_num, u8 *status);
303int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
304 int srq_num, u8 *status);
305int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
301int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 306int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
302 int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 307 int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
303 u8 *status); 308 u8 *status);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 5dee908c2f34..5ece609c2ee0 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -224,7 +224,8 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
224 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 224 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
225} 225}
226 226
227void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) 227void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
228 struct mthca_srq *srq)
228{ 229{
229 struct mthca_cq *cq; 230 struct mthca_cq *cq;
230 struct mthca_cqe *cqe; 231 struct mthca_cqe *cqe;
@@ -265,8 +266,11 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn)
265 */ 266 */
266 while (prod_index > cq->cons_index) { 267 while (prod_index > cq->cons_index) {
267 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); 268 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
268 if (cqe->my_qpn == cpu_to_be32(qpn)) 269 if (cqe->my_qpn == cpu_to_be32(qpn)) {
270 if (srq)
271 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
269 ++nfreed; 272 ++nfreed;
273 }
270 else if (nfreed) 274 else if (nfreed)
271 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & 275 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
272 cq->ibcq.cqe), 276 cq->ibcq.cqe),
@@ -455,23 +459,27 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
455 >> wq->wqe_shift); 459 >> wq->wqe_shift);
456 entry->wr_id = (*cur_qp)->wrid[wqe_index + 460 entry->wr_id = (*cur_qp)->wrid[wqe_index +
457 (*cur_qp)->rq.max]; 461 (*cur_qp)->rq.max];
462 } else if ((*cur_qp)->ibqp.srq) {
463 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
464 u32 wqe = be32_to_cpu(cqe->wqe);
465 wq = NULL;
466 wqe_index = wqe >> srq->wqe_shift;
467 entry->wr_id = srq->wrid[wqe_index];
468 mthca_free_srq_wqe(srq, wqe);
458 } else { 469 } else {
459 wq = &(*cur_qp)->rq; 470 wq = &(*cur_qp)->rq;
460 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; 471 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
461 entry->wr_id = (*cur_qp)->wrid[wqe_index]; 472 entry->wr_id = (*cur_qp)->wrid[wqe_index];
462 } 473 }
463 474
464 if (wq->last_comp < wqe_index) 475 if (wq) {
465 wq->tail += wqe_index - wq->last_comp; 476 if (wq->last_comp < wqe_index)
466 else 477 wq->tail += wqe_index - wq->last_comp;
467 wq->tail += wqe_index + wq->max - wq->last_comp; 478 else
468 479 wq->tail += wqe_index + wq->max - wq->last_comp;
469 wq->last_comp = wqe_index;
470 480
471 if (0) 481 wq->last_comp = wqe_index;
472 mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n", 482 }
473 is_send ? "Send" : "Receive",
474 (*cur_qp)->qpn, wqe_index, wq->max);
475 483
476 if (is_error) { 484 if (is_error) {
477 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, 485 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index cb78b5d07201..7bff5a8425f4 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -218,6 +218,13 @@ struct mthca_cq_table {
218 struct mthca_icm_table *table; 218 struct mthca_icm_table *table;
219}; 219};
220 220
221struct mthca_srq_table {
222 struct mthca_alloc alloc;
223 spinlock_t lock;
224 struct mthca_array srq;
225 struct mthca_icm_table *table;
226};
227
221struct mthca_qp_table { 228struct mthca_qp_table {
222 struct mthca_alloc alloc; 229 struct mthca_alloc alloc;
223 u32 rdb_base; 230 u32 rdb_base;
@@ -299,6 +306,7 @@ struct mthca_dev {
299 struct mthca_mr_table mr_table; 306 struct mthca_mr_table mr_table;
300 struct mthca_eq_table eq_table; 307 struct mthca_eq_table eq_table;
301 struct mthca_cq_table cq_table; 308 struct mthca_cq_table cq_table;
309 struct mthca_srq_table srq_table;
302 struct mthca_qp_table qp_table; 310 struct mthca_qp_table qp_table;
303 struct mthca_av_table av_table; 311 struct mthca_av_table av_table;
304 struct mthca_mcg_table mcg_table; 312 struct mthca_mcg_table mcg_table;
@@ -372,6 +380,7 @@ int mthca_init_pd_table(struct mthca_dev *dev);
372int mthca_init_mr_table(struct mthca_dev *dev); 380int mthca_init_mr_table(struct mthca_dev *dev);
373int mthca_init_eq_table(struct mthca_dev *dev); 381int mthca_init_eq_table(struct mthca_dev *dev);
374int mthca_init_cq_table(struct mthca_dev *dev); 382int mthca_init_cq_table(struct mthca_dev *dev);
383int mthca_init_srq_table(struct mthca_dev *dev);
375int mthca_init_qp_table(struct mthca_dev *dev); 384int mthca_init_qp_table(struct mthca_dev *dev);
376int mthca_init_av_table(struct mthca_dev *dev); 385int mthca_init_av_table(struct mthca_dev *dev);
377int mthca_init_mcg_table(struct mthca_dev *dev); 386int mthca_init_mcg_table(struct mthca_dev *dev);
@@ -381,6 +390,7 @@ void mthca_cleanup_pd_table(struct mthca_dev *dev);
381void mthca_cleanup_mr_table(struct mthca_dev *dev); 390void mthca_cleanup_mr_table(struct mthca_dev *dev);
382void mthca_cleanup_eq_table(struct mthca_dev *dev); 391void mthca_cleanup_eq_table(struct mthca_dev *dev);
383void mthca_cleanup_cq_table(struct mthca_dev *dev); 392void mthca_cleanup_cq_table(struct mthca_dev *dev);
393void mthca_cleanup_srq_table(struct mthca_dev *dev);
384void mthca_cleanup_qp_table(struct mthca_dev *dev); 394void mthca_cleanup_qp_table(struct mthca_dev *dev);
385void mthca_cleanup_av_table(struct mthca_dev *dev); 395void mthca_cleanup_av_table(struct mthca_dev *dev);
386void mthca_cleanup_mcg_table(struct mthca_dev *dev); 396void mthca_cleanup_mcg_table(struct mthca_dev *dev);
@@ -431,7 +441,19 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
431void mthca_free_cq(struct mthca_dev *dev, 441void mthca_free_cq(struct mthca_dev *dev,
432 struct mthca_cq *cq); 442 struct mthca_cq *cq);
433void mthca_cq_event(struct mthca_dev *dev, u32 cqn); 443void mthca_cq_event(struct mthca_dev *dev, u32 cqn);
434void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn); 444void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
445 struct mthca_srq *srq);
446
447int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
448 struct ib_srq_attr *attr, struct mthca_srq *srq);
449void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
450void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
451 enum ib_event_type event_type);
452void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
453int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
454 struct ib_recv_wr **bad_wr);
455int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
456 struct ib_recv_wr **bad_wr);
435 457
436void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 458void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
437 enum ib_event_type event_type); 459 enum ib_event_type event_type);
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 16c5d4a805f0..3241d6c9dc11 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -253,6 +253,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
253 profile = default_profile; 253 profile = default_profile;
254 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 254 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
255 profile.uarc_size = 0; 255 profile.uarc_size = 0;
256 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
257 profile.num_srq = dev_lim.max_srqs;
256 258
257 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 259 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
258 if (err < 0) 260 if (err < 0)
@@ -424,15 +426,29 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
424 } 426 }
425 427
426 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, 428 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
427 dev_lim->cqc_entry_sz, 429 dev_lim->cqc_entry_sz,
428 mdev->limits.num_cqs, 430 mdev->limits.num_cqs,
429 mdev->limits.reserved_cqs, 0); 431 mdev->limits.reserved_cqs, 0);
430 if (!mdev->cq_table.table) { 432 if (!mdev->cq_table.table) {
431 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); 433 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
432 err = -ENOMEM; 434 err = -ENOMEM;
433 goto err_unmap_rdb; 435 goto err_unmap_rdb;
434 } 436 }
435 437
438 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
439 mdev->srq_table.table =
440 mthca_alloc_icm_table(mdev, init_hca->srqc_base,
441 dev_lim->srq_entry_sz,
442 mdev->limits.num_srqs,
443 mdev->limits.reserved_srqs, 0);
444 if (!mdev->srq_table.table) {
445 mthca_err(mdev, "Failed to map SRQ context memory, "
446 "aborting.\n");
447 err = -ENOMEM;
448 goto err_unmap_cq;
449 }
450 }
451
436 /* 452 /*
437 * It's not strictly required, but for simplicity just map the 453 * It's not strictly required, but for simplicity just map the
438 * whole multicast group table now. The table isn't very big 454 * whole multicast group table now. The table isn't very big
@@ -448,11 +464,15 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
448 if (!mdev->mcg_table.table) { 464 if (!mdev->mcg_table.table) {
449 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); 465 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
450 err = -ENOMEM; 466 err = -ENOMEM;
451 goto err_unmap_cq; 467 goto err_unmap_srq;
452 } 468 }
453 469
454 return 0; 470 return 0;
455 471
472err_unmap_srq:
473 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
474 mthca_free_icm_table(mdev, mdev->srq_table.table);
475
456err_unmap_cq: 476err_unmap_cq:
457 mthca_free_icm_table(mdev, mdev->cq_table.table); 477 mthca_free_icm_table(mdev, mdev->cq_table.table);
458 478
@@ -532,6 +552,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
532 profile = default_profile; 552 profile = default_profile;
533 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 553 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
534 profile.num_udav = 0; 554 profile.num_udav = 0;
555 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
556 profile.num_srq = dev_lim.max_srqs;
535 557
536 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 558 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
537 if ((int) icm_size < 0) { 559 if ((int) icm_size < 0) {
@@ -558,6 +580,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
558 return 0; 580 return 0;
559 581
560err_free_icm: 582err_free_icm:
583 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
584 mthca_free_icm_table(mdev, mdev->srq_table.table);
561 mthca_free_icm_table(mdev, mdev->cq_table.table); 585 mthca_free_icm_table(mdev, mdev->cq_table.table);
562 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 586 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
563 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); 587 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
@@ -587,6 +611,8 @@ static void mthca_close_hca(struct mthca_dev *mdev)
587 mthca_CLOSE_HCA(mdev, 0, &status); 611 mthca_CLOSE_HCA(mdev, 0, &status);
588 612
589 if (mthca_is_memfree(mdev)) { 613 if (mthca_is_memfree(mdev)) {
614 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
615 mthca_free_icm_table(mdev, mdev->srq_table.table);
590 mthca_free_icm_table(mdev, mdev->cq_table.table); 616 mthca_free_icm_table(mdev, mdev->cq_table.table);
591 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 617 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
592 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); 618 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
@@ -731,11 +757,18 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev)
731 goto err_cmd_poll; 757 goto err_cmd_poll;
732 } 758 }
733 759
760 err = mthca_init_srq_table(dev);
761 if (err) {
762 mthca_err(dev, "Failed to initialize "
763 "shared receive queue table, aborting.\n");
764 goto err_cq_table_free;
765 }
766
734 err = mthca_init_qp_table(dev); 767 err = mthca_init_qp_table(dev);
735 if (err) { 768 if (err) {
736 mthca_err(dev, "Failed to initialize " 769 mthca_err(dev, "Failed to initialize "
737 "queue pair table, aborting.\n"); 770 "queue pair table, aborting.\n");
738 goto err_cq_table_free; 771 goto err_srq_table_free;
739 } 772 }
740 773
741 err = mthca_init_av_table(dev); 774 err = mthca_init_av_table(dev);
@@ -760,6 +793,9 @@ err_av_table_free:
760err_qp_table_free: 793err_qp_table_free:
761 mthca_cleanup_qp_table(dev); 794 mthca_cleanup_qp_table(dev);
762 795
796err_srq_table_free:
797 mthca_cleanup_srq_table(dev);
798
763err_cq_table_free: 799err_cq_table_free:
764 mthca_cleanup_cq_table(dev); 800 mthca_cleanup_cq_table(dev);
765 801
@@ -1046,6 +1082,7 @@ err_cleanup:
1046 mthca_cleanup_mcg_table(mdev); 1082 mthca_cleanup_mcg_table(mdev);
1047 mthca_cleanup_av_table(mdev); 1083 mthca_cleanup_av_table(mdev);
1048 mthca_cleanup_qp_table(mdev); 1084 mthca_cleanup_qp_table(mdev);
1085 mthca_cleanup_srq_table(mdev);
1049 mthca_cleanup_cq_table(mdev); 1086 mthca_cleanup_cq_table(mdev);
1050 mthca_cmd_use_polling(mdev); 1087 mthca_cmd_use_polling(mdev);
1051 mthca_cleanup_eq_table(mdev); 1088 mthca_cleanup_eq_table(mdev);
@@ -1095,6 +1132,7 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev)
1095 mthca_cleanup_mcg_table(mdev); 1132 mthca_cleanup_mcg_table(mdev);
1096 mthca_cleanup_av_table(mdev); 1133 mthca_cleanup_av_table(mdev);
1097 mthca_cleanup_qp_table(mdev); 1134 mthca_cleanup_qp_table(mdev);
1135 mthca_cleanup_srq_table(mdev);
1098 mthca_cleanup_cq_table(mdev); 1136 mthca_cleanup_cq_table(mdev);
1099 mthca_cmd_use_polling(mdev); 1137 mthca_cmd_use_polling(mdev);
1100 mthca_cleanup_eq_table(mdev); 1138 mthca_cleanup_eq_table(mdev);
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 9b280661f2a1..0576056b34f4 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -102,6 +102,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
102 profile[MTHCA_RES_UARC].size = request->uarc_size; 102 profile[MTHCA_RES_UARC].size = request->uarc_size;
103 103
104 profile[MTHCA_RES_QP].num = request->num_qp; 104 profile[MTHCA_RES_QP].num = request->num_qp;
105 profile[MTHCA_RES_SRQ].num = request->num_srq;
105 profile[MTHCA_RES_EQP].num = request->num_qp; 106 profile[MTHCA_RES_EQP].num = request->num_qp;
106 profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; 107 profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp;
107 profile[MTHCA_RES_CQ].num = request->num_cq; 108 profile[MTHCA_RES_CQ].num = request->num_cq;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.h b/drivers/infiniband/hw/mthca/mthca_profile.h
index 0d4f070a3fa1..94641808f97f 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.h
+++ b/drivers/infiniband/hw/mthca/mthca_profile.h
@@ -42,6 +42,7 @@
42struct mthca_profile { 42struct mthca_profile {
43 int num_qp; 43 int num_qp;
44 int rdb_per_qp; 44 int rdb_per_qp;
45 int num_srq;
45 int num_cq; 46 int num_cq;
46 int num_mcg; 47 int num_mcg;
47 int num_mpt; 48 int num_mpt;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 08a7340e19ff..23ceb26af8fe 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -425,6 +425,77 @@ static int mthca_ah_destroy(struct ib_ah *ah)
425 return 0; 425 return 0;
426} 426}
427 427
428static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
429 struct ib_srq_init_attr *init_attr,
430 struct ib_udata *udata)
431{
432 struct mthca_create_srq ucmd;
433 struct mthca_ucontext *context = NULL;
434 struct mthca_srq *srq;
435 int err;
436
437 srq = kmalloc(sizeof *srq, GFP_KERNEL);
438 if (!srq)
439 return ERR_PTR(-ENOMEM);
440
441 if (pd->uobject) {
442 context = to_mucontext(pd->uobject->context);
443
444 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
445 return ERR_PTR(-EFAULT);
446
447 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
448 context->db_tab, ucmd.db_index,
449 ucmd.db_page);
450
451 if (err)
452 goto err_free;
453
454 srq->mr.ibmr.lkey = ucmd.lkey;
455 srq->db_index = ucmd.db_index;
456 }
457
458 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
459 &init_attr->attr, srq);
460
461 if (err && pd->uobject)
462 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
463 context->db_tab, ucmd.db_index);
464
465 if (err)
466 goto err_free;
467
468 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
469 mthca_free_srq(to_mdev(pd->device), srq);
470 err = -EFAULT;
471 goto err_free;
472 }
473
474 return &srq->ibsrq;
475
476err_free:
477 kfree(srq);
478
479 return ERR_PTR(err);
480}
481
482static int mthca_destroy_srq(struct ib_srq *srq)
483{
484 struct mthca_ucontext *context;
485
486 if (srq->uobject) {
487 context = to_mucontext(srq->uobject->context);
488
489 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
490 context->db_tab, to_msrq(srq)->db_index);
491 }
492
493 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
494 kfree(srq);
495
496 return 0;
497}
498
428static struct ib_qp *mthca_create_qp(struct ib_pd *pd, 499static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
429 struct ib_qp_init_attr *init_attr, 500 struct ib_qp_init_attr *init_attr,
430 struct ib_udata *udata) 501 struct ib_udata *udata)
@@ -1003,6 +1074,17 @@ int mthca_register_device(struct mthca_dev *dev)
1003 dev->ib_dev.dealloc_pd = mthca_dealloc_pd; 1074 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
1004 dev->ib_dev.create_ah = mthca_ah_create; 1075 dev->ib_dev.create_ah = mthca_ah_create;
1005 dev->ib_dev.destroy_ah = mthca_ah_destroy; 1076 dev->ib_dev.destroy_ah = mthca_ah_destroy;
1077
1078 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1079 dev->ib_dev.create_srq = mthca_create_srq;
1080 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1081
1082 if (mthca_is_memfree(dev))
1083 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1084 else
1085 dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1086 }
1087
1006 dev->ib_dev.create_qp = mthca_create_qp; 1088 dev->ib_dev.create_qp = mthca_create_qp;
1007 dev->ib_dev.modify_qp = mthca_modify_qp; 1089 dev->ib_dev.modify_qp = mthca_modify_qp;
1008 dev->ib_dev.destroy_qp = mthca_destroy_qp; 1090 dev->ib_dev.destroy_qp = mthca_destroy_qp;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index b95249ee46cf..024015678c8a 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -197,6 +197,29 @@ struct mthca_cq {
197 wait_queue_head_t wait; 197 wait_queue_head_t wait;
198}; 198};
199 199
200struct mthca_srq {
201 struct ib_srq ibsrq;
202 spinlock_t lock;
203 atomic_t refcount;
204 int srqn;
205 int max;
206 int max_gs;
207 int wqe_shift;
208 int first_free;
209 int last_free;
210 u16 counter; /* Arbel only */
211 int db_index; /* Arbel only */
212 __be32 *db; /* Arbel only */
213 void *last;
214
215 int is_direct;
216 u64 *wrid;
217 union mthca_buf queue;
218 struct mthca_mr mr;
219
220 wait_queue_head_t wait;
221};
222
200struct mthca_wq { 223struct mthca_wq {
201 spinlock_t lock; 224 spinlock_t lock;
202 int max; 225 int max;
@@ -277,6 +300,11 @@ static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
277 return container_of(ibcq, struct mthca_cq, ibcq); 300 return container_of(ibcq, struct mthca_cq, ibcq);
278} 301}
279 302
303static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
304{
305 return container_of(ibsrq, struct mthca_srq, ibsrq);
306}
307
280static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) 308static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
281{ 309{
282 return container_of(ibqp, struct mthca_qp, ibqp); 310 return container_of(ibqp, struct mthca_qp, ibqp);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index ebb8f4a3dd80..7607b9800736 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -612,10 +612,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
612 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 612 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
613 613
614 if (mthca_is_memfree(dev)) { 614 if (mthca_is_memfree(dev)) {
615 qp_context->rq_size_stride = 615 if (qp->rq.max)
616 ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4); 616 qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
617 qp_context->sq_size_stride = 617 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
618 ((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4); 618
619 if (qp->sq.max)
620 qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
621 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
619 } 622 }
620 623
621 /* leave arbel_sched_queue as 0 */ 624 /* leave arbel_sched_queue as 0 */
@@ -784,6 +787,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
784 787
785 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 788 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
786 789
790 if (ibqp->srq)
791 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
792
787 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 793 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
788 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 794 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
789 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); 795 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
@@ -806,6 +812,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
806 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); 812 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
807 } 813 }
808 814
815 if (ibqp->srq)
816 qp_context->srqn = cpu_to_be32(1 << 24 |
817 to_msrq(ibqp->srq)->srqn);
818
809 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, 819 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
810 qp->qpn, 0, mailbox, 0, &status); 820 qp->qpn, 0, mailbox, 0, &status);
811 if (status) { 821 if (status) {
@@ -1260,9 +1270,11 @@ void mthca_free_qp(struct mthca_dev *dev,
1260 * unref the mem-free tables and free the QPN in our table. 1270 * unref the mem-free tables and free the QPN in our table.
1261 */ 1271 */
1262 if (!qp->ibqp.uobject) { 1272 if (!qp->ibqp.uobject) {
1263 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn); 1273 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
1274 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1264 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1275 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1265 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); 1276 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
1277 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1266 1278
1267 mthca_free_memfree(dev, qp); 1279 mthca_free_memfree(dev, qp);
1268 mthca_free_wqe_buf(dev, qp); 1280 mthca_free_wqe_buf(dev, qp);
@@ -2008,6 +2020,15 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2008{ 2020{
2009 struct mthca_next_seg *next; 2021 struct mthca_next_seg *next;
2010 2022
2023 /*
2024 * For SRQs, all WQEs generate a CQE, so we're always at the
2025 * end of the doorbell chain.
2026 */
2027 if (qp->ibqp.srq) {
2028 *new_wqe = 0;
2029 return 0;
2030 }
2031
2011 if (is_send) 2032 if (is_send)
2012 next = get_send_wqe(qp, index); 2033 next = get_send_wqe(qp, index);
2013 else 2034 else
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
new file mode 100644
index 000000000000..75cd2d84ef12
--- /dev/null
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -0,0 +1,591 @@
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
33 */
34
35#include "mthca_dev.h"
36#include "mthca_cmd.h"
37#include "mthca_memfree.h"
38#include "mthca_wqe.h"
39
40enum {
41 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
42};
43
44struct mthca_tavor_srq_context {
45 __be64 wqe_base_ds; /* low 6 bits is descriptor size */
46 __be32 state_pd;
47 __be32 lkey;
48 __be32 uar;
49 __be32 wqe_cnt;
50 u32 reserved[2];
51};
52
53struct mthca_arbel_srq_context {
54 __be32 state_logsize_srqn;
55 __be32 lkey;
56 __be32 db_index;
57 __be32 logstride_usrpage;
58 __be64 wqe_base;
59 __be32 eq_pd;
60 __be16 limit_watermark;
61 __be16 wqe_cnt;
62 u16 reserved1;
63 __be16 wqe_counter;
64 u32 reserved2[3];
65};
66
67static void *get_wqe(struct mthca_srq *srq, int n)
68{
69 if (srq->is_direct)
70 return srq->queue.direct.buf + (n << srq->wqe_shift);
71 else
72 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
73 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
74}
75
76/*
77 * Return a pointer to the location within a WQE that we're using as a
78 * link when the WQE is in the free list. We use an offset of 4
79 * because in the Tavor case, posting a WQE may overwrite the first
80 * four bytes of the previous WQE. The offset avoids corrupting our
81 * free list if the WQE has already completed and been put on the free
82 * list when we post the next WQE.
83 */
84static inline int *wqe_to_link(void *wqe)
85{
86 return (int *) (wqe + 4);
87}
88
89static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
90 struct mthca_pd *pd,
91 struct mthca_srq *srq,
92 struct mthca_tavor_srq_context *context)
93{
94 memset(context, 0, sizeof *context);
95
96 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
97 context->state_pd = cpu_to_be32(pd->pd_num);
98 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
99
100 if (pd->ibpd.uobject)
101 context->uar =
102 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
103 else
104 context->uar = cpu_to_be32(dev->driver_uar.index);
105}
106
107static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
108 struct mthca_pd *pd,
109 struct mthca_srq *srq,
110 struct mthca_arbel_srq_context *context)
111{
112 int logsize;
113
114 memset(context, 0, sizeof *context);
115
116 logsize = long_log2(srq->max) + srq->wqe_shift;
117 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
118 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
119 context->db_index = cpu_to_be32(srq->db_index);
120 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
121 if (pd->ibpd.uobject)
122 context->logstride_usrpage |=
123 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
124 else
125 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
126 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
127}
128
129static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
130{
131 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
132 srq->is_direct, &srq->mr);
133 kfree(srq->wrid);
134}
135
136static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
137 struct mthca_srq *srq)
138{
139 struct mthca_data_seg *scatter;
140 void *wqe;
141 int err;
142 int i;
143
144 if (pd->ibpd.uobject)
145 return 0;
146
147 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
148 if (!srq->wrid)
149 return -ENOMEM;
150
151 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
152 MTHCA_MAX_DIRECT_SRQ_SIZE,
153 &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
154 if (err) {
155 kfree(srq->wrid);
156 return err;
157 }
158
159 /*
160 * Now initialize the SRQ buffer so that all of the WQEs are
161 * linked into the list of free WQEs. In addition, set the
162 * scatter list L_Keys to the sentry value of 0x100.
163 */
164 for (i = 0; i < srq->max; ++i) {
165 wqe = get_wqe(srq, i);
166
167 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
168
169 for (scatter = wqe + sizeof (struct mthca_next_seg);
170 (void *) scatter < wqe + (1 << srq->wqe_shift);
171 ++scatter)
172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
173 }
174
175 return 0;
176}
177
178int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
179 struct ib_srq_attr *attr, struct mthca_srq *srq)
180{
181 struct mthca_mailbox *mailbox;
182 u8 status;
183 int ds;
184 int err;
185
186 /* Sanity check SRQ size before proceeding */
187 if (attr->max_wr > 16 << 20 || attr->max_sge > 64)
188 return -EINVAL;
189
190 srq->max = attr->max_wr;
191 srq->max_gs = attr->max_sge;
192 srq->last = NULL;
193 srq->counter = 0;
194
195 if (mthca_is_memfree(dev))
196 srq->max = roundup_pow_of_two(srq->max + 1);
197
198 ds = min(64UL,
199 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
200 srq->max_gs * sizeof (struct mthca_data_seg)));
201 srq->wqe_shift = long_log2(ds);
202
203 srq->srqn = mthca_alloc(&dev->srq_table.alloc);
204 if (srq->srqn == -1)
205 return -ENOMEM;
206
207 if (mthca_is_memfree(dev)) {
208 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
209 if (err)
210 goto err_out;
211
212 if (!pd->ibpd.uobject) {
213 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
214 srq->srqn, &srq->db);
215 if (srq->db_index < 0) {
216 err = -ENOMEM;
217 goto err_out_icm;
218 }
219 }
220 }
221
222 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
223 if (IS_ERR(mailbox)) {
224 err = PTR_ERR(mailbox);
225 goto err_out_db;
226 }
227
228 err = mthca_alloc_srq_buf(dev, pd, srq);
229 if (err)
230 goto err_out_mailbox;
231
232 spin_lock_init(&srq->lock);
233 atomic_set(&srq->refcount, 1);
234 init_waitqueue_head(&srq->wait);
235
236 if (mthca_is_memfree(dev))
237 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
238 else
239 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
240
241 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
242
243 if (err) {
244 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
245 goto err_out_free_buf;
246 }
247 if (status) {
248 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
249 status);
250 err = -EINVAL;
251 goto err_out_free_buf;
252 }
253
254 spin_lock_irq(&dev->srq_table.lock);
255 if (mthca_array_set(&dev->srq_table.srq,
256 srq->srqn & (dev->limits.num_srqs - 1),
257 srq)) {
258 spin_unlock_irq(&dev->srq_table.lock);
259 goto err_out_free_srq;
260 }
261 spin_unlock_irq(&dev->srq_table.lock);
262
263 mthca_free_mailbox(dev, mailbox);
264
265 srq->first_free = 0;
266 srq->last_free = srq->max - 1;
267
268 return 0;
269
270err_out_free_srq:
271 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
272 if (err)
273 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
274 else if (status)
275 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
276
277err_out_free_buf:
278 if (!pd->ibpd.uobject)
279 mthca_free_srq_buf(dev, srq);
280
281err_out_mailbox:
282 mthca_free_mailbox(dev, mailbox);
283
284err_out_db:
285 if (!pd->ibpd.uobject && mthca_is_memfree(dev))
286 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
287
288err_out_icm:
289 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
290
291err_out:
292 mthca_free(&dev->srq_table.alloc, srq->srqn);
293
294 return err;
295}
296
297void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
298{
299 struct mthca_mailbox *mailbox;
300 int err;
301 u8 status;
302
303 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
304 if (IS_ERR(mailbox)) {
305 mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
306 return;
307 }
308
309 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
310 if (err)
311 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
312 else if (status)
313 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
314
315 spin_lock_irq(&dev->srq_table.lock);
316 mthca_array_clear(&dev->srq_table.srq,
317 srq->srqn & (dev->limits.num_srqs - 1));
318 spin_unlock_irq(&dev->srq_table.lock);
319
320 atomic_dec(&srq->refcount);
321 wait_event(srq->wait, !atomic_read(&srq->refcount));
322
323 if (!srq->ibsrq.uobject) {
324 mthca_free_srq_buf(dev, srq);
325 if (mthca_is_memfree(dev))
326 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
327 }
328
329 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
330 mthca_free(&dev->srq_table.alloc, srq->srqn);
331 mthca_free_mailbox(dev, mailbox);
332}
333
334void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
335 enum ib_event_type event_type)
336{
337 struct mthca_srq *srq;
338 struct ib_event event;
339
340 spin_lock(&dev->srq_table.lock);
341 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
342 if (srq)
343 atomic_inc(&srq->refcount);
344 spin_unlock(&dev->srq_table.lock);
345
346 if (!srq) {
347 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
348 return;
349 }
350
351 if (!srq->ibsrq.event_handler)
352 goto out;
353
354 event.device = &dev->ib_dev;
355 event.event = event_type;
356 event.element.srq = &srq->ibsrq;
357 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
358
359out:
360 if (atomic_dec_and_test(&srq->refcount))
361 wake_up(&srq->wait);
362}
363
364/*
365 * This function must be called with IRQs disabled.
366 */
367void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
368{
369 int ind;
370
371 ind = wqe_addr >> srq->wqe_shift;
372
373 spin_lock(&srq->lock);
374
375 if (likely(srq->first_free >= 0))
376 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
377 else
378 srq->first_free = ind;
379
380 *wqe_to_link(get_wqe(srq, ind)) = -1;
381 srq->last_free = ind;
382
383 spin_unlock(&srq->lock);
384}
385
386int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
387 struct ib_recv_wr **bad_wr)
388{
389 struct mthca_dev *dev = to_mdev(ibsrq->device);
390 struct mthca_srq *srq = to_msrq(ibsrq);
391 unsigned long flags;
392 int err = 0;
393 int first_ind;
394 int ind;
395 int next_ind;
396 int nreq;
397 int i;
398 void *wqe;
399 void *prev_wqe;
400
401 spin_lock_irqsave(&srq->lock, flags);
402
403 first_ind = srq->first_free;
404
405 for (nreq = 0; wr; ++nreq, wr = wr->next) {
406 ind = srq->first_free;
407
408 if (ind < 0) {
409 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
410 err = -ENOMEM;
411 *bad_wr = wr;
412 return nreq;
413 }
414
415 wqe = get_wqe(srq, ind);
416 next_ind = *wqe_to_link(wqe);
417 prev_wqe = srq->last;
418 srq->last = wqe;
419
420 ((struct mthca_next_seg *) wqe)->nda_op = 0;
421 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
422 /* flags field will always remain 0 */
423
424 wqe += sizeof (struct mthca_next_seg);
425
426 if (unlikely(wr->num_sge > srq->max_gs)) {
427 err = -EINVAL;
428 *bad_wr = wr;
429 srq->last = prev_wqe;
430 return nreq;
431 }
432
433 for (i = 0; i < wr->num_sge; ++i) {
434 ((struct mthca_data_seg *) wqe)->byte_count =
435 cpu_to_be32(wr->sg_list[i].length);
436 ((struct mthca_data_seg *) wqe)->lkey =
437 cpu_to_be32(wr->sg_list[i].lkey);
438 ((struct mthca_data_seg *) wqe)->addr =
439 cpu_to_be64(wr->sg_list[i].addr);
440 wqe += sizeof (struct mthca_data_seg);
441 }
442
443 if (i < srq->max_gs) {
444 ((struct mthca_data_seg *) wqe)->byte_count = 0;
445 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
446 ((struct mthca_data_seg *) wqe)->addr = 0;
447 }
448
449 if (likely(prev_wqe)) {
450 ((struct mthca_next_seg *) prev_wqe)->nda_op =
451 cpu_to_be32((ind << srq->wqe_shift) | 1);
452 wmb();
453 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
454 cpu_to_be32(MTHCA_NEXT_DBD);
455 }
456
457 srq->wrid[ind] = wr->wr_id;
458 srq->first_free = next_ind;
459 }
460
461 return nreq;
462
463 if (likely(nreq)) {
464 __be32 doorbell[2];
465
466 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
467 doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
468
469 /*
470 * Make sure that descriptors are written before
471 * doorbell is rung.
472 */
473 wmb();
474
475 mthca_write64(doorbell,
476 dev->kar + MTHCA_RECEIVE_DOORBELL,
477 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
478 }
479
480 spin_unlock_irqrestore(&srq->lock, flags);
481 return err;
482}
483
484int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
485 struct ib_recv_wr **bad_wr)
486{
487 struct mthca_dev *dev = to_mdev(ibsrq->device);
488 struct mthca_srq *srq = to_msrq(ibsrq);
489 unsigned long flags;
490 int err = 0;
491 int ind;
492 int next_ind;
493 int nreq;
494 int i;
495 void *wqe;
496
497 spin_lock_irqsave(&srq->lock, flags);
498
499 for (nreq = 0; wr; ++nreq, wr = wr->next) {
500 ind = srq->first_free;
501
502 if (ind < 0) {
503 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
504 err = -ENOMEM;
505 *bad_wr = wr;
506 return nreq;
507 }
508
509 wqe = get_wqe(srq, ind);
510 next_ind = *wqe_to_link(wqe);
511
512 ((struct mthca_next_seg *) wqe)->nda_op =
513 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
514 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
515 /* flags field will always remain 0 */
516
517 wqe += sizeof (struct mthca_next_seg);
518
519 if (unlikely(wr->num_sge > srq->max_gs)) {
520 err = -EINVAL;
521 *bad_wr = wr;
522 return nreq;
523 }
524
525 for (i = 0; i < wr->num_sge; ++i) {
526 ((struct mthca_data_seg *) wqe)->byte_count =
527 cpu_to_be32(wr->sg_list[i].length);
528 ((struct mthca_data_seg *) wqe)->lkey =
529 cpu_to_be32(wr->sg_list[i].lkey);
530 ((struct mthca_data_seg *) wqe)->addr =
531 cpu_to_be64(wr->sg_list[i].addr);
532 wqe += sizeof (struct mthca_data_seg);
533 }
534
535 if (i < srq->max_gs) {
536 ((struct mthca_data_seg *) wqe)->byte_count = 0;
537 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
538 ((struct mthca_data_seg *) wqe)->addr = 0;
539 }
540
541 srq->wrid[ind] = wr->wr_id;
542 srq->first_free = next_ind;
543 }
544
545 if (likely(nreq)) {
546 srq->counter += nreq;
547
548 /*
549 * Make sure that descriptors are written before
550 * we write doorbell record.
551 */
552 wmb();
553 *srq->db = cpu_to_be32(srq->counter);
554 }
555
556 spin_unlock_irqrestore(&srq->lock, flags);
557 return err;
558}
559
560int __devinit mthca_init_srq_table(struct mthca_dev *dev)
561{
562 int err;
563
564 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
565 return 0;
566
567 spin_lock_init(&dev->srq_table.lock);
568
569 err = mthca_alloc_init(&dev->srq_table.alloc,
570 dev->limits.num_srqs,
571 dev->limits.num_srqs - 1,
572 dev->limits.reserved_srqs);
573 if (err)
574 return err;
575
576 err = mthca_array_init(&dev->srq_table.srq,
577 dev->limits.num_srqs);
578 if (err)
579 mthca_alloc_cleanup(&dev->srq_table.alloc);
580
581 return err;
582}
583
584void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev)
585{
586 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
587 return;
588
589 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
590 mthca_alloc_cleanup(&dev->srq_table.alloc);
591}
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
index 3024c1b4547d..41613ec8a04e 100644
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ b/drivers/infiniband/hw/mthca/mthca_user.h
@@ -69,6 +69,17 @@ struct mthca_create_cq_resp {
69 __u32 reserved; 69 __u32 reserved;
70}; 70};
71 71
72struct mthca_create_srq {
73 __u32 lkey;
74 __u32 db_index;
75 __u64 db_page;
76};
77
78struct mthca_create_srq_resp {
79 __u32 srqn;
80 __u32 reserved;
81};
82
72struct mthca_create_qp { 83struct mthca_create_qp {
73 __u32 lkey; 84 __u32 lkey;
74 __u32 reserved; 85 __u32 reserved;