aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_main.c
diff options
context:
space:
mode:
authorRoland Dreier <roland@eddore.topspincom.com>2005-08-19 13:59:31 -0400
committerRoland Dreier <rolandd@cisco.com>2005-08-26 23:37:37 -0400
commitec34a922d243c3401a694450734e9effb2bafbfe (patch)
tree7d79ed1848d1b63665d7565274c1d2b56d09df9d /drivers/infiniband/hw/mthca/mthca_main.c
parentd20a40192868082eff6fec729b311cb8463b4a21 (diff)
[PATCH] IB/mthca: Add SRQ implementation
Add mthca support for shared receive queues (SRQs), including userspace SRQs. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_main.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c48
1 files changed, 43 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 16c5d4a805f0..3241d6c9dc11 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -253,6 +253,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
253 profile = default_profile; 253 profile = default_profile;
254 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 254 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
255 profile.uarc_size = 0; 255 profile.uarc_size = 0;
256 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
257 profile.num_srq = dev_lim.max_srqs;
256 258
257 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 259 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
258 if (err < 0) 260 if (err < 0)
@@ -424,15 +426,29 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
424 } 426 }
425 427
426 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, 428 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
427 dev_lim->cqc_entry_sz, 429 dev_lim->cqc_entry_sz,
428 mdev->limits.num_cqs, 430 mdev->limits.num_cqs,
429 mdev->limits.reserved_cqs, 0); 431 mdev->limits.reserved_cqs, 0);
430 if (!mdev->cq_table.table) { 432 if (!mdev->cq_table.table) {
431 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); 433 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
432 err = -ENOMEM; 434 err = -ENOMEM;
433 goto err_unmap_rdb; 435 goto err_unmap_rdb;
434 } 436 }
435 437
438 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
439 mdev->srq_table.table =
440 mthca_alloc_icm_table(mdev, init_hca->srqc_base,
441 dev_lim->srq_entry_sz,
442 mdev->limits.num_srqs,
443 mdev->limits.reserved_srqs, 0);
444 if (!mdev->srq_table.table) {
445 mthca_err(mdev, "Failed to map SRQ context memory, "
446 "aborting.\n");
447 err = -ENOMEM;
448 goto err_unmap_cq;
449 }
450 }
451
436 /* 452 /*
437 * It's not strictly required, but for simplicity just map the 453 * It's not strictly required, but for simplicity just map the
438 * whole multicast group table now. The table isn't very big 454 * whole multicast group table now. The table isn't very big
@@ -448,11 +464,15 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
448 if (!mdev->mcg_table.table) { 464 if (!mdev->mcg_table.table) {
449 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); 465 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
450 err = -ENOMEM; 466 err = -ENOMEM;
451 goto err_unmap_cq; 467 goto err_unmap_srq;
452 } 468 }
453 469
454 return 0; 470 return 0;
455 471
472err_unmap_srq:
473 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
474 mthca_free_icm_table(mdev, mdev->srq_table.table);
475
456err_unmap_cq: 476err_unmap_cq:
457 mthca_free_icm_table(mdev, mdev->cq_table.table); 477 mthca_free_icm_table(mdev, mdev->cq_table.table);
458 478
@@ -532,6 +552,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
532 profile = default_profile; 552 profile = default_profile;
533 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 553 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
534 profile.num_udav = 0; 554 profile.num_udav = 0;
555 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
556 profile.num_srq = dev_lim.max_srqs;
535 557
536 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 558 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
537 if ((int) icm_size < 0) { 559 if ((int) icm_size < 0) {
@@ -558,6 +580,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
558 return 0; 580 return 0;
559 581
560err_free_icm: 582err_free_icm:
583 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
584 mthca_free_icm_table(mdev, mdev->srq_table.table);
561 mthca_free_icm_table(mdev, mdev->cq_table.table); 585 mthca_free_icm_table(mdev, mdev->cq_table.table);
562 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 586 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
563 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); 587 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
@@ -587,6 +611,8 @@ static void mthca_close_hca(struct mthca_dev *mdev)
587 mthca_CLOSE_HCA(mdev, 0, &status); 611 mthca_CLOSE_HCA(mdev, 0, &status);
588 612
589 if (mthca_is_memfree(mdev)) { 613 if (mthca_is_memfree(mdev)) {
614 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
615 mthca_free_icm_table(mdev, mdev->srq_table.table);
590 mthca_free_icm_table(mdev, mdev->cq_table.table); 616 mthca_free_icm_table(mdev, mdev->cq_table.table);
591 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 617 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
592 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); 618 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
@@ -731,11 +757,18 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev)
731 goto err_cmd_poll; 757 goto err_cmd_poll;
732 } 758 }
733 759
760 err = mthca_init_srq_table(dev);
761 if (err) {
762 mthca_err(dev, "Failed to initialize "
763 "shared receive queue table, aborting.\n");
764 goto err_cq_table_free;
765 }
766
734 err = mthca_init_qp_table(dev); 767 err = mthca_init_qp_table(dev);
735 if (err) { 768 if (err) {
736 mthca_err(dev, "Failed to initialize " 769 mthca_err(dev, "Failed to initialize "
737 "queue pair table, aborting.\n"); 770 "queue pair table, aborting.\n");
738 goto err_cq_table_free; 771 goto err_srq_table_free;
739 } 772 }
740 773
741 err = mthca_init_av_table(dev); 774 err = mthca_init_av_table(dev);
@@ -760,6 +793,9 @@ err_av_table_free:
760err_qp_table_free: 793err_qp_table_free:
761 mthca_cleanup_qp_table(dev); 794 mthca_cleanup_qp_table(dev);
762 795
796err_srq_table_free:
797 mthca_cleanup_srq_table(dev);
798
763err_cq_table_free: 799err_cq_table_free:
764 mthca_cleanup_cq_table(dev); 800 mthca_cleanup_cq_table(dev);
765 801
@@ -1046,6 +1082,7 @@ err_cleanup:
1046 mthca_cleanup_mcg_table(mdev); 1082 mthca_cleanup_mcg_table(mdev);
1047 mthca_cleanup_av_table(mdev); 1083 mthca_cleanup_av_table(mdev);
1048 mthca_cleanup_qp_table(mdev); 1084 mthca_cleanup_qp_table(mdev);
1085 mthca_cleanup_srq_table(mdev);
1049 mthca_cleanup_cq_table(mdev); 1086 mthca_cleanup_cq_table(mdev);
1050 mthca_cmd_use_polling(mdev); 1087 mthca_cmd_use_polling(mdev);
1051 mthca_cleanup_eq_table(mdev); 1088 mthca_cleanup_eq_table(mdev);
@@ -1095,6 +1132,7 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev)
1095 mthca_cleanup_mcg_table(mdev); 1132 mthca_cleanup_mcg_table(mdev);
1096 mthca_cleanup_av_table(mdev); 1133 mthca_cleanup_av_table(mdev);
1097 mthca_cleanup_qp_table(mdev); 1134 mthca_cleanup_qp_table(mdev);
1135 mthca_cleanup_srq_table(mdev);
1098 mthca_cleanup_cq_table(mdev); 1136 mthca_cleanup_cq_table(mdev);
1099 mthca_cmd_use_polling(mdev); 1137 mthca_cmd_use_polling(mdev);
1100 mthca_cleanup_eq_table(mdev); 1138 mthca_cleanup_eq_table(mdev);