aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_cq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_cq.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c118
1 files changed, 6 insertions, 112 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 907867d1f2e0..8afb9ee2fbc6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -639,113 +639,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
639 639
640static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) 640static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
641{ 641{
642 int i; 642 mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
643 int size; 643 &cq->queue, cq->is_direct, &cq->mr);
644
645 if (cq->is_direct)
646 dma_free_coherent(&dev->pdev->dev,
647 (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
648 cq->queue.direct.buf,
649 pci_unmap_addr(&cq->queue.direct,
650 mapping));
651 else {
652 size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
653 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
654 if (cq->queue.page_list[i].buf)
655 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
656 cq->queue.page_list[i].buf,
657 pci_unmap_addr(&cq->queue.page_list[i],
658 mapping));
659
660 kfree(cq->queue.page_list);
661 }
662}
663
664static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
665 struct mthca_cq *cq)
666{
667 int err = -ENOMEM;
668 int npages, shift;
669 u64 *dma_list = NULL;
670 dma_addr_t t;
671 int i;
672
673 if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {
674 cq->is_direct = 1;
675 npages = 1;
676 shift = get_order(size) + PAGE_SHIFT;
677
678 cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
679 size, &t, GFP_KERNEL);
680 if (!cq->queue.direct.buf)
681 return -ENOMEM;
682
683 pci_unmap_addr_set(&cq->queue.direct, mapping, t);
684
685 memset(cq->queue.direct.buf, 0, size);
686
687 while (t & ((1 << shift) - 1)) {
688 --shift;
689 npages *= 2;
690 }
691
692 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
693 if (!dma_list)
694 goto err_free;
695
696 for (i = 0; i < npages; ++i)
697 dma_list[i] = t + i * (1 << shift);
698 } else {
699 cq->is_direct = 0;
700 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
701 shift = PAGE_SHIFT;
702
703 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
704 if (!dma_list)
705 return -ENOMEM;
706
707 cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,
708 GFP_KERNEL);
709 if (!cq->queue.page_list)
710 goto err_out;
711
712 for (i = 0; i < npages; ++i)
713 cq->queue.page_list[i].buf = NULL;
714
715 for (i = 0; i < npages; ++i) {
716 cq->queue.page_list[i].buf =
717 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
718 &t, GFP_KERNEL);
719 if (!cq->queue.page_list[i].buf)
720 goto err_free;
721
722 dma_list[i] = t;
723 pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);
724
725 memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE);
726 }
727 }
728
729 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
730 dma_list, shift, npages,
731 0, size,
732 MTHCA_MPT_FLAG_LOCAL_WRITE |
733 MTHCA_MPT_FLAG_LOCAL_READ,
734 &cq->mr);
735 if (err)
736 goto err_free;
737
738 kfree(dma_list);
739
740 return 0;
741
742err_free:
743 mthca_free_cq_buf(dev, cq);
744
745err_out:
746 kfree(dma_list);
747
748 return err;
749} 644}
750 645
751int mthca_init_cq(struct mthca_dev *dev, int nent, 646int mthca_init_cq(struct mthca_dev *dev, int nent,
@@ -797,7 +692,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
797 cq_context = mailbox->buf; 692 cq_context = mailbox->buf;
798 693
799 if (cq->is_kernel) { 694 if (cq->is_kernel) {
800 err = mthca_alloc_cq_buf(dev, size, cq); 695 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
696 &cq->queue, &cq->is_direct,
697 &dev->driver_pd, 1, &cq->mr);
801 if (err) 698 if (err)
802 goto err_out_mailbox; 699 goto err_out_mailbox;
803 700
@@ -858,10 +755,8 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
858 return 0; 755 return 0;
859 756
860err_out_free_mr: 757err_out_free_mr:
861 if (cq->is_kernel) { 758 if (cq->is_kernel)
862 mthca_free_mr(dev, &cq->mr);
863 mthca_free_cq_buf(dev, cq); 759 mthca_free_cq_buf(dev, cq);
864 }
865 760
866err_out_mailbox: 761err_out_mailbox:
867 mthca_free_mailbox(dev, mailbox); 762 mthca_free_mailbox(dev, mailbox);
@@ -929,7 +824,6 @@ void mthca_free_cq(struct mthca_dev *dev,
929 wait_event(cq->wait, !atomic_read(&cq->refcount)); 824 wait_event(cq->wait, !atomic_read(&cq->refcount));
930 825
931 if (cq->is_kernel) { 826 if (cq->is_kernel) {
932 mthca_free_mr(dev, &cq->mr);
933 mthca_free_cq_buf(dev, cq); 827 mthca_free_cq_buf(dev, cq);
934 if (mthca_is_memfree(dev)) { 828 if (mthca_is_memfree(dev)) {
935 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 829 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);