diff options
author | Roland Dreier <roland@topspin.com> | 2005-06-27 17:36:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-27 18:11:44 -0400 |
commit | 64dc81fca7f6d5c51e50ffa850640ad8358acd1f (patch) | |
tree | 919ca8d2ea8567a1de4bba989a123b77340b989a /drivers/infiniband/hw | |
parent | bb2af78bcdbb8801791de33f1775c98b9178daab (diff) |
[PATCH] IB/mthca: Use dma_alloc_coherent instead of pci_alloc_consistent
Switch all allocations of coherent memory from pci_alloc_consistent() to
dma_alloc_coherent(), so that we can pass GFP_KERNEL. This should help when
the system is low on memory.
Signed-off-by: Roland Dreier <roland@topspin.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cq.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_eq.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 19 |
3 files changed, 29 insertions, 27 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 3724d9db50a3..5dae5b5dc8e7 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -636,19 +636,19 @@ static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) | |||
636 | int size; | 636 | int size; |
637 | 637 | ||
638 | if (cq->is_direct) | 638 | if (cq->is_direct) |
639 | pci_free_consistent(dev->pdev, | 639 | dma_free_coherent(&dev->pdev->dev, |
640 | (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, | 640 | (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, |
641 | cq->queue.direct.buf, | 641 | cq->queue.direct.buf, |
642 | pci_unmap_addr(&cq->queue.direct, | 642 | pci_unmap_addr(&cq->queue.direct, |
643 | mapping)); | 643 | mapping)); |
644 | else { | 644 | else { |
645 | size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE; | 645 | size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE; |
646 | for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) | 646 | for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) |
647 | if (cq->queue.page_list[i].buf) | 647 | if (cq->queue.page_list[i].buf) |
648 | pci_free_consistent(dev->pdev, PAGE_SIZE, | 648 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
649 | cq->queue.page_list[i].buf, | 649 | cq->queue.page_list[i].buf, |
650 | pci_unmap_addr(&cq->queue.page_list[i], | 650 | pci_unmap_addr(&cq->queue.page_list[i], |
651 | mapping)); | 651 | mapping)); |
652 | 652 | ||
653 | kfree(cq->queue.page_list); | 653 | kfree(cq->queue.page_list); |
654 | } | 654 | } |
@@ -668,8 +668,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size, | |||
668 | npages = 1; | 668 | npages = 1; |
669 | shift = get_order(size) + PAGE_SHIFT; | 669 | shift = get_order(size) + PAGE_SHIFT; |
670 | 670 | ||
671 | cq->queue.direct.buf = pci_alloc_consistent(dev->pdev, | 671 | cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, |
672 | size, &t); | 672 | size, &t, GFP_KERNEL); |
673 | if (!cq->queue.direct.buf) | 673 | if (!cq->queue.direct.buf) |
674 | return -ENOMEM; | 674 | return -ENOMEM; |
675 | 675 | ||
@@ -707,7 +707,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size, | |||
707 | 707 | ||
708 | for (i = 0; i < npages; ++i) { | 708 | for (i = 0; i < npages; ++i) { |
709 | cq->queue.page_list[i].buf = | 709 | cq->queue.page_list[i].buf = |
710 | pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t); | 710 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, |
711 | &t, GFP_KERNEL); | ||
711 | if (!cq->queue.page_list[i].buf) | 712 | if (!cq->queue.page_list[i].buf) |
712 | goto err_free; | 713 | goto err_free; |
713 | 714 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 7500ebc23f36..970cba24e79f 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -501,8 +501,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
501 | eq_context = MAILBOX_ALIGN(mailbox); | 501 | eq_context = MAILBOX_ALIGN(mailbox); |
502 | 502 | ||
503 | for (i = 0; i < npages; ++i) { | 503 | for (i = 0; i < npages; ++i) { |
504 | eq->page_list[i].buf = pci_alloc_consistent(dev->pdev, | 504 | eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, |
505 | PAGE_SIZE, &t); | 505 | PAGE_SIZE, &t, GFP_KERNEL); |
506 | if (!eq->page_list[i].buf) | 506 | if (!eq->page_list[i].buf) |
507 | goto err_out_free; | 507 | goto err_out_free; |
508 | 508 | ||
@@ -582,10 +582,10 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
582 | err_out_free: | 582 | err_out_free: |
583 | for (i = 0; i < npages; ++i) | 583 | for (i = 0; i < npages; ++i) |
584 | if (eq->page_list[i].buf) | 584 | if (eq->page_list[i].buf) |
585 | pci_free_consistent(dev->pdev, PAGE_SIZE, | 585 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
586 | eq->page_list[i].buf, | 586 | eq->page_list[i].buf, |
587 | pci_unmap_addr(&eq->page_list[i], | 587 | pci_unmap_addr(&eq->page_list[i], |
588 | mapping)); | 588 | mapping)); |
589 | 589 | ||
590 | kfree(eq->page_list); | 590 | kfree(eq->page_list); |
591 | kfree(dma_list); | 591 | kfree(dma_list); |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index ca73bab11a02..031f690f5455 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -934,7 +934,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, | |||
934 | mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n", | 934 | mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n", |
935 | size, shift); | 935 | size, shift); |
936 | 936 | ||
937 | qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t); | 937 | qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size, |
938 | &t, GFP_KERNEL); | ||
938 | if (!qp->queue.direct.buf) | 939 | if (!qp->queue.direct.buf) |
939 | goto err_out; | 940 | goto err_out; |
940 | 941 | ||
@@ -973,7 +974,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, | |||
973 | 974 | ||
974 | for (i = 0; i < npages; ++i) { | 975 | for (i = 0; i < npages; ++i) { |
975 | qp->queue.page_list[i].buf = | 976 | qp->queue.page_list[i].buf = |
976 | pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t); | 977 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, |
978 | &t, GFP_KERNEL); | ||
977 | if (!qp->queue.page_list[i].buf) | 979 | if (!qp->queue.page_list[i].buf) |
978 | goto err_out_free; | 980 | goto err_out_free; |
979 | 981 | ||
@@ -996,16 +998,15 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, | |||
996 | 998 | ||
997 | err_out_free: | 999 | err_out_free: |
998 | if (qp->is_direct) { | 1000 | if (qp->is_direct) { |
999 | pci_free_consistent(dev->pdev, size, | 1001 | dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf, |
1000 | qp->queue.direct.buf, | 1002 | pci_unmap_addr(&qp->queue.direct, mapping)); |
1001 | pci_unmap_addr(&qp->queue.direct, mapping)); | ||
1002 | } else | 1003 | } else |
1003 | for (i = 0; i < npages; ++i) { | 1004 | for (i = 0; i < npages; ++i) { |
1004 | if (qp->queue.page_list[i].buf) | 1005 | if (qp->queue.page_list[i].buf) |
1005 | pci_free_consistent(dev->pdev, PAGE_SIZE, | 1006 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
1006 | qp->queue.page_list[i].buf, | 1007 | qp->queue.page_list[i].buf, |
1007 | pci_unmap_addr(&qp->queue.page_list[i], | 1008 | pci_unmap_addr(&qp->queue.page_list[i], |
1008 | mapping)); | 1009 | mapping)); |
1009 | 1010 | ||
1010 | } | 1011 | } |
1011 | 1012 | ||