aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2006-10-27 18:28:35 -0400
committerRoland Dreier <rolandd@cisco.com>2006-10-30 23:52:52 -0500
commit8de94ce19dd3c6fc6e9d9658da11cf3d76841ee5 (patch)
treea9dfc1ed6ebee7112a593cc6505563295185ace7 /drivers/infiniband/hw
parent04d03bc576f244bfa9692452aab83fa357ac0d57 (diff)
IB/amso1100: Use dma_alloc_coherent() instead of kmalloc/dma_map_single
The Ammasso driver needs to use dma_alloc_coherent() for allocating memory that will be used by the HW for dma. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c13
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c18
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c52
3 files changed, 33 insertions, 50 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index 028a60bbfca9..0315f99e4191 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -42,13 +42,14 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
42{ 42{
43 int i; 43 int i;
44 struct sp_chunk *new_head; 44 struct sp_chunk *new_head;
45 dma_addr_t dma_addr;
45 46
46 new_head = (struct sp_chunk *) __get_free_page(gfp_mask); 47 new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
48 &dma_addr, gfp_mask);
47 if (new_head == NULL) 49 if (new_head == NULL)
48 return -ENOMEM; 50 return -ENOMEM;
49 51
50 new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head, 52 new_head->dma_addr = dma_addr;
51 PAGE_SIZE, DMA_FROM_DEVICE);
52 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); 53 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
53 54
54 new_head->next = NULL; 55 new_head->next = NULL;
@@ -80,10 +81,8 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
80 81
81 while (root) { 82 while (root) {
82 next = root->next; 83 next = root->next;
83 dma_unmap_single(c2dev->ibdev.dma_device, 84 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
84 pci_unmap_addr(root, mapping), PAGE_SIZE, 85 pci_unmap_addr(root, mapping));
85 DMA_FROM_DEVICE);
86 __free_page((struct page *) root);
87 root = next; 86 root = next;
88 } 87 }
89} 88}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 9d7bcc5ade93..05c9154d46f4 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -246,20 +246,17 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
246 246
247static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 247static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
248{ 248{
249 249 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
250 dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping), 250 mq->msg_pool.host, pci_unmap_addr(mq, mapping));
251 mq->q_size * mq->msg_size, DMA_FROM_DEVICE);
252 free_pages((unsigned long) mq->msg_pool.host,
253 get_order(mq->q_size * mq->msg_size));
254} 251}
255 252
256static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, 253static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
257 int msg_size) 254 int msg_size)
258{ 255{
259 unsigned long pool_start; 256 u8 *pool_start;
260 257
261 pool_start = __get_free_pages(GFP_KERNEL, 258 pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
262 get_order(q_size * msg_size)); 259 &mq->host_dma, GFP_KERNEL);
263 if (!pool_start) 260 if (!pool_start)
264 return -ENOMEM; 261 return -ENOMEM;
265 262
@@ -267,13 +264,10 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
267 0, /* index (currently unknown) */ 264 0, /* index (currently unknown) */
268 q_size, 265 q_size,
269 msg_size, 266 msg_size,
270 (u8 *) pool_start, 267 pool_start,
271 NULL, /* peer (currently unknown) */ 268 NULL, /* peer (currently unknown) */
272 C2_MQ_HOST_TARGET); 269 C2_MQ_HOST_TARGET);
273 270
274 mq->host_dma = dma_map_single(c2dev->ibdev.dma_device,
275 (void *)pool_start,
276 q_size * msg_size, DMA_FROM_DEVICE);
277 pci_unmap_addr_set(mq, mapping, mq->host_dma); 271 pci_unmap_addr_set(mq, mapping, mq->host_dma);
278 272
279 return 0; 273 return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 30409e179606..030238d335ed 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -517,14 +517,12 @@ int c2_rnic_init(struct c2_dev *c2dev)
517 /* Initialize the Verbs Reply Queue */ 517 /* Initialize the Verbs Reply Queue */
518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); 518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
520 q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL); 520 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
521 &c2dev->rep_vq.host_dma, GFP_KERNEL);
521 if (!q1_pages) { 522 if (!q1_pages) {
522 err = -ENOMEM; 523 err = -ENOMEM;
523 goto bail1; 524 goto bail1;
524 } 525 }
525 c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
526 (void *)q1_pages, qsize * msgsize,
527 DMA_FROM_DEVICE);
528 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 526 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
529 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, 527 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages,
530 (unsigned long long) c2dev->rep_vq.host_dma); 528 (unsigned long long) c2dev->rep_vq.host_dma);
@@ -540,14 +538,12 @@ int c2_rnic_init(struct c2_dev *c2dev)
540 /* Initialize the Asynchronus Event Queue */ 538 /* Initialize the Asynchronus Event Queue */
541 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); 539 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
542 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 540 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
543 q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL); 541 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
542 &c2dev->aeq.host_dma, GFP_KERNEL);
544 if (!q2_pages) { 543 if (!q2_pages) {
545 err = -ENOMEM; 544 err = -ENOMEM;
546 goto bail2; 545 goto bail2;
547 } 546 }
548 c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
549 (void *)q2_pages, qsize * msgsize,
550 DMA_FROM_DEVICE);
551 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 547 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
552 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, 548 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages,
553 (unsigned long long) c2dev->rep_vq.host_dma); 549 (unsigned long long) c2dev->rep_vq.host_dma);
@@ -597,17 +593,13 @@ int c2_rnic_init(struct c2_dev *c2dev)
597 bail4: 593 bail4:
598 vq_term(c2dev); 594 vq_term(c2dev);
599 bail3: 595 bail3:
600 dma_unmap_single(c2dev->ibdev.dma_device, 596 dma_free_coherent(&c2dev->pcidev->dev,
601 pci_unmap_addr(&c2dev->aeq, mapping), 597 c2dev->aeq.q_size * c2dev->aeq.msg_size,
602 c2dev->aeq.q_size * c2dev->aeq.msg_size, 598 q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
603 DMA_FROM_DEVICE);
604 kfree(q2_pages);
605 bail2: 599 bail2:
606 dma_unmap_single(c2dev->ibdev.dma_device, 600 dma_free_coherent(&c2dev->pcidev->dev,
607 pci_unmap_addr(&c2dev->rep_vq, mapping), 601 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
608 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 602 q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
609 DMA_FROM_DEVICE);
610 kfree(q1_pages);
611 bail1: 603 bail1:
612 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 604 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
613 bail0: 605 bail0:
@@ -640,19 +632,17 @@ void c2_rnic_term(struct c2_dev *c2dev)
640 /* Free the verbs request allocator */ 632 /* Free the verbs request allocator */
641 vq_term(c2dev); 633 vq_term(c2dev);
642 634
643 /* Unmap and free the asynchronus event queue */ 635 /* Free the asynchronus event queue */
644 dma_unmap_single(c2dev->ibdev.dma_device, 636 dma_free_coherent(&c2dev->pcidev->dev,
645 pci_unmap_addr(&c2dev->aeq, mapping), 637 c2dev->aeq.q_size * c2dev->aeq.msg_size,
646 c2dev->aeq.q_size * c2dev->aeq.msg_size, 638 c2dev->aeq.msg_pool.host,
647 DMA_FROM_DEVICE); 639 pci_unmap_addr(&c2dev->aeq, mapping));
648 kfree(c2dev->aeq.msg_pool.host); 640
649 641 /* Free the verbs reply queue */
650 /* Unmap and free the verbs reply queue */ 642 dma_free_coherent(&c2dev->pcidev->dev,
651 dma_unmap_single(c2dev->ibdev.dma_device, 643 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
652 pci_unmap_addr(&c2dev->rep_vq, mapping), 644 c2dev->rep_vq.msg_pool.host,
653 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 645 pci_unmap_addr(&c2dev->rep_vq, mapping));
654 DMA_FROM_DEVICE);
655 kfree(c2dev->rep_vq.msg_pool.host);
656 646
657 /* Free the MQ shared pointer pool */ 647 /* Free the MQ shared pointer pool */
658 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 648 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);