diff options
Diffstat (limited to 'drivers/infiniband/hw/amso1100/c2_rnic.c')
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2_rnic.c | 52 |
1 files changed, 21 insertions, 31 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index 30409e179606..030238d335ed 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c | |||
@@ -517,14 +517,12 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
517 | /* Initialize the Verbs Reply Queue */ | 517 | /* Initialize the Verbs Reply Queue */ |
518 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); | 518 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); |
519 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); | 519 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); |
520 | q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL); | 520 | q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, |
521 | &c2dev->rep_vq.host_dma, GFP_KERNEL); | ||
521 | if (!q1_pages) { | 522 | if (!q1_pages) { |
522 | err = -ENOMEM; | 523 | err = -ENOMEM; |
523 | goto bail1; | 524 | goto bail1; |
524 | } | 525 | } |
525 | c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
526 | (void *)q1_pages, qsize * msgsize, | ||
527 | DMA_FROM_DEVICE); | ||
528 | pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); | 526 | pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); |
529 | pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, | 527 | pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, |
530 | (unsigned long long) c2dev->rep_vq.host_dma); | 528 | (unsigned long long) c2dev->rep_vq.host_dma); |
@@ -540,14 +538,12 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
540 | /* Initialize the Asynchronus Event Queue */ | 538 | /* Initialize the Asynchronus Event Queue */ |
541 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); | 539 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); |
542 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); | 540 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); |
543 | q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL); | 541 | q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, |
542 | &c2dev->aeq.host_dma, GFP_KERNEL); | ||
544 | if (!q2_pages) { | 543 | if (!q2_pages) { |
545 | err = -ENOMEM; | 544 | err = -ENOMEM; |
546 | goto bail2; | 545 | goto bail2; |
547 | } | 546 | } |
548 | c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
549 | (void *)q2_pages, qsize * msgsize, | ||
550 | DMA_FROM_DEVICE); | ||
551 | pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); | 547 | pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); |
552 | pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, | 548 | pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, |
553 | (unsigned long long) c2dev->rep_vq.host_dma); | 549 | (unsigned long long) c2dev->rep_vq.host_dma); |
@@ -597,17 +593,13 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
597 | bail4: | 593 | bail4: |
598 | vq_term(c2dev); | 594 | vq_term(c2dev); |
599 | bail3: | 595 | bail3: |
600 | dma_unmap_single(c2dev->ibdev.dma_device, | 596 | dma_free_coherent(&c2dev->pcidev->dev, |
601 | pci_unmap_addr(&c2dev->aeq, mapping), | 597 | c2dev->aeq.q_size * c2dev->aeq.msg_size, |
602 | c2dev->aeq.q_size * c2dev->aeq.msg_size, | 598 | q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); |
603 | DMA_FROM_DEVICE); | ||
604 | kfree(q2_pages); | ||
605 | bail2: | 599 | bail2: |
606 | dma_unmap_single(c2dev->ibdev.dma_device, | 600 | dma_free_coherent(&c2dev->pcidev->dev, |
607 | pci_unmap_addr(&c2dev->rep_vq, mapping), | 601 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, |
608 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, | 602 | q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); |
609 | DMA_FROM_DEVICE); | ||
610 | kfree(q1_pages); | ||
611 | bail1: | 603 | bail1: |
612 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); | 604 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); |
613 | bail0: | 605 | bail0: |
@@ -640,19 +632,17 @@ void c2_rnic_term(struct c2_dev *c2dev) | |||
640 | /* Free the verbs request allocator */ | 632 | /* Free the verbs request allocator */ |
641 | vq_term(c2dev); | 633 | vq_term(c2dev); |
642 | 634 | ||
643 | /* Unmap and free the asynchronus event queue */ | 635 | /* Free the asynchronus event queue */ |
644 | dma_unmap_single(c2dev->ibdev.dma_device, | 636 | dma_free_coherent(&c2dev->pcidev->dev, |
645 | pci_unmap_addr(&c2dev->aeq, mapping), | 637 | c2dev->aeq.q_size * c2dev->aeq.msg_size, |
646 | c2dev->aeq.q_size * c2dev->aeq.msg_size, | 638 | c2dev->aeq.msg_pool.host, |
647 | DMA_FROM_DEVICE); | 639 | pci_unmap_addr(&c2dev->aeq, mapping)); |
648 | kfree(c2dev->aeq.msg_pool.host); | 640 | |
649 | 641 | /* Free the verbs reply queue */ | |
650 | /* Unmap and free the verbs reply queue */ | 642 | dma_free_coherent(&c2dev->pcidev->dev, |
651 | dma_unmap_single(c2dev->ibdev.dma_device, | 643 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, |
652 | pci_unmap_addr(&c2dev->rep_vq, mapping), | 644 | c2dev->rep_vq.msg_pool.host, |
653 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, | 645 | pci_unmap_addr(&c2dev->rep_vq, mapping)); |
654 | DMA_FROM_DEVICE); | ||
655 | kfree(c2dev->rep_vq.msg_pool.host); | ||
656 | 646 | ||
657 | /* Free the MQ shared pointer pool */ | 647 | /* Free the MQ shared pointer pool */ |
658 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); | 648 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); |