diff options
author | Christoph Hellwig <hch@lst.de> | 2018-06-15 07:08:50 -0400 |
---|---|---|
committer | Paul Burton <paul.burton@mips.com> | 2018-06-24 12:27:27 -0400 |
commit | c5e2bbb45d28d53d278f25068142a283a0a74f7a (patch) | |
tree | 9527bd52fc31b960b1a969e9f4c0a535d17ee8f7 /arch/mips/jazz/jazzdma.c | |
parent | d1f2564a5639bb54493eaa313aef612aee47ba7c (diff) |
MIPS: jazz: split dma mapping operations from dma-default
Jazz actually has a very basic IOMMU, so split the ops into a separate
implementation from the generic default support (which is about to go
away anyway).
Signed-off-by: Christoph Hellwig <hch@lst.de>
Patchwork: https://patchwork.linux-mips.org/patch/19548/
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: Florian Fainelli <f.fainelli@gmail.com>
Cc: David Daney <david.daney@cavium.com>
Cc: Kevin Cernekee <cernekee@gmail.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Tom Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: iommu@lists.linux-foundation.org
Cc: linux-mips@linux-mips.org
Diffstat (limited to 'arch/mips/jazz/jazzdma.c')
-rw-r--r-- | arch/mips/jazz/jazzdma.c | 141 |
1 files changed, 140 insertions, 1 deletions
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index d626a9a391cc..446fc8c92e1e 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/bootmem.h> | 16 | #include <linux/bootmem.h> |
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/gfp.h> | 18 | #include <linux/gfp.h> |
19 | #include <linux/dma-direct.h> | ||
20 | #include <linux/dma-noncoherent.h> | ||
19 | #include <asm/mipsregs.h> | 21 | #include <asm/mipsregs.h> |
20 | #include <asm/jazz.h> | 22 | #include <asm/jazz.h> |
21 | #include <asm/io.h> | 23 | #include <asm/io.h> |
@@ -86,6 +88,7 @@ static int __init vdma_init(void) | |||
86 | printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n"); | 88 | printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n"); |
87 | return 0; | 89 | return 0; |
88 | } | 90 | } |
91 | arch_initcall(vdma_init); | ||
89 | 92 | ||
90 | /* | 93 | /* |
91 | * Allocate DMA pagetables using a simple first-fit algorithm | 94 | * Allocate DMA pagetables using a simple first-fit algorithm |
@@ -556,4 +559,140 @@ int vdma_get_enable(int channel) | |||
556 | return enable; | 559 | return enable; |
557 | } | 560 | } |
558 | 561 | ||
559 | arch_initcall(vdma_init); | 562 | static void *jazz_dma_alloc(struct device *dev, size_t size, |
563 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | ||
564 | { | ||
565 | void *ret; | ||
566 | |||
567 | ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); | ||
568 | if (!ret) | ||
569 | return NULL; | ||
570 | |||
571 | *dma_handle = vdma_alloc(virt_to_phys(ret), size); | ||
572 | if (*dma_handle == VDMA_ERROR) { | ||
573 | dma_direct_free(dev, size, ret, *dma_handle, attrs); | ||
574 | return NULL; | ||
575 | } | ||
576 | |||
577 | if (!(attrs & DMA_ATTR_NON_CONSISTENT)) { | ||
578 | dma_cache_wback_inv((unsigned long)ret, size); | ||
579 | ret = UNCAC_ADDR(ret); | ||
580 | } | ||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | static void jazz_dma_free(struct device *dev, size_t size, void *vaddr, | ||
585 | dma_addr_t dma_handle, unsigned long attrs) | ||
586 | { | ||
587 | vdma_free(dma_handle); | ||
588 | if (!(attrs & DMA_ATTR_NON_CONSISTENT)) | ||
589 | vaddr = (void *)CAC_ADDR((unsigned long)vaddr); | ||
590 | return dma_direct_free(dev, size, vaddr, dma_handle, attrs); | ||
591 | } | ||
592 | |||
593 | static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page, | ||
594 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
595 | unsigned long attrs) | ||
596 | { | ||
597 | phys_addr_t phys = page_to_phys(page) + offset; | ||
598 | |||
599 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
600 | arch_sync_dma_for_device(dev, phys, size, dir); | ||
601 | return vdma_alloc(phys, size); | ||
602 | } | ||
603 | |||
604 | static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | ||
605 | size_t size, enum dma_data_direction dir, unsigned long attrs) | ||
606 | { | ||
607 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
608 | arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir); | ||
609 | vdma_free(dma_addr); | ||
610 | } | ||
611 | |||
612 | static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||
613 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
614 | { | ||
615 | int i; | ||
616 | struct scatterlist *sg; | ||
617 | |||
618 | for_each_sg(sglist, sg, nents, i) { | ||
619 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
620 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, | ||
621 | dir); | ||
622 | sg->dma_address = vdma_alloc(sg_phys(sg), sg->length); | ||
623 | if (sg->dma_address == VDMA_ERROR) | ||
624 | return 0; | ||
625 | sg_dma_len(sg) = sg->length; | ||
626 | } | ||
627 | |||
628 | return nents; | ||
629 | } | ||
630 | |||
631 | static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
632 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
633 | { | ||
634 | int i; | ||
635 | struct scatterlist *sg; | ||
636 | |||
637 | for_each_sg(sglist, sg, nents, i) { | ||
638 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
639 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, | ||
640 | dir); | ||
641 | vdma_free(sg->dma_address); | ||
642 | } | ||
643 | } | ||
644 | |||
645 | static void jazz_dma_sync_single_for_device(struct device *dev, | ||
646 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | ||
647 | { | ||
648 | arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir); | ||
649 | } | ||
650 | |||
651 | static void jazz_dma_sync_single_for_cpu(struct device *dev, | ||
652 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | ||
653 | { | ||
654 | arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir); | ||
655 | } | ||
656 | |||
657 | static void jazz_dma_sync_sg_for_device(struct device *dev, | ||
658 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | ||
659 | { | ||
660 | struct scatterlist *sg; | ||
661 | int i; | ||
662 | |||
663 | for_each_sg(sgl, sg, nents, i) | ||
664 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); | ||
665 | } | ||
666 | |||
667 | static void jazz_dma_sync_sg_for_cpu(struct device *dev, | ||
668 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | ||
669 | { | ||
670 | struct scatterlist *sg; | ||
671 | int i; | ||
672 | |||
673 | for_each_sg(sgl, sg, nents, i) | ||
674 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); | ||
675 | } | ||
676 | |||
677 | static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
678 | { | ||
679 | return dma_addr == VDMA_ERROR; | ||
680 | } | ||
681 | |||
682 | const struct dma_map_ops jazz_dma_ops = { | ||
683 | .alloc = jazz_dma_alloc, | ||
684 | .free = jazz_dma_free, | ||
685 | .mmap = arch_dma_mmap, | ||
686 | .map_page = jazz_dma_map_page, | ||
687 | .unmap_page = jazz_dma_unmap_page, | ||
688 | .map_sg = jazz_dma_map_sg, | ||
689 | .unmap_sg = jazz_dma_unmap_sg, | ||
690 | .sync_single_for_cpu = jazz_dma_sync_single_for_cpu, | ||
691 | .sync_single_for_device = jazz_dma_sync_single_for_device, | ||
692 | .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu, | ||
693 | .sync_sg_for_device = jazz_dma_sync_sg_for_device, | ||
694 | .dma_supported = dma_direct_supported, | ||
695 | .cache_sync = arch_dma_cache_sync, | ||
696 | .mapping_error = jazz_dma_mapping_error, | ||
697 | }; | ||
698 | EXPORT_SYMBOL(jazz_dma_ops); | ||