diff options
| author | Christoph Hellwig <hch@lst.de> | 2016-01-20 18:01:35 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-20 20:09:18 -0500 |
| commit | 4605f04b2893fb5498b31c54e8f21da2fc4cc736 (patch) | |
| tree | dc9ae4048408b1a6eb0a3ecbd7e99feb9a9172ab /arch/c6x | |
| parent | 6f62097583e799040d6d18909b670b1e4dbb614d (diff) | |
c6x: convert to dma_map_ops
[dan.carpenter@oracle.com: C6X: fix build breakage]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Mark Salter <msalter@redhat.com>
Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/c6x')
| -rw-r--r-- | arch/c6x/Kconfig | 2 | ||||
| -rw-r--r-- | arch/c6x/include/asm/dma-mapping.h | 98 | ||||
| -rw-r--r-- | arch/c6x/kernel/dma.c | 95 | ||||
| -rw-r--r-- | arch/c6x/mm/dma-coherent.c | 10 |
4 files changed, 58 insertions, 147 deletions
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 77ea09b8bce1..8602f725e270 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig | |||
| @@ -17,6 +17,8 @@ config C6X | |||
| 17 | select OF_EARLY_FLATTREE | 17 | select OF_EARLY_FLATTREE |
| 18 | select GENERIC_CLOCKEVENTS | 18 | select GENERIC_CLOCKEVENTS |
| 19 | select MODULES_USE_ELF_RELA | 19 | select MODULES_USE_ELF_RELA |
| 20 | select ARCH_NO_COHERENT_DMA_MMAP | ||
| 21 | select HAVE_DMA_ATTRS | ||
| 20 | 22 | ||
| 21 | config MMU | 23 | config MMU |
| 22 | def_bool n | 24 | def_bool n |
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index bbd7774e4d4e..f881e425d442 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h | |||
| @@ -12,104 +12,24 @@ | |||
| 12 | #ifndef _ASM_C6X_DMA_MAPPING_H | 12 | #ifndef _ASM_C6X_DMA_MAPPING_H |
| 13 | #define _ASM_C6X_DMA_MAPPING_H | 13 | #define _ASM_C6X_DMA_MAPPING_H |
| 14 | 14 | ||
| 15 | #include <linux/dma-debug.h> | ||
| 16 | #include <asm-generic/dma-coherent.h> | ||
| 17 | |||
| 18 | #define dma_supported(d, m) 1 | ||
| 19 | |||
| 20 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
| 21 | dma_addr_t addr, | ||
| 22 | unsigned long offset, | ||
| 23 | size_t size, | ||
| 24 | enum dma_data_direction dir) | ||
| 25 | { | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
| 29 | { | ||
| 30 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
| 31 | return -EIO; | ||
| 32 | |||
| 33 | *dev->dma_mask = dma_mask; | ||
| 34 | |||
| 35 | return 0; | ||
| 36 | } | ||
| 37 | |||
| 38 | /* | 15 | /* |
| 39 | * DMA errors are defined by all-bits-set in the DMA address. | 16 | * DMA errors are defined by all-bits-set in the DMA address. |
| 40 | */ | 17 | */ |
| 41 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 18 | #define DMA_ERROR_CODE ~0 |
| 42 | { | ||
| 43 | debug_dma_mapping_error(dev, dma_addr); | ||
| 44 | return dma_addr == ~0; | ||
| 45 | } | ||
| 46 | |||
| 47 | extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
| 48 | size_t size, enum dma_data_direction dir); | ||
| 49 | |||
| 50 | extern void dma_unmap_single(struct device *dev, dma_addr_t handle, | ||
| 51 | size_t size, enum dma_data_direction dir); | ||
| 52 | |||
| 53 | extern int dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||
| 54 | int nents, enum dma_data_direction direction); | ||
| 55 | |||
| 56 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
| 57 | int nents, enum dma_data_direction direction); | ||
| 58 | 19 | ||
| 59 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 20 | extern struct dma_map_ops c6x_dma_ops; |
| 60 | unsigned long offset, size_t size, | ||
| 61 | enum dma_data_direction dir) | ||
| 62 | { | ||
| 63 | dma_addr_t handle; | ||
| 64 | |||
| 65 | handle = dma_map_single(dev, page_address(page) + offset, size, dir); | ||
| 66 | |||
| 67 | debug_dma_map_page(dev, page, offset, size, dir, handle, false); | ||
| 68 | |||
| 69 | return handle; | ||
| 70 | } | ||
| 71 | 21 | ||
| 72 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, | 22 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
| 73 | size_t size, enum dma_data_direction dir) | ||
| 74 | { | 23 | { |
| 75 | dma_unmap_single(dev, handle, size, dir); | 24 | return &c6x_dma_ops; |
| 76 | |||
| 77 | debug_dma_unmap_page(dev, handle, size, dir, false); | ||
| 78 | } | 25 | } |
| 79 | 26 | ||
| 80 | extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | 27 | #include <asm-generic/dma-mapping-common.h> |
| 81 | size_t size, enum dma_data_direction dir); | ||
| 82 | |||
| 83 | extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | ||
| 84 | size_t size, | ||
| 85 | enum dma_data_direction dir); | ||
| 86 | |||
| 87 | extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
| 88 | int nents, enum dma_data_direction dir); | ||
| 89 | |||
| 90 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
| 91 | int nents, enum dma_data_direction dir); | ||
| 92 | 28 | ||
| 93 | extern void coherent_mem_init(u32 start, u32 size); | 29 | extern void coherent_mem_init(u32 start, u32 size); |
| 94 | extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | 30 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
| 95 | extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); | 31 | gfp_t gfp, struct dma_attrs *attrs); |
| 96 | 32 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, | |
| 97 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) | 33 | dma_addr_t dma_handle, struct dma_attrs *attrs); |
| 98 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) | ||
| 99 | |||
| 100 | /* Not supported for now */ | ||
| 101 | static inline int dma_mmap_coherent(struct device *dev, | ||
| 102 | struct vm_area_struct *vma, void *cpu_addr, | ||
| 103 | dma_addr_t dma_addr, size_t size) | ||
| 104 | { | ||
| 105 | return -EINVAL; | ||
| 106 | } | ||
| 107 | |||
| 108 | static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 109 | void *cpu_addr, dma_addr_t dma_addr, | ||
| 110 | size_t size) | ||
| 111 | { | ||
| 112 | return -EINVAL; | ||
| 113 | } | ||
| 114 | 34 | ||
| 115 | #endif /* _ASM_C6X_DMA_MAPPING_H */ | 35 | #endif /* _ASM_C6X_DMA_MAPPING_H */ |
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c index ab7b12de144d..8a80f3a250c0 100644 --- a/arch/c6x/kernel/dma.c +++ b/arch/c6x/kernel/dma.c | |||
| @@ -36,110 +36,101 @@ static void c6x_dma_sync(dma_addr_t handle, size_t size, | |||
| 36 | } | 36 | } |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 39 | static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, |
| 40 | enum dma_data_direction dir) | 40 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 41 | struct dma_attrs *attrs) | ||
| 41 | { | 42 | { |
| 42 | dma_addr_t addr = virt_to_phys(ptr); | 43 | dma_addr_t handle = virt_to_phys(page_address(page) + offset); |
| 43 | 44 | ||
| 44 | c6x_dma_sync(addr, size, dir); | 45 | c6x_dma_sync(handle, size, dir); |
| 45 | 46 | return handle; | |
| 46 | debug_dma_map_page(dev, virt_to_page(ptr), | ||
| 47 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
| 48 | dir, addr, true); | ||
| 49 | return addr; | ||
| 50 | } | 47 | } |
| 51 | EXPORT_SYMBOL(dma_map_single); | ||
| 52 | |||
| 53 | 48 | ||
| 54 | void dma_unmap_single(struct device *dev, dma_addr_t handle, | 49 | static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, |
| 55 | size_t size, enum dma_data_direction dir) | 50 | size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) |
| 56 | { | 51 | { |
| 57 | c6x_dma_sync(handle, size, dir); | 52 | c6x_dma_sync(handle, size, dir); |
| 58 | |||
| 59 | debug_dma_unmap_page(dev, handle, size, dir, true); | ||
| 60 | } | 53 | } |
| 61 | EXPORT_SYMBOL(dma_unmap_single); | ||
| 62 | |||
| 63 | 54 | ||
| 64 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, | 55 | static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
| 65 | int nents, enum dma_data_direction dir) | 56 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) |
| 66 | { | 57 | { |
| 67 | struct scatterlist *sg; | 58 | struct scatterlist *sg; |
| 68 | int i; | 59 | int i; |
| 69 | 60 | ||
| 70 | for_each_sg(sglist, sg, nents, i) | 61 | for_each_sg(sglist, sg, nents, i) { |
| 71 | sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length, | 62 | sg->dma_address = sg_phys(sg); |
| 72 | dir); | 63 | c6x_dma_sync(sg->dma_address, sg->length, dir); |
| 73 | 64 | } | |
| 74 | debug_dma_map_sg(dev, sglist, nents, nents, dir); | ||
| 75 | 65 | ||
| 76 | return nents; | 66 | return nents; |
| 77 | } | 67 | } |
| 78 | EXPORT_SYMBOL(dma_map_sg); | ||
| 79 | |||
| 80 | 68 | ||
| 81 | void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 69 | static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
| 82 | int nents, enum dma_data_direction dir) | 70 | int nents, enum dma_data_direction dir, |
| 71 | struct dma_attrs *attrs) | ||
| 83 | { | 72 | { |
| 84 | struct scatterlist *sg; | 73 | struct scatterlist *sg; |
| 85 | int i; | 74 | int i; |
| 86 | 75 | ||
| 87 | for_each_sg(sglist, sg, nents, i) | 76 | for_each_sg(sglist, sg, nents, i) |
| 88 | dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir); | 77 | c6x_dma_sync(sg_dma_address(sg), sg->length, dir); |
| 89 | 78 | ||
| 90 | debug_dma_unmap_sg(dev, sglist, nents, dir); | ||
| 91 | } | 79 | } |
| 92 | EXPORT_SYMBOL(dma_unmap_sg); | ||
| 93 | 80 | ||
| 94 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | 81 | static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, |
| 95 | size_t size, enum dma_data_direction dir) | 82 | size_t size, enum dma_data_direction dir) |
| 96 | { | 83 | { |
| 97 | c6x_dma_sync(handle, size, dir); | 84 | c6x_dma_sync(handle, size, dir); |
| 98 | 85 | ||
| 99 | debug_dma_sync_single_for_cpu(dev, handle, size, dir); | ||
| 100 | } | 86 | } |
| 101 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
| 102 | 87 | ||
| 103 | 88 | static void c6x_dma_sync_single_for_device(struct device *dev, | |
| 104 | void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | 89 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| 105 | size_t size, enum dma_data_direction dir) | ||
| 106 | { | 90 | { |
| 107 | c6x_dma_sync(handle, size, dir); | 91 | c6x_dma_sync(handle, size, dir); |
| 108 | 92 | ||
| 109 | debug_dma_sync_single_for_device(dev, handle, size, dir); | ||
| 110 | } | 93 | } |
| 111 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
| 112 | |||
| 113 | 94 | ||
| 114 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, | 95 | static void c6x_dma_sync_sg_for_cpu(struct device *dev, |
| 115 | int nents, enum dma_data_direction dir) | 96 | struct scatterlist *sglist, int nents, |
| 97 | enum dma_data_direction dir) | ||
| 116 | { | 98 | { |
| 117 | struct scatterlist *sg; | 99 | struct scatterlist *sg; |
| 118 | int i; | 100 | int i; |
| 119 | 101 | ||
| 120 | for_each_sg(sglist, sg, nents, i) | 102 | for_each_sg(sglist, sg, nents, i) |
| 121 | dma_sync_single_for_cpu(dev, sg_dma_address(sg), | 103 | c6x_dma_sync_single_for_cpu(dev, sg_dma_address(sg), |
| 122 | sg->length, dir); | 104 | sg->length, dir); |
| 123 | 105 | ||
| 124 | debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir); | ||
| 125 | } | 106 | } |
| 126 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
| 127 | |||
| 128 | 107 | ||
| 129 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | 108 | static void c6x_dma_sync_sg_for_device(struct device *dev, |
| 130 | int nents, enum dma_data_direction dir) | 109 | struct scatterlist *sglist, int nents, |
| 110 | enum dma_data_direction dir) | ||
| 131 | { | 111 | { |
| 132 | struct scatterlist *sg; | 112 | struct scatterlist *sg; |
| 133 | int i; | 113 | int i; |
| 134 | 114 | ||
| 135 | for_each_sg(sglist, sg, nents, i) | 115 | for_each_sg(sglist, sg, nents, i) |
| 136 | dma_sync_single_for_device(dev, sg_dma_address(sg), | 116 | c6x_dma_sync_single_for_device(dev, sg_dma_address(sg), |
| 137 | sg->length, dir); | 117 | sg->length, dir); |
| 138 | 118 | ||
| 139 | debug_dma_sync_sg_for_device(dev, sglist, nents, dir); | ||
| 140 | } | 119 | } |
| 141 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
| 142 | 120 | ||
| 121 | struct dma_map_ops c6x_dma_ops = { | ||
| 122 | .alloc = c6x_dma_alloc, | ||
| 123 | .free = c6x_dma_free, | ||
| 124 | .map_page = c6x_dma_map_page, | ||
| 125 | .unmap_page = c6x_dma_unmap_page, | ||
| 126 | .map_sg = c6x_dma_map_sg, | ||
| 127 | .unmap_sg = c6x_dma_unmap_sg, | ||
| 128 | .sync_single_for_device = c6x_dma_sync_single_for_device, | ||
| 129 | .sync_single_for_cpu = c6x_dma_sync_single_for_cpu, | ||
| 130 | .sync_sg_for_device = c6x_dma_sync_sg_for_device, | ||
| 131 | .sync_sg_for_cpu = c6x_dma_sync_sg_for_cpu, | ||
| 132 | }; | ||
| 133 | EXPORT_SYMBOL(c6x_dma_ops); | ||
| 143 | 134 | ||
| 144 | /* Number of entries preallocated for DMA-API debugging */ | 135 | /* Number of entries preallocated for DMA-API debugging */ |
| 145 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | 136 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index 4187e5180373..f7ee63af2541 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c | |||
| @@ -73,8 +73,8 @@ static void __free_dma_pages(u32 addr, int order) | |||
| 73 | * Allocate DMA coherent memory space and return both the kernel | 73 | * Allocate DMA coherent memory space and return both the kernel |
| 74 | * virtual and DMA address for that space. | 74 | * virtual and DMA address for that space. |
| 75 | */ | 75 | */ |
| 76 | void *dma_alloc_coherent(struct device *dev, size_t size, | 76 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
| 77 | dma_addr_t *handle, gfp_t gfp) | 77 | gfp_t gfp, struct dma_attrs *attrs) |
| 78 | { | 78 | { |
| 79 | u32 paddr; | 79 | u32 paddr; |
| 80 | int order; | 80 | int order; |
| @@ -94,13 +94,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
| 94 | 94 | ||
| 95 | return phys_to_virt(paddr); | 95 | return phys_to_virt(paddr); |
| 96 | } | 96 | } |
| 97 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
| 98 | 97 | ||
| 99 | /* | 98 | /* |
| 100 | * Free DMA coherent memory as defined by the above mapping. | 99 | * Free DMA coherent memory as defined by the above mapping. |
| 101 | */ | 100 | */ |
| 102 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 101 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, |
| 103 | dma_addr_t dma_handle) | 102 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
| 104 | { | 103 | { |
| 105 | int order; | 104 | int order; |
| 106 | 105 | ||
| @@ -111,7 +110,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
| 111 | 110 | ||
| 112 | __free_dma_pages(virt_to_phys(vaddr), order); | 111 | __free_dma_pages(virt_to_phys(vaddr), order); |
| 113 | } | 112 | } |
| 114 | EXPORT_SYMBOL(dma_free_coherent); | ||
| 115 | 113 | ||
| 116 | /* | 114 | /* |
| 117 | * Initialise the coherent DMA memory allocator using the given uncached region. | 115 | * Initialise the coherent DMA memory allocator using the given uncached region. |
