diff options
| author | Christoph Hellwig <hch@lst.de> | 2017-08-27 04:35:40 -0400 |
|---|---|---|
| committer | Christoph Hellwig <hch@lst.de> | 2017-10-19 10:37:44 -0400 |
| commit | e0c6584df9c414b50de17e1abc1099f7501bbb60 (patch) | |
| tree | a683b944a85576069343932bd4fe2fa78284074a | |
| parent | d708e71ed78f56330325e8de304e2fb1e38ccf1c (diff) | |
sh: make dma_cache_sync a no-op
sh does not implement DMA_ATTR_NON_CONSISTENT allocations, so it doesn't
make any sense to do any work in dma_cache_sync given that it
must be a no-op when dma_alloc_attrs returns coherent memory.
On the other hand sh uses dma_cache_sync internally in the dma_ops
implementation and for the maple bus that does not use the DMA API,
so a the old functionality for dma_cache_sync is still provided under
the name sh_sync_dma_for_device, and without the redundant dev
argument. While at it two of the syncing dma_ops also go the proper
_for_device postfix.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
| -rw-r--r-- | arch/sh/include/asm/dma-mapping.h | 9 | ||||
| -rw-r--r-- | arch/sh/kernel/dma-nommu.c | 17 | ||||
| -rw-r--r-- | arch/sh/mm/consistent.c | 6 | ||||
| -rw-r--r-- | drivers/sh/maple/maple.c | 5 |
4 files changed, 21 insertions, 16 deletions
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 9b06be07db4d..b46194ecef17 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h | |||
| @@ -9,8 +9,10 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | |||
| 9 | return dma_ops; | 9 | return dma_ops; |
| 10 | } | 10 | } |
| 11 | 11 | ||
| 12 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 12 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 13 | enum dma_data_direction dir); | 13 | enum dma_data_direction dir) |
| 14 | { | ||
| 15 | } | ||
| 14 | 16 | ||
| 15 | /* arch/sh/mm/consistent.c */ | 17 | /* arch/sh/mm/consistent.c */ |
| 16 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 18 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
| @@ -20,4 +22,7 @@ extern void dma_generic_free_coherent(struct device *dev, size_t size, | |||
| 20 | void *vaddr, dma_addr_t dma_handle, | 22 | void *vaddr, dma_addr_t dma_handle, |
| 21 | unsigned long attrs); | 23 | unsigned long attrs); |
| 22 | 24 | ||
| 25 | void sh_sync_dma_for_device(void *vaddr, size_t size, | ||
| 26 | enum dma_data_direction dir); | ||
| 27 | |||
| 23 | #endif /* __ASM_SH_DMA_MAPPING_H */ | 28 | #endif /* __ASM_SH_DMA_MAPPING_H */ |
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index d24c707b2181..62b485107eae 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | #include <linux/dma-mapping.h> | 10 | #include <linux/dma-mapping.h> |
| 11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
| 12 | #include <asm/cacheflush.h> | ||
| 12 | 13 | ||
| 13 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | 14 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, |
| 14 | unsigned long offset, size_t size, | 15 | unsigned long offset, size_t size, |
| @@ -20,7 +21,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | |||
| 20 | WARN_ON(size == 0); | 21 | WARN_ON(size == 0); |
| 21 | 22 | ||
| 22 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | 23 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 23 | dma_cache_sync(dev, page_address(page) + offset, size, dir); | 24 | sh_sync_dma_for_device(page_address(page) + offset, size, dir); |
| 24 | 25 | ||
| 25 | return addr; | 26 | return addr; |
| 26 | } | 27 | } |
| @@ -38,7 +39,7 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 38 | BUG_ON(!sg_page(s)); | 39 | BUG_ON(!sg_page(s)); |
| 39 | 40 | ||
| 40 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | 41 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 41 | dma_cache_sync(dev, sg_virt(s), s->length, dir); | 42 | sh_sync_dma_for_device(sg_virt(s), s->length, dir); |
| 42 | 43 | ||
| 43 | s->dma_address = sg_phys(s); | 44 | s->dma_address = sg_phys(s); |
| 44 | s->dma_length = s->length; | 45 | s->dma_length = s->length; |
| @@ -48,20 +49,20 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 48 | } | 49 | } |
| 49 | 50 | ||
| 50 | #ifdef CONFIG_DMA_NONCOHERENT | 51 | #ifdef CONFIG_DMA_NONCOHERENT |
| 51 | static void nommu_sync_single(struct device *dev, dma_addr_t addr, | 52 | static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr, |
| 52 | size_t size, enum dma_data_direction dir) | 53 | size_t size, enum dma_data_direction dir) |
| 53 | { | 54 | { |
| 54 | dma_cache_sync(dev, phys_to_virt(addr), size, dir); | 55 | sh_sync_dma_for_device(phys_to_virt(addr), size, dir); |
| 55 | } | 56 | } |
| 56 | 57 | ||
| 57 | static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, | 58 | static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
| 58 | int nelems, enum dma_data_direction dir) | 59 | int nelems, enum dma_data_direction dir) |
| 59 | { | 60 | { |
| 60 | struct scatterlist *s; | 61 | struct scatterlist *s; |
| 61 | int i; | 62 | int i; |
| 62 | 63 | ||
| 63 | for_each_sg(sg, s, nelems, i) | 64 | for_each_sg(sg, s, nelems, i) |
| 64 | dma_cache_sync(dev, sg_virt(s), s->length, dir); | 65 | sh_sync_dma_for_device(sg_virt(s), s->length, dir); |
| 65 | } | 66 | } |
| 66 | #endif | 67 | #endif |
| 67 | 68 | ||
| @@ -71,8 +72,8 @@ const struct dma_map_ops nommu_dma_ops = { | |||
| 71 | .map_page = nommu_map_page, | 72 | .map_page = nommu_map_page, |
| 72 | .map_sg = nommu_map_sg, | 73 | .map_sg = nommu_map_sg, |
| 73 | #ifdef CONFIG_DMA_NONCOHERENT | 74 | #ifdef CONFIG_DMA_NONCOHERENT |
| 74 | .sync_single_for_device = nommu_sync_single, | 75 | .sync_single_for_device = nommu_sync_single_for_device, |
| 75 | .sync_sg_for_device = nommu_sync_sg, | 76 | .sync_sg_for_device = nommu_sync_sg_for_device, |
| 76 | #endif | 77 | #endif |
| 77 | .is_phys = 1, | 78 | .is_phys = 1, |
| 78 | }; | 79 | }; |
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index d1275adfa0ef..6ea3aab508f2 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
| @@ -49,7 +49,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
| 49 | * Pages from the page allocator may have data present in | 49 | * Pages from the page allocator may have data present in |
| 50 | * cache. So flush the cache before using uncached memory. | 50 | * cache. So flush the cache before using uncached memory. |
| 51 | */ | 51 | */ |
| 52 | dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL); | 52 | sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL); |
| 53 | 53 | ||
| 54 | ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); | 54 | ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); |
| 55 | if (!ret_nocache) { | 55 | if (!ret_nocache) { |
| @@ -78,7 +78,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size, | |||
| 78 | iounmap(vaddr); | 78 | iounmap(vaddr); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 81 | void sh_sync_dma_for_device(void *vaddr, size_t size, |
| 82 | enum dma_data_direction direction) | 82 | enum dma_data_direction direction) |
| 83 | { | 83 | { |
| 84 | void *addr; | 84 | void *addr; |
| @@ -100,7 +100,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
| 100 | BUG(); | 100 | BUG(); |
| 101 | } | 101 | } |
| 102 | } | 102 | } |
| 103 | EXPORT_SYMBOL(dma_cache_sync); | 103 | EXPORT_SYMBOL(sh_sync_dma_for_device); |
| 104 | 104 | ||
| 105 | static int __init memchunk_setup(char *str) | 105 | static int __init memchunk_setup(char *str) |
| 106 | { | 106 | { |
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index bec81c2404f7..7525039d812c 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c | |||
| @@ -300,7 +300,7 @@ static void maple_send(void) | |||
| 300 | mutex_unlock(&maple_wlist_lock); | 300 | mutex_unlock(&maple_wlist_lock); |
| 301 | if (maple_packets > 0) { | 301 | if (maple_packets > 0) { |
| 302 | for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) | 302 | for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) |
| 303 | dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, | 303 | sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE, |
| 304 | PAGE_SIZE, DMA_BIDIRECTIONAL); | 304 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 305 | } | 305 | } |
| 306 | 306 | ||
| @@ -642,8 +642,7 @@ static void maple_dma_handler(struct work_struct *work) | |||
| 642 | list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { | 642 | list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { |
| 643 | mdev = mq->dev; | 643 | mdev = mq->dev; |
| 644 | recvbuf = mq->recvbuf->buf; | 644 | recvbuf = mq->recvbuf->buf; |
| 645 | dma_cache_sync(&mdev->dev, recvbuf, 0x400, | 645 | sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE); |
| 646 | DMA_FROM_DEVICE); | ||
| 647 | code = recvbuf[0]; | 646 | code = recvbuf[0]; |
| 648 | kfree(mq->sendbuf); | 647 | kfree(mq->sendbuf); |
| 649 | list_del_init(&mq->list); | 648 | list_del_init(&mq->list); |
