diff options
| author | Eli Billauer <eli.billauer@gmail.com> | 2011-09-11 15:43:06 -0400 |
|---|---|---|
| committer | Michal Simek <monstr@monstr.eu> | 2011-10-14 06:24:29 -0400 |
| commit | cf560c1801e518abfe0951008c4f2df4bbb3f5e8 (patch) | |
| tree | b7701d77a994b26a71bd5cfb96f3ae1ca99b4e88 | |
| parent | 2309f7cfca745ec282c125e79ac80dca2ea8390e (diff) | |
microblaze: Moved __dma_sync() to dma-mapping.h
__dma_sync_page() was replaced by __dma_sync(), and parameters of calls to
the new function were adjusted to match __dma_sync()'s format.
Signed-off-by: Eli Billauer <eli.billauer@gmail.com>
Signed-off-by: Michal Simek <monstr@monstr.eu>
| -rw-r--r-- | arch/microblaze/include/asm/dma-mapping.h | 20 | ||||
| -rw-r--r-- | arch/microblaze/kernel/dma.c | 22 |
2 files changed, 21 insertions, 21 deletions
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 8fbb0ec10233..cddeca59a63a 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h | |||
| @@ -28,12 +28,12 @@ | |||
| 28 | #include <linux/dma-attrs.h> | 28 | #include <linux/dma-attrs.h> |
| 29 | #include <asm/io.h> | 29 | #include <asm/io.h> |
| 30 | #include <asm-generic/dma-coherent.h> | 30 | #include <asm-generic/dma-coherent.h> |
| 31 | #include <asm/cacheflush.h> | ||
| 31 | 32 | ||
| 32 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | 33 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
| 33 | 34 | ||
| 34 | #define __dma_alloc_coherent(dev, gfp, size, handle) NULL | 35 | #define __dma_alloc_coherent(dev, gfp, size, handle) NULL |
| 35 | #define __dma_free_coherent(size, addr) ((void)0) | 36 | #define __dma_free_coherent(size, addr) ((void)0) |
| 36 | #define __dma_sync(addr, size, rw) ((void)0) | ||
| 37 | 37 | ||
| 38 | static inline unsigned long device_to_mask(struct device *dev) | 38 | static inline unsigned long device_to_mask(struct device *dev) |
| 39 | { | 39 | { |
| @@ -95,6 +95,22 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |||
| 95 | 95 | ||
| 96 | #include <asm-generic/dma-mapping-common.h> | 96 | #include <asm-generic/dma-mapping-common.h> |
| 97 | 97 | ||
| 98 | static inline void __dma_sync(unsigned long paddr, | ||
| 99 | size_t size, enum dma_data_direction direction) | ||
| 100 | { | ||
| 101 | switch (direction) { | ||
| 102 | case DMA_TO_DEVICE: | ||
| 103 | case DMA_BIDIRECTIONAL: | ||
| 104 | flush_dcache_range(paddr, paddr + size); | ||
| 105 | break; | ||
| 106 | case DMA_FROM_DEVICE: | ||
| 107 | invalidate_dcache_range(paddr, paddr + size); | ||
| 108 | break; | ||
| 109 | default: | ||
| 110 | BUG(); | ||
| 111 | } | ||
| 112 | } | ||
| 113 | |||
| 98 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 114 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 99 | { | 115 | { |
| 100 | struct dma_map_ops *ops = get_dma_ops(dev); | 116 | struct dma_map_ops *ops = get_dma_ops(dev); |
| @@ -135,7 +151,7 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
| 135 | enum dma_data_direction direction) | 151 | enum dma_data_direction direction) |
| 136 | { | 152 | { |
| 137 | BUG_ON(direction == DMA_NONE); | 153 | BUG_ON(direction == DMA_NONE); |
| 138 | __dma_sync(vaddr, size, (int)direction); | 154 | __dma_sync(virt_to_phys(vaddr), size, (int)direction); |
| 139 | } | 155 | } |
| 140 | 156 | ||
| 141 | #endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */ | 157 | #endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */ |
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 393e6b2db688..595130bceadd 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | #include <linux/gfp.h> | 11 | #include <linux/gfp.h> |
| 12 | #include <linux/dma-debug.h> | 12 | #include <linux/dma-debug.h> |
| 13 | #include <asm/bug.h> | 13 | #include <asm/bug.h> |
| 14 | #include <asm/cacheflush.h> | ||
| 15 | 14 | ||
| 16 | /* | 15 | /* |
| 17 | * Generic direct DMA implementation | 16 | * Generic direct DMA implementation |
| @@ -21,21 +20,6 @@ | |||
| 21 | * can set archdata.dma_data to an unsigned long holding the offset. By | 20 | * can set archdata.dma_data to an unsigned long holding the offset. By |
| 22 | * default the offset is PCI_DRAM_OFFSET. | 21 | * default the offset is PCI_DRAM_OFFSET. |
| 23 | */ | 22 | */ |
| 24 | static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, | ||
| 25 | size_t size, enum dma_data_direction direction) | ||
| 26 | { | ||
| 27 | switch (direction) { | ||
| 28 | case DMA_TO_DEVICE: | ||
| 29 | case DMA_BIDIRECTIONAL: | ||
| 30 | flush_dcache_range(paddr + offset, paddr + offset + size); | ||
| 31 | break; | ||
| 32 | case DMA_FROM_DEVICE: | ||
| 33 | invalidate_dcache_range(paddr + offset, paddr + offset + size); | ||
| 34 | break; | ||
| 35 | default: | ||
| 36 | BUG(); | ||
| 37 | } | ||
| 38 | } | ||
| 39 | 23 | ||
| 40 | static unsigned long get_dma_direct_offset(struct device *dev) | 24 | static unsigned long get_dma_direct_offset(struct device *dev) |
| 41 | { | 25 | { |
| @@ -91,7 +75,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | |||
| 91 | /* FIXME this part of code is untested */ | 75 | /* FIXME this part of code is untested */ |
| 92 | for_each_sg(sgl, sg, nents, i) { | 76 | for_each_sg(sgl, sg, nents, i) { |
| 93 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); | 77 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); |
| 94 | __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, | 78 | __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, |
| 95 | sg->length, direction); | 79 | sg->length, direction); |
| 96 | } | 80 | } |
| 97 | 81 | ||
| @@ -116,7 +100,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, | |||
| 116 | enum dma_data_direction direction, | 100 | enum dma_data_direction direction, |
| 117 | struct dma_attrs *attrs) | 101 | struct dma_attrs *attrs) |
| 118 | { | 102 | { |
| 119 | __dma_sync_page(page_to_phys(page), offset, size, direction); | 103 | __dma_sync(page_to_phys(page) + offset, size, direction); |
| 120 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); | 104 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); |
| 121 | } | 105 | } |
| 122 | 106 | ||
| @@ -131,7 +115,7 @@ static inline void dma_direct_unmap_page(struct device *dev, | |||
| 131 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and | 115 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and |
| 132 | * dma_address is physical address | 116 | * dma_address is physical address |
| 133 | */ | 117 | */ |
| 134 | __dma_sync_page(dma_address, 0 , size, direction); | 118 | __dma_sync(dma_address, size, direction); |
| 135 | } | 119 | } |
| 136 | 120 | ||
| 137 | struct dma_map_ops dma_direct_ops = { | 121 | struct dma_map_ops dma_direct_ops = { |
