aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/dma-mapping.h')
-rw-r--r--include/asm-sh/dma-mapping.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 56cd4b977232..8d0867b98e05 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -53,7 +53,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
53 consistent_free(vaddr, size); 53 consistent_free(vaddr, size);
54} 54}
55 55
56static inline void dma_cache_sync(void *vaddr, size_t size, 56static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
57 enum dma_data_direction dir) 57 enum dma_data_direction dir)
58{ 58{
59 consistent_sync(vaddr, size, (int)dir); 59 consistent_sync(vaddr, size, (int)dir);
@@ -67,7 +67,7 @@ static inline dma_addr_t dma_map_single(struct device *dev,
67 if (dev->bus == &pci_bus_type) 67 if (dev->bus == &pci_bus_type)
68 return virt_to_bus(ptr); 68 return virt_to_bus(ptr);
69#endif 69#endif
70 dma_cache_sync(ptr, size, dir); 70 dma_cache_sync(dev, ptr, size, dir);
71 71
72 return virt_to_bus(ptr); 72 return virt_to_bus(ptr);
73} 73}
@@ -81,7 +81,7 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
81 81
82 for (i = 0; i < nents; i++) { 82 for (i = 0; i < nents; i++) {
83#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 83#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
84 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 84 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
85 sg[i].length, dir); 85 sg[i].length, dir);
86#endif 86#endif
87 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 87 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
@@ -112,7 +112,7 @@ static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
112 if (dev->bus == &pci_bus_type) 112 if (dev->bus == &pci_bus_type)
113 return; 113 return;
114#endif 114#endif
115 dma_cache_sync(bus_to_virt(dma_handle), size, dir); 115 dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir);
116} 116}
117 117
118static inline void dma_sync_single_range(struct device *dev, 118static inline void dma_sync_single_range(struct device *dev,
@@ -124,7 +124,7 @@ static inline void dma_sync_single_range(struct device *dev,
124 if (dev->bus == &pci_bus_type) 124 if (dev->bus == &pci_bus_type)
125 return; 125 return;
126#endif 126#endif
127 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir); 127 dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir);
128} 128}
129 129
130static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, 130static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
@@ -134,7 +134,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
134 134
135 for (i = 0; i < nelems; i++) { 135 for (i = 0; i < nelems; i++) {
136#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 136#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
137 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 137 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
138 sg[i].length, dir); 138 sg[i].length, dir);
139#endif 139#endif
140 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 140 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;