aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/dma-mapping.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-12-08 19:17:01 -0500
committerPaul Mundt <lethal@linux-sh.org>2006-12-11 18:42:09 -0500
commit5432143464ee7f5cb8b0b015a0fd1c3279af10ae (patch)
tree000f509ed9495c6a36d8f4083e6cac69d87e6f58 /include/asm-sh/dma-mapping.h
parent37bda1da4570c2e9c6dd34e77d2120218e384950 (diff)
sh: Fixup dma_cache_sync() callers.
This now takes a struct device, update all of the callers. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/dma-mapping.h')
-rw-r--r--include/asm-sh/dma-mapping.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 37ab0c131a4d..8d0867b98e05 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -67,7 +67,7 @@ static inline dma_addr_t dma_map_single(struct device *dev,
67 if (dev->bus == &pci_bus_type) 67 if (dev->bus == &pci_bus_type)
68 return virt_to_bus(ptr); 68 return virt_to_bus(ptr);
69#endif 69#endif
70 dma_cache_sync(ptr, size, dir); 70 dma_cache_sync(dev, ptr, size, dir);
71 71
72 return virt_to_bus(ptr); 72 return virt_to_bus(ptr);
73} 73}
@@ -81,7 +81,7 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
81 81
82 for (i = 0; i < nents; i++) { 82 for (i = 0; i < nents; i++) {
83#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 83#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
84 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 84 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
85 sg[i].length, dir); 85 sg[i].length, dir);
86#endif 86#endif
87 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 87 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
@@ -112,7 +112,7 @@ static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
112 if (dev->bus == &pci_bus_type) 112 if (dev->bus == &pci_bus_type)
113 return; 113 return;
114#endif 114#endif
115 dma_cache_sync(bus_to_virt(dma_handle), size, dir); 115 dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir);
116} 116}
117 117
118static inline void dma_sync_single_range(struct device *dev, 118static inline void dma_sync_single_range(struct device *dev,
@@ -124,7 +124,7 @@ static inline void dma_sync_single_range(struct device *dev,
124 if (dev->bus == &pci_bus_type) 124 if (dev->bus == &pci_bus_type)
125 return; 125 return;
126#endif 126#endif
127 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir); 127 dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir);
128} 128}
129 129
130static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, 130static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
@@ -134,7 +134,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
134 134
135 for (i = 0; i < nelems; i++) { 135 for (i = 0; i < nelems; i++) {
136#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 136#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
137 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 137 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
138 sg[i].length, dir); 138 sg[i].length, dir);
139#endif 139#endif
140 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 140 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;