diff options
author | Paul Mundt <lethal@linux-sh.org> | 2007-05-13 20:10:46 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2007-05-13 20:10:46 -0400 |
commit | 599c26d32950c33bdd2a5ac6939bfe15ecf057e0 (patch) | |
tree | a9e062ed5c4e2fdbe0ea48fae2fbb2f7c10af96c /include/asm-sh64/dma-mapping.h | |
parent | a226d33abccff1959cec911da4143ea06ab22052 (diff) |
sh64: dma-mapping updates.
Follow the noncoherent changes from sh.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh64/dma-mapping.h')
-rw-r--r-- | include/asm-sh64/dma-mapping.h | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h index 5efe906c59f7..c7c0f059cdc4 100644 --- a/include/asm-sh64/dma-mapping.h +++ b/include/asm-sh64/dma-mapping.h | |||
@@ -35,6 +35,10 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
35 | consistent_free(NULL, size, vaddr, dma_handle); | 35 | consistent_free(NULL, size, vaddr, dma_handle); |
36 | } | 36 | } |
37 | 37 | ||
38 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
39 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
40 | #define dma_is_consistent(d, h) (1) | ||
41 | |||
38 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 42 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
39 | enum dma_data_direction dir) | 43 | enum dma_data_direction dir) |
40 | { | 44 | { |
@@ -49,7 +53,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, | |||
49 | if (dev->bus == &pci_bus_type) | 53 | if (dev->bus == &pci_bus_type) |
50 | return virt_to_bus(ptr); | 54 | return virt_to_bus(ptr); |
51 | #endif | 55 | #endif |
52 | dma_cache_sync(ptr, size, dir); | 56 | dma_cache_sync(dev, ptr, size, dir); |
53 | 57 | ||
54 | return virt_to_bus(ptr); | 58 | return virt_to_bus(ptr); |
55 | } | 59 | } |
@@ -63,7 +67,7 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
63 | 67 | ||
64 | for (i = 0; i < nents; i++) { | 68 | for (i = 0; i < nents; i++) { |
65 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | 69 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
66 | dma_cache_sync(page_address(sg[i].page) + sg[i].offset, | 70 | dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, |
67 | sg[i].length, dir); | 71 | sg[i].length, dir); |
68 | #endif | 72 | #endif |
69 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; | 73 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; |
@@ -94,7 +98,7 @@ static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, | |||
94 | if (dev->bus == &pci_bus_type) | 98 | if (dev->bus == &pci_bus_type) |
95 | return; | 99 | return; |
96 | #endif | 100 | #endif |
97 | dma_cache_sync(bus_to_virt(dma_handle), size, dir); | 101 | dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir); |
98 | } | 102 | } |
99 | 103 | ||
100 | static inline void dma_sync_single_range(struct device *dev, | 104 | static inline void dma_sync_single_range(struct device *dev, |
@@ -106,7 +110,7 @@ static inline void dma_sync_single_range(struct device *dev, | |||
106 | if (dev->bus == &pci_bus_type) | 110 | if (dev->bus == &pci_bus_type) |
107 | return; | 111 | return; |
108 | #endif | 112 | #endif |
109 | dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir); | 113 | dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir); |
110 | } | 114 | } |
111 | 115 | ||
112 | static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, | 116 | static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, |
@@ -116,7 +120,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, | |||
116 | 120 | ||
117 | for (i = 0; i < nelems; i++) { | 121 | for (i = 0; i < nelems; i++) { |
118 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | 122 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
119 | dma_cache_sync(page_address(sg[i].page) + sg[i].offset, | 123 | dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, |
120 | sg[i].length, dir); | 124 | sg[i].length, dir); |
121 | #endif | 125 | #endif |
122 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; | 126 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; |