aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh64/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh64/dma-mapping.h')
-rw-r--r--include/asm-sh64/dma-mapping.h193
1 files changed, 0 insertions, 193 deletions
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
deleted file mode 100644
index 1438b763a5ea..000000000000
--- a/include/asm-sh64/dma-mapping.h
+++ /dev/null
@@ -1,193 +0,0 @@
1#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H
3
4#include <linux/mm.h>
5#include <linux/scatterlist.h>
6#include <asm/io.h>
7
8struct pci_dev;
9extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
10 dma_addr_t *dma_handle);
11extern void consistent_free(struct pci_dev *hwdev, size_t size,
12 void *vaddr, dma_addr_t dma_handle);
13
14#define dma_supported(dev, mask) (1)
15
16static inline int dma_set_mask(struct device *dev, u64 mask)
17{
18 if (!dev->dma_mask || !dma_supported(dev, mask))
19 return -EIO;
20
21 *dev->dma_mask = mask;
22
23 return 0;
24}
25
26static inline void *dma_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *dma_handle, gfp_t flag)
28{
29 return consistent_alloc(NULL, size, dma_handle);
30}
31
32static inline void dma_free_coherent(struct device *dev, size_t size,
33 void *vaddr, dma_addr_t dma_handle)
34{
35 consistent_free(NULL, size, vaddr, dma_handle);
36}
37
38#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
39#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
40#define dma_is_consistent(d, h) (1)
41
42static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
43 enum dma_data_direction dir)
44{
45 unsigned long s = (unsigned long) vaddr & L1_CACHE_ALIGN_MASK;
46 unsigned long e = (vaddr + size) & L1_CACHE_ALIGN_MASK;
47
48 for (; s <= e; s += L1_CACHE_BYTES)
49 asm volatile ("ocbp %0, 0" : : "r" (s));
50}
51
52static inline dma_addr_t dma_map_single(struct device *dev,
53 void *ptr, size_t size,
54 enum dma_data_direction dir)
55{
56#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
57 if (dev->bus == &pci_bus_type)
58 return virt_to_phys(ptr);
59#endif
60 dma_cache_sync(dev, ptr, size, dir);
61
62 return virt_to_phys(ptr);
63}
64
65#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
66
67static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
68 int nents, enum dma_data_direction dir)
69{
70 int i;
71
72 for (i = 0; i < nents; i++) {
73#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
74 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
75#endif
76 sg[i].dma_address = sg_phys(&sg[i]);
77 }
78
79 return nents;
80}
81
82#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
83
84static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
85 unsigned long offset, size_t size,
86 enum dma_data_direction dir)
87{
88 return dma_map_single(dev, page_address(page) + offset, size, dir);
89}
90
91static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
92 size_t size, enum dma_data_direction dir)
93{
94 dma_unmap_single(dev, dma_address, size, dir);
95}
96
97static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction dir)
99{
100#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
101 if (dev->bus == &pci_bus_type)
102 return;
103#endif
104 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
105}
106
107static inline void dma_sync_single_range(struct device *dev,
108 dma_addr_t dma_handle,
109 unsigned long offset, size_t size,
110 enum dma_data_direction dir)
111{
112#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
113 if (dev->bus == &pci_bus_type)
114 return;
115#endif
116 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
117}
118
119static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
120 int nelems, enum dma_data_direction dir)
121{
122 int i;
123
124 for (i = 0; i < nelems; i++) {
125#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
126 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
127#endif
128 sg[i].dma_address = sg_phys(&sg[i]);
129 }
130}
131
132static inline void dma_sync_single_for_cpu(struct device *dev,
133 dma_addr_t dma_handle, size_t size,
134 enum dma_data_direction dir)
135{
136 dma_sync_single(dev, dma_handle, size, dir);
137}
138
139static inline void dma_sync_single_for_device(struct device *dev,
140 dma_addr_t dma_handle, size_t size,
141 enum dma_data_direction dir)
142{
143 dma_sync_single(dev, dma_handle, size, dir);
144}
145
146static inline void dma_sync_single_range_for_cpu(struct device *dev,
147 dma_addr_t dma_handle,
148 unsigned long offset,
149 size_t size,
150 enum dma_data_direction direction)
151{
152 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
153}
154
155static inline void dma_sync_single_range_for_device(struct device *dev,
156 dma_addr_t dma_handle,
157 unsigned long offset,
158 size_t size,
159 enum dma_data_direction direction)
160{
161 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
162}
163
164static inline void dma_sync_sg_for_cpu(struct device *dev,
165 struct scatterlist *sg, int nelems,
166 enum dma_data_direction dir)
167{
168 dma_sync_sg(dev, sg, nelems, dir);
169}
170
171static inline void dma_sync_sg_for_device(struct device *dev,
172 struct scatterlist *sg, int nelems,
173 enum dma_data_direction dir)
174{
175 dma_sync_sg(dev, sg, nelems, dir);
176}
177
178static inline int dma_get_cache_alignment(void)
179{
180 /*
181 * Each processor family will define its own L1_CACHE_SHIFT,
182 * L1_CACHE_BYTES wraps to this, so this is always safe.
183 */
184 return L1_CACHE_BYTES;
185}
186
187static inline int dma_mapping_error(dma_addr_t dma_addr)
188{
189 return dma_addr == 0;
190}
191
192#endif /* __ASM_SH_DMA_MAPPING_H */
193