aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/include/asm/dma-mapping.h')
-rw-r--r--arch/sh/include/asm/dma-mapping.h233
1 files changed, 61 insertions, 172 deletions
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 69d56dd4c968..87ced133a363 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -1,219 +1,108 @@
1#ifndef __ASM_SH_DMA_MAPPING_H 1#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H 2#define __ASM_SH_DMA_MAPPING_H
3 3
4#include <linux/mm.h> 4extern struct dma_map_ops *dma_ops;
5#include <linux/scatterlist.h> 5extern void no_iommu_init(void);
6#include <linux/dma-debug.h> 6
7#include <asm/cacheflush.h> 7static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8#include <asm/io.h> 8{
9 return dma_ops;
10}
11
9#include <asm-generic/dma-coherent.h> 12#include <asm-generic/dma-coherent.h>
13#include <asm-generic/dma-mapping-common.h>
14
15static inline int dma_supported(struct device *dev, u64 mask)
16{
17 struct dma_map_ops *ops = get_dma_ops(dev);
10 18
11extern struct bus_type pci_bus_type; 19 if (ops->dma_supported)
20 return ops->dma_supported(dev, mask);
12 21
13#define dma_supported(dev, mask) (1) 22 return 1;
23}
14 24
15static inline int dma_set_mask(struct device *dev, u64 mask) 25static inline int dma_set_mask(struct device *dev, u64 mask)
16{ 26{
27 struct dma_map_ops *ops = get_dma_ops(dev);
28
17 if (!dev->dma_mask || !dma_supported(dev, mask)) 29 if (!dev->dma_mask || !dma_supported(dev, mask))
18 return -EIO; 30 return -EIO;
31 if (ops->set_dma_mask)
32 return ops->set_dma_mask(dev, mask);
19 33
20 *dev->dma_mask = mask; 34 *dev->dma_mask = mask;
21 35
22 return 0; 36 return 0;
23} 37}
24 38
25void *dma_alloc_coherent(struct device *dev, size_t size,
26 dma_addr_t *dma_handle, gfp_t flag);
27
28void dma_free_coherent(struct device *dev, size_t size,
29 void *vaddr, dma_addr_t dma_handle);
30
31void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 39void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
32 enum dma_data_direction dir); 40 enum dma_data_direction dir);
33 41
34#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 42#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
35#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 43#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
36#define dma_is_consistent(d, h) (1)
37
38static inline dma_addr_t dma_map_single(struct device *dev,
39 void *ptr, size_t size,
40 enum dma_data_direction dir)
41{
42 dma_addr_t addr = virt_to_phys(ptr);
43
44#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
45 if (dev->bus == &pci_bus_type)
46 return addr;
47#endif
48 dma_cache_sync(dev, ptr, size, dir);
49
50 debug_dma_map_page(dev, virt_to_page(ptr),
51 (unsigned long)ptr & ~PAGE_MASK, size,
52 dir, addr, true);
53
54 return addr;
55}
56
57static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
58 size_t size, enum dma_data_direction dir)
59{
60 debug_dma_unmap_page(dev, addr, size, dir, true);
61}
62 44
63static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 45#ifdef CONFIG_DMA_COHERENT
64 int nents, enum dma_data_direction dir) 46#define dma_is_consistent(d, h) (1)
65{ 47#else
66 int i; 48#define dma_is_consistent(d, h) (0)
67
68 for (i = 0; i < nents; i++) {
69#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
70 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
71#endif 49#endif
72 sg[i].dma_address = sg_phys(&sg[i]);
73 sg[i].dma_length = sg[i].length;
74 }
75 50
76 debug_dma_map_sg(dev, sg, nents, i, dir); 51static inline int dma_get_cache_alignment(void)
77
78 return nents;
79}
80
81static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir)
83{
84 debug_dma_unmap_sg(dev, sg, nents, dir);
85}
86
87static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
88 unsigned long offset, size_t size,
89 enum dma_data_direction dir)
90{
91 return dma_map_single(dev, page_address(page) + offset, size, dir);
92}
93
94static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
95 size_t size, enum dma_data_direction dir)
96{
97 dma_unmap_single(dev, dma_address, size, dir);
98}
99
100static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle,
101 size_t size, enum dma_data_direction dir)
102{ 52{
103#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 53 /*
104 if (dev->bus == &pci_bus_type) 54 * Each processor family will define its own L1_CACHE_SHIFT,
105 return; 55 * L1_CACHE_BYTES wraps to this, so this is always safe.
106#endif 56 */
107 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); 57 return L1_CACHE_BYTES;
108} 58}
109 59
110static inline void dma_sync_single_range(struct device *dev, 60static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
111 dma_addr_t dma_handle,
112 unsigned long offset, size_t size,
113 enum dma_data_direction dir)
114{ 61{
115#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 62 struct dma_map_ops *ops = get_dma_ops(dev);
116 if (dev->bus == &pci_bus_type)
117 return;
118#endif
119 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
120}
121 63
122static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg, 64 if (ops->mapping_error)
123 int nelems, enum dma_data_direction dir) 65 return ops->mapping_error(dev, dma_addr);
124{
125 int i;
126 66
127 for (i = 0; i < nelems; i++) { 67 return dma_addr == 0;
128#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
129 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
130#endif
131 sg[i].dma_address = sg_phys(&sg[i]);
132 sg[i].dma_length = sg[i].length;
133 }
134} 68}
135 69
136static inline void dma_sync_single_for_cpu(struct device *dev, 70static inline void *dma_alloc_coherent(struct device *dev, size_t size,
137 dma_addr_t dma_handle, size_t size, 71 dma_addr_t *dma_handle, gfp_t gfp)
138 enum dma_data_direction dir)
139{ 72{
140 __dma_sync_single(dev, dma_handle, size, dir); 73 struct dma_map_ops *ops = get_dma_ops(dev);
141 debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir); 74 void *memory;
142}
143 75
144static inline void dma_sync_single_for_device(struct device *dev, 76 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
145 dma_addr_t dma_handle, 77 return memory;
146 size_t size, 78 if (!ops->alloc_coherent)
147 enum dma_data_direction dir) 79 return NULL;
148{
149 __dma_sync_single(dev, dma_handle, size, dir);
150 debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
151}
152 80
153static inline void dma_sync_single_range_for_cpu(struct device *dev, 81 memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
154 dma_addr_t dma_handle, 82 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
155 unsigned long offset,
156 size_t size,
157 enum dma_data_direction direction)
158{
159 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
160 debug_dma_sync_single_range_for_cpu(dev, dma_handle,
161 offset, size, direction);
162}
163 83
164static inline void dma_sync_single_range_for_device(struct device *dev, 84 return memory;
165 dma_addr_t dma_handle,
166 unsigned long offset,
167 size_t size,
168 enum dma_data_direction direction)
169{
170 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
171 debug_dma_sync_single_range_for_device(dev, dma_handle,
172 offset, size, direction);
173} 85}
174 86
175 87static inline void dma_free_coherent(struct device *dev, size_t size,
176static inline void dma_sync_sg_for_cpu(struct device *dev, 88 void *vaddr, dma_addr_t dma_handle)
177 struct scatterlist *sg, int nelems,
178 enum dma_data_direction dir)
179{ 89{
180 __dma_sync_sg(dev, sg, nelems, dir); 90 struct dma_map_ops *ops = get_dma_ops(dev);
181 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
182}
183 91
184static inline void dma_sync_sg_for_device(struct device *dev, 92 WARN_ON(irqs_disabled()); /* for portability */
185 struct scatterlist *sg, int nelems,
186 enum dma_data_direction dir)
187{
188 __dma_sync_sg(dev, sg, nelems, dir);
189 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
190}
191 93
192static inline int dma_get_cache_alignment(void) 94 if (dma_release_from_coherent(dev, get_order(size), vaddr))
193{ 95 return;
194 /*
195 * Each processor family will define its own L1_CACHE_SHIFT,
196 * L1_CACHE_BYTES wraps to this, so this is always safe.
197 */
198 return L1_CACHE_BYTES;
199}
200 96
201static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 97 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
202{ 98 if (ops->free_coherent)
203 return dma_addr == 0; 99 ops->free_coherent(dev, size, vaddr, dma_handle);
204} 100}
205 101
206#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 102/* arch/sh/mm/consistent.c */
207 103extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
208extern int 104 dma_addr_t *dma_addr, gfp_t flag);
209dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 105extern void dma_generic_free_coherent(struct device *dev, size_t size,
210 dma_addr_t device_addr, size_t size, int flags); 106 void *vaddr, dma_addr_t dma_handle);
211
212extern void
213dma_release_declared_memory(struct device *dev);
214
215extern void *
216dma_mark_declared_memory_occupied(struct device *dev,
217 dma_addr_t device_addr, size_t size);
218 107
219#endif /* __ASM_SH_DMA_MAPPING_H */ 108#endif /* __ASM_SH_DMA_MAPPING_H */