aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm/dma-mapping.h
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /arch/sh/include/asm/dma-mapping.h
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/sh/include/asm/dma-mapping.h')
-rw-r--r--arch/sh/include/asm/dma-mapping.h233
1 files changed, 60 insertions, 173 deletions
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 69d56dd4c968..bea3337a426a 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -1,219 +1,106 @@
1#ifndef __ASM_SH_DMA_MAPPING_H 1#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H 2#define __ASM_SH_DMA_MAPPING_H
3 3
4#include <linux/mm.h> 4extern struct dma_map_ops *dma_ops;
5#include <linux/scatterlist.h> 5extern void no_iommu_init(void);
6#include <linux/dma-debug.h> 6
7#include <asm/cacheflush.h> 7static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8#include <asm/io.h> 8{
9 return dma_ops;
10}
11
9#include <asm-generic/dma-coherent.h> 12#include <asm-generic/dma-coherent.h>
13#include <asm-generic/dma-mapping-common.h>
14
15static inline int dma_supported(struct device *dev, u64 mask)
16{
17 struct dma_map_ops *ops = get_dma_ops(dev);
10 18
11extern struct bus_type pci_bus_type; 19 if (ops->dma_supported)
20 return ops->dma_supported(dev, mask);
12 21
13#define dma_supported(dev, mask) (1) 22 return 1;
23}
14 24
15static inline int dma_set_mask(struct device *dev, u64 mask) 25static inline int dma_set_mask(struct device *dev, u64 mask)
16{ 26{
27 struct dma_map_ops *ops = get_dma_ops(dev);
28
17 if (!dev->dma_mask || !dma_supported(dev, mask)) 29 if (!dev->dma_mask || !dma_supported(dev, mask))
18 return -EIO; 30 return -EIO;
31 if (ops->set_dma_mask)
32 return ops->set_dma_mask(dev, mask);
19 33
20 *dev->dma_mask = mask; 34 *dev->dma_mask = mask;
21 35
22 return 0; 36 return 0;
23} 37}
24 38
25void *dma_alloc_coherent(struct device *dev, size_t size,
26 dma_addr_t *dma_handle, gfp_t flag);
27
28void dma_free_coherent(struct device *dev, size_t size,
29 void *vaddr, dma_addr_t dma_handle);
30
31void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 39void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
32 enum dma_data_direction dir); 40 enum dma_data_direction dir);
33 41
34#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 42#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
35#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 43#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
36#define dma_is_consistent(d, h) (1)
37
38static inline dma_addr_t dma_map_single(struct device *dev,
39 void *ptr, size_t size,
40 enum dma_data_direction dir)
41{
42 dma_addr_t addr = virt_to_phys(ptr);
43
44#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
45 if (dev->bus == &pci_bus_type)
46 return addr;
47#endif
48 dma_cache_sync(dev, ptr, size, dir);
49 44
50 debug_dma_map_page(dev, virt_to_page(ptr), 45#ifdef CONFIG_DMA_COHERENT
51 (unsigned long)ptr & ~PAGE_MASK, size, 46#define dma_is_consistent(d, h) (1)
52 dir, addr, true); 47#else
53 48#define dma_is_consistent(d, h) (0)
54 return addr;
55}
56
57static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
58 size_t size, enum dma_data_direction dir)
59{
60 debug_dma_unmap_page(dev, addr, size, dir, true);
61}
62
63static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
64 int nents, enum dma_data_direction dir)
65{
66 int i;
67
68 for (i = 0; i < nents; i++) {
69#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
70 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
71#endif
72 sg[i].dma_address = sg_phys(&sg[i]);
73 sg[i].dma_length = sg[i].length;
74 }
75
76 debug_dma_map_sg(dev, sg, nents, i, dir);
77
78 return nents;
79}
80
81static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir)
83{
84 debug_dma_unmap_sg(dev, sg, nents, dir);
85}
86
87static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
88 unsigned long offset, size_t size,
89 enum dma_data_direction dir)
90{
91 return dma_map_single(dev, page_address(page) + offset, size, dir);
92}
93
94static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
95 size_t size, enum dma_data_direction dir)
96{
97 dma_unmap_single(dev, dma_address, size, dir);
98}
99
100static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle,
101 size_t size, enum dma_data_direction dir)
102{
103#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
104 if (dev->bus == &pci_bus_type)
105 return;
106#endif 49#endif
107 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
108}
109 50
110static inline void dma_sync_single_range(struct device *dev, 51static inline int dma_get_cache_alignment(void)
111 dma_addr_t dma_handle,
112 unsigned long offset, size_t size,
113 enum dma_data_direction dir)
114{ 52{
115#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 53 /*
116 if (dev->bus == &pci_bus_type) 54 * Each processor family will define its own L1_CACHE_SHIFT,
117 return; 55 * L1_CACHE_BYTES wraps to this, so this is always safe.
118#endif 56 */
119 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); 57 return L1_CACHE_BYTES;
120} 58}
121 59
122static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg, 60static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
123 int nelems, enum dma_data_direction dir)
124{ 61{
125 int i; 62 struct dma_map_ops *ops = get_dma_ops(dev);
126 63
127 for (i = 0; i < nelems; i++) { 64 if (ops->mapping_error)
128#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 65 return ops->mapping_error(dev, dma_addr);
129 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
130#endif
131 sg[i].dma_address = sg_phys(&sg[i]);
132 sg[i].dma_length = sg[i].length;
133 }
134}
135 66
136static inline void dma_sync_single_for_cpu(struct device *dev, 67 return dma_addr == 0;
137 dma_addr_t dma_handle, size_t size,
138 enum dma_data_direction dir)
139{
140 __dma_sync_single(dev, dma_handle, size, dir);
141 debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
142}
143
144static inline void dma_sync_single_for_device(struct device *dev,
145 dma_addr_t dma_handle,
146 size_t size,
147 enum dma_data_direction dir)
148{
149 __dma_sync_single(dev, dma_handle, size, dir);
150 debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
151} 68}
152 69
153static inline void dma_sync_single_range_for_cpu(struct device *dev, 70static inline void *dma_alloc_coherent(struct device *dev, size_t size,
154 dma_addr_t dma_handle, 71 dma_addr_t *dma_handle, gfp_t gfp)
155 unsigned long offset,
156 size_t size,
157 enum dma_data_direction direction)
158{ 72{
159 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); 73 struct dma_map_ops *ops = get_dma_ops(dev);
160 debug_dma_sync_single_range_for_cpu(dev, dma_handle, 74 void *memory;
161 offset, size, direction);
162}
163 75
164static inline void dma_sync_single_range_for_device(struct device *dev, 76 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
165 dma_addr_t dma_handle, 77 return memory;
166 unsigned long offset, 78 if (!ops->alloc_coherent)
167 size_t size, 79 return NULL;
168 enum dma_data_direction direction)
169{
170 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
171 debug_dma_sync_single_range_for_device(dev, dma_handle,
172 offset, size, direction);
173}
174 80
81 memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
82 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
175 83
176static inline void dma_sync_sg_for_cpu(struct device *dev, 84 return memory;
177 struct scatterlist *sg, int nelems,
178 enum dma_data_direction dir)
179{
180 __dma_sync_sg(dev, sg, nelems, dir);
181 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
182} 85}
183 86
184static inline void dma_sync_sg_for_device(struct device *dev, 87static inline void dma_free_coherent(struct device *dev, size_t size,
185 struct scatterlist *sg, int nelems, 88 void *vaddr, dma_addr_t dma_handle)
186 enum dma_data_direction dir)
187{ 89{
188 __dma_sync_sg(dev, sg, nelems, dir); 90 struct dma_map_ops *ops = get_dma_ops(dev);
189 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
190}
191 91
192static inline int dma_get_cache_alignment(void) 92 if (dma_release_from_coherent(dev, get_order(size), vaddr))
193{ 93 return;
194 /*
195 * Each processor family will define its own L1_CACHE_SHIFT,
196 * L1_CACHE_BYTES wraps to this, so this is always safe.
197 */
198 return L1_CACHE_BYTES;
199}
200 94
201static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 95 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
202{ 96 if (ops->free_coherent)
203 return dma_addr == 0; 97 ops->free_coherent(dev, size, vaddr, dma_handle);
204} 98}
205 99
206#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 100/* arch/sh/mm/consistent.c */
207 101extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
208extern int 102 dma_addr_t *dma_addr, gfp_t flag);
209dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 103extern void dma_generic_free_coherent(struct device *dev, size_t size,
210 dma_addr_t device_addr, size_t size, int flags); 104 void *vaddr, dma_addr_t dma_handle);
211
212extern void
213dma_release_declared_memory(struct device *dev);
214
215extern void *
216dma_mark_declared_memory_occupied(struct device *dev,
217 dma_addr_t device_addr, size_t size);
218 105
219#endif /* __ASM_SH_DMA_MAPPING_H */ 106#endif /* __ASM_SH_DMA_MAPPING_H */