aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-06-17 19:28:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 16:03:58 -0400
commit7c095e4603dd6ce78ff5b9b70896fe3e05c13f5c (patch)
tree4de9ce5ba2c5e19e85c8c54e4884845a929504bb /arch/x86/include
parentc147d8ea3e2f6f953647f2347ae732fd99b32e73 (diff)
dma-mapping: x86: use asm-generic/dma-mapping-common.h
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Joerg Roedel <joerg.roedel@amd.com> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/dma-mapping.h173
1 files changed, 2 insertions, 171 deletions
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index b93405b228b4..1c3f9435f1c9 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -33,6 +33,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
33#endif 33#endif
34} 34}
35 35
36#include <asm-generic/dma-mapping-common.h>
37
36/* Make sure we keep the same behaviour */ 38/* Make sure we keep the same behaviour */
37static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 39static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
38{ 40{
@@ -53,177 +55,6 @@ extern int dma_set_mask(struct device *dev, u64 mask);
53extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 55extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
54 dma_addr_t *dma_addr, gfp_t flag); 56 dma_addr_t *dma_addr, gfp_t flag);
55 57
56static inline dma_addr_t
57dma_map_single(struct device *hwdev, void *ptr, size_t size,
58 enum dma_data_direction dir)
59{
60 struct dma_map_ops *ops = get_dma_ops(hwdev);
61 dma_addr_t addr;
62
63 BUG_ON(!valid_dma_direction(dir));
64 kmemcheck_mark_initialized(ptr, size);
65 addr = ops->map_page(hwdev, virt_to_page(ptr),
66 (unsigned long)ptr & ~PAGE_MASK, size,
67 dir, NULL);
68 debug_dma_map_page(hwdev, virt_to_page(ptr),
69 (unsigned long)ptr & ~PAGE_MASK, size,
70 dir, addr, true);
71 return addr;
72}
73
74static inline void
75dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
76 enum dma_data_direction dir)
77{
78 struct dma_map_ops *ops = get_dma_ops(dev);
79
80 BUG_ON(!valid_dma_direction(dir));
81 if (ops->unmap_page)
82 ops->unmap_page(dev, addr, size, dir, NULL);
83 debug_dma_unmap_page(dev, addr, size, dir, true);
84}
85
86static inline int
87dma_map_sg(struct device *hwdev, struct scatterlist *sg,
88 int nents, enum dma_data_direction dir)
89{
90 struct dma_map_ops *ops = get_dma_ops(hwdev);
91 int ents;
92 struct scatterlist *s;
93 int i;
94
95 BUG_ON(!valid_dma_direction(dir));
96 for_each_sg(sg, s, nents, i)
97 kmemcheck_mark_initialized(sg_virt(s), s->length);
98 ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
99 debug_dma_map_sg(hwdev, sg, nents, ents, dir);
100
101 return ents;
102}
103
104static inline void
105dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
106 enum dma_data_direction dir)
107{
108 struct dma_map_ops *ops = get_dma_ops(hwdev);
109
110 BUG_ON(!valid_dma_direction(dir));
111 debug_dma_unmap_sg(hwdev, sg, nents, dir);
112 if (ops->unmap_sg)
113 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
114}
115
116static inline void
117dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
118 size_t size, enum dma_data_direction dir)
119{
120 struct dma_map_ops *ops = get_dma_ops(hwdev);
121
122 BUG_ON(!valid_dma_direction(dir));
123 if (ops->sync_single_for_cpu)
124 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
125 debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
126 flush_write_buffers();
127}
128
129static inline void
130dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
131 size_t size, enum dma_data_direction dir)
132{
133 struct dma_map_ops *ops = get_dma_ops(hwdev);
134
135 BUG_ON(!valid_dma_direction(dir));
136 if (ops->sync_single_for_device)
137 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
138 debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
139 flush_write_buffers();
140}
141
142static inline void
143dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
144 unsigned long offset, size_t size,
145 enum dma_data_direction dir)
146{
147 struct dma_map_ops *ops = get_dma_ops(hwdev);
148
149 BUG_ON(!valid_dma_direction(dir));
150 if (ops->sync_single_range_for_cpu)
151 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
152 size, dir);
153 debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
154 offset, size, dir);
155 flush_write_buffers();
156}
157
158static inline void
159dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
160 unsigned long offset, size_t size,
161 enum dma_data_direction dir)
162{
163 struct dma_map_ops *ops = get_dma_ops(hwdev);
164
165 BUG_ON(!valid_dma_direction(dir));
166 if (ops->sync_single_range_for_device)
167 ops->sync_single_range_for_device(hwdev, dma_handle,
168 offset, size, dir);
169 debug_dma_sync_single_range_for_device(hwdev, dma_handle,
170 offset, size, dir);
171 flush_write_buffers();
172}
173
174static inline void
175dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
176 int nelems, enum dma_data_direction dir)
177{
178 struct dma_map_ops *ops = get_dma_ops(hwdev);
179
180 BUG_ON(!valid_dma_direction(dir));
181 if (ops->sync_sg_for_cpu)
182 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
183 debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
184 flush_write_buffers();
185}
186
187static inline void
188dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
189 int nelems, enum dma_data_direction dir)
190{
191 struct dma_map_ops *ops = get_dma_ops(hwdev);
192
193 BUG_ON(!valid_dma_direction(dir));
194 if (ops->sync_sg_for_device)
195 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
196 debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
197
198 flush_write_buffers();
199}
200
201static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
202 size_t offset, size_t size,
203 enum dma_data_direction dir)
204{
205 struct dma_map_ops *ops = get_dma_ops(dev);
206 dma_addr_t addr;
207
208 BUG_ON(!valid_dma_direction(dir));
209 kmemcheck_mark_initialized(page_address(page) + offset, size);
210 addr = ops->map_page(dev, page, offset, size, dir, NULL);
211 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
212
213 return addr;
214}
215
216static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
217 size_t size, enum dma_data_direction dir)
218{
219 struct dma_map_ops *ops = get_dma_ops(dev);
220
221 BUG_ON(!valid_dma_direction(dir));
222 if (ops->unmap_page)
223 ops->unmap_page(dev, addr, size, dir, NULL);
224 debug_dma_unmap_page(dev, addr, size, dir, false);
225}
226
227static inline void 58static inline void
228dma_cache_sync(struct device *dev, void *vaddr, size_t size, 59dma_cache_sync(struct device *dev, void *vaddr, size_t size,
229 enum dma_data_direction dir) 60 enum dma_data_direction dir)