aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/dma-mapping.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-04-07 16:34:16 -0400
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-04-07 16:34:16 -0400
commit38f4b8c0da01ae7cd9b93386842ce272d6fde9ab (patch)
tree3c8c52201aac038094bfea7efdd0984a8f62045e /arch/x86/include/asm/dma-mapping.h
parenta811454027352c762e0d5bba1b1d8f7d26bf96ae (diff)
parent8e2c4f2844c0e8dcdfe312e5f2204854ca8532c6 (diff)
Merge commit 'origin/master' into for-linus/xen/master
* commit 'origin/master': (4825 commits) Fix build errors due to CONFIG_BRANCH_TRACER=y parport: Use the PCI IRQ if offered tty: jsm cleanups Adjust path to gpio headers KGDB_SERIAL_CONSOLE check for module Change KCONFIG name tty: Blackin CTS/RTS Change hardware flow control from poll to interrupt driven Add support for the MAX3100 SPI UART. lanana: assign a device name and numbering for MAX3100 serqt: initial clean up pass for tty side tty: Use the generic RS485 ioctl on CRIS tty: Correct inline types for tty_driver_kref_get() splice: fix deadlock in splicing to file nilfs2: support nanosecond timestamp nilfs2: introduce secondary super block nilfs2: simplify handling of active state of segments nilfs2: mark minor flag for checkpoint created by internal operation nilfs2: clean up sketch file nilfs2: super block operations fix endian bug ... Conflicts: arch/x86/include/asm/thread_info.h arch/x86/lguest/boot.c drivers/xen/manage.c
Diffstat (limited to 'arch/x86/include/asm/dma-mapping.h')
-rw-r--r--arch/x86/include/asm/dma-mapping.h194
1 files changed, 96 insertions, 98 deletions
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 132a134d12f2..f82fdc412c64 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -7,6 +7,8 @@
7 */ 7 */
8 8
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <linux/dma-debug.h>
11#include <linux/dma-attrs.h>
10#include <asm/io.h> 12#include <asm/io.h>
11#include <asm/swiotlb.h> 13#include <asm/swiotlb.h>
12#include <asm-generic/dma-coherent.h> 14#include <asm-generic/dma-coherent.h>
@@ -16,47 +18,9 @@ extern int iommu_merge;
16extern struct device x86_dma_fallback_dev; 18extern struct device x86_dma_fallback_dev;
17extern int panic_on_overflow; 19extern int panic_on_overflow;
18 20
19struct dma_mapping_ops { 21extern struct dma_map_ops *dma_ops;
20 int (*mapping_error)(struct device *dev, 22
21 dma_addr_t dma_addr); 23static inline struct dma_map_ops *get_dma_ops(struct device *dev)
22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction);
28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
29 size_t size, int direction);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
52 int direction);
53 int (*dma_supported)(struct device *hwdev, u64 mask);
54 int is_phys;
55};
56
57extern struct dma_mapping_ops *dma_ops;
58
59static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
60{ 24{
61#ifdef CONFIG_X86_32 25#ifdef CONFIG_X86_32
62 return dma_ops; 26 return dma_ops;
@@ -71,7 +35,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
71/* Make sure we keep the same behaviour */ 35/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 36static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{ 37{
74 struct dma_mapping_ops *ops = get_dma_ops(dev); 38 struct dma_map_ops *ops = get_dma_ops(dev);
75 if (ops->mapping_error) 39 if (ops->mapping_error)
76 return ops->mapping_error(dev, dma_addr); 40 return ops->mapping_error(dev, dma_addr);
77 41
@@ -90,137 +54,167 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
90 54
91static inline dma_addr_t 55static inline dma_addr_t
92dma_map_single(struct device *hwdev, void *ptr, size_t size, 56dma_map_single(struct device *hwdev, void *ptr, size_t size,
93 int direction) 57 enum dma_data_direction dir)
94{ 58{
95 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 59 struct dma_map_ops *ops = get_dma_ops(hwdev);
96 60 dma_addr_t addr;
97 BUG_ON(!valid_dma_direction(direction)); 61
98 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); 62 BUG_ON(!valid_dma_direction(dir));
63 addr = ops->map_page(hwdev, virt_to_page(ptr),
64 (unsigned long)ptr & ~PAGE_MASK, size,
65 dir, NULL);
66 debug_dma_map_page(hwdev, virt_to_page(ptr),
67 (unsigned long)ptr & ~PAGE_MASK, size,
68 dir, addr, true);
69 return addr;
99} 70}
100 71
101static inline void 72static inline void
102dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 73dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
103 int direction) 74 enum dma_data_direction dir)
104{ 75{
105 struct dma_mapping_ops *ops = get_dma_ops(dev); 76 struct dma_map_ops *ops = get_dma_ops(dev);
106 77
107 BUG_ON(!valid_dma_direction(direction)); 78 BUG_ON(!valid_dma_direction(dir));
108 if (ops->unmap_single) 79 if (ops->unmap_page)
109 ops->unmap_single(dev, addr, size, direction); 80 ops->unmap_page(dev, addr, size, dir, NULL);
81 debug_dma_unmap_page(dev, addr, size, dir, true);
110} 82}
111 83
112static inline int 84static inline int
113dma_map_sg(struct device *hwdev, struct scatterlist *sg, 85dma_map_sg(struct device *hwdev, struct scatterlist *sg,
114 int nents, int direction) 86 int nents, enum dma_data_direction dir)
115{ 87{
116 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 88 struct dma_map_ops *ops = get_dma_ops(hwdev);
89 int ents;
90
91 BUG_ON(!valid_dma_direction(dir));
92 ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
93 debug_dma_map_sg(hwdev, sg, nents, ents, dir);
117 94
118 BUG_ON(!valid_dma_direction(direction)); 95 return ents;
119 return ops->map_sg(hwdev, sg, nents, direction);
120} 96}
121 97
122static inline void 98static inline void
123dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 99dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
124 int direction) 100 enum dma_data_direction dir)
125{ 101{
126 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 102 struct dma_map_ops *ops = get_dma_ops(hwdev);
127 103
128 BUG_ON(!valid_dma_direction(direction)); 104 BUG_ON(!valid_dma_direction(dir));
105 debug_dma_unmap_sg(hwdev, sg, nents, dir);
129 if (ops->unmap_sg) 106 if (ops->unmap_sg)
130 ops->unmap_sg(hwdev, sg, nents, direction); 107 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
131} 108}
132 109
133static inline void 110static inline void
134dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 111dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
135 size_t size, int direction) 112 size_t size, enum dma_data_direction dir)
136{ 113{
137 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 114 struct dma_map_ops *ops = get_dma_ops(hwdev);
138 115
139 BUG_ON(!valid_dma_direction(direction)); 116 BUG_ON(!valid_dma_direction(dir));
140 if (ops->sync_single_for_cpu) 117 if (ops->sync_single_for_cpu)
141 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); 118 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
119 debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
142 flush_write_buffers(); 120 flush_write_buffers();
143} 121}
144 122
145static inline void 123static inline void
146dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 124dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
147 size_t size, int direction) 125 size_t size, enum dma_data_direction dir)
148{ 126{
149 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 127 struct dma_map_ops *ops = get_dma_ops(hwdev);
150 128
151 BUG_ON(!valid_dma_direction(direction)); 129 BUG_ON(!valid_dma_direction(dir));
152 if (ops->sync_single_for_device) 130 if (ops->sync_single_for_device)
153 ops->sync_single_for_device(hwdev, dma_handle, size, direction); 131 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
132 debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
154 flush_write_buffers(); 133 flush_write_buffers();
155} 134}
156 135
157static inline void 136static inline void
158dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 137dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
159 unsigned long offset, size_t size, int direction) 138 unsigned long offset, size_t size,
139 enum dma_data_direction dir)
160{ 140{
161 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 141 struct dma_map_ops *ops = get_dma_ops(hwdev);
162 142
163 BUG_ON(!valid_dma_direction(direction)); 143 BUG_ON(!valid_dma_direction(dir));
164 if (ops->sync_single_range_for_cpu) 144 if (ops->sync_single_range_for_cpu)
165 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, 145 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
166 size, direction); 146 size, dir);
147 debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
148 offset, size, dir);
167 flush_write_buffers(); 149 flush_write_buffers();
168} 150}
169 151
170static inline void 152static inline void
171dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, 153dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
172 unsigned long offset, size_t size, 154 unsigned long offset, size_t size,
173 int direction) 155 enum dma_data_direction dir)
174{ 156{
175 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 157 struct dma_map_ops *ops = get_dma_ops(hwdev);
176 158
177 BUG_ON(!valid_dma_direction(direction)); 159 BUG_ON(!valid_dma_direction(dir));
178 if (ops->sync_single_range_for_device) 160 if (ops->sync_single_range_for_device)
179 ops->sync_single_range_for_device(hwdev, dma_handle, 161 ops->sync_single_range_for_device(hwdev, dma_handle,
180 offset, size, direction); 162 offset, size, dir);
163 debug_dma_sync_single_range_for_device(hwdev, dma_handle,
164 offset, size, dir);
181 flush_write_buffers(); 165 flush_write_buffers();
182} 166}
183 167
184static inline void 168static inline void
185dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 169dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
186 int nelems, int direction) 170 int nelems, enum dma_data_direction dir)
187{ 171{
188 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 172 struct dma_map_ops *ops = get_dma_ops(hwdev);
189 173
190 BUG_ON(!valid_dma_direction(direction)); 174 BUG_ON(!valid_dma_direction(dir));
191 if (ops->sync_sg_for_cpu) 175 if (ops->sync_sg_for_cpu)
192 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 176 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
177 debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
193 flush_write_buffers(); 178 flush_write_buffers();
194} 179}
195 180
196static inline void 181static inline void
197dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 182dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
198 int nelems, int direction) 183 int nelems, enum dma_data_direction dir)
199{ 184{
200 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 185 struct dma_map_ops *ops = get_dma_ops(hwdev);
201 186
202 BUG_ON(!valid_dma_direction(direction)); 187 BUG_ON(!valid_dma_direction(dir));
203 if (ops->sync_sg_for_device) 188 if (ops->sync_sg_for_device)
204 ops->sync_sg_for_device(hwdev, sg, nelems, direction); 189 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
190 debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
205 191
206 flush_write_buffers(); 192 flush_write_buffers();
207} 193}
208 194
209static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 195static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
210 size_t offset, size_t size, 196 size_t offset, size_t size,
211 int direction) 197 enum dma_data_direction dir)
212{ 198{
213 struct dma_mapping_ops *ops = get_dma_ops(dev); 199 struct dma_map_ops *ops = get_dma_ops(dev);
200 dma_addr_t addr;
214 201
215 BUG_ON(!valid_dma_direction(direction)); 202 BUG_ON(!valid_dma_direction(dir));
216 return ops->map_single(dev, page_to_phys(page) + offset, 203 addr = ops->map_page(dev, page, offset, size, dir, NULL);
217 size, direction); 204 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
205
206 return addr;
218} 207}
219 208
220static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 209static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
221 size_t size, int direction) 210 size_t size, enum dma_data_direction dir)
222{ 211{
223 dma_unmap_single(dev, addr, size, direction); 212 struct dma_map_ops *ops = get_dma_ops(dev);
213
214 BUG_ON(!valid_dma_direction(dir));
215 if (ops->unmap_page)
216 ops->unmap_page(dev, addr, size, dir, NULL);
217 debug_dma_unmap_page(dev, addr, size, dir, false);
224} 218}
225 219
226static inline void 220static inline void
@@ -244,7 +238,7 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
244 238
245 dma_mask = dev->coherent_dma_mask; 239 dma_mask = dev->coherent_dma_mask;
246 if (!dma_mask) 240 if (!dma_mask)
247 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; 241 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
248 242
249 return dma_mask; 243 return dma_mask;
250} 244}
@@ -253,10 +247,10 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
253{ 247{
254 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); 248 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
255 249
256 if (dma_mask <= DMA_24BIT_MASK) 250 if (dma_mask <= DMA_BIT_MASK(24))
257 gfp |= GFP_DMA; 251 gfp |= GFP_DMA;
258#ifdef CONFIG_X86_64 252#ifdef CONFIG_X86_64
259 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) 253 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
260 gfp |= GFP_DMA32; 254 gfp |= GFP_DMA32;
261#endif 255#endif
262 return gfp; 256 return gfp;
@@ -266,7 +260,7 @@ static inline void *
266dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 260dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
267 gfp_t gfp) 261 gfp_t gfp)
268{ 262{
269 struct dma_mapping_ops *ops = get_dma_ops(dev); 263 struct dma_map_ops *ops = get_dma_ops(dev);
270 void *memory; 264 void *memory;
271 265
272 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 266 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -285,20 +279,24 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
285 if (!ops->alloc_coherent) 279 if (!ops->alloc_coherent)
286 return NULL; 280 return NULL;
287 281
288 return ops->alloc_coherent(dev, size, dma_handle, 282 memory = ops->alloc_coherent(dev, size, dma_handle,
289 dma_alloc_coherent_gfp_flags(dev, gfp)); 283 dma_alloc_coherent_gfp_flags(dev, gfp));
284 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
285
286 return memory;
290} 287}
291 288
292static inline void dma_free_coherent(struct device *dev, size_t size, 289static inline void dma_free_coherent(struct device *dev, size_t size,
293 void *vaddr, dma_addr_t bus) 290 void *vaddr, dma_addr_t bus)
294{ 291{
295 struct dma_mapping_ops *ops = get_dma_ops(dev); 292 struct dma_map_ops *ops = get_dma_ops(dev);
296 293
297 WARN_ON(irqs_disabled()); /* for portability */ 294 WARN_ON(irqs_disabled()); /* for portability */
298 295
299 if (dma_release_from_coherent(dev, get_order(size), vaddr)) 296 if (dma_release_from_coherent(dev, get_order(size), vaddr))
300 return; 297 return;
301 298
299 debug_dma_free_coherent(dev, size, vaddr, bus);
302 if (ops->free_coherent) 300 if (ops->free_coherent)
303 ops->free_coherent(dev, size, vaddr, bus); 301 ops->free_coherent(dev, size, vaddr, bus);
304} 302}