diff options
author | Sascha Hauer <s.hauer@pengutronix.de> | 2011-08-08 02:22:41 -0400 |
---|---|---|
committer | Sascha Hauer <s.hauer@pengutronix.de> | 2011-08-08 02:22:41 -0400 |
commit | 1a43f2012455a977397deffe35912fd3f3ce17b9 (patch) | |
tree | 5189f337df44e7a495fbd097cd476b0380babd8c /arch/arm/include/asm/dma-mapping.h | |
parent | e1b96ada659431669efaf3defa997abf5db68130 (diff) | |
parent | 322a8b034003c0d46d39af85bf24fee27b902f48 (diff) |
Merge commit 'v3.1-rc1' into imx-fixes
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 88 |
1 files changed, 13 insertions, 75 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 4fff837363ed..7a21d0bf7134 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
115 | ___dma_page_dev_to_cpu(page, off, size, dir); | 115 | ___dma_page_dev_to_cpu(page, off, size, dir); |
116 | } | 116 | } |
117 | 117 | ||
118 | /* | 118 | extern int dma_supported(struct device *, u64); |
119 | * Return whether the given device DMA address mask can be supported | 119 | extern int dma_set_mask(struct device *, u64); |
120 | * properly. For example, if your device can only drive the low 24-bits | ||
121 | * during bus mastering, then you would pass 0x00ffffff as the mask | ||
122 | * to this function. | ||
123 | * | ||
124 | * FIXME: This should really be a platform specific issue - we should | ||
125 | * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. | ||
126 | */ | ||
127 | static inline int dma_supported(struct device *dev, u64 mask) | ||
128 | { | ||
129 | if (mask < ISA_DMA_THRESHOLD) | ||
130 | return 0; | ||
131 | return 1; | ||
132 | } | ||
133 | |||
134 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
135 | { | ||
136 | #ifdef CONFIG_DMABOUNCE | ||
137 | if (dev->archdata.dmabounce) { | ||
138 | if (dma_mask >= ISA_DMA_THRESHOLD) | ||
139 | return 0; | ||
140 | else | ||
141 | return -EIO; | ||
142 | } | ||
143 | #endif | ||
144 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
145 | return -EIO; | ||
146 | |||
147 | *dev->dma_mask = dma_mask; | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | 120 | ||
152 | /* | 121 | /* |
153 | * DMA errors are defined by all-bits-set in the DMA address. | 122 | * DMA errors are defined by all-bits-set in the DMA address. |
@@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, | |||
256 | * @dev: valid struct device pointer | 225 | * @dev: valid struct device pointer |
257 | * @small_buf_size: size of buffers to use with small buffer pool | 226 | * @small_buf_size: size of buffers to use with small buffer pool |
258 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | 227 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) |
228 | * @needs_bounce_fn: called to determine whether buffer needs bouncing | ||
259 | * | 229 | * |
260 | * This function should be called by low-level platform code to register | 230 | * This function should be called by low-level platform code to register |
261 | * a device as requireing DMA buffer bouncing. The function will allocate | 231 | * a device as requireing DMA buffer bouncing. The function will allocate |
262 | * appropriate DMA pools for the device. | 232 | * appropriate DMA pools for the device. |
263 | * | ||
264 | */ | 233 | */ |
265 | extern int dmabounce_register_dev(struct device *, unsigned long, | 234 | extern int dmabounce_register_dev(struct device *, unsigned long, |
266 | unsigned long); | 235 | unsigned long, int (*)(struct device *, dma_addr_t, size_t)); |
267 | 236 | ||
268 | /** | 237 | /** |
269 | * dmabounce_unregister_dev | 238 | * dmabounce_unregister_dev |
@@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long, | |||
277 | */ | 246 | */ |
278 | extern void dmabounce_unregister_dev(struct device *); | 247 | extern void dmabounce_unregister_dev(struct device *); |
279 | 248 | ||
280 | /** | ||
281 | * dma_needs_bounce | ||
282 | * | ||
283 | * @dev: valid struct device pointer | ||
284 | * @dma_handle: dma_handle of unbounced buffer | ||
285 | * @size: size of region being mapped | ||
286 | * | ||
287 | * Platforms that utilize the dmabounce mechanism must implement | ||
288 | * this function. | ||
289 | * | ||
290 | * The dmabounce routines call this function whenever a dma-mapping | ||
291 | * is requested to determine whether a given buffer needs to be bounced | ||
292 | * or not. The function must return 0 if the buffer is OK for | ||
293 | * DMA access and 1 if the buffer needs to be bounced. | ||
294 | * | ||
295 | */ | ||
296 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
297 | |||
298 | /* | 249 | /* |
299 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | 250 | * The DMA API, implemented by dmabounce.c. See below for descriptions. |
300 | */ | 251 | */ |
301 | extern dma_addr_t __dma_map_single(struct device *, void *, size_t, | ||
302 | enum dma_data_direction); | ||
303 | extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, | ||
304 | enum dma_data_direction); | ||
305 | extern dma_addr_t __dma_map_page(struct device *, struct page *, | 252 | extern dma_addr_t __dma_map_page(struct device *, struct page *, |
306 | unsigned long, size_t, enum dma_data_direction); | 253 | unsigned long, size_t, enum dma_data_direction); |
307 | extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, | 254 | extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, |
@@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, | |||
328 | } | 275 | } |
329 | 276 | ||
330 | 277 | ||
331 | static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, | ||
332 | size_t size, enum dma_data_direction dir) | ||
333 | { | ||
334 | __dma_single_cpu_to_dev(cpu_addr, size, dir); | ||
335 | return virt_to_dma(dev, cpu_addr); | ||
336 | } | ||
337 | |||
338 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | 278 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
339 | unsigned long offset, size_t size, enum dma_data_direction dir) | 279 | unsigned long offset, size_t size, enum dma_data_direction dir) |
340 | { | 280 | { |
@@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | |||
342 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 282 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
343 | } | 283 | } |
344 | 284 | ||
345 | static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, | ||
346 | size_t size, enum dma_data_direction dir) | ||
347 | { | ||
348 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); | ||
349 | } | ||
350 | |||
351 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | 285 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, |
352 | size_t size, enum dma_data_direction dir) | 286 | size_t size, enum dma_data_direction dir) |
353 | { | 287 | { |
@@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | |||
373 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 307 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
374 | size_t size, enum dma_data_direction dir) | 308 | size_t size, enum dma_data_direction dir) |
375 | { | 309 | { |
310 | unsigned long offset; | ||
311 | struct page *page; | ||
376 | dma_addr_t addr; | 312 | dma_addr_t addr; |
377 | 313 | ||
314 | BUG_ON(!virt_addr_valid(cpu_addr)); | ||
315 | BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); | ||
378 | BUG_ON(!valid_dma_direction(dir)); | 316 | BUG_ON(!valid_dma_direction(dir)); |
379 | 317 | ||
380 | addr = __dma_map_single(dev, cpu_addr, size, dir); | 318 | page = virt_to_page(cpu_addr); |
381 | debug_dma_map_page(dev, virt_to_page(cpu_addr), | 319 | offset = (unsigned long)cpu_addr & ~PAGE_MASK; |
382 | (unsigned long)cpu_addr & ~PAGE_MASK, size, | 320 | addr = __dma_map_page(dev, page, offset, size, dir); |
383 | dir, addr, true); | 321 | debug_dma_map_page(dev, page, offset, size, dir, addr, true); |
384 | 322 | ||
385 | return addr; | 323 | return addr; |
386 | } | 324 | } |
@@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | |||
430 | size_t size, enum dma_data_direction dir) | 368 | size_t size, enum dma_data_direction dir) |
431 | { | 369 | { |
432 | debug_dma_unmap_page(dev, handle, size, dir, true); | 370 | debug_dma_unmap_page(dev, handle, size, dir, true); |
433 | __dma_unmap_single(dev, handle, size, dir); | 371 | __dma_unmap_page(dev, handle, size, dir); |
434 | } | 372 | } |
435 | 373 | ||
436 | /** | 374 | /** |