aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r--arch/arm/include/asm/dma-mapping.h86
1 files changed, 70 insertions, 16 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 45329fca1b64..7b95d2058395 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -3,11 +3,48 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/mm.h> /* need struct page */ 6#include <linux/mm_types.h>
7
8#include <linux/scatterlist.h> 7#include <linux/scatterlist.h>
9 8
10#include <asm-generic/dma-coherent.h> 9#include <asm-generic/dma-coherent.h>
10#include <asm/memory.h>
11
12/*
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
16 */
17#ifndef __arch_page_to_dma
18static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
19{
20 return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
21}
22
23static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
24{
25 return (void *)__bus_to_virt(addr);
26}
27
28static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
29{
30 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
31}
32#else
33static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
34{
35 return __arch_page_to_dma(dev, page);
36}
37
38static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
39{
40 return __arch_dma_to_virt(dev, addr);
41}
42
43static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
44{
45 return __arch_virt_to_dma(dev, addr);
46}
47#endif
11 48
12/* 49/*
13 * DMA-consistent mapping functions. These allocate/free a region of 50 * DMA-consistent mapping functions. These allocate/free a region of
@@ -169,7 +206,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
169 if (!arch_is_coherent()) 206 if (!arch_is_coherent())
170 dma_cache_maint(cpu_addr, size, dir); 207 dma_cache_maint(cpu_addr, size, dir);
171 208
172 return virt_to_dma(dev, (unsigned long)cpu_addr); 209 return virt_to_dma(dev, cpu_addr);
173} 210}
174#else 211#else
175extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); 212extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
@@ -195,7 +232,7 @@ dma_map_page(struct device *dev, struct page *page,
195 unsigned long offset, size_t size, 232 unsigned long offset, size_t size,
196 enum dma_data_direction dir) 233 enum dma_data_direction dir)
197{ 234{
198 return dma_map_single(dev, page_address(page) + offset, size, (int)dir); 235 return dma_map_single(dev, page_address(page) + offset, size, dir);
199} 236}
200 237
201/** 238/**
@@ -241,7 +278,7 @@ static inline void
241dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, 278dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
242 enum dma_data_direction dir) 279 enum dma_data_direction dir)
243{ 280{
244 dma_unmap_single(dev, handle, size, (int)dir); 281 dma_unmap_single(dev, handle, size, dir);
245} 282}
246 283
247/** 284/**
@@ -314,11 +351,12 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da
314 351
315 352
316/** 353/**
317 * dma_sync_single_for_cpu 354 * dma_sync_single_range_for_cpu
318 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 355 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
319 * @handle: DMA address of buffer 356 * @handle: DMA address of buffer
320 * @size: size of buffer to map 357 * @offset: offset of region to start sync
321 * @dir: DMA transfer direction 358 * @size: size of region to sync
359 * @dir: DMA transfer direction (same as passed to dma_map_single)
322 * 360 *
323 * Make physical memory consistent for a single streaming mode DMA 361 * Make physical memory consistent for a single streaming mode DMA
324 * translation after a transfer. 362 * translation after a transfer.
@@ -332,25 +370,41 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da
332 */ 370 */
333#ifndef CONFIG_DMABOUNCE 371#ifndef CONFIG_DMABOUNCE
334static inline void 372static inline void
335dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, 373dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
336 enum dma_data_direction dir) 374 unsigned long offset, size_t size,
375 enum dma_data_direction dir)
337{ 376{
338 if (!arch_is_coherent()) 377 if (!arch_is_coherent())
339 dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); 378 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
340} 379}
341 380
342static inline void 381static inline void
343dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, 382dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
344 enum dma_data_direction dir) 383 unsigned long offset, size_t size,
384 enum dma_data_direction dir)
345{ 385{
346 if (!arch_is_coherent()) 386 if (!arch_is_coherent())
347 dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); 387 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
348} 388}
349#else 389#else
350extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); 390extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
351extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction); 391extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
352#endif 392#endif
353 393
394static inline void
395dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
396 enum dma_data_direction dir)
397{
398 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
399}
400
401static inline void
402dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
403 enum dma_data_direction dir)
404{
405 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
406}
407
354 408
355/** 409/**
356 * dma_sync_sg_for_cpu 410 * dma_sync_sg_for_cpu