aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/common/dmabounce.c18
-rw-r--r--arch/arm/include/asm/dma-mapping.h51
-rw-r--r--arch/arm/include/asm/memory.h14
-rw-r--r--arch/arm/plat-omap/include/mach/memory.h15
4 files changed, 61 insertions, 37 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 69130f365904..1ea6482cdf6e 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -246,9 +246,9 @@ map_single(struct device *dev, void *ptr, size_t size,
246 } 246 }
247 247
248 dev_dbg(dev, 248 dev_dbg(dev,
249 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", 249 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
250 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), 250 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
251 buf->safe, (void *) buf->safe_dma_addr); 251 buf->safe, buf->safe_dma_addr);
252 252
253 if ((dir == DMA_TO_DEVICE) || 253 if ((dir == DMA_TO_DEVICE) ||
254 (dir == DMA_BIDIRECTIONAL)) { 254 (dir == DMA_BIDIRECTIONAL)) {
@@ -292,9 +292,9 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
292 BUG_ON(buf->size != size); 292 BUG_ON(buf->size != size);
293 293
294 dev_dbg(dev, 294 dev_dbg(dev,
295 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", 295 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
296 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), 296 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
297 buf->safe, (void *) buf->safe_dma_addr); 297 buf->safe, buf->safe_dma_addr);
298 298
299 DO_STATS ( device_info->bounce_count++ ); 299 DO_STATS ( device_info->bounce_count++ );
300 300
@@ -355,9 +355,9 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
355 */ 355 */
356 356
357 dev_dbg(dev, 357 dev_dbg(dev,
358 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", 358 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
359 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), 359 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
360 buf->safe, (void *) buf->safe_dma_addr); 360 buf->safe, buf->safe_dma_addr);
361 361
362 DO_STATS ( device_info->bounce_count++ ); 362 DO_STATS ( device_info->bounce_count++ );
363 363
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 45329fca1b64..6f45959fe2cc 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -3,11 +3,48 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/mm.h> /* need struct page */ 6#include <linux/mm_types.h>
7
8#include <linux/scatterlist.h> 7#include <linux/scatterlist.h>
9 8
10#include <asm-generic/dma-coherent.h> 9#include <asm-generic/dma-coherent.h>
10#include <asm/memory.h>
11
12/*
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
16 */
17#ifndef __arch_page_to_dma
18static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
19{
20 return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
21}
22
23static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
24{
25 return (void *)__bus_to_virt(addr);
26}
27
28static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
29{
30 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
31}
32#else
33static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
34{
35 return __arch_page_to_dma(dev, page);
36}
37
38static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
39{
40 return __arch_dma_to_virt(dev, addr);
41}
42
43static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
44{
45 return __arch_virt_to_dma(dev, addr);
46}
47#endif
11 48
12/* 49/*
13 * DMA-consistent mapping functions. These allocate/free a region of 50 * DMA-consistent mapping functions. These allocate/free a region of
@@ -169,7 +206,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
169 if (!arch_is_coherent()) 206 if (!arch_is_coherent())
170 dma_cache_maint(cpu_addr, size, dir); 207 dma_cache_maint(cpu_addr, size, dir);
171 208
172 return virt_to_dma(dev, (unsigned long)cpu_addr); 209 return virt_to_dma(dev, cpu_addr);
173} 210}
174#else 211#else
175extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); 212extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
@@ -195,7 +232,7 @@ dma_map_page(struct device *dev, struct page *page,
195 unsigned long offset, size_t size, 232 unsigned long offset, size_t size,
196 enum dma_data_direction dir) 233 enum dma_data_direction dir)
197{ 234{
198 return dma_map_single(dev, page_address(page) + offset, size, (int)dir); 235 return dma_map_single(dev, page_address(page) + offset, size, dir);
199} 236}
200 237
201/** 238/**
@@ -241,7 +278,7 @@ static inline void
241dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, 278dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
242 enum dma_data_direction dir) 279 enum dma_data_direction dir)
243{ 280{
244 dma_unmap_single(dev, handle, size, (int)dir); 281 dma_unmap_single(dev, handle, size, dir);
245} 282}
246 283
247/** 284/**
@@ -336,7 +373,7 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
336 enum dma_data_direction dir) 373 enum dma_data_direction dir)
337{ 374{
338 if (!arch_is_coherent()) 375 if (!arch_is_coherent())
339 dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); 376 dma_cache_maint(dma_to_virt(dev, handle), size, dir);
340} 377}
341 378
342static inline void 379static inline void
@@ -344,7 +381,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
344 enum dma_data_direction dir) 381 enum dma_data_direction dir)
345{ 382{
346 if (!arch_is_coherent()) 383 if (!arch_is_coherent())
347 dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); 384 dma_cache_maint(dma_to_virt(dev, handle), size, dir);
348} 385}
349#else 386#else
350extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); 387extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 7bcd69a9a88c..bf7c737c9226 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -314,20 +314,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
314#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 314#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
315 315
316/* 316/*
317 * Optional device DMA address remapping. Do _not_ use directly!
318 * We should really eliminate virt_to_bus() here - it's deprecated.
319 */
320#ifndef __arch_page_to_dma
321#define page_to_dma(dev, page) ((dma_addr_t)__virt_to_bus((unsigned long)page_address(page)))
322#define dma_to_virt(dev, addr) ((void *)__bus_to_virt(addr))
323#define virt_to_dma(dev, addr) ((dma_addr_t)__virt_to_bus((unsigned long)(addr)))
324#else
325#define page_to_dma(dev, page) (__arch_page_to_dma(dev, page))
326#define dma_to_virt(dev, addr) (__arch_dma_to_virt(dev, addr))
327#define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr))
328#endif
329
330/*
331 * Optional coherency support. Currently used only by selected 317 * Optional coherency support. Currently used only by selected
332 * Intel XSC3-based systems. 318 * Intel XSC3-based systems.
333 */ 319 */
diff --git a/arch/arm/plat-omap/include/mach/memory.h b/arch/arm/plat-omap/include/mach/memory.h
index 037486c5f4a4..a325caf80d04 100644
--- a/arch/arm/plat-omap/include/mach/memory.h
+++ b/arch/arm/plat-omap/include/mach/memory.h
@@ -76,13 +76,14 @@
76 (dma_addr_t)virt_to_lbus(page_address(page)) : \ 76 (dma_addr_t)virt_to_lbus(page_address(page)) : \
77 (dma_addr_t)__virt_to_bus(page_address(page));}) 77 (dma_addr_t)__virt_to_bus(page_address(page));})
78 78
79#define __arch_dma_to_virt(dev, addr) ({is_lbus_device(dev) ? \ 79#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
80 lbus_to_virt(addr) : \ 80 lbus_to_virt(addr) : \
81 __bus_to_virt(addr);}) 81 __bus_to_virt(addr)); })
82 82
83#define __arch_virt_to_dma(dev, addr) ({is_lbus_device(dev) ? \ 83#define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \
84 virt_to_lbus(addr) : \ 84 (dma_addr_t) (is_lbus_device(dev) ? \
85 __virt_to_bus(addr);}) 85 virt_to_lbus(__addr) : \
86 __virt_to_bus(__addr)); })
86 87
87#endif /* CONFIG_ARCH_OMAP15XX */ 88#endif /* CONFIG_ARCH_OMAP15XX */
88 89