diff options
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 44 |
1 files changed, 41 insertions, 3 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 82a093cee09..9cd5334019e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -25,9 +25,11 @@ | |||
25 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
26 | #include <asm/sizes.h> | 26 | #include <asm/sizes.h> |
27 | 27 | ||
28 | #include "mm.h" | ||
29 | |||
28 | static u64 get_coherent_dma_mask(struct device *dev) | 30 | static u64 get_coherent_dma_mask(struct device *dev) |
29 | { | 31 | { |
30 | u64 mask = ISA_DMA_THRESHOLD; | 32 | u64 mask = (u64)arm_dma_limit; |
31 | 33 | ||
32 | if (dev) { | 34 | if (dev) { |
33 | mask = dev->coherent_dma_mask; | 35 | mask = dev->coherent_dma_mask; |
@@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
41 | return 0; | 43 | return 0; |
42 | } | 44 | } |
43 | 45 | ||
44 | if ((~mask) & ISA_DMA_THRESHOLD) { | 46 | if ((~mask) & (u64)arm_dma_limit) { |
45 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | 47 | dev_warn(dev, "coherent DMA mask %#llx is smaller " |
46 | "than system GFP_DMA mask %#llx\n", | 48 | "than system GFP_DMA mask %#llx\n", |
47 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | 49 | mask, (u64)arm_dma_limit); |
48 | return 0; | 50 | return 0; |
49 | } | 51 | } |
50 | } | 52 | } |
@@ -308,6 +310,13 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
308 | struct page *page; | 310 | struct page *page; |
309 | void *addr; | 311 | void *addr; |
310 | 312 | ||
313 | /* Following is a work-around (a.k.a. hack) to prevent pages | ||
314 | * with __GFP_COMP being passed to split_page() which cannot | ||
315 | * handle them. The real problem is that this flag probably | ||
316 | * should be 0 on ARM as it is not supported on this | ||
317 | * platform--see CONFIG_HUGETLB_PAGE. */ | ||
318 | gfp &= ~(__GFP_COMP); | ||
319 | |||
311 | *handle = ~0; | 320 | *handle = ~0; |
312 | size = PAGE_ALIGN(size); | 321 | size = PAGE_ALIGN(size); |
313 | 322 | ||
@@ -322,6 +331,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
322 | 331 | ||
323 | if (addr) | 332 | if (addr) |
324 | *handle = pfn_to_dma(dev, page_to_pfn(page)); | 333 | *handle = pfn_to_dma(dev, page_to_pfn(page)); |
334 | else | ||
335 | __dma_free_buffer(page, size); | ||
325 | 336 | ||
326 | return addr; | 337 | return addr; |
327 | } | 338 | } |
@@ -657,6 +668,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
657 | } | 668 | } |
658 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 669 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
659 | 670 | ||
671 | /* | ||
672 | * Return whether the given device DMA address mask can be supported | ||
673 | * properly. For example, if your device can only drive the low 24-bits | ||
674 | * during bus mastering, then you would pass 0x00ffffff as the mask | ||
675 | * to this function. | ||
676 | */ | ||
677 | int dma_supported(struct device *dev, u64 mask) | ||
678 | { | ||
679 | if (mask < (u64)arm_dma_limit) | ||
680 | return 0; | ||
681 | return 1; | ||
682 | } | ||
683 | EXPORT_SYMBOL(dma_supported); | ||
684 | |||
685 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
686 | { | ||
687 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
688 | return -EIO; | ||
689 | |||
690 | #ifndef CONFIG_DMABOUNCE | ||
691 | *dev->dma_mask = dma_mask; | ||
692 | #endif | ||
693 | |||
694 | return 0; | ||
695 | } | ||
696 | EXPORT_SYMBOL(dma_set_mask); | ||
697 | |||
660 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | 698 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 |
661 | 699 | ||
662 | static int __init dma_debug_do_init(void) | 700 | static int __init dma_debug_do_init(void) |