aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/dma-default.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-11-02 22:05:43 -0400
committerRalf Baechle <ralf@linux-mips.org>2007-11-26 12:26:14 -0500
commitcce335ae47e231398269fb05fa48e0e9cbf289e0 (patch)
treea01ea9ad318d459393a905df5c53b68b754300da /arch/mips/mm/dma-default.c
parent940f6b48a130e0a33cb8bd397dd0e277166470ad (diff)
[MIPS] 64-bit Sibyte kernels need DMA32.
Sibyte SOCs only have 32-bit PCI. Due to the sparse use of the address space only the first 1GB of memory is mapped at physical addresses below 1GB. If a system has more than 1GB of memory 32-bit DMA will not be able to reach all of it. For now this patch is good enough to keep Sibyte users happy but it seems eventually something like swiotlb will be needed for Sibyte. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm/dma-default.c')
-rw-r--r--arch/mips/mm/dma-default.c37
1 files changed, 28 insertions, 9 deletions
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 33519ce49540..ae76795685cc 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -40,16 +40,38 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
40 current_cpu_type() == CPU_R12000); 40 current_cpu_type() == CPU_R12000);
41} 41}
42 42
43static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
44{
45 /* ignore region specifiers */
46 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
47
48#ifdef CONFIG_ZONE_DMA32
49 if (dev == NULL)
50 gfp |= __GFP_DMA;
51 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
52 gfp |= __GFP_DMA;
53 else
54#endif
55#ifdef CONFIG_ZONE_DMA32
56 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
57 gfp |= __GFP_DMA32;
58 else
59#endif
60 ;
61
62 /* Don't invoke OOM killer */
63 gfp |= __GFP_NORETRY;
64
65 return gfp;
66}
67
43void *dma_alloc_noncoherent(struct device *dev, size_t size, 68void *dma_alloc_noncoherent(struct device *dev, size_t size,
44 dma_addr_t * dma_handle, gfp_t gfp) 69 dma_addr_t * dma_handle, gfp_t gfp)
45{ 70{
46 void *ret; 71 void *ret;
47 72
48 /* ignore region specifiers */ 73 gfp = massage_gfp_flags(dev, gfp);
49 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
50 74
51 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
52 gfp |= GFP_DMA;
53 ret = (void *) __get_free_pages(gfp, get_order(size)); 75 ret = (void *) __get_free_pages(gfp, get_order(size));
54 76
55 if (ret != NULL) { 77 if (ret != NULL) {
@@ -67,11 +89,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
67{ 89{
68 void *ret; 90 void *ret;
69 91
70 /* ignore region specifiers */ 92 gfp = massage_gfp_flags(dev, gfp);
71 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
72 93
73 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
74 gfp |= GFP_DMA;
75 ret = (void *) __get_free_pages(gfp, get_order(size)); 94 ret = (void *) __get_free_pages(gfp, get_order(size));
76 95
77 if (ret) { 96 if (ret) {
@@ -343,7 +362,7 @@ int dma_supported(struct device *dev, u64 mask)
343 * so we can't guarantee allocations that must be 362 * so we can't guarantee allocations that must be
344 * within a tighter range than GFP_DMA.. 363 * within a tighter range than GFP_DMA..
345 */ 364 */
346 if (mask < 0x00ffffff) 365 if (mask < DMA_BIT_MASK(24))
347 return 0; 366 return 0;
348 367
349 return 1; 368 return 1;