aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-11-02 22:05:43 -0400
committerRalf Baechle <ralf@linux-mips.org>2007-11-26 12:26:14 -0500
commitcce335ae47e231398269fb05fa48e0e9cbf289e0 (patch)
treea01ea9ad318d459393a905df5c53b68b754300da /arch/mips/mm
parent940f6b48a130e0a33cb8bd397dd0e277166470ad (diff)
[MIPS] 64-bit Sibyte kernels need DMA32.
Sibyte SOCs only have 32-bit PCI. Due to the sparse use of the address space only the first 1GB of memory is mapped at physical addresses below 1GB. If a system has more than 1GB of memory 32-bit DMA will not be able to reach all of it. For now this patch is good enough to keep Sibyte users happy but it seems eventually something like swiotlb will be needed for Sibyte. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/dma-default.c37
-rw-r--r--arch/mips/mm/init.c43
2 files changed, 44 insertions, 36 deletions
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 33519ce49540..ae76795685cc 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -40,16 +40,38 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
40 current_cpu_type() == CPU_R12000); 40 current_cpu_type() == CPU_R12000);
41} 41}
42 42
43static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
44{
45 /* ignore region specifiers */
46 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
47
48#ifdef CONFIG_ZONE_DMA32
49 if (dev == NULL)
50 gfp |= __GFP_DMA;
51 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
52 gfp |= __GFP_DMA;
53 else
54#endif
55#ifdef CONFIG_ZONE_DMA32
56 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
57 gfp |= __GFP_DMA32;
58 else
59#endif
60 ;
61
62 /* Don't invoke OOM killer */
63 gfp |= __GFP_NORETRY;
64
65 return gfp;
66}
67
43void *dma_alloc_noncoherent(struct device *dev, size_t size, 68void *dma_alloc_noncoherent(struct device *dev, size_t size,
44 dma_addr_t * dma_handle, gfp_t gfp) 69 dma_addr_t * dma_handle, gfp_t gfp)
45{ 70{
46 void *ret; 71 void *ret;
47 72
48 /* ignore region specifiers */ 73 gfp = massage_gfp_flags(dev, gfp);
49 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
50 74
51 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
52 gfp |= GFP_DMA;
53 ret = (void *) __get_free_pages(gfp, get_order(size)); 75 ret = (void *) __get_free_pages(gfp, get_order(size));
54 76
55 if (ret != NULL) { 77 if (ret != NULL) {
@@ -67,11 +89,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
67{ 89{
68 void *ret; 90 void *ret;
69 91
70 /* ignore region specifiers */ 92 gfp = massage_gfp_flags(dev, gfp);
71 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
72 93
73 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
74 gfp |= GFP_DMA;
75 ret = (void *) __get_free_pages(gfp, get_order(size)); 94 ret = (void *) __get_free_pages(gfp, get_order(size));
76 95
77 if (ret) { 96 if (ret) {
@@ -343,7 +362,7 @@ int dma_supported(struct device *dev, u64 mask)
343 * so we can't guarantee allocations that must be 362 * so we can't guarantee allocations that must be
344 * within a tighter range than GFP_DMA.. 363 * within a tighter range than GFP_DMA..
345 */ 364 */
346 if (mask < 0x00ffffff) 365 if (mask < DMA_BIT_MASK(24))
347 return 0; 366 return 0;
348 367
349 return 1; 368 return 1;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index ec3b9e9f30f4..480dec04f552 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -347,11 +347,8 @@ static int __init page_is_ram(unsigned long pagenr)
347 347
348void __init paging_init(void) 348void __init paging_init(void)
349{ 349{
350 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 350 unsigned long max_zone_pfns[MAX_NR_ZONES];
351#ifndef CONFIG_FLATMEM 351 unsigned long lastpfn;
352 unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
353 unsigned long i, j, pfn;
354#endif
355 352
356 pagetable_init(); 353 pagetable_init();
357 354
@@ -361,35 +358,27 @@ void __init paging_init(void)
361 kmap_coherent_init(); 358 kmap_coherent_init();
362 359
363#ifdef CONFIG_ZONE_DMA 360#ifdef CONFIG_ZONE_DMA
364 if (min_low_pfn < MAX_DMA_PFN && MAX_DMA_PFN <= max_low_pfn) { 361 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
365 zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
366 zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
367 } else if (max_low_pfn < MAX_DMA_PFN)
368 zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
369 else
370#endif 362#endif
371 zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; 363#ifdef CONFIG_ZONE_DMA32
372 364 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
365#endif
366 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
367 lastpfn = max_low_pfn;
373#ifdef CONFIG_HIGHMEM 368#ifdef CONFIG_HIGHMEM
374 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn; 369 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
370 lastpfn = highend_pfn;
375 371
376 if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) { 372 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
377 printk(KERN_WARNING "This processor doesn't support highmem." 373 printk(KERN_WARNING "This processor doesn't support highmem."
378 " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]); 374 " %ldk highmem ignored\n",
379 zones_size[ZONE_HIGHMEM] = 0; 375 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
376 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
377 lastpfn = max_low_pfn;
380 } 378 }
381#endif 379#endif
382 380
383#ifdef CONFIG_FLATMEM 381 free_area_init_nodes(max_zone_pfns);
384 free_area_init(zones_size);
385#else
386 pfn = min_low_pfn;
387 for (i = 0; i < MAX_NR_ZONES; i++)
388 for (j = 0; j < zones_size[i]; j++, pfn++)
389 if (!page_is_ram(pfn))
390 zholes_size[i]++;
391 free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size);
392#endif
393} 382}
394 383
395static struct kcore_list kcore_mem, kcore_vmalloc; 384static struct kcore_list kcore_mem, kcore_vmalloc;