aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-05-09 12:26:30 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-07-18 16:40:11 -0400
commiteef015c8aa74451f848307fe5f65485070533bbb (patch)
tree7fca680be2246812c14920c0fb44fbe717786dc6 /arch/tile
parentbbaa22c3a0d0be4406d26e5a73d1e8e504787986 (diff)
arch/tile: enable ZONE_DMA for tilegx
This is required for PCI root complex legacy support and USB OHCI root complex support. With this change tilegx now supports allocating memory whose PA fits in 32 bits. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/Kconfig3
-rw-r--r--arch/tile/kernel/pci-dma.c15
-rw-r--r--arch/tile/kernel/setup.c12
-rw-r--r--arch/tile/mm/init.c11
4 files changed, 28 insertions, 13 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index a5302d319229..0ad771f7a7e1 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -212,6 +212,9 @@ config HIGHMEM
212 212
213 If unsure, say "true". 213 If unsure, say "true".
214 214
215config ZONE_DMA
216 def_bool y
217
215# We do not currently support disabling NUMA. 218# We do not currently support disabling NUMA.
216config NUMA 219config NUMA
217 bool # "NUMA Memory Allocation and Scheduler Support" 220 bool # "NUMA Memory Allocation and Scheduler Support"
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 9814d7082f24..edd856a000c5 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -45,14 +45,17 @@ void *dma_alloc_coherent(struct device *dev,
45 gfp |= __GFP_ZERO; 45 gfp |= __GFP_ZERO;
46 46
47 /* 47 /*
48 * By forcing NUMA node 0 for 32-bit masks we ensure that the 48 * If the mask specifies that the memory be in the first 4 GB, then
49 * high 32 bits of the resulting PA will be zero. If the mask 49 * we force the allocation to come from the DMA zone. We also
50 * size is, e.g., 24, we may still not be able to guarantee a 50 * force the node to 0 since that's the only node where the DMA
51 * suitable memory address, in which case we will return NULL. 51 * zone isn't empty. If the mask size is smaller than 32 bits, we
52 * But such devices are uncommon. 52 * may still not be able to guarantee a suitable memory address, in
53 * which case we will return NULL. But such devices are uncommon.
53 */ 54 */
54 if (dma_mask <= DMA_BIT_MASK(32)) 55 if (dma_mask <= DMA_BIT_MASK(32)) {
56 gfp |= GFP_DMA;
55 node = 0; 57 node = 0;
58 }
56 59
57 pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); 60 pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
58 if (pg == NULL) 61 if (pg == NULL)
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 6d179dfcc15e..fdde3b6986e5 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -658,6 +658,8 @@ static void __init zone_sizes_init(void)
658 unsigned long zones_size[MAX_NR_ZONES] = { 0 }; 658 unsigned long zones_size[MAX_NR_ZONES] = { 0 };
659 int size = percpu_size(); 659 int size = percpu_size();
660 int num_cpus = smp_height * smp_width; 660 int num_cpus = smp_height * smp_width;
661 const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT));
662
661 int i; 663 int i;
662 664
663 for (i = 0; i < num_cpus; ++i) 665 for (i = 0; i < num_cpus; ++i)
@@ -729,6 +731,14 @@ static void __init zone_sizes_init(void)
729 zones_size[ZONE_NORMAL] = end - start; 731 zones_size[ZONE_NORMAL] = end - start;
730#endif 732#endif
731 733
734 if (start < dma_end) {
735 zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL],
736 dma_end - start);
737 zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA];
738 } else {
739 zones_size[ZONE_DMA] = 0;
740 }
741
732 /* Take zone metadata from controller 0 if we're isolnode. */ 742 /* Take zone metadata from controller 0 if we're isolnode. */
733 if (node_isset(i, isolnodes)) 743 if (node_isset(i, isolnodes))
734 NODE_DATA(i)->bdata = &bootmem_node_data[0]; 744 NODE_DATA(i)->bdata = &bootmem_node_data[0];
@@ -738,7 +748,7 @@ static void __init zone_sizes_init(void)
738 PFN_UP(node_percpu[i])); 748 PFN_UP(node_percpu[i]));
739 749
740 /* Track the type of memory on each node */ 750 /* Track the type of memory on each node */
741 if (zones_size[ZONE_NORMAL]) 751 if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA])
742 node_set_state(i, N_NORMAL_MEMORY); 752 node_set_state(i, N_NORMAL_MEMORY);
743#ifdef CONFIG_HIGHMEM 753#ifdef CONFIG_HIGHMEM
744 if (end != start) 754 if (end != start)
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index a2417a0a8222..ef29d6c5e10e 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -733,16 +733,15 @@ static void __init set_non_bootmem_pages_init(void)
733 for_each_zone(z) { 733 for_each_zone(z) {
734 unsigned long start, end; 734 unsigned long start, end;
735 int nid = z->zone_pgdat->node_id; 735 int nid = z->zone_pgdat->node_id;
736#ifdef CONFIG_HIGHMEM
736 int idx = zone_idx(z); 737 int idx = zone_idx(z);
738#endif
737 739
738 start = z->zone_start_pfn; 740 start = z->zone_start_pfn;
739 if (start == 0)
740 continue; /* bootmem */
741 end = start + z->spanned_pages; 741 end = start + z->spanned_pages;
742 if (idx == ZONE_NORMAL) { 742 start = max(start, node_free_pfn[nid]);
743 BUG_ON(start != node_start_pfn[nid]); 743 start = max(start, max_low_pfn);
744 start = node_free_pfn[nid]; 744
745 }
746#ifdef CONFIG_HIGHMEM 745#ifdef CONFIG_HIGHMEM
747 if (idx == ZONE_HIGHMEM) 746 if (idx == ZONE_HIGHMEM)
748 totalhigh_pages += z->spanned_pages; 747 totalhigh_pages += z->spanned_pages;