aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-07-14 05:44:23 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-07-14 14:47:43 -0400
commit0608f70c78a384c2f225f2de226ca057a196f108 (patch)
treea5ffb473cc55ca8bb538b9869cb61d2982d99f5e /arch/x86/mm
parent7c0caeb866b0f648d91bb75b8bc6f86af95bb033 (diff)
x86: Use HAVE_MEMBLOCK_NODE_MAP
From 5732e1247898d67cbf837585150fe9f68974671d Mon Sep 17 00:00:00 2001 From: Tejun Heo <tj@kernel.org> Date: Thu, 14 Jul 2011 11:22:16 +0200 Convert x86 to HAVE_MEMBLOCK_NODE_MAP. The only difference in memory handling is that allocations can't no longer cross node boundaries whether they're node affine or not, which shouldn't matter at all. This conversion will enable further simplification of boot memory handling. -v2: Fix build failure on !NUMA configurations discovered by hpa. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20110714094423.GG3455@htj.dyndns.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_32.c8
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/memblock.c14
-rw-r--r--arch/x86/mm/numa.c15
4 files changed, 12 insertions, 27 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 29f7c6d98179..5d173db93c4e 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -650,18 +650,18 @@ void __init initmem_init(void)
650 highstart_pfn = highend_pfn = max_pfn; 650 highstart_pfn = highend_pfn = max_pfn;
651 if (max_pfn > max_low_pfn) 651 if (max_pfn > max_low_pfn)
652 highstart_pfn = max_low_pfn; 652 highstart_pfn = max_low_pfn;
653 memblock_x86_register_active_regions(0, 0, highend_pfn);
654 sparse_memory_present_with_active_regions(0);
655 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 653 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
656 pages_to_mb(highend_pfn - highstart_pfn)); 654 pages_to_mb(highend_pfn - highstart_pfn));
657 num_physpages = highend_pfn; 655 num_physpages = highend_pfn;
658 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; 656 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
659#else 657#else
660 memblock_x86_register_active_regions(0, 0, max_low_pfn);
661 sparse_memory_present_with_active_regions(0);
662 num_physpages = max_low_pfn; 658 num_physpages = max_low_pfn;
663 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; 659 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
664#endif 660#endif
661
662 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
663 sparse_memory_present_with_active_regions(0);
664
665#ifdef CONFIG_FLATMEM 665#ifdef CONFIG_FLATMEM
666 max_mapnr = num_physpages; 666 max_mapnr = num_physpages;
667#endif 667#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d865c4aeec55..7fb064cbdcec 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -607,7 +607,7 @@ kernel_physical_mapping_init(unsigned long start,
607#ifndef CONFIG_NUMA 607#ifndef CONFIG_NUMA
608void __init initmem_init(void) 608void __init initmem_init(void)
609{ 609{
610 memblock_x86_register_active_regions(0, 0, max_pfn); 610 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
611} 611}
612#endif 612#endif
613 613
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index e4569f85b390..97fbc3973934 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -283,20 +283,6 @@ static int __init memblock_x86_find_active_region(const struct memblock_region *
283 return 1; 283 return 1;
284} 284}
285 285
286/* Walk the memblock.memory map and register active regions within a node */
287void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
288 unsigned long last_pfn)
289{
290 unsigned long ei_startpfn;
291 unsigned long ei_endpfn;
292 struct memblock_region *r;
293
294 for_each_memblock(memory, r)
295 if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
296 &ei_startpfn, &ei_endpfn))
297 add_active_range(nid, ei_startpfn, ei_endpfn);
298}
299
300/* 286/*
301 * Find the hole size (in bytes) in the memory range. 287 * Find the hole size (in bytes) in the memory range.
302 * @start: starting address of the memory range to scan 288 * @start: starting address of the memory range to scan
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 824efadc5741..f4a40bdb2e4e 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -498,13 +498,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
498 if (WARN_ON(nodes_empty(node_possible_map))) 498 if (WARN_ON(nodes_empty(node_possible_map)))
499 return -EINVAL; 499 return -EINVAL;
500 500
501 for (i = 0; i < mi->nr_blks; i++) 501 for (i = 0; i < mi->nr_blks; i++) {
502 memblock_x86_register_active_regions(mi->blk[i].nid, 502 struct numa_memblk *mb = &mi->blk[i];
503 mi->blk[i].start >> PAGE_SHIFT, 503 memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
504 mi->blk[i].end >> PAGE_SHIFT); 504 }
505
506 /* for out of order entries */
507 sort_node_map();
508 505
509 /* 506 /*
510 * If sections array is gonna be used for pfn -> nid mapping, check 507 * If sections array is gonna be used for pfn -> nid mapping, check
@@ -538,6 +535,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
538 setup_node_data(nid, start, end); 535 setup_node_data(nid, start, end);
539 } 536 }
540 537
538 /* Dump memblock with node info and return. */
539 memblock_dump_all();
541 return 0; 540 return 0;
542} 541}
543 542
@@ -575,7 +574,7 @@ static int __init numa_init(int (*init_func)(void))
575 nodes_clear(node_possible_map); 574 nodes_clear(node_possible_map);
576 nodes_clear(node_online_map); 575 nodes_clear(node_online_map);
577 memset(&numa_meminfo, 0, sizeof(numa_meminfo)); 576 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
578 remove_all_active_ranges(); 577 WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
579 numa_reset_distance(); 578 numa_reset_distance();
580 579
581 ret = init_func(); 580 ret = init_func();