aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-04-04 18:23:53 -0400
committerH. Peter Anvin <hpa@zytor.com>2011-04-06 20:57:16 -0400
commit7210cf9217937e470a9acbc113a590f476b9c047 (patch)
tree7d253d9d701e54710b0d26c67a38b67e860e6a2a /arch/x86/mm
parentaf7c1a6e8374e05aab4a98ce4d2fb07b66506a02 (diff)
x86-32, numa: Calculate remap size in common code
Only pgdat and memmap use remap area and there isn't much benefit in allowing per-node override. In addition, the use of node_remap_size[] is confusing in that it contains number of bytes before remap initialization and then number of pages afterwards. Move remap size calculation for memap from specific NUMA config implementations to init_alloc_remap() and make node_remap_size[] static. The only behavior difference is that, before this patch, numaq_32 didn't consider max_pfn when calculating the memmap size but it's enforced after this patch, which is the right thing to do. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1301955840-7246-8-git-send-email-tj@kernel.org Acked-by: Yinghai Lu <yinghai@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/numa_32.c10
-rw-r--r--arch/x86/mm/srat_32.c1
2 files changed, 4 insertions, 7 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 99310d26fe34..9a7336550f0d 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -104,7 +104,7 @@ extern unsigned long highend_pfn, highstart_pfn;
104 104
105#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) 105#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
106 106
107unsigned long node_remap_size[MAX_NUMNODES]; 107static unsigned long node_remap_size[MAX_NUMNODES];
108static void *node_remap_start_vaddr[MAX_NUMNODES]; 108static void *node_remap_start_vaddr[MAX_NUMNODES];
109void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 109void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
110 110
@@ -129,7 +129,6 @@ int __init get_memcfg_numa_flat(void)
129 node_end_pfn[0] = max_pfn; 129 node_end_pfn[0] = max_pfn;
130 memblock_x86_register_active_regions(0, 0, max_pfn); 130 memblock_x86_register_active_regions(0, 0, max_pfn);
131 memory_present(0, 0, max_pfn); 131 memory_present(0, 0, max_pfn);
132 node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn);
133 132
134 /* Indicate there is one node available. */ 133 /* Indicate there is one node available. */
135 nodes_clear(node_online_map); 134 nodes_clear(node_online_map);
@@ -282,11 +281,10 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
282 if (node_end_pfn[nid] > max_pfn) 281 if (node_end_pfn[nid] > max_pfn)
283 node_end_pfn[nid] = max_pfn; 282 node_end_pfn[nid] = max_pfn;
284 283
285 /* ensure the remap includes space for the pgdat. */ 284 /* calculate the necessary space aligned to large page size */
286 size = node_remap_size[nid]; 285 size = node_memmap_size_bytes(nid, node_start_pfn[nid],
286 min(node_end_pfn[nid], max_pfn));
287 size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); 287 size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
288
289 /* align to large page */
290 size = ALIGN(size, LARGE_PAGE_BYTES); 288 size = ALIGN(size, LARGE_PAGE_BYTES);
291 289
292 node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT, 290 node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 48651c6f657d..1b9e82c96dc5 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -276,7 +276,6 @@ int __init get_memcfg_from_srat(void)
276 unsigned long end = min(node_end_pfn[nid], max_pfn); 276 unsigned long end = min(node_end_pfn[nid], max_pfn);
277 277
278 memory_present(nid, start, end); 278 memory_present(nid, start, end);
279 node_remap_size[nid] = node_memmap_size_bytes(nid, start, end);
280 } 279 }
281 return 1; 280 return 1;
282out_fail: 281out_fail: