diff options
-rw-r--r-- | arch/x86/mm/numa_32.c | 29 |
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 975a76f622ba..900863204be2 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -265,8 +265,10 @@ void resume_map_numa_kva(pgd_t *pgd_base) | |||
265 | * opportunistically and the callers will fall back to other memory | 265 | * opportunistically and the callers will fall back to other memory |
266 | * allocation mechanisms on failure. | 266 | * allocation mechanisms on failure. |
267 | */ | 267 | */ |
268 | static __init void init_alloc_remap(int nid) | 268 | static __init void init_alloc_remap(int nid, u64 start, u64 end) |
269 | { | 269 | { |
270 | unsigned long start_pfn = start >> PAGE_SHIFT; | ||
271 | unsigned long end_pfn = end >> PAGE_SHIFT; | ||
270 | unsigned long size, pfn; | 272 | unsigned long size, pfn; |
271 | u64 node_pa, remap_pa; | 273 | u64 node_pa, remap_pa; |
272 | void *remap_va; | 274 | void *remap_va; |
@@ -276,24 +278,15 @@ static __init void init_alloc_remap(int nid) | |||
276 | * memory could be added but not currently present. | 278 | * memory could be added but not currently present. |
277 | */ | 279 | */ |
278 | printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", | 280 | printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", |
279 | nid, node_start_pfn[nid], node_end_pfn[nid]); | 281 | nid, start_pfn, end_pfn); |
280 | if (node_start_pfn[nid] > max_pfn) | ||
281 | return; | ||
282 | if (!node_end_pfn[nid]) | ||
283 | return; | ||
284 | if (node_end_pfn[nid] > max_pfn) | ||
285 | node_end_pfn[nid] = max_pfn; | ||
286 | 282 | ||
287 | /* calculate the necessary space aligned to large page size */ | 283 | /* calculate the necessary space aligned to large page size */ |
288 | size = node_memmap_size_bytes(nid, node_start_pfn[nid], | 284 | size = node_memmap_size_bytes(nid, start_pfn, end_pfn); |
289 | min(node_end_pfn[nid], max_pfn)); | ||
290 | size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); | 285 | size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); |
291 | size = ALIGN(size, LARGE_PAGE_BYTES); | 286 | size = ALIGN(size, LARGE_PAGE_BYTES); |
292 | 287 | ||
293 | /* allocate node memory and the lowmem remap area */ | 288 | /* allocate node memory and the lowmem remap area */ |
294 | node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT, | 289 | node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); |
295 | (u64)node_end_pfn[nid] << PAGE_SHIFT, | ||
296 | size, LARGE_PAGE_BYTES); | ||
297 | if (node_pa == MEMBLOCK_ERROR) { | 290 | if (node_pa == MEMBLOCK_ERROR) { |
298 | pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", | 291 | pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", |
299 | size, nid); | 292 | size, nid); |
@@ -391,8 +384,14 @@ void __init initmem_init(void) | |||
391 | get_memcfg_numa(); | 384 | get_memcfg_numa(); |
392 | numa_init_array(); | 385 | numa_init_array(); |
393 | 386 | ||
394 | for_each_online_node(nid) | 387 | for_each_online_node(nid) { |
395 | init_alloc_remap(nid); | 388 | u64 start = (u64)node_start_pfn[nid] << PAGE_SHIFT; |
389 | u64 end = min((u64)node_end_pfn[nid] << PAGE_SHIFT, | ||
390 | (u64)max_pfn << PAGE_SHIFT); | ||
391 | |||
392 | if (start < end) | ||
393 | init_alloc_remap(nid, start, end); | ||
394 | } | ||
396 | 395 | ||
397 | #ifdef CONFIG_HIGHMEM | 396 | #ifdef CONFIG_HIGHMEM |
398 | highstart_pfn = highend_pfn = max_pfn; | 397 | highstart_pfn = highend_pfn = max_pfn; |