diff options
author | Tejun Heo <tj@kernel.org> | 2011-04-04 18:23:49 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2011-04-06 20:56:57 -0400 |
commit | 5b8443b25c0f323ec190d094e4b441957b02664e (patch) | |
tree | ab64e1087999f668729ce45c2e9543ca63f298c9 /arch/x86/mm | |
parent | a6c24f7a705d939ddd2fcaa443fa3d8e852b933d (diff) |
x86-32, numa: Remove redundant top-down alloc code from remap initialization
memblock_find_in_range() now does top-down allocation by default, so
there's no reason for its callers to explicitly implement it by
gradually lowering the start address.
Remove redundant top-down allocation logic from init_meminit() and
calculate_numa_remap_pages().
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1301955840-7246-4-git-send-email-tj@kernel.org
Acked-by: Yinghai Lu <yinghai@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/numa_32.c | 43 |
1 files changed, 14 insertions, 29 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 50e82507eab4..60701a5e0de0 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -270,8 +270,7 @@ static __init unsigned long calculate_numa_remap_pages(void) | |||
270 | unsigned long size, reserve_pages = 0; | 270 | unsigned long size, reserve_pages = 0; |
271 | 271 | ||
272 | for_each_online_node(nid) { | 272 | for_each_online_node(nid) { |
273 | u64 node_kva_target; | 273 | u64 node_kva; |
274 | u64 node_kva_final; | ||
275 | 274 | ||
276 | /* | 275 | /* |
277 | * The acpi/srat node info can show hot-add memroy zones | 276 | * The acpi/srat node info can show hot-add memroy zones |
@@ -295,19 +294,11 @@ static __init unsigned long calculate_numa_remap_pages(void) | |||
295 | /* now the roundup is correct, convert to PAGE_SIZE pages */ | 294 | /* now the roundup is correct, convert to PAGE_SIZE pages */ |
296 | size = size * PTRS_PER_PTE; | 295 | size = size * PTRS_PER_PTE; |
297 | 296 | ||
298 | node_kva_target = round_down(node_end_pfn[nid] - size, | 297 | node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT, |
299 | PTRS_PER_PTE); | ||
300 | node_kva_target <<= PAGE_SHIFT; | ||
301 | do { | ||
302 | node_kva_final = memblock_find_in_range(node_kva_target, | ||
303 | ((u64)node_end_pfn[nid])<<PAGE_SHIFT, | 298 | ((u64)node_end_pfn[nid])<<PAGE_SHIFT, |
304 | ((u64)size)<<PAGE_SHIFT, | 299 | ((u64)size)<<PAGE_SHIFT, |
305 | LARGE_PAGE_BYTES); | 300 | LARGE_PAGE_BYTES); |
306 | node_kva_target -= LARGE_PAGE_BYTES; | 301 | if (node_kva == MEMBLOCK_ERROR) |
307 | } while (node_kva_final == MEMBLOCK_ERROR && | ||
308 | (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid])); | ||
309 | |||
310 | if (node_kva_final == MEMBLOCK_ERROR) | ||
311 | panic("Can not get kva ram\n"); | 302 | panic("Can not get kva ram\n"); |
312 | 303 | ||
313 | node_remap_size[nid] = size; | 304 | node_remap_size[nid] = size; |
@@ -315,7 +306,7 @@ static __init unsigned long calculate_numa_remap_pages(void) | |||
315 | reserve_pages += size; | 306 | reserve_pages += size; |
316 | printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of" | 307 | printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of" |
317 | " node %d at %llx\n", | 308 | " node %d at %llx\n", |
318 | size, nid, node_kva_final>>PAGE_SHIFT); | 309 | size, nid, node_kva >> PAGE_SHIFT); |
319 | 310 | ||
320 | /* | 311 | /* |
321 | * prevent kva address below max_low_pfn want it on system | 312 | * prevent kva address below max_low_pfn want it on system |
@@ -328,11 +319,11 @@ static __init unsigned long calculate_numa_remap_pages(void) | |||
328 | * to use it as free. | 319 | * to use it as free. |
329 | * So memblock_x86_reserve_range here, hope we don't run out of that array | 320 | * So memblock_x86_reserve_range here, hope we don't run out of that array |
330 | */ | 321 | */ |
331 | memblock_x86_reserve_range(node_kva_final, | 322 | memblock_x86_reserve_range(node_kva, |
332 | node_kva_final+(((u64)size)<<PAGE_SHIFT), | 323 | node_kva + (((u64)size)<<PAGE_SHIFT), |
333 | "KVA RAM"); | 324 | "KVA RAM"); |
334 | 325 | ||
335 | node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT; | 326 | node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT; |
336 | } | 327 | } |
337 | printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", | 328 | printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", |
338 | reserve_pages); | 329 | reserve_pages); |
@@ -356,7 +347,6 @@ static void init_remap_allocator(int nid) | |||
356 | void __init initmem_init(void) | 347 | void __init initmem_init(void) |
357 | { | 348 | { |
358 | int nid; | 349 | int nid; |
359 | long kva_target_pfn; | ||
360 | 350 | ||
361 | /* | 351 | /* |
362 | * When mapping a NUMA machine we allocate the node_mem_map arrays | 352 | * When mapping a NUMA machine we allocate the node_mem_map arrays |
@@ -371,15 +361,10 @@ void __init initmem_init(void) | |||
371 | 361 | ||
372 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); | 362 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); |
373 | 363 | ||
374 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); | 364 | kva_start_pfn = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, |
375 | do { | 365 | max_low_pfn << PAGE_SHIFT, |
376 | kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT, | 366 | kva_pages << PAGE_SHIFT, |
377 | max_low_pfn<<PAGE_SHIFT, | 367 | PTRS_PER_PTE << PAGE_SHIFT) >> PAGE_SHIFT; |
378 | kva_pages<<PAGE_SHIFT, | ||
379 | PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT; | ||
380 | kva_target_pfn -= PTRS_PER_PTE; | ||
381 | } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn); | ||
382 | |||
383 | if (kva_start_pfn == MEMBLOCK_ERROR) | 368 | if (kva_start_pfn == MEMBLOCK_ERROR) |
384 | panic("Can not get kva space\n"); | 369 | panic("Can not get kva space\n"); |
385 | 370 | ||