aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2010-10-05 19:15:15 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-10-06 00:44:35 -0400
commit1d931264af0f10649b35afa8fbd2e169da51ac08 (patch)
tree01ccff1fd0777cf256aeef478357bda6fc178276 /arch/x86/mm/init_32.c
parent9f4c13964b58608fbce05540743281ea3146c0e8 (diff)
x86-32, memblock: Make add_highpages honor early reserved ranges
Originally the only early reserved range that is overlapped with high pages is "KVA RAM", but we already do remove that from the active ranges. However, It turns out Xen could have that kind of overlapping to support memory ballooning.x So we need to make add_highpage_with_active_regions() to subtract memblock reserved just like low ram; this is the proper design anyway. In this patch, refactering get_freel_all_memory_range() to make it can be used by add_highpage_with_active_regions(). Also we don't need to remove "KVA RAM" from active ranges. Signed-off-by: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <4CABB183.1040607@kernel.org> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c53
1 files changed, 16 insertions, 37 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index c2385d7ae31..85467099d6d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -423,49 +423,28 @@ static void __init add_one_highpage_init(struct page *page)
423 totalhigh_pages++; 423 totalhigh_pages++;
424} 424}
425 425
426struct add_highpages_data { 426void __init add_highpages_with_active_regions(int nid,
427 unsigned long start_pfn; 427 unsigned long start_pfn, unsigned long end_pfn)
428 unsigned long end_pfn;
429};
430
431static int __init add_highpages_work_fn(unsigned long start_pfn,
432 unsigned long end_pfn, void *datax)
433{ 428{
434 int node_pfn; 429 struct range *range;
435 struct page *page; 430 int nr_range;
436 unsigned long final_start_pfn, final_end_pfn; 431 int i;
437 struct add_highpages_data *data;
438 432
439 data = (struct add_highpages_data *)datax; 433 nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
440 434
441 final_start_pfn = max(start_pfn, data->start_pfn); 435 for (i = 0; i < nr_range; i++) {
442 final_end_pfn = min(end_pfn, data->end_pfn); 436 struct page *page;
443 if (final_start_pfn >= final_end_pfn) 437 int node_pfn;
444 return 0;
445 438
446 for (node_pfn = final_start_pfn; node_pfn < final_end_pfn; 439 for (node_pfn = range[i].start; node_pfn < range[i].end;
447 node_pfn++) { 440 node_pfn++) {
448 if (!pfn_valid(node_pfn)) 441 if (!pfn_valid(node_pfn))
449 continue; 442 continue;
450 page = pfn_to_page(node_pfn); 443 page = pfn_to_page(node_pfn);
451 add_one_highpage_init(page); 444 add_one_highpage_init(page);
445 }
452 } 446 }
453
454 return 0;
455
456} 447}
457
458void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
459 unsigned long end_pfn)
460{
461 struct add_highpages_data data;
462
463 data.start_pfn = start_pfn;
464 data.end_pfn = end_pfn;
465
466 work_with_active_regions(nid, add_highpages_work_fn, &data);
467}
468
469#else 448#else
470static inline void permanent_kmaps_init(pgd_t *pgd_base) 449static inline void permanent_kmaps_init(pgd_t *pgd_base)
471{ 450{