aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2010-10-05 19:15:15 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-10-06 00:44:35 -0400
commit1d931264af0f10649b35afa8fbd2e169da51ac08 (patch)
tree01ccff1fd0777cf256aeef478357bda6fc178276 /arch
parent9f4c13964b58608fbce05540743281ea3146c0e8 (diff)
x86-32, memblock: Make add_highpages honor early reserved ranges
Originally the only early reserved range that is overlapped with high pages is "KVA RAM", but we already do remove that from the active ranges. However, It turns out Xen could have that kind of overlapping to support memory ballooning.x So we need to make add_highpage_with_active_regions() to subtract memblock reserved just like low ram; this is the proper design anyway. In this patch, refactering get_freel_all_memory_range() to make it can be used by add_highpage_with_active_regions(). Also we don't need to remove "KVA RAM" from active ranges. Signed-off-by: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <4CABB183.1040607@kernel.org> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/mm/init_32.c53
-rw-r--r--arch/x86/mm/memblock.c19
-rw-r--r--arch/x86/mm/numa_32.c2
4 files changed, 33 insertions, 43 deletions
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index 2c304bb6e07..19ae14ba697 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -9,6 +9,8 @@ void memblock_x86_to_bootmem(u64 start, u64 end);
9void memblock_x86_reserve_range(u64 start, u64 end, char *name); 9void memblock_x86_reserve_range(u64 start, u64 end, char *name);
10void memblock_x86_free_range(u64 start, u64 end); 10void memblock_x86_free_range(u64 start, u64 end);
11struct range; 11struct range;
12int __get_free_all_memory_range(struct range **range, int nodeid,
13 unsigned long start_pfn, unsigned long end_pfn);
12int get_free_all_memory_range(struct range **rangep, int nodeid); 14int get_free_all_memory_range(struct range **rangep, int nodeid);
13 15
14void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, 16void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index c2385d7ae31..85467099d6d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -423,49 +423,28 @@ static void __init add_one_highpage_init(struct page *page)
423 totalhigh_pages++; 423 totalhigh_pages++;
424} 424}
425 425
426struct add_highpages_data { 426void __init add_highpages_with_active_regions(int nid,
427 unsigned long start_pfn; 427 unsigned long start_pfn, unsigned long end_pfn)
428 unsigned long end_pfn;
429};
430
431static int __init add_highpages_work_fn(unsigned long start_pfn,
432 unsigned long end_pfn, void *datax)
433{ 428{
434 int node_pfn; 429 struct range *range;
435 struct page *page; 430 int nr_range;
436 unsigned long final_start_pfn, final_end_pfn; 431 int i;
437 struct add_highpages_data *data;
438 432
439 data = (struct add_highpages_data *)datax; 433 nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
440 434
441 final_start_pfn = max(start_pfn, data->start_pfn); 435 for (i = 0; i < nr_range; i++) {
442 final_end_pfn = min(end_pfn, data->end_pfn); 436 struct page *page;
443 if (final_start_pfn >= final_end_pfn) 437 int node_pfn;
444 return 0;
445 438
446 for (node_pfn = final_start_pfn; node_pfn < final_end_pfn; 439 for (node_pfn = range[i].start; node_pfn < range[i].end;
447 node_pfn++) { 440 node_pfn++) {
448 if (!pfn_valid(node_pfn)) 441 if (!pfn_valid(node_pfn))
449 continue; 442 continue;
450 page = pfn_to_page(node_pfn); 443 page = pfn_to_page(node_pfn);
451 add_one_highpage_init(page); 444 add_one_highpage_init(page);
445 }
452 } 446 }
453
454 return 0;
455
456} 447}
457
458void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
459 unsigned long end_pfn)
460{
461 struct add_highpages_data data;
462
463 data.start_pfn = start_pfn;
464 data.end_pfn = end_pfn;
465
466 work_with_active_regions(nid, add_highpages_work_fn, &data);
467}
468
469#else 448#else
470static inline void permanent_kmaps_init(pgd_t *pgd_base) 449static inline void permanent_kmaps_init(pgd_t *pgd_base)
471{ 450{
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index 50ecbc59757..fd7a0404945 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -156,7 +156,8 @@ static int __init count_early_node_map(int nodeid)
156 return data.nr; 156 return data.nr;
157} 157}
158 158
159int __init get_free_all_memory_range(struct range **rangep, int nodeid) 159int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
160 unsigned long start_pfn, unsigned long end_pfn)
160{ 161{
161 int count; 162 int count;
162 struct range *range; 163 struct range *range;
@@ -172,9 +173,9 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
172 * at first 173 * at first
173 */ 174 */
174 nr_range = add_from_early_node_map(range, count, nr_range, nodeid); 175 nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
175#ifdef CONFIG_X86_32 176 subtract_range(range, count, 0, start_pfn);
176 subtract_range(range, count, max_low_pfn, -1ULL); 177 subtract_range(range, count, end_pfn, -1ULL);
177#endif 178
178 memblock_x86_subtract_reserved(range, count); 179 memblock_x86_subtract_reserved(range, count);
179 nr_range = clean_sort_range(range, count); 180 nr_range = clean_sort_range(range, count);
180 181
@@ -182,6 +183,16 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
182 return nr_range; 183 return nr_range;
183} 184}
184 185
186int __init get_free_all_memory_range(struct range **rangep, int nodeid)
187{
188 unsigned long end_pfn = -1UL;
189
190#ifdef CONFIG_X86_32
191 end_pfn = max_low_pfn;
192#endif
193 return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
194}
195
185static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) 196static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
186{ 197{
187 int i, count; 198 int i, count;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 70ddeb75ba2..84a3e4c9f27 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -326,8 +326,6 @@ static __init unsigned long calculate_numa_remap_pages(void)
326 "KVA RAM"); 326 "KVA RAM");
327 327
328 node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT; 328 node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
329 remove_active_range(nid, node_remap_start_pfn[nid],
330 node_remap_start_pfn[nid] + size);
331 } 329 }
332 printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", 330 printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
333 reserve_pages); 331 reserve_pages);