aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-06-14 21:32:52 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 04:37:25 -0400
commitb5bc6c0e55000dab86b73f838f5ad02908b23755 (patch)
tree4895117f5e69ed8648f102dccc895c80c14fbae5 /arch/x86/mm
parentd0be6bdea103b8d04c8a3495538b7c0011ae4129 (diff)
x86, mm: use add_highpages_with_active_regions() for high pages init v2
use early_node_map to init high pages, so we can remove page_is_ram() and page_is_reserved_early() in the big loop with add_one_highpage also remove page_is_reserved_early(), it is not needed anymore. v2: fix the build of other platforms Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/discontig_32.c19
-rw-r--r--arch/x86/mm/init_32.c62
2 files changed, 58 insertions, 23 deletions
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index c3f119e99e0d..7c4d0255f8d8 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -100,7 +100,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
100#endif 100#endif
101 101
102extern unsigned long find_max_low_pfn(void); 102extern unsigned long find_max_low_pfn(void);
103extern void add_one_highpage_init(struct page *, int, int);
104extern unsigned long highend_pfn, highstart_pfn; 103extern unsigned long highend_pfn, highstart_pfn;
105 104
106#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) 105#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
@@ -432,10 +431,10 @@ void __init set_highmem_pages_init(int bad_ppro)
432{ 431{
433#ifdef CONFIG_HIGHMEM 432#ifdef CONFIG_HIGHMEM
434 struct zone *zone; 433 struct zone *zone;
435 struct page *page; 434 int nid;
436 435
437 for_each_zone(zone) { 436 for_each_zone(zone) {
438 unsigned long node_pfn, zone_start_pfn, zone_end_pfn; 437 unsigned long zone_start_pfn, zone_end_pfn;
439 438
440 if (!is_highmem(zone)) 439 if (!is_highmem(zone))
441 continue; 440 continue;
@@ -443,16 +442,12 @@ void __init set_highmem_pages_init(int bad_ppro)
443 zone_start_pfn = zone->zone_start_pfn; 442 zone_start_pfn = zone->zone_start_pfn;
444 zone_end_pfn = zone_start_pfn + zone->spanned_pages; 443 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
445 444
445 nid = zone_to_nid(zone);
446 printk("Initializing %s for node %d (%08lx:%08lx)\n", 446 printk("Initializing %s for node %d (%08lx:%08lx)\n",
447 zone->name, zone_to_nid(zone), 447 zone->name, nid, zone_start_pfn, zone_end_pfn);
448 zone_start_pfn, zone_end_pfn); 448
449 449 add_highpages_with_active_regions(nid, zone_start_pfn,
450 for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { 450 zone_end_pfn, bad_ppro);
451 if (!pfn_valid(node_pfn))
452 continue;
453 page = pfn_to_page(node_pfn);
454 add_one_highpage_init(page, node_pfn, bad_ppro);
455 }
456 } 451 }
457 totalram_pages += totalhigh_pages; 452 totalram_pages += totalhigh_pages;
458#endif 453#endif
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index abadb1da70df..ba07a489230e 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -287,10 +287,10 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
287 pkmap_page_table = pte; 287 pkmap_page_table = pte;
288} 288}
289 289
290void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) 290static void __init
291add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
291{ 292{
292 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn)) && 293 if (!(bad_ppro && page_kills_ppro(pfn))) {
293 !page_is_reserved_early(pfn)) {
294 ClearPageReserved(page); 294 ClearPageReserved(page);
295 init_page_count(page); 295 init_page_count(page);
296 __free_page(page); 296 __free_page(page);
@@ -299,18 +299,58 @@ void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
299 SetPageReserved(page); 299 SetPageReserved(page);
300} 300}
301 301
302struct add_highpages_data {
303 unsigned long start_pfn;
304 unsigned long end_pfn;
305 int bad_ppro;
306};
307
308static void __init add_highpages_work_fn(unsigned long start_pfn,
309 unsigned long end_pfn, void *datax)
310{
311 int node_pfn;
312 struct page *page;
313 unsigned long final_start_pfn, final_end_pfn;
314 struct add_highpages_data *data;
315 int bad_ppro;
316
317 data = (struct add_highpages_data *)datax;
318 bad_ppro = data->bad_ppro;
319
320 final_start_pfn = max(start_pfn, data->start_pfn);
321 final_end_pfn = min(end_pfn, data->end_pfn);
322 if (final_start_pfn >= final_end_pfn)
323 return;
324
325 for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
326 node_pfn++) {
327 if (!pfn_valid(node_pfn))
328 continue;
329 page = pfn_to_page(node_pfn);
330 add_one_highpage_init(page, node_pfn, bad_ppro);
331 }
332
333}
334
335void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
336 unsigned long end_pfn,
337 int bad_ppro)
338{
339 struct add_highpages_data data;
340
341 data.start_pfn = start_pfn;
342 data.end_pfn = end_pfn;
343 data.bad_ppro = bad_ppro;
344
345 work_with_active_regions(nid, add_highpages_work_fn, &data);
346}
347
302#ifndef CONFIG_NUMA 348#ifndef CONFIG_NUMA
303static void __init set_highmem_pages_init(int bad_ppro) 349static void __init set_highmem_pages_init(int bad_ppro)
304{ 350{
305 int pfn; 351 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn,
352 bad_ppro);
306 353
307 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
308 /*
309 * Holes under sparsemem might not have no mem_map[]:
310 */
311 if (pfn_valid(pfn))
312 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
313 }
314 totalram_pages += totalhigh_pages; 354 totalram_pages += totalhigh_pages;
315} 355}
316#endif /* !CONFIG_NUMA */ 356#endif /* !CONFIG_NUMA */