aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-06-23 06:05:30 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 06:50:20 -0400
commit2ec65f8b89ea003c27ff7723525a2ee335a2b393 (patch)
tree9b8718be2017f619b2a0185492315b85ea1731fa /arch/x86
parentbef1568d9714f1162086c32583ba7984a7ca8e3e (diff)
x86: clean up using max_low_pfn on 32-bit
so that max_low_pfn is not changed after it is set. so we can move that early and out of initmem_init. could call find_low_pfn_range just after max_pfn is set. also could move reserve_initrd out of setup_bootmem_allocator so 32bit is more like 64bit. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/setup_32.c16
-rw-r--r--arch/x86/mm/discontig_32.c22
-rw-r--r--arch/x86/mm/init_32.c25
3 files changed, 26 insertions, 37 deletions
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 9a08490a3889..b42f570a5a56 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -188,13 +188,14 @@ static inline void copy_edd(void)
188 188
189static bool do_relocate_initrd = false; 189static bool do_relocate_initrd = false;
190 190
191void __init reserve_initrd(void) 191static void __init reserve_initrd(void)
192{ 192{
193 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 193 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
194 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 194 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
195 u64 ramdisk_end = ramdisk_image + ramdisk_size; 195 u64 ramdisk_end = ramdisk_image + ramdisk_size;
196 u64 end_of_lowmem = max_low_pfn << PAGE_SHIFT; 196 u64 end_of_lowmem = max_low_pfn << PAGE_SHIFT;
197 u64 ramdisk_here; 197 u64 ramdisk_here;
198 u64 ramdisk_target;
198 199
199 if (!boot_params.hdr.type_of_loader || 200 if (!boot_params.hdr.type_of_loader ||
200 !ramdisk_image || !ramdisk_size) 201 !ramdisk_image || !ramdisk_size)
@@ -202,7 +203,7 @@ void __init reserve_initrd(void)
202 203
203 initrd_start = 0; 204 initrd_start = 0;
204 205
205 if (ramdisk_size >= end_of_lowmem/2) { 206 if (ramdisk_size >= (end_of_lowmem>>1)) {
206 free_early(ramdisk_image, ramdisk_end); 207 free_early(ramdisk_image, ramdisk_end);
207 printk(KERN_ERR "initrd too large to handle, " 208 printk(KERN_ERR "initrd too large to handle, "
208 "disabling initrd\n"); 209 "disabling initrd\n");
@@ -225,7 +226,8 @@ void __init reserve_initrd(void)
225 } 226 }
226 227
227 /* We need to move the initrd down into lowmem */ 228 /* We need to move the initrd down into lowmem */
228 ramdisk_here = find_e820_area(min_low_pfn<<PAGE_SHIFT, 229 ramdisk_target = max_pfn_mapped<<PAGE_SHIFT;
230 ramdisk_here = find_e820_area(min(ramdisk_target, end_of_lowmem>>1),
229 end_of_lowmem, ramdisk_size, 231 end_of_lowmem, ramdisk_size,
230 PAGE_SIZE); 232 PAGE_SIZE);
231 233
@@ -346,8 +348,6 @@ static void set_mca_bus(int x) { }
346 */ 348 */
347void __init setup_arch(char **cmdline_p) 349void __init setup_arch(char **cmdline_p)
348{ 350{
349 unsigned long max_low_pfn;
350
351 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 351 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
352 pre_setup_arch_hook(); 352 pre_setup_arch_hook();
353 early_cpu_init(); 353 early_cpu_init();
@@ -450,6 +450,10 @@ void __init setup_arch(char **cmdline_p)
450 max_pfn = e820_end_of_ram(); 450 max_pfn = e820_end_of_ram();
451 } 451 }
452 452
453 find_low_pfn_range();
454
455 reserve_initrd();
456
453 dmi_scan_machine(); 457 dmi_scan_machine();
454 458
455 io_delay_init(); 459 io_delay_init();
@@ -466,7 +470,7 @@ void __init setup_arch(char **cmdline_p)
466 acpi_numa_init(); 470 acpi_numa_init();
467#endif 471#endif
468 472
469 max_low_pfn = initmem_init(0, max_pfn); 473 initmem_init(0, max_pfn);
470 474
471#ifdef CONFIG_ACPI_SLEEP 475#ifdef CONFIG_ACPI_SLEEP
472 /* 476 /*
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 3e75be46c4f2..1dfff700264c 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -309,11 +309,10 @@ static void init_remap_allocator(int nid)
309 (ulong) node_remap_end_vaddr[nid]); 309 (ulong) node_remap_end_vaddr[nid]);
310} 310}
311 311
312unsigned long __init initmem_init(unsigned long start_pfn, 312void __init initmem_init(unsigned long start_pfn,
313 unsigned long end_pfn) 313 unsigned long end_pfn)
314{ 314{
315 int nid; 315 int nid;
316 unsigned long system_start_pfn, system_max_low_pfn;
317 long kva_target_pfn; 316 long kva_target_pfn;
318 317
319 /* 318 /*
@@ -324,17 +323,11 @@ unsigned long __init initmem_init(unsigned long start_pfn,
324 * and ZONE_HIGHMEM. 323 * and ZONE_HIGHMEM.
325 */ 324 */
326 325
327 /* call find_max_low_pfn at first, it could update max_pfn */
328 system_max_low_pfn = max_low_pfn = find_max_low_pfn();
329
330 remove_all_active_ranges(); 326 remove_all_active_ranges();
331 get_memcfg_numa(); 327 get_memcfg_numa();
332 328
333 kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); 329 kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE);
334 330
335 /* partially used pages are not usable - thus round upwards */
336 system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);
337
338 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); 331 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
339 do { 332 do {
340 kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT, 333 kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT,
@@ -357,19 +350,19 @@ unsigned long __init initmem_init(unsigned long start_pfn,
357 "KVA PG"); 350 "KVA PG");
358#ifdef CONFIG_HIGHMEM 351#ifdef CONFIG_HIGHMEM
359 highstart_pfn = highend_pfn = max_pfn; 352 highstart_pfn = highend_pfn = max_pfn;
360 if (max_pfn > system_max_low_pfn) 353 if (max_pfn > max_low_pfn)
361 highstart_pfn = system_max_low_pfn; 354 highstart_pfn = max_low_pfn;
362 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 355 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
363 pages_to_mb(highend_pfn - highstart_pfn)); 356 pages_to_mb(highend_pfn - highstart_pfn));
364 num_physpages = highend_pfn; 357 num_physpages = highend_pfn;
365 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; 358 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
366#else 359#else
367 num_physpages = system_max_low_pfn; 360 num_physpages = max_low_pfn;
368 high_memory = (void *) __va(system_max_low_pfn * PAGE_SIZE - 1) + 1; 361 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
369#endif 362#endif
370 printk(KERN_NOTICE "%ldMB LOWMEM available.\n", 363 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
371 pages_to_mb(system_max_low_pfn)); 364 pages_to_mb(max_low_pfn));
372 printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n", 365 printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n",
373 min_low_pfn, max_low_pfn, highstart_pfn); 366 min_low_pfn, max_low_pfn, highstart_pfn);
374 367
375 printk("Low memory ends at vaddr %08lx\n", 368 printk("Low memory ends at vaddr %08lx\n",
@@ -387,7 +380,6 @@ unsigned long __init initmem_init(unsigned long start_pfn,
387 memset(NODE_DATA(0), 0, sizeof(struct pglist_data)); 380 memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
388 NODE_DATA(0)->bdata = &node0_bdata; 381 NODE_DATA(0)->bdata = &node0_bdata;
389 setup_bootmem_allocator(); 382 setup_bootmem_allocator();
390 return max_low_pfn;
391} 383}
392 384
393void __init zone_sizes_init(void) 385void __init zone_sizes_init(void)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index d1017336f1b5..27b829312944 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -561,9 +561,15 @@ early_param("highmem", parse_highmem);
561/* 561/*
562 * Determine low and high memory ranges: 562 * Determine low and high memory ranges:
563 */ 563 */
564unsigned long __init find_max_low_pfn(void) 564void __init find_low_pfn_range(void)
565{ 565{
566 unsigned long max_low_pfn; 566 /* it could update max_pfn */
567
568 /*
569 * partially used pages are not usable - thus
570 * we are rounding upwards:
571 */
572 min_low_pfn = PFN_UP(init_pg_tables_end);
567 573
568 max_low_pfn = max_pfn; 574 max_low_pfn = max_pfn;
569 if (max_low_pfn > MAXMEM_PFN) { 575 if (max_low_pfn > MAXMEM_PFN) {
@@ -625,21 +631,12 @@ unsigned long __init find_max_low_pfn(void)
625 " kernel!\n"); 631 " kernel!\n");
626#endif 632#endif
627 } 633 }
628 return max_low_pfn;
629} 634}
630 635
631#ifndef CONFIG_NEED_MULTIPLE_NODES 636#ifndef CONFIG_NEED_MULTIPLE_NODES
632unsigned long __init initmem_init(unsigned long start_pfn, 637void __init initmem_init(unsigned long start_pfn,
633 unsigned long end_pfn) 638 unsigned long end_pfn)
634{ 639{
635 /*
636 * partially used pages are not usable - thus
637 * we are rounding upwards:
638 */
639 min_low_pfn = PFN_UP(init_pg_tables_end);
640
641 max_low_pfn = find_max_low_pfn();
642
643#ifdef CONFIG_HIGHMEM 640#ifdef CONFIG_HIGHMEM
644 highstart_pfn = highend_pfn = max_pfn; 641 highstart_pfn = highend_pfn = max_pfn;
645 if (max_pfn > max_low_pfn) 642 if (max_pfn > max_low_pfn)
@@ -661,8 +658,6 @@ unsigned long __init initmem_init(unsigned long start_pfn,
661 pages_to_mb(max_low_pfn)); 658 pages_to_mb(max_low_pfn));
662 659
663 setup_bootmem_allocator(); 660 setup_bootmem_allocator();
664
665 return max_low_pfn;
666} 661}
667 662
668void __init zone_sizes_init(void) 663void __init zone_sizes_init(void)
@@ -699,8 +694,6 @@ void __init setup_bootmem_allocator(void)
699 panic("Cannot find bootmem map of size %ld\n", bootmap_size); 694 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
700 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP"); 695 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
701 696
702 reserve_initrd();
703
704 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, max_low_pfn); 697 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, max_low_pfn);
705 printk(KERN_INFO " mapped low ram: 0 - %08lx\n", 698 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
706 max_pfn_mapped<<PAGE_SHIFT); 699 max_pfn_mapped<<PAGE_SHIFT);