aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/acpi/boot.c44
-rw-r--r--arch/x86/kernel/e820.c26
-rw-r--r--arch/x86/kernel/io_apic_32.c10
-rw-r--r--arch/x86/kernel/io_apic_64.c10
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/mm/init_64.c175
-rw-r--r--include/asm-x86/e820.h2
-rw-r--r--include/asm-x86/genapic_32.h1
-rw-r--r--include/asm-x86/genapic_64.h2
9 files changed, 213 insertions, 70 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 5c0107602b62..bf7b4f7f60e1 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -37,6 +37,7 @@
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/io_apic.h> 38#include <asm/io_apic.h>
39#include <asm/apic.h> 39#include <asm/apic.h>
40#include <asm/genapic.h>
40#include <asm/io.h> 41#include <asm/io.h>
41#include <asm/mpspec.h> 42#include <asm/mpspec.h>
42#include <asm/smp.h> 43#include <asm/smp.h>
@@ -1373,8 +1374,6 @@ static void __init acpi_process_madt(void)
1373 return; 1374 return;
1374} 1375}
1375 1376
1376#ifdef __i386__
1377
1378static int __init disable_acpi_irq(const struct dmi_system_id *d) 1377static int __init disable_acpi_irq(const struct dmi_system_id *d)
1379{ 1378{
1380 if (!acpi_force) { 1379 if (!acpi_force) {
@@ -1436,6 +1435,17 @@ dmi_disable_irq0_through_ioapic(const struct dmi_system_id *d)
1436} 1435}
1437 1436
1438/* 1437/*
1438 * Force ignoring BIOS IRQ0 pin2 override
1439 */
1440static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
1441{
1442 pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n", d->ident);
1443 acpi_skip_timer_override = 1;
1444 force_mask_ioapic_irq_2();
1445 return 0;
1446}
1447
1448/*
1439 * If your system is blacklisted here, but you find that acpi=force 1449 * If your system is blacklisted here, but you find that acpi=force
1440 * works for you, please contact acpi-devel@sourceforge.net 1450 * works for you, please contact acpi-devel@sourceforge.net
1441 */ 1451 */
@@ -1628,11 +1638,35 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
1628 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), 1638 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
1629 }, 1639 },
1630 }, 1640 },
1641 /*
1642 * HP laptops which use a DSDT reporting as HP/SB400/10000,
1643 * which includes some code which overrides all temperature
1644 * trip points to 16C if the INTIN2 input of the I/O APIC
1645 * is enabled. This input is incorrectly designated the
1646 * ISA IRQ 0 via an interrupt source override even though
1647 * it is wired to the output of the master 8259A and INTIN0
1648 * is not connected at all. Force ignoring BIOS IRQ0 pin2
1649 * override in that cases.
1650 */
1651 {
1652 .callback = dmi_ignore_irq0_timer_override,
1653 .ident = "HP NX6125 laptop",
1654 .matches = {
1655 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1656 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"),
1657 },
1658 },
1659 {
1660 .callback = dmi_ignore_irq0_timer_override,
1661 .ident = "HP NX6325 laptop",
1662 .matches = {
1663 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1664 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
1665 },
1666 },
1631 {} 1667 {}
1632}; 1668};
1633 1669
1634#endif /* __i386__ */
1635
1636/* 1670/*
1637 * acpi_boot_table_init() and acpi_boot_init() 1671 * acpi_boot_table_init() and acpi_boot_init()
1638 * called from setup_arch(), always. 1672 * called from setup_arch(), always.
@@ -1660,9 +1694,7 @@ int __init acpi_boot_table_init(void)
1660{ 1694{
1661 int error; 1695 int error;
1662 1696
1663#ifdef __i386__
1664 dmi_check_system(acpi_dmi_table); 1697 dmi_check_system(acpi_dmi_table);
1665#endif
1666 1698
1667 /* 1699 /*
1668 * If acpi_disabled, bail out 1700 * If acpi_disabled, bail out
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index e07d4019e266..66fd5bd78318 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1056,12 +1056,25 @@ unsigned long __initdata end_user_pfn = MAX_ARCH_PFN;
1056/* 1056/*
1057 * Find the highest page frame number we have available 1057 * Find the highest page frame number we have available
1058 */ 1058 */
1059unsigned long __init e820_end_of_ram(void) 1059unsigned long __init e820_end(void)
1060{ 1060{
1061 unsigned long last_pfn; 1061 int i;
1062 unsigned long last_pfn = 0;
1062 unsigned long max_arch_pfn = MAX_ARCH_PFN; 1063 unsigned long max_arch_pfn = MAX_ARCH_PFN;
1063 1064
1064 last_pfn = find_max_pfn_with_active_regions(); 1065 for (i = 0; i < e820.nr_map; i++) {
1066 struct e820entry *ei = &e820.map[i];
1067 unsigned long end_pfn;
1068
1069#ifdef CONFIG_X86_32
1070 if (ei->type != E820_RAM)
1071 continue;
1072#endif
1073
1074 end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;
1075 if (end_pfn > last_pfn)
1076 last_pfn = end_pfn;
1077 }
1065 1078
1066 if (last_pfn > max_arch_pfn) 1079 if (last_pfn > max_arch_pfn)
1067 last_pfn = max_arch_pfn; 1080 last_pfn = max_arch_pfn;
@@ -1185,6 +1198,9 @@ static int __init parse_memmap_opt(char *p)
1185 char *oldp; 1198 char *oldp;
1186 u64 start_at, mem_size; 1199 u64 start_at, mem_size;
1187 1200
1201 if (!p)
1202 return -EINVAL;
1203
1188 if (!strcmp(p, "exactmap")) { 1204 if (!strcmp(p, "exactmap")) {
1189#ifdef CONFIG_CRASH_DUMP 1205#ifdef CONFIG_CRASH_DUMP
1190 /* 1206 /*
@@ -1192,9 +1208,7 @@ static int __init parse_memmap_opt(char *p)
1192 * the real mem size before original memory map is 1208 * the real mem size before original memory map is
1193 * reset. 1209 * reset.
1194 */ 1210 */
1195 e820_register_active_regions(0, 0, -1UL); 1211 saved_max_pfn = e820_end();
1196 saved_max_pfn = e820_end_of_ram();
1197 remove_all_active_ranges();
1198#endif 1212#endif
1199 e820.nr_map = 0; 1213 e820.nr_map = 0;
1200 userdef = 1; 1214 userdef = 1;
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 337ec3438a8f..6b220b9dcbb3 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -59,6 +59,13 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
59static DEFINE_SPINLOCK(ioapic_lock); 59static DEFINE_SPINLOCK(ioapic_lock);
60static DEFINE_SPINLOCK(vector_lock); 60static DEFINE_SPINLOCK(vector_lock);
61 61
62static bool mask_ioapic_irq_2 __initdata;
63
64void __init force_mask_ioapic_irq_2(void)
65{
66 mask_ioapic_irq_2 = true;
67}
68
62int timer_through_8259 __initdata; 69int timer_through_8259 __initdata;
63 70
64/* 71/*
@@ -2172,6 +2179,9 @@ static inline void __init check_timer(void)
2172 printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", 2179 printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
2173 vector, apic1, pin1, apic2, pin2); 2180 vector, apic1, pin1, apic2, pin2);
2174 2181
2182 if (mask_ioapic_irq_2)
2183 mask_IO_APIC_irq(2);
2184
2175 /* 2185 /*
2176 * Some BIOS writers are clueless and report the ExtINTA 2186 * Some BIOS writers are clueless and report the ExtINTA
2177 * I/O APIC input from the cascaded 8259A as the timer 2187 * I/O APIC input from the cascaded 8259A as the timer
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 2b4c40bc12c9..0494cdb270c5 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -94,6 +94,13 @@ static int no_timer_check;
94 94
95static int disable_timer_pin_1 __initdata; 95static int disable_timer_pin_1 __initdata;
96 96
97static bool mask_ioapic_irq_2 __initdata;
98
99void __init force_mask_ioapic_irq_2(void)
100{
101 mask_ioapic_irq_2 = true;
102}
103
97int timer_through_8259 __initdata; 104int timer_through_8259 __initdata;
98 105
99/* Where if anywhere is the i8259 connect in external int mode */ 106/* Where if anywhere is the i8259 connect in external int mode */
@@ -1698,6 +1705,9 @@ static inline void __init check_timer(void)
1698 apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", 1705 apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
1699 cfg->vector, apic1, pin1, apic2, pin2); 1706 cfg->vector, apic1, pin1, apic2, pin2);
1700 1707
1708 if (mask_ioapic_irq_2)
1709 mask_IO_APIC_irq(2);
1710
1701 /* 1711 /*
1702 * Some BIOS writers are clueless and report the ExtINTA 1712 * Some BIOS writers are clueless and report the ExtINTA
1703 * I/O APIC input from the cascaded 8259A as the timer 1713 * I/O APIC input from the cascaded 8259A as the timer
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index bea8ae77d059..a7c3471ea17c 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -709,22 +709,18 @@ void __init setup_arch(char **cmdline_p)
709 early_gart_iommu_check(); 709 early_gart_iommu_check();
710#endif 710#endif
711 711
712 e820_register_active_regions(0, 0, -1UL);
713 /* 712 /*
714 * partially used pages are not usable - thus 713 * partially used pages are not usable - thus
715 * we are rounding upwards: 714 * we are rounding upwards:
716 */ 715 */
717 max_pfn = e820_end_of_ram(); 716 max_pfn = e820_end();
718 717
719 /* preallocate 4k for mptable mpc */ 718 /* preallocate 4k for mptable mpc */
720 early_reserve_e820_mpc_new(); 719 early_reserve_e820_mpc_new();
721 /* update e820 for memory not covered by WB MTRRs */ 720 /* update e820 for memory not covered by WB MTRRs */
722 mtrr_bp_init(); 721 mtrr_bp_init();
723 if (mtrr_trim_uncached_memory(max_pfn)) { 722 if (mtrr_trim_uncached_memory(max_pfn))
724 remove_all_active_ranges(); 723 max_pfn = e820_end();
725 e820_register_active_regions(0, 0, -1UL);
726 max_pfn = e820_end_of_ram();
727 }
728 724
729#ifdef CONFIG_X86_32 725#ifdef CONFIG_X86_32
730 /* max_low_pfn get updated here */ 726 /* max_low_pfn get updated here */
@@ -767,9 +763,6 @@ void __init setup_arch(char **cmdline_p)
767 */ 763 */
768 acpi_boot_table_init(); 764 acpi_boot_table_init();
769 765
770 /* Remove active ranges so rediscovery with NUMA-awareness happens */
771 remove_all_active_ranges();
772
773#ifdef CONFIG_ACPI_NUMA 766#ifdef CONFIG_ACPI_NUMA
774 /* 767 /*
775 * Parse SRAT to discover nodes. 768 * Parse SRAT to discover nodes.
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 57d5eff754c9..51f69b39b752 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -340,7 +340,8 @@ phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end)
340} 340}
341 341
342static unsigned long __meminit 342static unsigned long __meminit
343phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) 343phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
344 unsigned long page_size_mask)
344{ 345{
345 unsigned long pages = 0; 346 unsigned long pages = 0;
346 347
@@ -365,7 +366,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
365 continue; 366 continue;
366 } 367 }
367 368
368 if (cpu_has_pse) { 369 if (page_size_mask & (1<<PG_LEVEL_2M)) {
369 pages++; 370 pages++;
370 set_pte((pte_t *)pmd, 371 set_pte((pte_t *)pmd,
371 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 372 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
@@ -383,20 +384,22 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
383} 384}
384 385
385static unsigned long __meminit 386static unsigned long __meminit
386phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) 387phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
388 unsigned long page_size_mask)
387{ 389{
388 pmd_t *pmd = pmd_offset(pud, 0); 390 pmd_t *pmd = pmd_offset(pud, 0);
389 unsigned long last_map_addr; 391 unsigned long last_map_addr;
390 392
391 spin_lock(&init_mm.page_table_lock); 393 spin_lock(&init_mm.page_table_lock);
392 last_map_addr = phys_pmd_init(pmd, address, end); 394 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
393 spin_unlock(&init_mm.page_table_lock); 395 spin_unlock(&init_mm.page_table_lock);
394 __flush_tlb_all(); 396 __flush_tlb_all();
395 return last_map_addr; 397 return last_map_addr;
396} 398}
397 399
398static unsigned long __meminit 400static unsigned long __meminit
399phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) 401phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
402 unsigned long page_size_mask)
400{ 403{
401 unsigned long pages = 0; 404 unsigned long pages = 0;
402 unsigned long last_map_addr = end; 405 unsigned long last_map_addr = end;
@@ -418,11 +421,12 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
418 421
419 if (pud_val(*pud)) { 422 if (pud_val(*pud)) {
420 if (!pud_large(*pud)) 423 if (!pud_large(*pud))
421 last_map_addr = phys_pmd_update(pud, addr, end); 424 last_map_addr = phys_pmd_update(pud, addr, end,
425 page_size_mask);
422 continue; 426 continue;
423 } 427 }
424 428
425 if (direct_gbpages) { 429 if (page_size_mask & (1<<PG_LEVEL_1G)) {
426 pages++; 430 pages++;
427 set_pte((pte_t *)pud, 431 set_pte((pte_t *)pud,
428 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 432 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
@@ -433,7 +437,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
433 pmd = alloc_low_page(&pmd_phys); 437 pmd = alloc_low_page(&pmd_phys);
434 438
435 spin_lock(&init_mm.page_table_lock); 439 spin_lock(&init_mm.page_table_lock);
436 last_map_addr = phys_pmd_init(pmd, addr, end); 440 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
437 unmap_low_page(pmd); 441 unmap_low_page(pmd);
438 pud_populate(&init_mm, pud, __va(pmd_phys)); 442 pud_populate(&init_mm, pud, __va(pmd_phys));
439 spin_unlock(&init_mm.page_table_lock); 443 spin_unlock(&init_mm.page_table_lock);
@@ -446,29 +450,37 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
446} 450}
447 451
448static unsigned long __meminit 452static unsigned long __meminit
449phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end) 453phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
454 unsigned long page_size_mask)
450{ 455{
451 pud_t *pud; 456 pud_t *pud;
452 457
453 pud = (pud_t *)pgd_page_vaddr(*pgd); 458 pud = (pud_t *)pgd_page_vaddr(*pgd);
454 459
455 return phys_pud_init(pud, addr, end); 460 return phys_pud_init(pud, addr, end, page_size_mask);
456} 461}
457 462
458static void __init find_early_table_space(unsigned long end) 463static void __init find_early_table_space(unsigned long end)
459{ 464{
460 unsigned long puds, tables, start; 465 unsigned long puds, pmds, ptes, tables, start;
461 466
462 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 467 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
463 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); 468 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
464 if (!direct_gbpages) { 469 if (direct_gbpages) {
465 unsigned long pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 470 unsigned long extra;
466 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); 471 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
467 } 472 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
468 if (!cpu_has_pse) { 473 } else
469 unsigned long ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 474 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
470 tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); 475 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
471 } 476
477 if (cpu_has_pse) {
478 unsigned long extra;
479 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
480 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
481 } else
482 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
483 tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE);
472 484
473 /* 485 /*
474 * RED-PEN putting page tables only on node 0 could 486 * RED-PEN putting page tables only on node 0 could
@@ -608,29 +620,12 @@ static void __init early_memtest(unsigned long start, unsigned long end)
608} 620}
609#endif 621#endif
610 622
611/* 623static unsigned long __init kernel_physical_mapping_init(unsigned long start,
612 * Setup the direct mapping of the physical memory at PAGE_OFFSET. 624 unsigned long end,
613 * This runs before bootmem is initialized and gets pages directly from 625 unsigned long page_size_mask)
614 * the physical memory. To access them they are temporarily mapped.
615 */
616unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end)
617{ 626{
618 unsigned long next, last_map_addr = end;
619 unsigned long start_phys = start, end_phys = end;
620
621 printk(KERN_INFO "init_memory_mapping\n");
622 627
623 /* 628 unsigned long next, last_map_addr = end;
624 * Find space for the kernel direct mapping tables.
625 *
626 * Later we should allocate these tables in the local node of the
627 * memory mapped. Unfortunately this is done currently before the
628 * nodes are discovered.
629 */
630 if (!after_bootmem) {
631 init_gbpages();
632 find_early_table_space(end);
633 }
634 629
635 start = (unsigned long)__va(start); 630 start = (unsigned long)__va(start);
636 end = (unsigned long)__va(end); 631 end = (unsigned long)__va(end);
@@ -645,7 +640,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned lon
645 next = end; 640 next = end;
646 641
647 if (pgd_val(*pgd)) { 642 if (pgd_val(*pgd)) {
648 last_map_addr = phys_pud_update(pgd, __pa(start), __pa(end)); 643 last_map_addr = phys_pud_update(pgd, __pa(start),
644 __pa(end), page_size_mask);
649 continue; 645 continue;
650 } 646 }
651 647
@@ -654,22 +650,107 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned lon
654 else 650 else
655 pud = alloc_low_page(&pud_phys); 651 pud = alloc_low_page(&pud_phys);
656 652
657 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next)); 653 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
654 page_size_mask);
658 unmap_low_page(pud); 655 unmap_low_page(pud);
659 pgd_populate(&init_mm, pgd_offset_k(start), 656 pgd_populate(&init_mm, pgd_offset_k(start),
660 __va(pud_phys)); 657 __va(pud_phys));
661 } 658 }
662 659
660 return last_map_addr;
661}
662/*
663 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
664 * This runs before bootmem is initialized and gets pages directly from
665 * the physical memory. To access them they are temporarily mapped.
666 */
667unsigned long __init_refok init_memory_mapping(unsigned long start,
668 unsigned long end)
669{
670 unsigned long last_map_addr = end;
671 unsigned long page_size_mask = 0;
672 unsigned long start_pfn, end_pfn;
673
674 printk(KERN_INFO "init_memory_mapping\n");
675
676 /*
677 * Find space for the kernel direct mapping tables.
678 *
679 * Later we should allocate these tables in the local node of the
680 * memory mapped. Unfortunately this is done currently before the
681 * nodes are discovered.
682 */
683 if (!after_bootmem) {
684 init_gbpages();
685 find_early_table_space(end);
686 }
687
688 if (direct_gbpages)
689 page_size_mask |= 1 << PG_LEVEL_1G;
690 if (cpu_has_pse)
691 page_size_mask |= 1 << PG_LEVEL_2M;
692
693 /* head if not big page aligment ?*/
694 start_pfn = start >> PAGE_SHIFT;
695 end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT)
696 << (PMD_SHIFT - PAGE_SHIFT);
697 if (start_pfn < end_pfn)
698 last_map_addr = kernel_physical_mapping_init(
699 start_pfn<<PAGE_SHIFT,
700 end_pfn<<PAGE_SHIFT, 0);
701
702 /* big page (2M) range*/
703 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
704 << (PMD_SHIFT - PAGE_SHIFT);
705 end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT)
706 << (PUD_SHIFT - PAGE_SHIFT);
707 if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)))
708 end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT));
709 if (start_pfn < end_pfn)
710 last_map_addr = kernel_physical_mapping_init(
711 start_pfn<<PAGE_SHIFT,
712 end_pfn<<PAGE_SHIFT,
713 page_size_mask & (1<<PG_LEVEL_2M));
714
715 /* big page (1G) range */
716 start_pfn = end_pfn;
717 end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
718 if (start_pfn < end_pfn)
719 last_map_addr = kernel_physical_mapping_init(
720 start_pfn<<PAGE_SHIFT,
721 end_pfn<<PAGE_SHIFT,
722 page_size_mask & ((1<<PG_LEVEL_2M)
723 | (1<<PG_LEVEL_1G)));
724
725 /* tail is not big page (1G) alignment */
726 start_pfn = end_pfn;
727 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
728 if (start_pfn < end_pfn)
729 last_map_addr = kernel_physical_mapping_init(
730 start_pfn<<PAGE_SHIFT,
731 end_pfn<<PAGE_SHIFT,
732 page_size_mask & (1<<PG_LEVEL_2M));
733 /* tail is not big page (2M) alignment */
734 start_pfn = end_pfn;
735 end_pfn = end>>PAGE_SHIFT;
736 if (start_pfn < end_pfn)
737 last_map_addr = kernel_physical_mapping_init(
738 start_pfn<<PAGE_SHIFT,
739 end_pfn<<PAGE_SHIFT, 0);
740
663 if (!after_bootmem) 741 if (!after_bootmem)
664 mmu_cr4_features = read_cr4(); 742 mmu_cr4_features = read_cr4();
665 __flush_tlb_all(); 743 __flush_tlb_all();
666 744
667 if (!after_bootmem) 745 if (!after_bootmem && table_end > table_start)
668 reserve_early(table_start << PAGE_SHIFT, 746 reserve_early(table_start << PAGE_SHIFT,
669 table_end << PAGE_SHIFT, "PGTABLE"); 747 table_end << PAGE_SHIFT, "PGTABLE");
670 748
749 printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
750 last_map_addr, end);
751
671 if (!after_bootmem) 752 if (!after_bootmem)
672 early_memtest(start_phys, end_phys); 753 early_memtest(start, end);
673 754
674 return last_map_addr >> PAGE_SHIFT; 755 return last_map_addr >> PAGE_SHIFT;
675} 756}
@@ -1076,9 +1157,6 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1076 PAGE_KERNEL_LARGE); 1157 PAGE_KERNEL_LARGE);
1077 set_pmd(pmd, __pmd(pte_val(entry))); 1158 set_pmd(pmd, __pmd(pte_val(entry)));
1078 1159
1079 addr_end = addr + PMD_SIZE;
1080 p_end = p + PMD_SIZE;
1081
1082 /* check to see if we have contiguous blocks */ 1160 /* check to see if we have contiguous blocks */
1083 if (p_end != p || node_start != node) { 1161 if (p_end != p || node_start != node) {
1084 if (p_start) 1162 if (p_start)
@@ -1088,6 +1166,9 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1088 node_start = node; 1166 node_start = node;
1089 p_start = p; 1167 p_start = p;
1090 } 1168 }
1169
1170 addr_end = addr + PMD_SIZE;
1171 p_end = p + PMD_SIZE;
1091 } else 1172 } else
1092 vmemmap_verify((pte_t *)pmd, node, addr, next); 1173 vmemmap_verify((pte_t *)pmd, node, addr, next);
1093 } 1174 }
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index a20d0a7f5892..78c03d7bf441 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -99,7 +99,7 @@ extern void free_early(u64 start, u64 end);
99extern void early_res_to_bootmem(u64 start, u64 end); 99extern void early_res_to_bootmem(u64 start, u64 end);
100extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); 100extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
101 101
102extern unsigned long e820_end_of_ram(void); 102extern unsigned long e820_end(void);
103extern int e820_find_active_region(const struct e820entry *ei, 103extern int e820_find_active_region(const struct e820entry *ei,
104 unsigned long start_pfn, 104 unsigned long start_pfn,
105 unsigned long last_pfn, 105 unsigned long last_pfn,
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
index b02ea6e17de8..8d4c8bdb9065 100644
--- a/include/asm-x86/genapic_32.h
+++ b/include/asm-x86/genapic_32.h
@@ -119,5 +119,6 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
119#define is_uv_system() 0 119#define is_uv_system() 0
120#define uv_wakeup_secondary(a, b) 1 120#define uv_wakeup_secondary(a, b) 1
121 121
122extern void force_mask_ioapic_irq_2(void);
122 123
123#endif 124#endif
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h
index 0f8504627c41..082ad020e412 100644
--- a/include/asm-x86/genapic_64.h
+++ b/include/asm-x86/genapic_64.h
@@ -46,4 +46,6 @@ extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
46 46
47extern void setup_apic_routing(void); 47extern void setup_apic_routing(void);
48 48
49extern void force_mask_ioapic_irq_2(void);
50
49#endif 51#endif