diff options
Diffstat (limited to 'arch/x86_64')
-rw-r--r-- | arch/x86_64/kernel/e820.c | 6 | ||||
-rw-r--r-- | arch/x86_64/kernel/io_apic.c | 5 | ||||
-rw-r--r-- | arch/x86_64/kernel/kprobes.c | 6 | ||||
-rw-r--r-- | arch/x86_64/kernel/mpparse.c | 12 | ||||
-rw-r--r-- | arch/x86_64/kernel/pci-gart.c | 8 | ||||
-rw-r--r-- | arch/x86_64/kernel/pci-nommu.c | 7 | ||||
-rw-r--r-- | arch/x86_64/kernel/setup.c | 30 | ||||
-rw-r--r-- | arch/x86_64/kernel/traps.c | 33 | ||||
-rw-r--r-- | arch/x86_64/mm/srat.c | 15 |
9 files changed, 98 insertions, 24 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index 62776c07cff1..222b5b46d2b2 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c | |||
@@ -76,6 +76,12 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size) | |||
76 | *addrp = __pa_symbol(&_end); | 76 | *addrp = __pa_symbol(&_end); |
77 | return 1; | 77 | return 1; |
78 | } | 78 | } |
79 | |||
80 | if (last >= ebda_addr && addr < ebda_addr + ebda_size) { | ||
81 | *addrp = ebda_addr + ebda_size; | ||
82 | return 1; | ||
83 | } | ||
84 | |||
79 | /* XXX ramdisk image here? */ | 85 | /* XXX ramdisk image here? */ |
80 | return 0; | 86 | return 0; |
81 | } | 87 | } |
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 77b4c608cca0..0de3ea938830 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
@@ -1777,6 +1777,8 @@ static inline void unlock_ExtINT_logic(void) | |||
1777 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1777 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1778 | } | 1778 | } |
1779 | 1779 | ||
1780 | int timer_uses_ioapic_pin_0; | ||
1781 | |||
1780 | /* | 1782 | /* |
1781 | * This code may look a bit paranoid, but it's supposed to cooperate with | 1783 | * This code may look a bit paranoid, but it's supposed to cooperate with |
1782 | * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ | 1784 | * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ |
@@ -1814,6 +1816,9 @@ static inline void check_timer(void) | |||
1814 | pin2 = ioapic_i8259.pin; | 1816 | pin2 = ioapic_i8259.pin; |
1815 | apic2 = ioapic_i8259.apic; | 1817 | apic2 = ioapic_i8259.apic; |
1816 | 1818 | ||
1819 | if (pin1 == 0) | ||
1820 | timer_uses_ioapic_pin_0 = 1; | ||
1821 | |||
1817 | apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", | 1822 | apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", |
1818 | vector, apic1, pin1, apic2, pin2); | 1823 | vector, apic1, pin1, apic2, pin2); |
1819 | 1824 | ||
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 1eaa5dae6174..fa1d19ca700a 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
@@ -514,13 +514,13 @@ static void __kprobes resume_execution(struct kprobe *p, | |||
514 | *tos = orig_rip + (*tos - copy_rip); | 514 | *tos = orig_rip + (*tos - copy_rip); |
515 | break; | 515 | break; |
516 | case 0xff: | 516 | case 0xff: |
517 | if ((*insn & 0x30) == 0x10) { | 517 | if ((insn[1] & 0x30) == 0x10) { |
518 | /* call absolute, indirect */ | 518 | /* call absolute, indirect */ |
519 | /* Fix return addr; rip is correct. */ | 519 | /* Fix return addr; rip is correct. */ |
520 | next_rip = regs->rip; | 520 | next_rip = regs->rip; |
521 | *tos = orig_rip + (*tos - copy_rip); | 521 | *tos = orig_rip + (*tos - copy_rip); |
522 | } else if (((*insn & 0x31) == 0x20) || /* jmp near, absolute indirect */ | 522 | } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ |
523 | ((*insn & 0x31) == 0x21)) { /* jmp far, absolute indirect */ | 523 | ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ |
524 | /* rip is correct. */ | 524 | /* rip is correct. */ |
525 | next_rip = regs->rip; | 525 | next_rip = regs->rip; |
526 | } | 526 | } |
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c index b17cf3eba359..083da7e606b1 100644 --- a/arch/x86_64/kernel/mpparse.c +++ b/arch/x86_64/kernel/mpparse.c | |||
@@ -968,7 +968,17 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) | |||
968 | */ | 968 | */ |
969 | int irq = gsi; | 969 | int irq = gsi; |
970 | if (gsi < MAX_GSI_NUM) { | 970 | if (gsi < MAX_GSI_NUM) { |
971 | if (gsi > 15) | 971 | /* |
972 | * Retain the VIA chipset work-around (gsi > 15), but | ||
973 | * avoid a problem where the 8254 timer (IRQ0) is setup | ||
974 | * via an override (so it's not on pin 0 of the ioapic), | ||
975 | * and at the same time, the pin 0 interrupt is a PCI | ||
976 | * type. The gsi > 15 test could cause these two pins | ||
977 | * to be shared as IRQ0, and they are not shareable. | ||
978 | * So test for this condition, and if necessary, avoid | ||
979 | * the pin collision. | ||
980 | */ | ||
981 | if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0)) | ||
972 | gsi = pci_irq++; | 982 | gsi = pci_irq++; |
973 | /* | 983 | /* |
974 | * Don't assign IRQ used by ACPI SCI | 984 | * Don't assign IRQ used by ACPI SCI |
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index 9d3d76c85ae7..2480d3f08a47 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c | |||
@@ -639,6 +639,14 @@ static int __init pci_iommu_init(void) | |||
639 | return -1; | 639 | return -1; |
640 | } | 640 | } |
641 | 641 | ||
642 | i = 0; | ||
643 | for_all_nb(dev) | ||
644 | i++; | ||
645 | if (i > MAX_NB) { | ||
646 | printk(KERN_ERR "PCI-GART: Too many northbridges (%ld). Disabled\n", i); | ||
647 | return -1; | ||
648 | } | ||
649 | |||
642 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); | 650 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
643 | aper_size = info.aper_size * 1024 * 1024; | 651 | aper_size = info.aper_size * 1024 * 1024; |
644 | iommu_size = check_iommu_size(info.aper_base, aper_size); | 652 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c index 44adcc2d5e5b..1f6ecc62061d 100644 --- a/arch/x86_64/kernel/pci-nommu.c +++ b/arch/x86_64/kernel/pci-nommu.c | |||
@@ -12,9 +12,10 @@ static int | |||
12 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | 12 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) |
13 | { | 13 | { |
14 | if (hwdev && bus + size > *hwdev->dma_mask) { | 14 | if (hwdev && bus + size > *hwdev->dma_mask) { |
15 | printk(KERN_ERR | 15 | if (*hwdev->dma_mask >= 0xffffffffULL) |
16 | "nommu_%s: overflow %Lx+%lu of device mask %Lx\n", | 16 | printk(KERN_ERR |
17 | name, (long long)bus, size, (long long)*hwdev->dma_mask); | 17 | "nommu_%s: overflow %Lx+%lu of device mask %Lx\n", |
18 | name, (long long)bus, size, (long long)*hwdev->dma_mask); | ||
18 | return 0; | 19 | return 0; |
19 | } | 20 | } |
20 | return 1; | 21 | return 1; |
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index ebc3c33b1c6c..f0870bef24d1 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c | |||
@@ -571,17 +571,28 @@ static inline void copy_edd(void) | |||
571 | #endif | 571 | #endif |
572 | 572 | ||
573 | #define EBDA_ADDR_POINTER 0x40E | 573 | #define EBDA_ADDR_POINTER 0x40E |
574 | static void __init reserve_ebda_region(void) | 574 | |
575 | unsigned __initdata ebda_addr; | ||
576 | unsigned __initdata ebda_size; | ||
577 | |||
578 | static void discover_ebda(void) | ||
575 | { | 579 | { |
576 | unsigned int addr; | 580 | /* |
577 | /** | ||
578 | * there is a real-mode segmented pointer pointing to the | 581 | * there is a real-mode segmented pointer pointing to the |
579 | * 4K EBDA area at 0x40E | 582 | * 4K EBDA area at 0x40E |
580 | */ | 583 | */ |
581 | addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER); | 584 | ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER; |
582 | addr <<= 4; | 585 | ebda_addr <<= 4; |
583 | if (addr) | 586 | |
584 | reserve_bootmem_generic(addr, PAGE_SIZE); | 587 | ebda_size = *(unsigned short *)(unsigned long)ebda_addr; |
588 | |||
589 | /* Round EBDA up to pages */ | ||
590 | if (ebda_size == 0) | ||
591 | ebda_size = 1; | ||
592 | ebda_size <<= 10; | ||
593 | ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE); | ||
594 | if (ebda_size > 64*1024) | ||
595 | ebda_size = 64*1024; | ||
585 | } | 596 | } |
586 | 597 | ||
587 | void __init setup_arch(char **cmdline_p) | 598 | void __init setup_arch(char **cmdline_p) |
@@ -627,6 +638,8 @@ void __init setup_arch(char **cmdline_p) | |||
627 | 638 | ||
628 | check_efer(); | 639 | check_efer(); |
629 | 640 | ||
641 | discover_ebda(); | ||
642 | |||
630 | init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); | 643 | init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); |
631 | 644 | ||
632 | dmi_scan_machine(); | 645 | dmi_scan_machine(); |
@@ -669,7 +682,8 @@ void __init setup_arch(char **cmdline_p) | |||
669 | reserve_bootmem_generic(0, PAGE_SIZE); | 682 | reserve_bootmem_generic(0, PAGE_SIZE); |
670 | 683 | ||
671 | /* reserve ebda region */ | 684 | /* reserve ebda region */ |
672 | reserve_ebda_region(); | 685 | if (ebda_addr) |
686 | reserve_bootmem_generic(ebda_addr, ebda_size); | ||
673 | 687 | ||
674 | #ifdef CONFIG_SMP | 688 | #ifdef CONFIG_SMP |
675 | /* | 689 | /* |
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 2700b1375c1f..cea335e8746c 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c | |||
@@ -102,6 +102,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) | |||
102 | { | 102 | { |
103 | if (regs->eflags & X86_EFLAGS_IF) | 103 | if (regs->eflags & X86_EFLAGS_IF) |
104 | local_irq_disable(); | 104 | local_irq_disable(); |
105 | /* Make sure to not schedule here because we could be running | ||
106 | on an exception stack. */ | ||
105 | preempt_enable_no_resched(); | 107 | preempt_enable_no_resched(); |
106 | } | 108 | } |
107 | 109 | ||
@@ -385,6 +387,7 @@ void out_of_line_bug(void) | |||
385 | 387 | ||
386 | static DEFINE_SPINLOCK(die_lock); | 388 | static DEFINE_SPINLOCK(die_lock); |
387 | static int die_owner = -1; | 389 | static int die_owner = -1; |
390 | static unsigned int die_nest_count; | ||
388 | 391 | ||
389 | unsigned __kprobes long oops_begin(void) | 392 | unsigned __kprobes long oops_begin(void) |
390 | { | 393 | { |
@@ -399,6 +402,7 @@ unsigned __kprobes long oops_begin(void) | |||
399 | else | 402 | else |
400 | spin_lock(&die_lock); | 403 | spin_lock(&die_lock); |
401 | } | 404 | } |
405 | die_nest_count++; | ||
402 | die_owner = cpu; | 406 | die_owner = cpu; |
403 | console_verbose(); | 407 | console_verbose(); |
404 | bust_spinlocks(1); | 408 | bust_spinlocks(1); |
@@ -409,7 +413,13 @@ void __kprobes oops_end(unsigned long flags) | |||
409 | { | 413 | { |
410 | die_owner = -1; | 414 | die_owner = -1; |
411 | bust_spinlocks(0); | 415 | bust_spinlocks(0); |
412 | spin_unlock_irqrestore(&die_lock, flags); | 416 | die_nest_count--; |
417 | if (die_nest_count) | ||
418 | /* We still own the lock */ | ||
419 | local_irq_restore(flags); | ||
420 | else | ||
421 | /* Nest count reaches zero, release the lock. */ | ||
422 | spin_unlock_irqrestore(&die_lock, flags); | ||
413 | if (panic_on_oops) | 423 | if (panic_on_oops) |
414 | panic("Oops"); | 424 | panic("Oops"); |
415 | } | 425 | } |
@@ -464,6 +474,8 @@ void __kprobes die_nmi(char *str, struct pt_regs *regs) | |||
464 | panic("nmi watchdog"); | 474 | panic("nmi watchdog"); |
465 | printk("console shuts up ...\n"); | 475 | printk("console shuts up ...\n"); |
466 | oops_end(flags); | 476 | oops_end(flags); |
477 | nmi_exit(); | ||
478 | local_irq_enable(); | ||
467 | do_exit(SIGSEGV); | 479 | do_exit(SIGSEGV); |
468 | } | 480 | } |
469 | 481 | ||
@@ -473,8 +485,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, | |||
473 | { | 485 | { |
474 | struct task_struct *tsk = current; | 486 | struct task_struct *tsk = current; |
475 | 487 | ||
476 | conditional_sti(regs); | ||
477 | |||
478 | tsk->thread.error_code = error_code; | 488 | tsk->thread.error_code = error_code; |
479 | tsk->thread.trap_no = trapnr; | 489 | tsk->thread.trap_no = trapnr; |
480 | 490 | ||
@@ -511,6 +521,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | |||
511 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 521 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
512 | == NOTIFY_STOP) \ | 522 | == NOTIFY_STOP) \ |
513 | return; \ | 523 | return; \ |
524 | conditional_sti(regs); \ | ||
514 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ | 525 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ |
515 | } | 526 | } |
516 | 527 | ||
@@ -525,6 +536,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | |||
525 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 536 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
526 | == NOTIFY_STOP) \ | 537 | == NOTIFY_STOP) \ |
527 | return; \ | 538 | return; \ |
539 | conditional_sti(regs); \ | ||
528 | do_trap(trapnr, signr, str, regs, error_code, &info); \ | 540 | do_trap(trapnr, signr, str, regs, error_code, &info); \ |
529 | } | 541 | } |
530 | 542 | ||
@@ -538,7 +550,17 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | |||
538 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | 550 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
539 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | 551 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) |
540 | DO_ERROR(18, SIGSEGV, "reserved", reserved) | 552 | DO_ERROR(18, SIGSEGV, "reserved", reserved) |
541 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) | 553 | |
554 | /* Runs on IST stack */ | ||
555 | asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) | ||
556 | { | ||
557 | if (notify_die(DIE_TRAP, "stack segment", regs, error_code, | ||
558 | 12, SIGBUS) == NOTIFY_STOP) | ||
559 | return; | ||
560 | preempt_conditional_sti(regs); | ||
561 | do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); | ||
562 | preempt_conditional_cli(regs); | ||
563 | } | ||
542 | 564 | ||
543 | asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) | 565 | asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) |
544 | { | 566 | { |
@@ -672,8 +694,9 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) | |||
672 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { | 694 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { |
673 | return; | 695 | return; |
674 | } | 696 | } |
697 | preempt_conditional_sti(regs); | ||
675 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); | 698 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); |
676 | return; | 699 | preempt_conditional_cli(regs); |
677 | } | 700 | } |
678 | 701 | ||
679 | /* Help handler running on IST stack to switch back to user stack | 702 | /* Help handler running on IST stack to switch back to user stack |
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index 15ae9fcd65a7..e1513532df29 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c | |||
@@ -34,7 +34,10 @@ static nodemask_t nodes_found __initdata; | |||
34 | static struct bootnode nodes[MAX_NUMNODES] __initdata; | 34 | static struct bootnode nodes[MAX_NUMNODES] __initdata; |
35 | static struct bootnode nodes_add[MAX_NUMNODES] __initdata; | 35 | static struct bootnode nodes_add[MAX_NUMNODES] __initdata; |
36 | static int found_add_area __initdata; | 36 | static int found_add_area __initdata; |
37 | int hotadd_percent __initdata = 10; | 37 | int hotadd_percent __initdata = 0; |
38 | #ifndef RESERVE_HOTADD | ||
39 | #define hotadd_percent 0 /* Ignore all settings */ | ||
40 | #endif | ||
38 | static u8 pxm2node[256] = { [0 ... 255] = 0xff }; | 41 | static u8 pxm2node[256] = { [0 ... 255] = 0xff }; |
39 | 42 | ||
40 | /* Too small nodes confuse the VM badly. Usually they result | 43 | /* Too small nodes confuse the VM badly. Usually they result |
@@ -103,6 +106,7 @@ static __init void bad_srat(void) | |||
103 | int i; | 106 | int i; |
104 | printk(KERN_ERR "SRAT: SRAT not used.\n"); | 107 | printk(KERN_ERR "SRAT: SRAT not used.\n"); |
105 | acpi_numa = -1; | 108 | acpi_numa = -1; |
109 | found_add_area = 0; | ||
106 | for (i = 0; i < MAX_LOCAL_APIC; i++) | 110 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
107 | apicid_to_node[i] = NUMA_NO_NODE; | 111 | apicid_to_node[i] = NUMA_NO_NODE; |
108 | for (i = 0; i < MAX_NUMNODES; i++) | 112 | for (i = 0; i < MAX_NUMNODES; i++) |
@@ -154,7 +158,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) | |||
154 | int pxm, node; | 158 | int pxm, node; |
155 | if (srat_disabled()) | 159 | if (srat_disabled()) |
156 | return; | 160 | return; |
157 | if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat(); | 161 | if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { |
162 | bad_srat(); | ||
158 | return; | 163 | return; |
159 | } | 164 | } |
160 | if (pa->flags.enabled == 0) | 165 | if (pa->flags.enabled == 0) |
@@ -191,15 +196,17 @@ static int hotadd_enough_memory(struct bootnode *nd) | |||
191 | allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE; | 196 | allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE; |
192 | allowed = (allowed / 100) * hotadd_percent; | 197 | allowed = (allowed / 100) * hotadd_percent; |
193 | if (allocated + mem > allowed) { | 198 | if (allocated + mem > allowed) { |
199 | unsigned long range; | ||
194 | /* Give them at least part of their hotadd memory upto hotadd_percent | 200 | /* Give them at least part of their hotadd memory upto hotadd_percent |
195 | It would be better to spread the limit out | 201 | It would be better to spread the limit out |
196 | over multiple hotplug areas, but that is too complicated | 202 | over multiple hotplug areas, but that is too complicated |
197 | right now */ | 203 | right now */ |
198 | if (allocated >= allowed) | 204 | if (allocated >= allowed) |
199 | return 0; | 205 | return 0; |
200 | pages = (allowed - allocated + mem) / sizeof(struct page); | 206 | range = allowed - allocated; |
207 | pages = (range / PAGE_SIZE); | ||
201 | mem = pages * sizeof(struct page); | 208 | mem = pages * sizeof(struct page); |
202 | nd->end = nd->start + pages*PAGE_SIZE; | 209 | nd->end = nd->start + range; |
203 | } | 210 | } |
204 | /* Not completely fool proof, but a good sanity check */ | 211 | /* Not completely fool proof, but a good sanity check */ |
205 | addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem); | 212 | addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem); |