aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/e820.c
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-05-18 04:18:57 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-25 04:55:11 -0400
commita4c81cf684350797939416c99effb9d3ae46bca6 (patch)
tree93f58c28e1dae637a5bb9fd6166968808f10e472 /arch/x86/kernel/e820.c
parent69c9189320c46b14e5ae3ad4b3a0d35cc63cba20 (diff)
x86: extend e820 ealy_res support 32bit
move early_res related from e820_64.c to e820.c make edba detection to be done in head32.c remove smp_alloc_memory, because we have fixed trampoline address now. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> arch/x86/kernel/e820.c | 214 ++++++++++++++++++++++++++++++++++++ arch/x86/kernel/e820_64.c | 196 -------------------------------- arch/x86/kernel/head32.c | 76 ++++++++++++ arch/x86/kernel/setup_32.c | 109 +++--------------- arch/x86/kernel/smpboot.c | 17 -- arch/x86/kernel/trampoline.c | 2 arch/x86/mach-voyager/voyager_smp.c | 9 - include/asm-x86/e820.h | 6 + include/asm-x86/e820_64.h | 9 - include/asm-x86/smp.h | 1 arch/x86/kernel/e820.c | 214 ++++++++++++++++++++++++++++++++++++ arch/x86/kernel/e820_64.c | 196 -------------------------------- arch/x86/kernel/head32.c | 76 ++++++++++++ arch/x86/kernel/setup_32.c | 109 +++--------------- arch/x86/kernel/smpboot.c | 17 -- arch/x86/kernel/trampoline.c | 2 arch/x86/mach-voyager/voyager_smp.c | 9 - include/asm-x86/e820.h | 6 + include/asm-x86/e820_64.h | 9 - include/asm-x86/smp.h | 1 arch/x86/kernel/e820.c | 214 ++++++++++++++++++++++++++++++++++++ arch/x86/kernel/e820_64.c | 196 -------------------------------- arch/x86/kernel/head32.c | 76 ++++++++++++ arch/x86/kernel/setup_32.c | 109 +++--------------- arch/x86/kernel/smpboot.c | 17 -- arch/x86/kernel/trampoline.c | 2 arch/x86/mach-voyager/voyager_smp.c | 9 - include/asm-x86/e820.h | 6 + include/asm-x86/e820_64.h | 9 - include/asm-x86/smp.h | 1 10 files changed, 320 insertions(+), 319 deletions(-) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/e820.c')
-rw-r--r--arch/x86/kernel/e820.c214
1 files changed, 214 insertions, 0 deletions
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 41c480ae47df..35da8cdbe5e6 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -22,7 +22,9 @@
22#include <asm/pgtable.h> 22#include <asm/pgtable.h>
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/e820.h> 24#include <asm/e820.h>
25#include <asm/proto.h>
25#include <asm/setup.h> 26#include <asm/setup.h>
27#include <asm/trampoline.h>
26 28
27struct e820map e820; 29struct e820map e820;
28 30
@@ -493,3 +495,215 @@ __init void e820_setup_gap(void)
493 pci_mem_start, gapstart, gapsize); 495 pci_mem_start, gapstart, gapsize);
494} 496}
495 497
498
499/*
500 * Early reserved memory areas.
501 */
502#define MAX_EARLY_RES 20
503
504struct early_res {
505 u64 start, end;
506 char name[16];
507};
508static struct early_res early_res[MAX_EARLY_RES] __initdata = {
509 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
510#if defined(CONFIG_X86_64) && defined(CONFIG_X86_TRAMPOLINE)
511 { TRAMPOLINE_BASE, TRAMPOLINE_BASE + 2 * PAGE_SIZE, "TRAMPOLINE" },
512#endif
513#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
514 /*
515 * But first pinch a few for the stack/trampoline stuff
516 * FIXME: Don't need the extra page at 4K, but need to fix
517 * trampoline before removing it. (see the GDT stuff)
518 */
519 { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE" },
520 /*
521 * Has to be in very low memory so we can execute
522 * real-mode AP code.
523 */
524 { TRAMPOLINE_BASE, TRAMPOLINE_BASE + PAGE_SIZE, "TRAMPOLINE" },
525#endif
526 {}
527};
528
529void __init reserve_early(u64 start, u64 end, char *name)
530{
531 int i;
532 struct early_res *r;
533 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
534 r = &early_res[i];
535 if (end > r->start && start < r->end)
536 panic("Overlapping early reservations %llx-%llx %s to %llx-%llx %s\n",
537 start, end - 1, name?name:"", r->start,
538 r->end - 1, r->name);
539 }
540 if (i >= MAX_EARLY_RES)
541 panic("Too many early reservations");
542 r = &early_res[i];
543 r->start = start;
544 r->end = end;
545 if (name)
546 strncpy(r->name, name, sizeof(r->name) - 1);
547}
548
549void __init free_early(u64 start, u64 end)
550{
551 struct early_res *r;
552 int i, j;
553
554 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
555 r = &early_res[i];
556 if (start == r->start && end == r->end)
557 break;
558 }
559 if (i >= MAX_EARLY_RES || !early_res[i].end)
560 panic("free_early on not reserved area: %llx-%llx!",
561 start, end);
562
563 for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
564 ;
565
566 memmove(&early_res[i], &early_res[i + 1],
567 (j - 1 - i) * sizeof(struct early_res));
568
569 early_res[j - 1].end = 0;
570}
571
572void __init early_res_to_bootmem(u64 start, u64 end)
573{
574 int i;
575 u64 final_start, final_end;
576 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
577 struct early_res *r = &early_res[i];
578 final_start = max(start, r->start);
579 final_end = min(end, r->end);
580 if (final_start >= final_end)
581 continue;
582 printk(KERN_INFO " early res: %d [%llx-%llx] %s\n", i,
583 final_start, final_end - 1, r->name);
584#ifdef CONFIG_X86_64
585 reserve_bootmem_generic(final_start, final_end - final_start);
586#else
587 reserve_bootmem(final_start, final_end - final_start,
588 BOOTMEM_DEFAULT);
589#endif
590 }
591}
592
593/* Check for already reserved areas */
594static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
595{
596 int i;
597 u64 addr = *addrp, last;
598 int changed = 0;
599again:
600 last = addr + size;
601 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
602 struct early_res *r = &early_res[i];
603 if (last >= r->start && addr < r->end) {
604 *addrp = addr = round_up(r->end, align);
605 changed = 1;
606 goto again;
607 }
608 }
609 return changed;
610}
611
612/* Check for already reserved areas */
613static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
614{
615 int i;
616 u64 addr = *addrp, last;
617 u64 size = *sizep;
618 int changed = 0;
619again:
620 last = addr + size;
621 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
622 struct early_res *r = &early_res[i];
623 if (last > r->start && addr < r->start) {
624 size = r->start - addr;
625 changed = 1;
626 goto again;
627 }
628 if (last > r->end && addr < r->end) {
629 addr = round_up(r->end, align);
630 size = last - addr;
631 changed = 1;
632 goto again;
633 }
634 if (last <= r->end && addr >= r->start) {
635 (*sizep)++;
636 return 0;
637 }
638 }
639 if (changed) {
640 *addrp = addr;
641 *sizep = size;
642 }
643 return changed;
644}
645
646/*
647 * Find a free area with specified alignment in a specific range.
648 */
649u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
650{
651 int i;
652
653 for (i = 0; i < e820.nr_map; i++) {
654 struct e820entry *ei = &e820.map[i];
655 u64 addr, last;
656 u64 ei_last;
657
658 if (ei->type != E820_RAM)
659 continue;
660 addr = round_up(ei->addr, align);
661 ei_last = ei->addr + ei->size;
662 if (addr < start)
663 addr = round_up(start, align);
664 if (addr >= ei_last)
665 continue;
666 while (bad_addr(&addr, size, align) && addr+size <= ei_last)
667 ;
668 last = addr + size;
669 if (last > ei_last)
670 continue;
671 if (last > end)
672 continue;
673 return addr;
674 }
675 return -1ULL;
676}
677
678/*
679 * Find next free range after *start
680 */
681u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
682{
683 int i;
684
685 for (i = 0; i < e820.nr_map; i++) {
686 struct e820entry *ei = &e820.map[i];
687 u64 addr, last;
688 u64 ei_last;
689
690 if (ei->type != E820_RAM)
691 continue;
692 addr = round_up(ei->addr, align);
693 ei_last = ei->addr + ei->size;
694 if (addr < start)
695 addr = round_up(start, align);
696 if (addr >= ei_last)
697 continue;
698 *sizep = ei_last - addr;
699 while (bad_addr_size(&addr, sizep, align) &&
700 addr + *sizep <= ei_last)
701 ;
702 last = addr + *sizep;
703 if (last > ei_last)
704 continue;
705 return addr;
706 }
707 return -1UL;
708
709}