diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/kernel/e820.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/kernel/e820.c')
-rw-r--r-- | arch/x86/kernel/e820.c | 211 |
1 files changed, 62 insertions, 149 deletions
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 0d6fc71bedb1..3e2ef8425316 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -11,10 +11,13 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/crash_dump.h> | ||
14 | #include <linux/bootmem.h> | 15 | #include <linux/bootmem.h> |
15 | #include <linux/pfn.h> | 16 | #include <linux/pfn.h> |
16 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
18 | #include <linux/acpi.h> | ||
17 | #include <linux/firmware-map.h> | 19 | #include <linux/firmware-map.h> |
20 | #include <linux/memblock.h> | ||
18 | 21 | ||
19 | #include <asm/e820.h> | 22 | #include <asm/e820.h> |
20 | #include <asm/proto.h> | 23 | #include <asm/proto.h> |
@@ -665,21 +668,15 @@ __init void e820_setup_gap(void) | |||
665 | * boot_params.e820_map, others are passed via SETUP_E820_EXT node of | 668 | * boot_params.e820_map, others are passed via SETUP_E820_EXT node of |
666 | * linked list of struct setup_data, which is parsed here. | 669 | * linked list of struct setup_data, which is parsed here. |
667 | */ | 670 | */ |
668 | void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data) | 671 | void __init parse_e820_ext(struct setup_data *sdata) |
669 | { | 672 | { |
670 | u32 map_len; | ||
671 | int entries; | 673 | int entries; |
672 | struct e820entry *extmap; | 674 | struct e820entry *extmap; |
673 | 675 | ||
674 | entries = sdata->len / sizeof(struct e820entry); | 676 | entries = sdata->len / sizeof(struct e820entry); |
675 | map_len = sdata->len + sizeof(struct setup_data); | ||
676 | if (map_len > PAGE_SIZE) | ||
677 | sdata = early_ioremap(pa_data, map_len); | ||
678 | extmap = (struct e820entry *)(sdata->data); | 677 | extmap = (struct e820entry *)(sdata->data); |
679 | __append_e820_map(extmap, entries); | 678 | __append_e820_map(extmap, entries); |
680 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | 679 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
681 | if (map_len > PAGE_SIZE) | ||
682 | early_iounmap(sdata, map_len); | ||
683 | printk(KERN_INFO "extended physical RAM map:\n"); | 680 | printk(KERN_INFO "extended physical RAM map:\n"); |
684 | e820_print_map("extended"); | 681 | e820_print_map("extended"); |
685 | } | 682 | } |
@@ -738,73 +735,7 @@ core_initcall(e820_mark_nvs_memory); | |||
738 | #endif | 735 | #endif |
739 | 736 | ||
740 | /* | 737 | /* |
741 | * Find a free area with specified alignment in a specific range. | 738 | * pre allocated 4k and reserved it in memblock and e820_saved |
742 | */ | ||
743 | u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) | ||
744 | { | ||
745 | int i; | ||
746 | |||
747 | for (i = 0; i < e820.nr_map; i++) { | ||
748 | struct e820entry *ei = &e820.map[i]; | ||
749 | u64 addr; | ||
750 | u64 ei_start, ei_last; | ||
751 | |||
752 | if (ei->type != E820_RAM) | ||
753 | continue; | ||
754 | |||
755 | ei_last = ei->addr + ei->size; | ||
756 | ei_start = ei->addr; | ||
757 | addr = find_early_area(ei_start, ei_last, start, end, | ||
758 | size, align); | ||
759 | |||
760 | if (addr != -1ULL) | ||
761 | return addr; | ||
762 | } | ||
763 | return -1ULL; | ||
764 | } | ||
765 | |||
766 | u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align) | ||
767 | { | ||
768 | return find_e820_area(start, end, size, align); | ||
769 | } | ||
770 | |||
771 | u64 __init get_max_mapped(void) | ||
772 | { | ||
773 | u64 end = max_pfn_mapped; | ||
774 | |||
775 | end <<= PAGE_SHIFT; | ||
776 | |||
777 | return end; | ||
778 | } | ||
779 | /* | ||
780 | * Find next free range after *start | ||
781 | */ | ||
782 | u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) | ||
783 | { | ||
784 | int i; | ||
785 | |||
786 | for (i = 0; i < e820.nr_map; i++) { | ||
787 | struct e820entry *ei = &e820.map[i]; | ||
788 | u64 addr; | ||
789 | u64 ei_start, ei_last; | ||
790 | |||
791 | if (ei->type != E820_RAM) | ||
792 | continue; | ||
793 | |||
794 | ei_last = ei->addr + ei->size; | ||
795 | ei_start = ei->addr; | ||
796 | addr = find_early_area_size(ei_start, ei_last, start, | ||
797 | sizep, align); | ||
798 | |||
799 | if (addr != -1ULL) | ||
800 | return addr; | ||
801 | } | ||
802 | |||
803 | return -1ULL; | ||
804 | } | ||
805 | |||
806 | /* | ||
807 | * pre allocated 4k and reserved it in e820 | ||
808 | */ | 739 | */ |
809 | u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | 740 | u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) |
810 | { | 741 | { |
@@ -813,8 +744,8 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | |||
813 | u64 start; | 744 | u64 start; |
814 | 745 | ||
815 | for (start = startt; ; start += size) { | 746 | for (start = startt; ; start += size) { |
816 | start = find_e820_area_size(start, &size, align); | 747 | start = memblock_x86_find_in_range_size(start, &size, align); |
817 | if (!(start + 1)) | 748 | if (start == MEMBLOCK_ERROR) |
818 | return 0; | 749 | return 0; |
819 | if (size >= sizet) | 750 | if (size >= sizet) |
820 | break; | 751 | break; |
@@ -830,10 +761,9 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | |||
830 | addr = round_down(start + size - sizet, align); | 761 | addr = round_down(start + size - sizet, align); |
831 | if (addr < start) | 762 | if (addr < start) |
832 | return 0; | 763 | return 0; |
833 | e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); | 764 | memblock_x86_reserve_range(addr, addr + sizet, "new next"); |
834 | e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); | 765 | e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); |
835 | printk(KERN_INFO "update e820 for early_reserve_e820\n"); | 766 | printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); |
836 | update_e820(); | ||
837 | update_e820_saved(); | 767 | update_e820_saved(); |
838 | 768 | ||
839 | return addr; | 769 | return addr; |
@@ -895,74 +825,6 @@ unsigned long __init e820_end_of_low_ram_pfn(void) | |||
895 | { | 825 | { |
896 | return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); | 826 | return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); |
897 | } | 827 | } |
898 | /* | ||
899 | * Finds an active region in the address range from start_pfn to last_pfn and | ||
900 | * returns its range in ei_startpfn and ei_endpfn for the e820 entry. | ||
901 | */ | ||
902 | int __init e820_find_active_region(const struct e820entry *ei, | ||
903 | unsigned long start_pfn, | ||
904 | unsigned long last_pfn, | ||
905 | unsigned long *ei_startpfn, | ||
906 | unsigned long *ei_endpfn) | ||
907 | { | ||
908 | u64 align = PAGE_SIZE; | ||
909 | |||
910 | *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT; | ||
911 | *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT; | ||
912 | |||
913 | /* Skip map entries smaller than a page */ | ||
914 | if (*ei_startpfn >= *ei_endpfn) | ||
915 | return 0; | ||
916 | |||
917 | /* Skip if map is outside the node */ | ||
918 | if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || | ||
919 | *ei_startpfn >= last_pfn) | ||
920 | return 0; | ||
921 | |||
922 | /* Check for overlaps */ | ||
923 | if (*ei_startpfn < start_pfn) | ||
924 | *ei_startpfn = start_pfn; | ||
925 | if (*ei_endpfn > last_pfn) | ||
926 | *ei_endpfn = last_pfn; | ||
927 | |||
928 | return 1; | ||
929 | } | ||
930 | |||
931 | /* Walk the e820 map and register active regions within a node */ | ||
932 | void __init e820_register_active_regions(int nid, unsigned long start_pfn, | ||
933 | unsigned long last_pfn) | ||
934 | { | ||
935 | unsigned long ei_startpfn; | ||
936 | unsigned long ei_endpfn; | ||
937 | int i; | ||
938 | |||
939 | for (i = 0; i < e820.nr_map; i++) | ||
940 | if (e820_find_active_region(&e820.map[i], | ||
941 | start_pfn, last_pfn, | ||
942 | &ei_startpfn, &ei_endpfn)) | ||
943 | add_active_range(nid, ei_startpfn, ei_endpfn); | ||
944 | } | ||
945 | |||
946 | /* | ||
947 | * Find the hole size (in bytes) in the memory range. | ||
948 | * @start: starting address of the memory range to scan | ||
949 | * @end: ending address of the memory range to scan | ||
950 | */ | ||
951 | u64 __init e820_hole_size(u64 start, u64 end) | ||
952 | { | ||
953 | unsigned long start_pfn = start >> PAGE_SHIFT; | ||
954 | unsigned long last_pfn = end >> PAGE_SHIFT; | ||
955 | unsigned long ei_startpfn, ei_endpfn, ram = 0; | ||
956 | int i; | ||
957 | |||
958 | for (i = 0; i < e820.nr_map; i++) { | ||
959 | if (e820_find_active_region(&e820.map[i], | ||
960 | start_pfn, last_pfn, | ||
961 | &ei_startpfn, &ei_endpfn)) | ||
962 | ram += ei_endpfn - ei_startpfn; | ||
963 | } | ||
964 | return end - start - ((u64)ram << PAGE_SHIFT); | ||
965 | } | ||
966 | 828 | ||
967 | static void early_panic(char *msg) | 829 | static void early_panic(char *msg) |
968 | { | 830 | { |
@@ -980,15 +842,21 @@ static int __init parse_memopt(char *p) | |||
980 | if (!p) | 842 | if (!p) |
981 | return -EINVAL; | 843 | return -EINVAL; |
982 | 844 | ||
983 | #ifdef CONFIG_X86_32 | ||
984 | if (!strcmp(p, "nopentium")) { | 845 | if (!strcmp(p, "nopentium")) { |
846 | #ifdef CONFIG_X86_32 | ||
985 | setup_clear_cpu_cap(X86_FEATURE_PSE); | 847 | setup_clear_cpu_cap(X86_FEATURE_PSE); |
986 | return 0; | 848 | return 0; |
987 | } | 849 | #else |
850 | printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); | ||
851 | return -EINVAL; | ||
988 | #endif | 852 | #endif |
853 | } | ||
989 | 854 | ||
990 | userdef = 1; | 855 | userdef = 1; |
991 | mem_size = memparse(p, &p); | 856 | mem_size = memparse(p, &p); |
857 | /* don't remove all of memory when handling "mem={invalid}" param */ | ||
858 | if (mem_size == 0) | ||
859 | return -EINVAL; | ||
992 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); | 860 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); |
993 | 861 | ||
994 | return 0; | 862 | return 0; |
@@ -1210,3 +1078,48 @@ void __init setup_memory_map(void) | |||
1210 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); | 1078 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); |
1211 | e820_print_map(who); | 1079 | e820_print_map(who); |
1212 | } | 1080 | } |
1081 | |||
1082 | void __init memblock_x86_fill(void) | ||
1083 | { | ||
1084 | int i; | ||
1085 | u64 end; | ||
1086 | |||
1087 | /* | ||
1088 | * EFI may have more than 128 entries | ||
1089 | * We are safe to enable resizing, beause memblock_x86_fill() | ||
1090 | * is rather later for x86 | ||
1091 | */ | ||
1092 | memblock_can_resize = 1; | ||
1093 | |||
1094 | for (i = 0; i < e820.nr_map; i++) { | ||
1095 | struct e820entry *ei = &e820.map[i]; | ||
1096 | |||
1097 | end = ei->addr + ei->size; | ||
1098 | if (end != (resource_size_t)end) | ||
1099 | continue; | ||
1100 | |||
1101 | if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) | ||
1102 | continue; | ||
1103 | |||
1104 | memblock_add(ei->addr, ei->size); | ||
1105 | } | ||
1106 | |||
1107 | memblock_analyze(); | ||
1108 | memblock_dump_all(); | ||
1109 | } | ||
1110 | |||
1111 | void __init memblock_find_dma_reserve(void) | ||
1112 | { | ||
1113 | #ifdef CONFIG_X86_64 | ||
1114 | u64 free_size_pfn; | ||
1115 | u64 mem_size_pfn; | ||
1116 | /* | ||
1117 | * need to find out used area below MAX_DMA_PFN | ||
1118 | * need to use memblock to get free size in [0, MAX_DMA_PFN] | ||
1119 | * at first, and assume boot_mem will not take below MAX_DMA_PFN | ||
1120 | */ | ||
1121 | mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; | ||
1122 | free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; | ||
1123 | set_dma_reserve(mem_size_pfn - free_size_pfn); | ||
1124 | #endif | ||
1125 | } | ||