aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/e820.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2010-08-25 16:39:17 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-08-27 14:12:29 -0400
commit72d7c3b33c980843e756681fb4867dc1efd62a76 (patch)
tree9607345d9fa055dd501aacf0772258fb72897035 /arch/x86/kernel/e820.c
parent301ff3e88ef9ff4bdb92f36a3e6170fce4c9dd34 (diff)
x86: Use memblock to replace early_res
1. replace find_e820_area with memblock_find_in_range 2. replace reserve_early with memblock_x86_reserve_range 3. replace free_early with memblock_x86_free_range. 4. NO_BOOTMEM will switch to use memblock too. 5. use _e820, _early wrap in the patch, in following patch, will replace them all 6. because memblock_x86_free_range support partial free, we can remove some special care 7. Need to make sure that memblock_find_in_range() is called after memblock_x86_fill() so adjust some calling later in setup.c::setup_arch() -- corruption_check and mptable_update -v2: Move reserve_brk() early Before fill_memblock_area, to avoid overlap between brk and memblock_find_in_range() that could happen We have more then 128 RAM entry in E820 tables, and memblock_x86_fill() could use memblock_find_in_range() to find a new place for memblock.memory.region array. and We don't need to use extend_brk() after fill_memblock_area() So move reserve_brk() early before fill_memblock_area(). -v3: Move find_smp_config early To make sure memblock_find_in_range not find wrong place, if BIOS doesn't put mptable in right place. -v4: Treat RESERVED_KERN as RAM in memblock.memory. and they are already in memblock.reserved already.. use __NOT_KEEP_MEMBLOCK to make sure memblock related code could be freed later. -v5: Generic version __memblock_find_in_range() is going from high to low, and for 32bit active_region for 32bit does include high pages need to replace the limit with memblock.default_alloc_limit, aka get_max_mapped() -v6: Use current_limit instead -v7: check with MEMBLOCK_ERROR instead of -1ULL or -1L -v8: Set memblock_can_resize early to handle EFI with more RAM entries -v9: update after kmemleak changes in mainline Suggested-by: David S. Miller <davem@davemloft.net> Suggested-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel/e820.c')
-rw-r--r--arch/x86/kernel/e820.c159
1 files changed, 53 insertions, 106 deletions
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 0d6fc71bedb1..a9221d18a5ed 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -15,6 +15,7 @@
15#include <linux/pfn.h> 15#include <linux/pfn.h>
16#include <linux/suspend.h> 16#include <linux/suspend.h>
17#include <linux/firmware-map.h> 17#include <linux/firmware-map.h>
18#include <linux/memblock.h>
18 19
19#include <asm/e820.h> 20#include <asm/e820.h>
20#include <asm/proto.h> 21#include <asm/proto.h>
@@ -742,69 +743,29 @@ core_initcall(e820_mark_nvs_memory);
742 */ 743 */
743u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) 744u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
744{ 745{
745 int i; 746 u64 mem = memblock_find_in_range(start, end, size, align);
746
747 for (i = 0; i < e820.nr_map; i++) {
748 struct e820entry *ei = &e820.map[i];
749 u64 addr;
750 u64 ei_start, ei_last;
751 747
752 if (ei->type != E820_RAM) 748 if (mem == MEMBLOCK_ERROR)
753 continue; 749 return -1ULL;
754
755 ei_last = ei->addr + ei->size;
756 ei_start = ei->addr;
757 addr = find_early_area(ei_start, ei_last, start, end,
758 size, align);
759
760 if (addr != -1ULL)
761 return addr;
762 }
763 return -1ULL;
764}
765 750
766u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align) 751 return mem;
767{
768 return find_e820_area(start, end, size, align);
769} 752}
770 753
771u64 __init get_max_mapped(void)
772{
773 u64 end = max_pfn_mapped;
774
775 end <<= PAGE_SHIFT;
776
777 return end;
778}
779/* 754/*
780 * Find next free range after *start 755 * Find next free range after *start
781 */ 756 */
782u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) 757u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
783{ 758{
784 int i; 759 u64 mem = memblock_x86_find_in_range_size(start, sizep, align);
785 760
786 for (i = 0; i < e820.nr_map; i++) { 761 if (mem == MEMBLOCK_ERROR)
787 struct e820entry *ei = &e820.map[i]; 762 return -1ULL
788 u64 addr;
789 u64 ei_start, ei_last;
790
791 if (ei->type != E820_RAM)
792 continue;
793
794 ei_last = ei->addr + ei->size;
795 ei_start = ei->addr;
796 addr = find_early_area_size(ei_start, ei_last, start,
797 sizep, align);
798 763
799 if (addr != -1ULL) 764 return mem;
800 return addr;
801 }
802
803 return -1ULL;
804} 765}
805 766
806/* 767/*
807 * pre allocated 4k and reserved it in e820 768 * pre allocated 4k and reserved it in memblock and e820_saved
808 */ 769 */
809u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) 770u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
810{ 771{
@@ -813,8 +774,8 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
813 u64 start; 774 u64 start;
814 775
815 for (start = startt; ; start += size) { 776 for (start = startt; ; start += size) {
816 start = find_e820_area_size(start, &size, align); 777 start = memblock_x86_find_in_range_size(start, &size, align);
817 if (!(start + 1)) 778 if (start == MEMBLOCK_ERROR)
818 return 0; 779 return 0;
819 if (size >= sizet) 780 if (size >= sizet)
820 break; 781 break;
@@ -830,10 +791,9 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
830 addr = round_down(start + size - sizet, align); 791 addr = round_down(start + size - sizet, align);
831 if (addr < start) 792 if (addr < start)
832 return 0; 793 return 0;
833 e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); 794 memblock_x86_reserve_range(addr, addr + sizet, "new next");
834 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); 795 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
835 printk(KERN_INFO "update e820 for early_reserve_e820\n"); 796 printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
836 update_e820();
837 update_e820_saved(); 797 update_e820_saved();
838 798
839 return addr; 799 return addr;
@@ -895,52 +855,12 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
895{ 855{
896 return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); 856 return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
897} 857}
898/*
899 * Finds an active region in the address range from start_pfn to last_pfn and
900 * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
901 */
902int __init e820_find_active_region(const struct e820entry *ei,
903 unsigned long start_pfn,
904 unsigned long last_pfn,
905 unsigned long *ei_startpfn,
906 unsigned long *ei_endpfn)
907{
908 u64 align = PAGE_SIZE;
909
910 *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
911 *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;
912
913 /* Skip map entries smaller than a page */
914 if (*ei_startpfn >= *ei_endpfn)
915 return 0;
916
917 /* Skip if map is outside the node */
918 if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
919 *ei_startpfn >= last_pfn)
920 return 0;
921
922 /* Check for overlaps */
923 if (*ei_startpfn < start_pfn)
924 *ei_startpfn = start_pfn;
925 if (*ei_endpfn > last_pfn)
926 *ei_endpfn = last_pfn;
927
928 return 1;
929}
930 858
931/* Walk the e820 map and register active regions within a node */ 859/* Walk the e820 map and register active regions within a node */
932void __init e820_register_active_regions(int nid, unsigned long start_pfn, 860void __init e820_register_active_regions(int nid, unsigned long start_pfn,
933 unsigned long last_pfn) 861 unsigned long last_pfn)
934{ 862{
935 unsigned long ei_startpfn; 863 memblock_x86_register_active_regions(nid, start_pfn, last_pfn);
936 unsigned long ei_endpfn;
937 int i;
938
939 for (i = 0; i < e820.nr_map; i++)
940 if (e820_find_active_region(&e820.map[i],
941 start_pfn, last_pfn,
942 &ei_startpfn, &ei_endpfn))
943 add_active_range(nid, ei_startpfn, ei_endpfn);
944} 864}
945 865
946/* 866/*
@@ -950,18 +870,16 @@ void __init e820_register_active_regions(int nid, unsigned long start_pfn,
950 */ 870 */
951u64 __init e820_hole_size(u64 start, u64 end) 871u64 __init e820_hole_size(u64 start, u64 end)
952{ 872{
953 unsigned long start_pfn = start >> PAGE_SHIFT; 873 return memblock_x86_hole_size(start, end);
954 unsigned long last_pfn = end >> PAGE_SHIFT; 874}
955 unsigned long ei_startpfn, ei_endpfn, ram = 0;
956 int i;
957 875
958 for (i = 0; i < e820.nr_map; i++) { 876void reserve_early(u64 start, u64 end, char *name)
959 if (e820_find_active_region(&e820.map[i], 877{
960 start_pfn, last_pfn, 878 memblock_x86_reserve_range(start, end, name);
961 &ei_startpfn, &ei_endpfn)) 879}
962 ram += ei_endpfn - ei_startpfn; 880void free_early(u64 start, u64 end)
963 } 881{
964 return end - start - ((u64)ram << PAGE_SHIFT); 882 memblock_x86_free_range(start, end);
965} 883}
966 884
967static void early_panic(char *msg) 885static void early_panic(char *msg)
@@ -1210,3 +1128,32 @@ void __init setup_memory_map(void)
1210 printk(KERN_INFO "BIOS-provided physical RAM map:\n"); 1128 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
1211 e820_print_map(who); 1129 e820_print_map(who);
1212} 1130}
1131
1132void __init memblock_x86_fill(void)
1133{
1134 int i;
1135 u64 end;
1136
1137 /*
1138 * EFI may have more than 128 entries
1139 * We are safe to enable resizing, beause memblock_x86_fill()
1140 * is rather later for x86
1141 */
1142 memblock_can_resize = 1;
1143
1144 for (i = 0; i < e820.nr_map; i++) {
1145 struct e820entry *ei = &e820.map[i];
1146
1147 end = ei->addr + ei->size;
1148 if (end != (resource_size_t)end)
1149 continue;
1150
1151 if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
1152 continue;
1153
1154 memblock_add(ei->addr, ei->size);
1155 }
1156
1157 memblock_analyze();
1158 memblock_dump_all();
1159}