diff options
Diffstat (limited to 'arch/x86/kernel/e820.c')
| -rw-r--r-- | arch/x86/kernel/e820.c | 191 |
1 files changed, 51 insertions, 140 deletions
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 0d6fc71bedb1..0c2b7ef7a34d 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/pfn.h> | 15 | #include <linux/pfn.h> |
| 16 | #include <linux/suspend.h> | 16 | #include <linux/suspend.h> |
| 17 | #include <linux/firmware-map.h> | 17 | #include <linux/firmware-map.h> |
| 18 | #include <linux/memblock.h> | ||
| 18 | 19 | ||
| 19 | #include <asm/e820.h> | 20 | #include <asm/e820.h> |
| 20 | #include <asm/proto.h> | 21 | #include <asm/proto.h> |
| @@ -738,73 +739,7 @@ core_initcall(e820_mark_nvs_memory); | |||
| 738 | #endif | 739 | #endif |
| 739 | 740 | ||
| 740 | /* | 741 | /* |
| 741 | * Find a free area with specified alignment in a specific range. | 742 | * pre allocated 4k and reserved it in memblock and e820_saved |
| 742 | */ | ||
| 743 | u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) | ||
| 744 | { | ||
| 745 | int i; | ||
| 746 | |||
| 747 | for (i = 0; i < e820.nr_map; i++) { | ||
| 748 | struct e820entry *ei = &e820.map[i]; | ||
| 749 | u64 addr; | ||
| 750 | u64 ei_start, ei_last; | ||
| 751 | |||
| 752 | if (ei->type != E820_RAM) | ||
| 753 | continue; | ||
| 754 | |||
| 755 | ei_last = ei->addr + ei->size; | ||
| 756 | ei_start = ei->addr; | ||
| 757 | addr = find_early_area(ei_start, ei_last, start, end, | ||
| 758 | size, align); | ||
| 759 | |||
| 760 | if (addr != -1ULL) | ||
| 761 | return addr; | ||
| 762 | } | ||
| 763 | return -1ULL; | ||
| 764 | } | ||
| 765 | |||
| 766 | u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align) | ||
| 767 | { | ||
| 768 | return find_e820_area(start, end, size, align); | ||
| 769 | } | ||
| 770 | |||
| 771 | u64 __init get_max_mapped(void) | ||
| 772 | { | ||
| 773 | u64 end = max_pfn_mapped; | ||
| 774 | |||
| 775 | end <<= PAGE_SHIFT; | ||
| 776 | |||
| 777 | return end; | ||
| 778 | } | ||
| 779 | /* | ||
| 780 | * Find next free range after *start | ||
| 781 | */ | ||
| 782 | u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) | ||
| 783 | { | ||
| 784 | int i; | ||
| 785 | |||
| 786 | for (i = 0; i < e820.nr_map; i++) { | ||
| 787 | struct e820entry *ei = &e820.map[i]; | ||
| 788 | u64 addr; | ||
| 789 | u64 ei_start, ei_last; | ||
| 790 | |||
| 791 | if (ei->type != E820_RAM) | ||
| 792 | continue; | ||
| 793 | |||
| 794 | ei_last = ei->addr + ei->size; | ||
| 795 | ei_start = ei->addr; | ||
| 796 | addr = find_early_area_size(ei_start, ei_last, start, | ||
| 797 | sizep, align); | ||
| 798 | |||
| 799 | if (addr != -1ULL) | ||
| 800 | return addr; | ||
| 801 | } | ||
| 802 | |||
| 803 | return -1ULL; | ||
| 804 | } | ||
| 805 | |||
| 806 | /* | ||
| 807 | * pre allocated 4k and reserved it in e820 | ||
| 808 | */ | 743 | */ |
| 809 | u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | 744 | u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) |
| 810 | { | 745 | { |
| @@ -813,8 +748,8 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | |||
| 813 | u64 start; | 748 | u64 start; |
| 814 | 749 | ||
| 815 | for (start = startt; ; start += size) { | 750 | for (start = startt; ; start += size) { |
| 816 | start = find_e820_area_size(start, &size, align); | 751 | start = memblock_x86_find_in_range_size(start, &size, align); |
| 817 | if (!(start + 1)) | 752 | if (start == MEMBLOCK_ERROR) |
| 818 | return 0; | 753 | return 0; |
| 819 | if (size >= sizet) | 754 | if (size >= sizet) |
| 820 | break; | 755 | break; |
| @@ -830,10 +765,9 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | |||
| 830 | addr = round_down(start + size - sizet, align); | 765 | addr = round_down(start + size - sizet, align); |
| 831 | if (addr < start) | 766 | if (addr < start) |
| 832 | return 0; | 767 | return 0; |
| 833 | e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); | 768 | memblock_x86_reserve_range(addr, addr + sizet, "new next"); |
| 834 | e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); | 769 | e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); |
| 835 | printk(KERN_INFO "update e820 for early_reserve_e820\n"); | 770 | printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); |
| 836 | update_e820(); | ||
| 837 | update_e820_saved(); | 771 | update_e820_saved(); |
| 838 | 772 | ||
| 839 | return addr; | 773 | return addr; |
| @@ -895,74 +829,6 @@ unsigned long __init e820_end_of_low_ram_pfn(void) | |||
| 895 | { | 829 | { |
| 896 | return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); | 830 | return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); |
| 897 | } | 831 | } |
| 898 | /* | ||
| 899 | * Finds an active region in the address range from start_pfn to last_pfn and | ||
| 900 | * returns its range in ei_startpfn and ei_endpfn for the e820 entry. | ||
| 901 | */ | ||
| 902 | int __init e820_find_active_region(const struct e820entry *ei, | ||
| 903 | unsigned long start_pfn, | ||
| 904 | unsigned long last_pfn, | ||
| 905 | unsigned long *ei_startpfn, | ||
| 906 | unsigned long *ei_endpfn) | ||
| 907 | { | ||
| 908 | u64 align = PAGE_SIZE; | ||
| 909 | |||
| 910 | *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT; | ||
| 911 | *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT; | ||
| 912 | |||
| 913 | /* Skip map entries smaller than a page */ | ||
| 914 | if (*ei_startpfn >= *ei_endpfn) | ||
| 915 | return 0; | ||
| 916 | |||
| 917 | /* Skip if map is outside the node */ | ||
| 918 | if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || | ||
| 919 | *ei_startpfn >= last_pfn) | ||
| 920 | return 0; | ||
| 921 | |||
| 922 | /* Check for overlaps */ | ||
| 923 | if (*ei_startpfn < start_pfn) | ||
| 924 | *ei_startpfn = start_pfn; | ||
| 925 | if (*ei_endpfn > last_pfn) | ||
| 926 | *ei_endpfn = last_pfn; | ||
| 927 | |||
| 928 | return 1; | ||
| 929 | } | ||
| 930 | |||
| 931 | /* Walk the e820 map and register active regions within a node */ | ||
| 932 | void __init e820_register_active_regions(int nid, unsigned long start_pfn, | ||
| 933 | unsigned long last_pfn) | ||
| 934 | { | ||
| 935 | unsigned long ei_startpfn; | ||
| 936 | unsigned long ei_endpfn; | ||
| 937 | int i; | ||
| 938 | |||
| 939 | for (i = 0; i < e820.nr_map; i++) | ||
| 940 | if (e820_find_active_region(&e820.map[i], | ||
| 941 | start_pfn, last_pfn, | ||
| 942 | &ei_startpfn, &ei_endpfn)) | ||
| 943 | add_active_range(nid, ei_startpfn, ei_endpfn); | ||
| 944 | } | ||
| 945 | |||
| 946 | /* | ||
| 947 | * Find the hole size (in bytes) in the memory range. | ||
| 948 | * @start: starting address of the memory range to scan | ||
| 949 | * @end: ending address of the memory range to scan | ||
| 950 | */ | ||
| 951 | u64 __init e820_hole_size(u64 start, u64 end) | ||
| 952 | { | ||
| 953 | unsigned long start_pfn = start >> PAGE_SHIFT; | ||
| 954 | unsigned long last_pfn = end >> PAGE_SHIFT; | ||
| 955 | unsigned long ei_startpfn, ei_endpfn, ram = 0; | ||
| 956 | int i; | ||
| 957 | |||
| 958 | for (i = 0; i < e820.nr_map; i++) { | ||
| 959 | if (e820_find_active_region(&e820.map[i], | ||
| 960 | start_pfn, last_pfn, | ||
| 961 | &ei_startpfn, &ei_endpfn)) | ||
| 962 | ram += ei_endpfn - ei_startpfn; | ||
| 963 | } | ||
| 964 | return end - start - ((u64)ram << PAGE_SHIFT); | ||
| 965 | } | ||
| 966 | 832 | ||
| 967 | static void early_panic(char *msg) | 833 | static void early_panic(char *msg) |
| 968 | { | 834 | { |
| @@ -1210,3 +1076,48 @@ void __init setup_memory_map(void) | |||
| 1210 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); | 1076 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); |
| 1211 | e820_print_map(who); | 1077 | e820_print_map(who); |
| 1212 | } | 1078 | } |
| 1079 | |||
| 1080 | void __init memblock_x86_fill(void) | ||
| 1081 | { | ||
| 1082 | int i; | ||
| 1083 | u64 end; | ||
| 1084 | |||
| 1085 | /* | ||
| 1086 | * EFI may have more than 128 entries | ||
| 1087 | * We are safe to enable resizing, beause memblock_x86_fill() | ||
| 1088 | * is rather later for x86 | ||
| 1089 | */ | ||
| 1090 | memblock_can_resize = 1; | ||
| 1091 | |||
| 1092 | for (i = 0; i < e820.nr_map; i++) { | ||
| 1093 | struct e820entry *ei = &e820.map[i]; | ||
| 1094 | |||
| 1095 | end = ei->addr + ei->size; | ||
| 1096 | if (end != (resource_size_t)end) | ||
| 1097 | continue; | ||
| 1098 | |||
| 1099 | if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) | ||
| 1100 | continue; | ||
| 1101 | |||
| 1102 | memblock_add(ei->addr, ei->size); | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | memblock_analyze(); | ||
| 1106 | memblock_dump_all(); | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | void __init memblock_find_dma_reserve(void) | ||
| 1110 | { | ||
| 1111 | #ifdef CONFIG_X86_64 | ||
| 1112 | u64 free_size_pfn; | ||
| 1113 | u64 mem_size_pfn; | ||
| 1114 | /* | ||
| 1115 | * need to find out used area below MAX_DMA_PFN | ||
| 1116 | * need to use memblock to get free size in [0, MAX_DMA_PFN] | ||
| 1117 | * at first, and assume boot_mem will not take below MAX_DMA_PFN | ||
| 1118 | */ | ||
| 1119 | mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; | ||
| 1120 | free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; | ||
| 1121 | set_dma_reserve(mem_size_pfn - free_size_pfn); | ||
| 1122 | #endif | ||
| 1123 | } | ||
