diff options
Diffstat (limited to 'arch/x86/kernel/e820.c')
-rw-r--r-- | arch/x86/kernel/e820.c | 58 |
1 files changed, 27 insertions, 31 deletions
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 303a0e48f076..8071e2f3d6eb 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -738,35 +738,17 @@ core_initcall(e820_mark_nvs_memory); | |||
738 | /* | 738 | /* |
739 | * pre allocated 4k and reserved it in memblock and e820_saved | 739 | * pre allocated 4k and reserved it in memblock and e820_saved |
740 | */ | 740 | */ |
741 | u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | 741 | u64 __init early_reserve_e820(u64 size, u64 align) |
742 | { | 742 | { |
743 | u64 size = 0; | ||
744 | u64 addr; | 743 | u64 addr; |
745 | u64 start; | ||
746 | 744 | ||
747 | for (start = startt; ; start += size) { | 745 | addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
748 | start = memblock_x86_find_in_range_size(start, &size, align); | 746 | if (addr) { |
749 | if (start == MEMBLOCK_ERROR) | 747 | e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED); |
750 | return 0; | 748 | printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); |
751 | if (size >= sizet) | 749 | update_e820_saved(); |
752 | break; | ||
753 | } | 750 | } |
754 | 751 | ||
755 | #ifdef CONFIG_X86_32 | ||
756 | if (start >= MAXMEM) | ||
757 | return 0; | ||
758 | if (start + size > MAXMEM) | ||
759 | size = MAXMEM - start; | ||
760 | #endif | ||
761 | |||
762 | addr = round_down(start + size - sizet, align); | ||
763 | if (addr < start) | ||
764 | return 0; | ||
765 | memblock_x86_reserve_range(addr, addr + sizet, "new next"); | ||
766 | e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); | ||
767 | printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); | ||
768 | update_e820_saved(); | ||
769 | |||
770 | return addr; | 752 | return addr; |
771 | } | 753 | } |
772 | 754 | ||
@@ -1090,7 +1072,7 @@ void __init memblock_x86_fill(void) | |||
1090 | * We are safe to enable resizing, beause memblock_x86_fill() | 1072 | * We are safe to enable resizing, beause memblock_x86_fill() |
1091 | * is rather later for x86 | 1073 | * is rather later for x86 |
1092 | */ | 1074 | */ |
1093 | memblock_can_resize = 1; | 1075 | memblock_allow_resize(); |
1094 | 1076 | ||
1095 | for (i = 0; i < e820.nr_map; i++) { | 1077 | for (i = 0; i < e820.nr_map; i++) { |
1096 | struct e820entry *ei = &e820.map[i]; | 1078 | struct e820entry *ei = &e820.map[i]; |
@@ -1105,22 +1087,36 @@ void __init memblock_x86_fill(void) | |||
1105 | memblock_add(ei->addr, ei->size); | 1087 | memblock_add(ei->addr, ei->size); |
1106 | } | 1088 | } |
1107 | 1089 | ||
1108 | memblock_analyze(); | ||
1109 | memblock_dump_all(); | 1090 | memblock_dump_all(); |
1110 | } | 1091 | } |
1111 | 1092 | ||
1112 | void __init memblock_find_dma_reserve(void) | 1093 | void __init memblock_find_dma_reserve(void) |
1113 | { | 1094 | { |
1114 | #ifdef CONFIG_X86_64 | 1095 | #ifdef CONFIG_X86_64 |
1115 | u64 free_size_pfn; | 1096 | u64 nr_pages = 0, nr_free_pages = 0; |
1116 | u64 mem_size_pfn; | 1097 | unsigned long start_pfn, end_pfn; |
1098 | phys_addr_t start, end; | ||
1099 | int i; | ||
1100 | u64 u; | ||
1101 | |||
1117 | /* | 1102 | /* |
1118 | * need to find out used area below MAX_DMA_PFN | 1103 | * need to find out used area below MAX_DMA_PFN |
1119 | * need to use memblock to get free size in [0, MAX_DMA_PFN] | 1104 | * need to use memblock to get free size in [0, MAX_DMA_PFN] |
1120 | * at first, and assume boot_mem will not take below MAX_DMA_PFN | 1105 | * at first, and assume boot_mem will not take below MAX_DMA_PFN |
1121 | */ | 1106 | */ |
1122 | mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; | 1107 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { |
1123 | free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; | 1108 | start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN); |
1124 | set_dma_reserve(mem_size_pfn - free_size_pfn); | 1109 | end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN); |
1110 | nr_pages += end_pfn - start_pfn; | ||
1111 | } | ||
1112 | |||
1113 | for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) { | ||
1114 | start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN); | ||
1115 | end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN); | ||
1116 | if (start_pfn < end_pfn) | ||
1117 | nr_free_pages += end_pfn - start_pfn; | ||
1118 | } | ||
1119 | |||
1120 | set_dma_reserve(nr_pages - nr_free_pages); | ||
1125 | #endif | 1121 | #endif |
1126 | } | 1122 | } |