diff options
author | Laura Abbott <lauraa@codeaurora.org> | 2014-04-13 17:54:58 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2014-05-31 20:17:12 -0400 |
commit | 1c2f87c22566cd057bc8cde10c37ae9da1a1bb76 (patch) | |
tree | 6c135a3b2b6af6be3b6593ac6329d6d4f353b7dd /arch/arm/mm | |
parent | 1c8c3cf0b5239388e712508a85821f4718f4d889 (diff) |
ARM: 8025/1: Get rid of meminfo
memblock is now fully integrated into the kernel and is the prefered
method for tracking memory. Rather than reinvent the wheel with
meminfo, migrate to using memblock directly instead of meminfo as
an intermediate.
Acked-by: Jason Cooper <jason@lakedaemon.net>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: Kukjin Kim <kgene.kim@samsung.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Tested-by: Leif Lindholm <leif.lindholm@linaro.org>
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/init.c | 72 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 117 | ||||
-rw-r--r-- | arch/arm/mm/nommu.c | 66 |
3 files changed, 99 insertions, 156 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 2a77ba8796ae..c8ab21dc2178 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -81,24 +81,21 @@ __tagtable(ATAG_INITRD2, parse_tag_initrd2); | |||
81 | * initialization functions, as well as show_mem() for the skipping | 81 | * initialization functions, as well as show_mem() for the skipping |
82 | * of holes in the memory map. It is populated by arm_add_memory(). | 82 | * of holes in the memory map. It is populated by arm_add_memory(). |
83 | */ | 83 | */ |
84 | struct meminfo meminfo; | ||
85 | |||
86 | void show_mem(unsigned int filter) | 84 | void show_mem(unsigned int filter) |
87 | { | 85 | { |
88 | int free = 0, total = 0, reserved = 0; | 86 | int free = 0, total = 0, reserved = 0; |
89 | int shared = 0, cached = 0, slab = 0, i; | 87 | int shared = 0, cached = 0, slab = 0; |
90 | struct meminfo * mi = &meminfo; | 88 | struct memblock_region *reg; |
91 | 89 | ||
92 | printk("Mem-info:\n"); | 90 | printk("Mem-info:\n"); |
93 | show_free_areas(filter); | 91 | show_free_areas(filter); |
94 | 92 | ||
95 | for_each_bank (i, mi) { | 93 | for_each_memblock (memory, reg) { |
96 | struct membank *bank = &mi->bank[i]; | ||
97 | unsigned int pfn1, pfn2; | 94 | unsigned int pfn1, pfn2; |
98 | struct page *page, *end; | 95 | struct page *page, *end; |
99 | 96 | ||
100 | pfn1 = bank_pfn_start(bank); | 97 | pfn1 = memblock_region_memory_base_pfn(reg); |
101 | pfn2 = bank_pfn_end(bank); | 98 | pfn2 = memblock_region_memory_end_pfn(reg); |
102 | 99 | ||
103 | page = pfn_to_page(pfn1); | 100 | page = pfn_to_page(pfn1); |
104 | end = pfn_to_page(pfn2 - 1) + 1; | 101 | end = pfn_to_page(pfn2 - 1) + 1; |
@@ -115,8 +112,9 @@ void show_mem(unsigned int filter) | |||
115 | free++; | 112 | free++; |
116 | else | 113 | else |
117 | shared += page_count(page) - 1; | 114 | shared += page_count(page) - 1; |
118 | page++; | 115 | pfn1++; |
119 | } while (page < end); | 116 | page = pfn_to_page(pfn1); |
117 | } while (pfn1 < pfn2); | ||
120 | } | 118 | } |
121 | 119 | ||
122 | printk("%d pages of RAM\n", total); | 120 | printk("%d pages of RAM\n", total); |
@@ -130,16 +128,9 @@ void show_mem(unsigned int filter) | |||
130 | static void __init find_limits(unsigned long *min, unsigned long *max_low, | 128 | static void __init find_limits(unsigned long *min, unsigned long *max_low, |
131 | unsigned long *max_high) | 129 | unsigned long *max_high) |
132 | { | 130 | { |
133 | struct meminfo *mi = &meminfo; | 131 | *max_low = PFN_DOWN(memblock_get_current_limit()); |
134 | int i; | 132 | *min = PFN_UP(memblock_start_of_DRAM()); |
135 | 133 | *max_high = PFN_DOWN(memblock_end_of_DRAM()); | |
136 | /* This assumes the meminfo array is properly sorted */ | ||
137 | *min = bank_pfn_start(&mi->bank[0]); | ||
138 | for_each_bank (i, mi) | ||
139 | if (mi->bank[i].highmem) | ||
140 | break; | ||
141 | *max_low = bank_pfn_end(&mi->bank[i - 1]); | ||
142 | *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]); | ||
143 | } | 134 | } |
144 | 135 | ||
145 | #ifdef CONFIG_ZONE_DMA | 136 | #ifdef CONFIG_ZONE_DMA |
@@ -274,14 +265,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) | |||
274 | return phys; | 265 | return phys; |
275 | } | 266 | } |
276 | 267 | ||
277 | void __init arm_memblock_init(struct meminfo *mi, | 268 | void __init arm_memblock_init(const struct machine_desc *mdesc) |
278 | const struct machine_desc *mdesc) | ||
279 | { | 269 | { |
280 | int i; | ||
281 | |||
282 | for (i = 0; i < mi->nr_banks; i++) | ||
283 | memblock_add(mi->bank[i].start, mi->bank[i].size); | ||
284 | |||
285 | /* Register the kernel text, kernel data and initrd with memblock. */ | 270 | /* Register the kernel text, kernel data and initrd with memblock. */ |
286 | #ifdef CONFIG_XIP_KERNEL | 271 | #ifdef CONFIG_XIP_KERNEL |
287 | memblock_reserve(__pa(_sdata), _end - _sdata); | 272 | memblock_reserve(__pa(_sdata), _end - _sdata); |
@@ -413,54 +398,53 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |||
413 | /* | 398 | /* |
414 | * The mem_map array can get very big. Free the unused area of the memory map. | 399 | * The mem_map array can get very big. Free the unused area of the memory map. |
415 | */ | 400 | */ |
416 | static void __init free_unused_memmap(struct meminfo *mi) | 401 | static void __init free_unused_memmap(void) |
417 | { | 402 | { |
418 | unsigned long bank_start, prev_bank_end = 0; | 403 | unsigned long start, prev_end = 0; |
419 | unsigned int i; | 404 | struct memblock_region *reg; |
420 | 405 | ||
421 | /* | 406 | /* |
422 | * This relies on each bank being in address order. | 407 | * This relies on each bank being in address order. |
423 | * The banks are sorted previously in bootmem_init(). | 408 | * The banks are sorted previously in bootmem_init(). |
424 | */ | 409 | */ |
425 | for_each_bank(i, mi) { | 410 | for_each_memblock(memory, reg) { |
426 | struct membank *bank = &mi->bank[i]; | 411 | start = memblock_region_memory_base_pfn(reg); |
427 | |||
428 | bank_start = bank_pfn_start(bank); | ||
429 | 412 | ||
430 | #ifdef CONFIG_SPARSEMEM | 413 | #ifdef CONFIG_SPARSEMEM |
431 | /* | 414 | /* |
432 | * Take care not to free memmap entries that don't exist | 415 | * Take care not to free memmap entries that don't exist |
433 | * due to SPARSEMEM sections which aren't present. | 416 | * due to SPARSEMEM sections which aren't present. |
434 | */ | 417 | */ |
435 | bank_start = min(bank_start, | 418 | start = min(start, |
436 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | 419 | ALIGN(prev_end, PAGES_PER_SECTION)); |
437 | #else | 420 | #else |
438 | /* | 421 | /* |
439 | * Align down here since the VM subsystem insists that the | 422 | * Align down here since the VM subsystem insists that the |
440 | * memmap entries are valid from the bank start aligned to | 423 | * memmap entries are valid from the bank start aligned to |
441 | * MAX_ORDER_NR_PAGES. | 424 | * MAX_ORDER_NR_PAGES. |
442 | */ | 425 | */ |
443 | bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); | 426 | start = round_down(start, MAX_ORDER_NR_PAGES); |
444 | #endif | 427 | #endif |
445 | /* | 428 | /* |
446 | * If we had a previous bank, and there is a space | 429 | * If we had a previous bank, and there is a space |
447 | * between the current bank and the previous, free it. | 430 | * between the current bank and the previous, free it. |
448 | */ | 431 | */ |
449 | if (prev_bank_end && prev_bank_end < bank_start) | 432 | if (prev_end && prev_end < start) |
450 | free_memmap(prev_bank_end, bank_start); | 433 | free_memmap(prev_end, start); |
451 | 434 | ||
452 | /* | 435 | /* |
453 | * Align up here since the VM subsystem insists that the | 436 | * Align up here since the VM subsystem insists that the |
454 | * memmap entries are valid from the bank end aligned to | 437 | * memmap entries are valid from the bank end aligned to |
455 | * MAX_ORDER_NR_PAGES. | 438 | * MAX_ORDER_NR_PAGES. |
456 | */ | 439 | */ |
457 | prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); | 440 | prev_end = ALIGN(memblock_region_memory_end_pfn(reg), |
441 | MAX_ORDER_NR_PAGES); | ||
458 | } | 442 | } |
459 | 443 | ||
460 | #ifdef CONFIG_SPARSEMEM | 444 | #ifdef CONFIG_SPARSEMEM |
461 | if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) | 445 | if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) |
462 | free_memmap(prev_bank_end, | 446 | free_memmap(prev_end, |
463 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | 447 | ALIGN(prev_end, PAGES_PER_SECTION)); |
464 | #endif | 448 | #endif |
465 | } | 449 | } |
466 | 450 | ||
@@ -536,7 +520,7 @@ void __init mem_init(void) | |||
536 | set_max_mapnr(pfn_to_page(max_pfn) - mem_map); | 520 | set_max_mapnr(pfn_to_page(max_pfn) - mem_map); |
537 | 521 | ||
538 | /* this will put all unused low memory onto the freelists */ | 522 | /* this will put all unused low memory onto the freelists */ |
539 | free_unused_memmap(&meminfo); | 523 | free_unused_memmap(); |
540 | free_all_bootmem(); | 524 | free_all_bootmem(); |
541 | 525 | ||
542 | #ifdef CONFIG_SA1111 | 526 | #ifdef CONFIG_SA1111 |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index a991ce2f18d4..82ea2b3fb9b5 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -1062,74 +1062,47 @@ phys_addr_t arm_lowmem_limit __initdata = 0; | |||
1062 | void __init sanity_check_meminfo(void) | 1062 | void __init sanity_check_meminfo(void) |
1063 | { | 1063 | { |
1064 | phys_addr_t memblock_limit = 0; | 1064 | phys_addr_t memblock_limit = 0; |
1065 | int i, j, highmem = 0; | 1065 | int highmem = 0; |
1066 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; | 1066 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; |
1067 | struct memblock_region *reg; | ||
1067 | 1068 | ||
1068 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { | 1069 | for_each_memblock(memory, reg) { |
1069 | struct membank *bank = &meminfo.bank[j]; | 1070 | phys_addr_t block_start = reg->base; |
1070 | phys_addr_t size_limit; | 1071 | phys_addr_t block_end = reg->base + reg->size; |
1071 | 1072 | phys_addr_t size_limit = reg->size; | |
1072 | *bank = meminfo.bank[i]; | ||
1073 | size_limit = bank->size; | ||
1074 | 1073 | ||
1075 | if (bank->start >= vmalloc_limit) | 1074 | if (reg->base >= vmalloc_limit) |
1076 | highmem = 1; | 1075 | highmem = 1; |
1077 | else | 1076 | else |
1078 | size_limit = vmalloc_limit - bank->start; | 1077 | size_limit = vmalloc_limit - reg->base; |
1079 | 1078 | ||
1080 | bank->highmem = highmem; | ||
1081 | 1079 | ||
1082 | #ifdef CONFIG_HIGHMEM | 1080 | if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { |
1083 | /* | 1081 | |
1084 | * Split those memory banks which are partially overlapping | 1082 | if (highmem) { |
1085 | * the vmalloc area greatly simplifying things later. | 1083 | pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", |
1086 | */ | 1084 | &block_start, &block_end); |
1087 | if (!highmem && bank->size > size_limit) { | 1085 | memblock_remove(reg->base, reg->size); |
1088 | if (meminfo.nr_banks >= NR_BANKS) { | 1086 | continue; |
1089 | printk(KERN_CRIT "NR_BANKS too low, " | ||
1090 | "ignoring high memory\n"); | ||
1091 | } else { | ||
1092 | memmove(bank + 1, bank, | ||
1093 | (meminfo.nr_banks - i) * sizeof(*bank)); | ||
1094 | meminfo.nr_banks++; | ||
1095 | i++; | ||
1096 | bank[1].size -= size_limit; | ||
1097 | bank[1].start = vmalloc_limit; | ||
1098 | bank[1].highmem = highmem = 1; | ||
1099 | j++; | ||
1100 | } | 1087 | } |
1101 | bank->size = size_limit; | ||
1102 | } | ||
1103 | #else | ||
1104 | /* | ||
1105 | * Highmem banks not allowed with !CONFIG_HIGHMEM. | ||
1106 | */ | ||
1107 | if (highmem) { | ||
1108 | printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " | ||
1109 | "(!CONFIG_HIGHMEM).\n", | ||
1110 | (unsigned long long)bank->start, | ||
1111 | (unsigned long long)bank->start + bank->size - 1); | ||
1112 | continue; | ||
1113 | } | ||
1114 | 1088 | ||
1115 | /* | 1089 | if (reg->size > size_limit) { |
1116 | * Check whether this memory bank would partially overlap | 1090 | phys_addr_t overlap_size = reg->size - size_limit; |
1117 | * the vmalloc area. | 1091 | |
1118 | */ | 1092 | pr_notice("Truncating RAM at %pa-%pa to -%pa", |
1119 | if (bank->size > size_limit) { | 1093 | &block_start, &block_end, &vmalloc_limit); |
1120 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " | 1094 | memblock_remove(vmalloc_limit, overlap_size); |
1121 | "to -%.8llx (vmalloc region overlap).\n", | 1095 | block_end = vmalloc_limit; |
1122 | (unsigned long long)bank->start, | 1096 | } |
1123 | (unsigned long long)bank->start + bank->size - 1, | ||
1124 | (unsigned long long)bank->start + size_limit - 1); | ||
1125 | bank->size = size_limit; | ||
1126 | } | 1097 | } |
1127 | #endif | ||
1128 | if (!bank->highmem) { | ||
1129 | phys_addr_t bank_end = bank->start + bank->size; | ||
1130 | 1098 | ||
1131 | if (bank_end > arm_lowmem_limit) | 1099 | if (!highmem) { |
1132 | arm_lowmem_limit = bank_end; | 1100 | if (block_end > arm_lowmem_limit) { |
1101 | if (reg->size > size_limit) | ||
1102 | arm_lowmem_limit = vmalloc_limit; | ||
1103 | else | ||
1104 | arm_lowmem_limit = block_end; | ||
1105 | } | ||
1133 | 1106 | ||
1134 | /* | 1107 | /* |
1135 | * Find the first non-section-aligned page, and point | 1108 | * Find the first non-section-aligned page, and point |
@@ -1145,35 +1118,15 @@ void __init sanity_check_meminfo(void) | |||
1145 | * occurs before any free memory is mapped. | 1118 | * occurs before any free memory is mapped. |
1146 | */ | 1119 | */ |
1147 | if (!memblock_limit) { | 1120 | if (!memblock_limit) { |
1148 | if (!IS_ALIGNED(bank->start, SECTION_SIZE)) | 1121 | if (!IS_ALIGNED(block_start, SECTION_SIZE)) |
1149 | memblock_limit = bank->start; | 1122 | memblock_limit = block_start; |
1150 | else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) | 1123 | else if (!IS_ALIGNED(block_end, SECTION_SIZE)) |
1151 | memblock_limit = bank_end; | 1124 | memblock_limit = arm_lowmem_limit; |
1152 | } | 1125 | } |
1153 | } | ||
1154 | j++; | ||
1155 | } | ||
1156 | #ifdef CONFIG_HIGHMEM | ||
1157 | if (highmem) { | ||
1158 | const char *reason = NULL; | ||
1159 | 1126 | ||
1160 | if (cache_is_vipt_aliasing()) { | ||
1161 | /* | ||
1162 | * Interactions between kmap and other mappings | ||
1163 | * make highmem support with aliasing VIPT caches | ||
1164 | * rather difficult. | ||
1165 | */ | ||
1166 | reason = "with VIPT aliasing cache"; | ||
1167 | } | ||
1168 | if (reason) { | ||
1169 | printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", | ||
1170 | reason); | ||
1171 | while (j > 0 && meminfo.bank[j - 1].highmem) | ||
1172 | j--; | ||
1173 | } | 1127 | } |
1174 | } | 1128 | } |
1175 | #endif | 1129 | |
1176 | meminfo.nr_banks = j; | ||
1177 | high_memory = __va(arm_lowmem_limit - 1) + 1; | 1130 | high_memory = __va(arm_lowmem_limit - 1) + 1; |
1178 | 1131 | ||
1179 | /* | 1132 | /* |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 55764a7ef1f0..da1874f9f8cf 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -88,30 +88,35 @@ static unsigned long irbar_read(void) | |||
88 | void __init sanity_check_meminfo_mpu(void) | 88 | void __init sanity_check_meminfo_mpu(void) |
89 | { | 89 | { |
90 | int i; | 90 | int i; |
91 | struct membank *bank = meminfo.bank; | ||
92 | phys_addr_t phys_offset = PHYS_OFFSET; | 91 | phys_addr_t phys_offset = PHYS_OFFSET; |
93 | phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; | 92 | phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; |
94 | 93 | struct memblock_region *reg; | |
95 | /* Initially only use memory continuous from PHYS_OFFSET */ | 94 | bool first = true; |
96 | if (bank_phys_start(&bank[0]) != phys_offset) | 95 | phys_addr_t mem_start; |
97 | panic("First memory bank must be contiguous from PHYS_OFFSET"); | 96 | phys_addr_t mem_end; |
98 | 97 | ||
99 | /* Banks have already been sorted by start address */ | 98 | for_each_memblock(memory, reg) { |
100 | for (i = 1; i < meminfo.nr_banks; i++) { | 99 | if (first) { |
101 | if (bank[i].start <= bank_phys_end(&bank[0]) && | 100 | /* |
102 | bank_phys_end(&bank[i]) > bank_phys_end(&bank[0])) { | 101 | * Initially only use memory continuous from |
103 | bank[0].size = bank_phys_end(&bank[i]) - bank[0].start; | 102 | * PHYS_OFFSET */ |
103 | if (reg->base != phys_offset) | ||
104 | panic("First memory bank must be contiguous from PHYS_OFFSET"); | ||
105 | |||
106 | mem_start = reg->base; | ||
107 | mem_end = reg->base + reg->size; | ||
108 | specified_mem_size = reg->size; | ||
109 | first = false; | ||
104 | } else { | 110 | } else { |
105 | pr_notice("Ignoring RAM after 0x%.8lx. " | 111 | /* |
106 | "First non-contiguous (ignored) bank start: 0x%.8lx\n", | 112 | * memblock auto merges contiguous blocks, remove |
107 | (unsigned long)bank_phys_end(&bank[0]), | 113 | * all blocks afterwards |
108 | (unsigned long)bank_phys_start(&bank[i])); | 114 | */ |
109 | break; | 115 | pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", |
116 | &mem_start, ®->base); | ||
117 | memblock_remove(reg->base, reg->size); | ||
110 | } | 118 | } |
111 | } | 119 | } |
112 | /* All contiguous banks are now merged in to the first bank */ | ||
113 | meminfo.nr_banks = 1; | ||
114 | specified_mem_size = bank[0].size; | ||
115 | 120 | ||
116 | /* | 121 | /* |
117 | * MPU has curious alignment requirements: Size must be power of 2, and | 122 | * MPU has curious alignment requirements: Size must be power of 2, and |
@@ -128,23 +133,24 @@ void __init sanity_check_meminfo_mpu(void) | |||
128 | */ | 133 | */ |
129 | aligned_region_size = (phys_offset - 1) ^ (phys_offset); | 134 | aligned_region_size = (phys_offset - 1) ^ (phys_offset); |
130 | /* Find the max power-of-two sized region that fits inside our bank */ | 135 | /* Find the max power-of-two sized region that fits inside our bank */ |
131 | rounded_mem_size = (1 << __fls(bank[0].size)) - 1; | 136 | rounded_mem_size = (1 << __fls(specified_mem_size)) - 1; |
132 | 137 | ||
133 | /* The actual region size is the smaller of the two */ | 138 | /* The actual region size is the smaller of the two */ |
134 | aligned_region_size = aligned_region_size < rounded_mem_size | 139 | aligned_region_size = aligned_region_size < rounded_mem_size |
135 | ? aligned_region_size + 1 | 140 | ? aligned_region_size + 1 |
136 | : rounded_mem_size + 1; | 141 | : rounded_mem_size + 1; |
137 | 142 | ||
138 | if (aligned_region_size != specified_mem_size) | 143 | if (aligned_region_size != specified_mem_size) { |
139 | pr_warn("Truncating memory from 0x%.8lx to 0x%.8lx (MPU region constraints)", | 144 | pr_warn("Truncating memory from %pa to %pa (MPU region constraints)", |
140 | (unsigned long)specified_mem_size, | 145 | &specified_mem_size, &aligned_region_size); |
141 | (unsigned long)aligned_region_size); | 146 | memblock_remove(mem_start + aligned_region_size, |
147 | specified_mem_size - aligned_round_size); | ||
148 | |||
149 | mem_end = mem_start + aligned_region_size; | ||
150 | } | ||
142 | 151 | ||
143 | meminfo.bank[0].size = aligned_region_size; | 152 | pr_debug("MPU Region from %pa size %pa (end %pa))\n", |
144 | pr_debug("MPU Region from 0x%.8lx size 0x%.8lx (end 0x%.8lx))\n", | 153 | &phys_offset, &aligned_region_size, &mem_end); |
145 | (unsigned long)phys_offset, | ||
146 | (unsigned long)aligned_region_size, | ||
147 | (unsigned long)bank_phys_end(&bank[0])); | ||
148 | 154 | ||
149 | } | 155 | } |
150 | 156 | ||
@@ -292,7 +298,7 @@ void __init sanity_check_meminfo(void) | |||
292 | { | 298 | { |
293 | phys_addr_t end; | 299 | phys_addr_t end; |
294 | sanity_check_meminfo_mpu(); | 300 | sanity_check_meminfo_mpu(); |
295 | end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); | 301 | end = memblock_end_of_DRAM(); |
296 | high_memory = __va(end - 1) + 1; | 302 | high_memory = __va(end - 1) + 1; |
297 | } | 303 | } |
298 | 304 | ||