diff options
| author | Rob Herring <robh@kernel.org> | 2018-03-09 10:54:07 -0500 |
|---|---|---|
| committer | Michal Simek <michal.simek@xilinx.com> | 2018-03-16 07:51:27 -0400 |
| commit | 101646a24a2f9cdb61d7732459fbf068a7bbb542 (patch) | |
| tree | 679febdfa59103d83488dba3adabb41312b3bd64 | |
| parent | cd4dfee6a8bfbbe404e9905aff85e267ec99f5fa (diff) | |
microblaze: switch to NO_BOOTMEM
Microblaze doesn't set CONFIG_NO_BOOTMEM and so memblock_virt_alloc()
doesn't work for CONFIG_HAVE_MEMBLOCK && !CONFIG_NO_BOOTMEM.
Similar change was already done by others architectures
"ARM: mm: Remove bootmem code and switch to NO_BOOTMEM"
(sha1: 84f452b1e8fc73ac0e31254c66e3e2260ce5263d)
or
"openrisc: Consolidate setup to use memblock instead of bootmem"
(sha1: 266c7fad157265bb54d17db1c9545f2aaa488643)
or
"parisc: Drop bootmem and switch to memblock"
(sha1: 4fe9e1d957e45ad8eba9885ee860a0e93d13a7c7)
or
"powerpc: Remove bootmem allocator"
(sha1: 10239733ee8617bac3f1c1769af43a88ed979324)
or
"s390/mm: Convert bootmem to memblock"
(sha1: 50be634507284eea38df78154d22615d21200b42)
or
"sparc64: Convert over to NO_BOOTMEM."
(sha1: 625d693e9784f988371e69c2b41a2172c0be6c11)
or
"xtensa: drop sysmem and switch to memblock"
(sha1: 0e46c1115f5816949220d62dd3ff04aa68e7ac6b)
Issue was introduced by:
"of/fdt: use memblock_virt_alloc for early alloc"
(sha1: 0fa1c579349fdd90173381712ad78aa99c09d38b)
Signed-off-by: Rob Herring <robh@kernel.org>
Tested-by: Alvaro Gamez Machado <alvaro.gamez@hazent.com>
Tested-by: Michal Simek <michal.simek@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
| -rw-r--r-- | arch/microblaze/Kconfig | 1 | ||||
| -rw-r--r-- | arch/microblaze/mm/init.c | 56 |
2 files changed, 7 insertions, 50 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 4f798aa671dd..3817a3e2146c 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
| @@ -24,6 +24,7 @@ config MICROBLAZE | |||
| 24 | select HAVE_FTRACE_MCOUNT_RECORD | 24 | select HAVE_FTRACE_MCOUNT_RECORD |
| 25 | select HAVE_FUNCTION_GRAPH_TRACER | 25 | select HAVE_FUNCTION_GRAPH_TRACER |
| 26 | select HAVE_FUNCTION_TRACER | 26 | select HAVE_FUNCTION_TRACER |
| 27 | select NO_BOOTMEM | ||
| 27 | select HAVE_MEMBLOCK | 28 | select HAVE_MEMBLOCK |
| 28 | select HAVE_MEMBLOCK_NODE_MAP | 29 | select HAVE_MEMBLOCK_NODE_MAP |
| 29 | select HAVE_OPROFILE | 30 | select HAVE_OPROFILE |
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 5bc9c7fbb541..df6de7ccdc2e 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
| @@ -32,9 +32,6 @@ int mem_init_done; | |||
| 32 | #ifndef CONFIG_MMU | 32 | #ifndef CONFIG_MMU |
| 33 | unsigned int __page_offset; | 33 | unsigned int __page_offset; |
| 34 | EXPORT_SYMBOL(__page_offset); | 34 | EXPORT_SYMBOL(__page_offset); |
| 35 | |||
| 36 | #else | ||
| 37 | static int init_bootmem_done; | ||
| 38 | #endif /* CONFIG_MMU */ | 35 | #endif /* CONFIG_MMU */ |
| 39 | 36 | ||
| 40 | char *klimit = _end; | 37 | char *klimit = _end; |
| @@ -117,7 +114,6 @@ static void __init paging_init(void) | |||
| 117 | 114 | ||
| 118 | void __init setup_memory(void) | 115 | void __init setup_memory(void) |
| 119 | { | 116 | { |
| 120 | unsigned long map_size; | ||
| 121 | struct memblock_region *reg; | 117 | struct memblock_region *reg; |
| 122 | 118 | ||
| 123 | #ifndef CONFIG_MMU | 119 | #ifndef CONFIG_MMU |
| @@ -174,17 +170,6 @@ void __init setup_memory(void) | |||
| 174 | pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); | 170 | pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); |
| 175 | pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); | 171 | pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); |
| 176 | 172 | ||
| 177 | /* | ||
| 178 | * Find an area to use for the bootmem bitmap. | ||
| 179 | * We look for the first area which is at least | ||
| 180 | * 128kB in length (128kB is enough for a bitmap | ||
| 181 | * for 4GB of memory, using 4kB pages), plus 1 page | ||
| 182 | * (in case the address isn't page-aligned). | ||
| 183 | */ | ||
| 184 | map_size = init_bootmem_node(NODE_DATA(0), | ||
| 185 | PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); | ||
| 186 | memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); | ||
| 187 | |||
| 188 | /* Add active regions with valid PFNs */ | 173 | /* Add active regions with valid PFNs */ |
| 189 | for_each_memblock(memory, reg) { | 174 | for_each_memblock(memory, reg) { |
| 190 | unsigned long start_pfn, end_pfn; | 175 | unsigned long start_pfn, end_pfn; |
| @@ -196,32 +181,9 @@ void __init setup_memory(void) | |||
| 196 | &memblock.memory, 0); | 181 | &memblock.memory, 0); |
| 197 | } | 182 | } |
| 198 | 183 | ||
| 199 | /* free bootmem is whole main memory */ | ||
| 200 | free_bootmem_with_active_regions(0, max_low_pfn); | ||
| 201 | |||
| 202 | /* reserve allocate blocks */ | ||
| 203 | for_each_memblock(reserved, reg) { | ||
| 204 | unsigned long top = reg->base + reg->size - 1; | ||
| 205 | |||
| 206 | pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n", | ||
| 207 | (u32) reg->base, (u32) reg->size, top, | ||
| 208 | memory_start + lowmem_size - 1); | ||
| 209 | |||
| 210 | if (top <= (memory_start + lowmem_size - 1)) { | ||
| 211 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); | ||
| 212 | } else if (reg->base < (memory_start + lowmem_size - 1)) { | ||
| 213 | unsigned long trunc_size = memory_start + lowmem_size - | ||
| 214 | reg->base; | ||
| 215 | reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); | ||
| 216 | } | ||
| 217 | } | ||
| 218 | |||
| 219 | /* XXX need to clip this if using highmem? */ | 184 | /* XXX need to clip this if using highmem? */ |
| 220 | sparse_memory_present_with_active_regions(0); | 185 | sparse_memory_present_with_active_regions(0); |
| 221 | 186 | ||
| 222 | #ifdef CONFIG_MMU | ||
| 223 | init_bootmem_done = 1; | ||
| 224 | #endif | ||
| 225 | paging_init(); | 187 | paging_init(); |
| 226 | } | 188 | } |
| 227 | 189 | ||
| @@ -398,18 +360,12 @@ asmlinkage void __init mmu_init(void) | |||
| 398 | /* This is only called until mem_init is done. */ | 360 | /* This is only called until mem_init is done. */ |
| 399 | void __init *early_get_page(void) | 361 | void __init *early_get_page(void) |
| 400 | { | 362 | { |
| 401 | void *p; | 363 | /* |
| 402 | if (init_bootmem_done) { | 364 | * Mem start + kernel_tlb -> here is limit |
| 403 | p = alloc_bootmem_pages(PAGE_SIZE); | 365 | * because of mem mapping from head.S |
| 404 | } else { | 366 | */ |
| 405 | /* | 367 | return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, |
| 406 | * Mem start + kernel_tlb -> here is limit | 368 | memory_start + kernel_tlb)); |
| 407 | * because of mem mapping from head.S | ||
| 408 | */ | ||
| 409 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
| 410 | memory_start + kernel_tlb)); | ||
| 411 | } | ||
| 412 | return p; | ||
| 413 | } | 369 | } |
| 414 | 370 | ||
| 415 | #endif /* CONFIG_MMU */ | 371 | #endif /* CONFIG_MMU */ |
