aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2014-09-17 08:15:33 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2014-11-09 17:59:25 -0500
commit10239733ee8617bac3f1c1769af43a88ed979324 (patch)
treec449cc34dd59b4971f6a8fd1f89c3ae6b04596d9
parent16d0f5c4af76b0c3424290937bf1ac22adf439b1 (diff)
powerpc: Remove bootmem allocator
At the moment we transition from the memblock alloctor to the bootmem allocator. Gitting rid of the bootmem allocator removes a bunch of complicated code (most of which I owe the dubious honour of being responsible for writing). Signed-off-by: Anton Blanchard <anton@samba.org> Tested-by: Emil Medve <Emilian.Medve@Freescale.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/setup.h3
-rw-r--r--arch/powerpc/kernel/setup_32.c5
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/mm/init_32.c9
-rw-r--r--arch/powerpc/mm/mem.c62
-rw-r--r--arch/powerpc/mm/numa.c224
-rw-r--r--arch/powerpc/mm/pgtable_32.c3
-rw-r--r--arch/powerpc/mm/pgtable_64.c6
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c9
10 files changed, 47 insertions, 278 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 88eace4e28c3..98e9c548bd75 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -148,6 +148,7 @@ config PPC
148 select HAVE_ARCH_AUDITSYSCALL 148 select HAVE_ARCH_AUDITSYSCALL
149 select ARCH_SUPPORTS_ATOMIC_RMW 149 select ARCH_SUPPORTS_ATOMIC_RMW
150 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN 150 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
151 select NO_BOOTMEM
151 152
152config GENERIC_CSUM 153config GENERIC_CSUM
153 def_bool CPU_LITTLE_ENDIAN 154 def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 11ba86e17631..fbdf18cf954c 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -8,7 +8,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
8 8
9extern unsigned int rtas_data; 9extern unsigned int rtas_data;
10extern int mem_init_done; /* set on boot once kmalloc can be called */ 10extern int mem_init_done; /* set on boot once kmalloc can be called */
11extern int init_bootmem_done; /* set once bootmem is available */
12extern unsigned long long memory_limit; 11extern unsigned long long memory_limit;
13extern unsigned long klimit; 12extern unsigned long klimit;
14extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); 13extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
@@ -24,7 +23,7 @@ extern void reloc_got2(unsigned long);
24#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) 23#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
25 24
26void check_for_initrd(void); 25void check_for_initrd(void);
27void do_init_bootmem(void); 26void initmem_init(void);
28void setup_panic(void); 27void setup_panic(void);
29#define ARCH_PANIC_TIMEOUT 180 28#define ARCH_PANIC_TIMEOUT 180
30 29
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 07831ed0d9ef..88a36e64bf9b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -311,9 +311,8 @@ void __init setup_arch(char **cmdline_p)
311 311
312 irqstack_early_init(); 312 irqstack_early_init();
313 313
314 /* set up the bootmem stuff with available memory */ 314 initmem_init();
315 do_init_bootmem(); 315 if ( ppc_md.progress ) ppc_md.progress("setup_arch: initmem", 0x3eab);
316 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
317 316
318#ifdef CONFIG_DUMMY_CONSOLE 317#ifdef CONFIG_DUMMY_CONSOLE
319 conswitchp = &dummy_con; 318 conswitchp = &dummy_con;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 615aa904b216..91d9cfaffe6c 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -689,8 +689,7 @@ void __init setup_arch(char **cmdline_p)
689 exc_lvl_early_init(); 689 exc_lvl_early_init();
690 emergency_stack_init(); 690 emergency_stack_init();
691 691
692 /* set up the bootmem stuff with available memory */ 692 initmem_init();
693 do_init_bootmem();
694 sparse_init(); 693 sparse_init();
695 694
696#ifdef CONFIG_DUMMY_CONSOLE 695#ifdef CONFIG_DUMMY_CONSOLE
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 415a51b028b9..3b3100433c18 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -195,15 +195,6 @@ void __init MMU_init(void)
195 memblock_set_current_limit(lowmem_end_addr); 195 memblock_set_current_limit(lowmem_end_addr);
196} 196}
197 197
198/* This is only called until mem_init is done. */
199void __init *early_get_page(void)
200{
201 if (init_bootmem_done)
202 return alloc_bootmem_pages(PAGE_SIZE);
203 else
204 return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
205}
206
207#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ 198#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
208void setup_initial_memory_limit(phys_addr_t first_memblock_base, 199void setup_initial_memory_limit(phys_addr_t first_memblock_base,
209 phys_addr_t first_memblock_size) 200 phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 2add0b7b9f6d..e076d19c095b 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -61,7 +61,6 @@
61#define CPU_FTR_NOEXECUTE 0 61#define CPU_FTR_NOEXECUTE 0
62#endif 62#endif
63 63
64int init_bootmem_done;
65int mem_init_done; 64int mem_init_done;
66unsigned long long memory_limit; 65unsigned long long memory_limit;
67 66
@@ -190,70 +189,22 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
190} 189}
191EXPORT_SYMBOL_GPL(walk_system_ram_range); 190EXPORT_SYMBOL_GPL(walk_system_ram_range);
192 191
193/*
194 * Initialize the bootmem system and give it all the memory we
195 * have available. If we are using highmem, we only put the
196 * lowmem into the bootmem system.
197 */
198#ifndef CONFIG_NEED_MULTIPLE_NODES 192#ifndef CONFIG_NEED_MULTIPLE_NODES
199void __init do_init_bootmem(void) 193void __init initmem_init(void)
200{ 194{
201 unsigned long start, bootmap_pages;
202 unsigned long total_pages;
203 struct memblock_region *reg;
204 int boot_mapsize;
205
206 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 195 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
207 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 196 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
208#ifdef CONFIG_HIGHMEM 197#ifdef CONFIG_HIGHMEM
209 total_pages = total_lowmem >> PAGE_SHIFT;
210 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 198 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
211#endif 199#endif
212 200
213 /*
214 * Find an area to use for the bootmem bitmap. Calculate the size of
215 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
216 * Add 1 additional page in case the address isn't page-aligned.
217 */
218 bootmap_pages = bootmem_bootmap_pages(total_pages);
219
220 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
221
222 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
223 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
224
225 /* Place all memblock_regions in the same node and merge contiguous 201 /* Place all memblock_regions in the same node and merge contiguous
226 * memblock_regions 202 * memblock_regions
227 */ 203 */
228 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 204 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
229 205
230 /* Add all physical memory to the bootmem map, mark each area
231 * present.
232 */
233#ifdef CONFIG_HIGHMEM
234 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
235
236 /* reserve the sections we're already using */
237 for_each_memblock(reserved, reg) {
238 unsigned long top = reg->base + reg->size - 1;
239 if (top < lowmem_end_addr)
240 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
241 else if (reg->base < lowmem_end_addr) {
242 unsigned long trunc_size = lowmem_end_addr - reg->base;
243 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
244 }
245 }
246#else
247 free_bootmem_with_active_regions(0, max_pfn);
248
249 /* reserve the sections we're already using */
250 for_each_memblock(reserved, reg)
251 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
252#endif
253 /* XXX need to clip this if using highmem? */ 206 /* XXX need to clip this if using highmem? */
254 sparse_memory_present_with_active_regions(0); 207 sparse_memory_present_with_active_regions(0);
255
256 init_bootmem_done = 1;
257} 208}
258 209
259/* mark pages that don't exist as nosave */ 210/* mark pages that don't exist as nosave */
@@ -369,14 +320,6 @@ void __init paging_init(void)
369 mark_nonram_nosave(); 320 mark_nonram_nosave();
370} 321}
371 322
372static void __init register_page_bootmem_info(void)
373{
374 int i;
375
376 for_each_online_node(i)
377 register_page_bootmem_info_node(NODE_DATA(i));
378}
379
380void __init mem_init(void) 323void __init mem_init(void)
381{ 324{
382 /* 325 /*
@@ -389,7 +332,6 @@ void __init mem_init(void)
389 swiotlb_init(0); 332 swiotlb_init(0);
390#endif 333#endif
391 334
392 register_page_bootmem_info();
393 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 335 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
394 set_max_mapnr(max_pfn); 336 set_max_mapnr(max_pfn);
395 free_all_bootmem(); 337 free_all_bootmem();
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b9d1dfdbe5bb..2e9ad2d76b75 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -134,28 +134,6 @@ static int __init fake_numa_create_new_node(unsigned long end_pfn,
134 return 0; 134 return 0;
135} 135}
136 136
137/*
138 * get_node_active_region - Return active region containing pfn
139 * Active range returned is empty if none found.
140 * @pfn: The page to return the region for
141 * @node_ar: Returned set to the active region containing @pfn
142 */
143static void __init get_node_active_region(unsigned long pfn,
144 struct node_active_region *node_ar)
145{
146 unsigned long start_pfn, end_pfn;
147 int i, nid;
148
149 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
150 if (pfn >= start_pfn && pfn < end_pfn) {
151 node_ar->nid = nid;
152 node_ar->start_pfn = start_pfn;
153 node_ar->end_pfn = end_pfn;
154 break;
155 }
156 }
157}
158
159static void reset_numa_cpu_lookup_table(void) 137static void reset_numa_cpu_lookup_table(void)
160{ 138{
161 unsigned int cpu; 139 unsigned int cpu;
@@ -928,134 +906,48 @@ static void __init dump_numa_memory_topology(void)
928 } 906 }
929} 907}
930 908
931/*
932 * Allocate some memory, satisfying the memblock or bootmem allocator where
933 * required. nid is the preferred node and end is the physical address of
934 * the highest address in the node.
935 *
936 * Returns the virtual address of the memory.
937 */
938static void __init *careful_zallocation(int nid, unsigned long size,
939 unsigned long align,
940 unsigned long end_pfn)
941{
942 void *ret;
943 int new_nid;
944 unsigned long ret_paddr;
945
946 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
947
948 /* retry over all memory */
949 if (!ret_paddr)
950 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
951
952 if (!ret_paddr)
953 panic("numa.c: cannot allocate %lu bytes for node %d",
954 size, nid);
955
956 ret = __va(ret_paddr);
957
958 /*
959 * We initialize the nodes in numeric order: 0, 1, 2...
960 * and hand over control from the MEMBLOCK allocator to the
961 * bootmem allocator. If this function is called for
962 * node 5, then we know that all nodes <5 are using the
963 * bootmem allocator instead of the MEMBLOCK allocator.
964 *
965 * So, check the nid from which this allocation came
966 * and double check to see if we need to use bootmem
967 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
968 * since it would be useless.
969 */
970 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
971 if (new_nid < nid) {
972 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
973 size, align, 0);
974
975 dbg("alloc_bootmem %p %lx\n", ret, size);
976 }
977
978 memset(ret, 0, size);
979 return ret;
980}
981
982static struct notifier_block ppc64_numa_nb = { 909static struct notifier_block ppc64_numa_nb = {
983 .notifier_call = cpu_numa_callback, 910 .notifier_call = cpu_numa_callback,
984 .priority = 1 /* Must run before sched domains notifier. */ 911 .priority = 1 /* Must run before sched domains notifier. */
985}; 912};
986 913
987static void __init mark_reserved_regions_for_nid(int nid) 914/* Initialize NODE_DATA for a node on the local memory */
915static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
988{ 916{
989 struct pglist_data *node = NODE_DATA(nid); 917 u64 spanned_pages = end_pfn - start_pfn;
990 struct memblock_region *reg; 918 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
991 919 u64 nd_pa;
992 for_each_memblock(reserved, reg) { 920 void *nd;
993 unsigned long physbase = reg->base; 921 int tnid;
994 unsigned long size = reg->size; 922
995 unsigned long start_pfn = physbase >> PAGE_SHIFT; 923 if (spanned_pages)
996 unsigned long end_pfn = PFN_UP(physbase + size); 924 pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
997 struct node_active_region node_ar; 925 nid, start_pfn << PAGE_SHIFT,
998 unsigned long node_end_pfn = pgdat_end_pfn(node); 926 (end_pfn << PAGE_SHIFT) - 1);
999 927 else
1000 /* 928 pr_info("Initmem setup node %d\n", nid);
1001 * Check to make sure that this memblock.reserved area is 929
1002 * within the bounds of the node that we care about. 930 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
1003 * Checking the nid of the start and end points is not 931 nd = __va(nd_pa);
1004 * sufficient because the reserved area could span the 932
1005 * entire node. 933 /* report and initialize */
1006 */ 934 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
1007 if (end_pfn <= node->node_start_pfn || 935 nd_pa, nd_pa + nd_size - 1);
1008 start_pfn >= node_end_pfn) 936 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
1009 continue; 937 if (tnid != nid)
1010 938 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
1011 get_node_active_region(start_pfn, &node_ar); 939
1012 while (start_pfn < end_pfn && 940 node_data[nid] = nd;
1013 node_ar.start_pfn < node_ar.end_pfn) { 941 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
1014 unsigned long reserve_size = size; 942 NODE_DATA(nid)->node_id = nid;
1015 /* 943 NODE_DATA(nid)->node_start_pfn = start_pfn;
1016 * if reserved region extends past active region 944 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1017 * then trim size to active region
1018 */
1019 if (end_pfn > node_ar.end_pfn)
1020 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
1021 - physbase;
1022 /*
1023 * Only worry about *this* node, others may not
1024 * yet have valid NODE_DATA().
1025 */
1026 if (node_ar.nid == nid) {
1027 dbg("reserve_bootmem %lx %lx nid=%d\n",
1028 physbase, reserve_size, node_ar.nid);
1029 reserve_bootmem_node(NODE_DATA(node_ar.nid),
1030 physbase, reserve_size,
1031 BOOTMEM_DEFAULT);
1032 }
1033 /*
1034 * if reserved region is contained in the active region
1035 * then done.
1036 */
1037 if (end_pfn <= node_ar.end_pfn)
1038 break;
1039
1040 /*
1041 * reserved region extends past the active region
1042 * get next active region that contains this
1043 * reserved region
1044 */
1045 start_pfn = node_ar.end_pfn;
1046 physbase = start_pfn << PAGE_SHIFT;
1047 size = size - reserve_size;
1048 get_node_active_region(start_pfn, &node_ar);
1049 }
1050 }
1051} 945}
1052 946
1053 947void __init initmem_init(void)
1054void __init do_init_bootmem(void)
1055{ 948{
1056 int nid, cpu; 949 int nid, cpu;
1057 950
1058 min_low_pfn = 0;
1059 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 951 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1060 max_pfn = max_low_pfn; 952 max_pfn = max_low_pfn;
1061 953
@@ -1064,64 +956,16 @@ void __init do_init_bootmem(void)
1064 else 956 else
1065 dump_numa_memory_topology(); 957 dump_numa_memory_topology();
1066 958
959 memblock_dump_all();
960
1067 for_each_online_node(nid) { 961 for_each_online_node(nid) {
1068 unsigned long start_pfn, end_pfn; 962 unsigned long start_pfn, end_pfn;
1069 void *bootmem_vaddr;
1070 unsigned long bootmap_pages;
1071 963
1072 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 964 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1073 965 setup_node_data(nid, start_pfn, end_pfn);
1074 /*
1075 * Allocate the node structure node local if possible
1076 *
1077 * Be careful moving this around, as it relies on all
1078 * previous nodes' bootmem to be initialized and have
1079 * all reserved areas marked.
1080 */
1081 NODE_DATA(nid) = careful_zallocation(nid,
1082 sizeof(struct pglist_data),
1083 SMP_CACHE_BYTES, end_pfn);
1084
1085 dbg("node %d\n", nid);
1086 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1087
1088 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1089 NODE_DATA(nid)->node_start_pfn = start_pfn;
1090 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1091
1092 if (NODE_DATA(nid)->node_spanned_pages == 0)
1093 continue;
1094
1095 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1096 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1097
1098 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1099 bootmem_vaddr = careful_zallocation(nid,
1100 bootmap_pages << PAGE_SHIFT,
1101 PAGE_SIZE, end_pfn);
1102
1103 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1104
1105 init_bootmem_node(NODE_DATA(nid),
1106 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1107 start_pfn, end_pfn);
1108
1109 free_bootmem_with_active_regions(nid, end_pfn);
1110 /*
1111 * Be very careful about moving this around. Future
1112 * calls to careful_zallocation() depend on this getting
1113 * done correctly.
1114 */
1115 mark_reserved_regions_for_nid(nid);
1116 sparse_memory_present_with_active_regions(nid); 966 sparse_memory_present_with_active_regions(nid);
1117 } 967 }
1118 968
1119 init_bootmem_done = 1;
1120
1121 /*
1122 * Now bootmem is initialised we can create the node to cpumask
1123 * lookup tables and setup the cpu callback to populate them.
1124 */
1125 setup_node_to_cpumask_map(); 969 setup_node_to_cpumask_map();
1126 970
1127 reset_numa_cpu_lookup_table(); 971 reset_numa_cpu_lookup_table();
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index cf11342bf519..d545b1231594 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -100,12 +100,11 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
100{ 100{
101 pte_t *pte; 101 pte_t *pte;
102 extern int mem_init_done; 102 extern int mem_init_done;
103 extern void *early_get_page(void);
104 103
105 if (mem_init_done) { 104 if (mem_init_done) {
106 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 105 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
107 } else { 106 } else {
108 pte = (pte_t *)early_get_page(); 107 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
109 if (pte) 108 if (pte)
110 clear_page(pte); 109 clear_page(pte);
111 } 110 }
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index c8d709ab489d..cdb19ab859d3 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -75,11 +75,7 @@ static __ref void *early_alloc_pgtable(unsigned long size)
75{ 75{
76 void *pt; 76 void *pt;
77 77
78 if (init_bootmem_done) 78 pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS)));
79 pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
80 else
81 pt = __va(memblock_alloc_base(size, size,
82 __pa(MAX_DMA_ADDRESS)));
83 memset(pt, 0, size); 79 memset(pt, 0, size);
84 80
85 return pt; 81 return pt;
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index e996e007bc44..711f3d352af7 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -18,7 +18,7 @@
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/fsl-diu-fb.h> 20#include <linux/fsl-diu-fb.h>
21#include <linux/bootmem.h> 21#include <linux/memblock.h>
22#include <sysdev/fsl_soc.h> 22#include <sysdev/fsl_soc.h>
23 23
24#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
@@ -297,14 +297,13 @@ static void __init mpc512x_setup_diu(void)
297 * and so negatively affect boot time. Instead we reserve the 297 * and so negatively affect boot time. Instead we reserve the
298 * already configured frame buffer area so that it won't be 298 * already configured frame buffer area so that it won't be
299 * destroyed. The starting address of the area to reserve and 299 * destroyed. The starting address of the area to reserve and
300 * also it's length is passed to reserve_bootmem(). It will be 300 * also it's length is passed to memblock_reserve(). It will be
301 * freed later on first open of fbdev, when splash image is not 301 * freed later on first open of fbdev, when splash image is not
302 * needed any more. 302 * needed any more.
303 */ 303 */
304 if (diu_shared_fb.in_use) { 304 if (diu_shared_fb.in_use) {
305 ret = reserve_bootmem(diu_shared_fb.fb_phys, 305 ret = memblock_reserve(diu_shared_fb.fb_phys,
306 diu_shared_fb.fb_len, 306 diu_shared_fb.fb_len);
307 BOOTMEM_EXCLUSIVE);
308 if (ret) { 307 if (ret) {
309 pr_err("%s: reserve bootmem failed\n", __func__); 308 pr_err("%s: reserve bootmem failed\n", __func__);
310 diu_shared_fb.in_use = false; 309 diu_shared_fb.in_use = false;