aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-03 20:26:03 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-03 20:26:03 -0400
commit412a4ac5e9cf7fdeb6af562c25547a9b9da7674f (patch)
treea8ce13cbc9c47c99799e5e3e3ad26ba78274ee73 /arch/powerpc/mm
parente8e5c2155b0035b6e04f29be67f6444bc914005b (diff)
parent0c2daaafcdec726e89cbccca61d576de8429c537 (diff)
Merge commit 'gcl/next' into next
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/40x_mmu.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c26
-rw-r--r--arch/powerpc/mm/init_32.c16
-rw-r--r--arch/powerpc/mm/init_64.c2
-rw-r--r--arch/powerpc/mm/mem.c78
-rw-r--r--arch/powerpc/mm/numa.c84
-rw-r--r--arch/powerpc/mm/pgtable_32.c6
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c4
-rw-r--r--arch/powerpc/mm/stab.c4
-rw-r--r--arch/powerpc/mm/tlb_nohash.c4
11 files changed, 115 insertions, 115 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index 65abfcfaaa9e..1dc2fa5ce1bd 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -135,7 +135,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
135 /* If the size of RAM is not an exact power of two, we may not 135 /* If the size of RAM is not an exact power of two, we may not
136 * have covered RAM in its entirety with 16 and 4 MiB 136 * have covered RAM in its entirety with 16 and 4 MiB
137 * pages. Consequently, restrict the top end of RAM currently 137 * pages. Consequently, restrict the top end of RAM currently
138 * allocable so that calls to the LMB to allocate PTEs for "tail" 138 * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
139 * coverage with normal-sized pages (or other reasons) do not 139 * coverage with normal-sized pages (or other reasons) do not
140 * attempt to allocate outside the allowed range. 140 * attempt to allocate outside the allowed range.
141 */ 141 */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 3ecdcec0a39e..98f262de5585 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -31,7 +31,7 @@
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/signal.h> 33#include <linux/signal.h>
34#include <linux/lmb.h> 34#include <linux/memblock.h>
35 35
36#include <asm/processor.h> 36#include <asm/processor.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
@@ -384,8 +384,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
384 printk(KERN_INFO "Huge page(16GB) memory: " 384 printk(KERN_INFO "Huge page(16GB) memory: "
385 "addr = 0x%lX size = 0x%lX pages = %d\n", 385 "addr = 0x%lX size = 0x%lX pages = %d\n",
386 phys_addr, block_size, expected_pages); 386 phys_addr, block_size, expected_pages);
387 if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) { 387 if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
388 lmb_reserve(phys_addr, block_size * expected_pages); 388 memblock_reserve(phys_addr, block_size * expected_pages);
389 add_gpage(phys_addr, block_size, expected_pages); 389 add_gpage(phys_addr, block_size, expected_pages);
390 } 390 }
391 return 0; 391 return 0;
@@ -458,7 +458,7 @@ static void __init htab_init_page_sizes(void)
458 * and we have at least 1G of RAM at boot 458 * and we have at least 1G of RAM at boot
459 */ 459 */
460 if (mmu_psize_defs[MMU_PAGE_16M].shift && 460 if (mmu_psize_defs[MMU_PAGE_16M].shift &&
461 lmb_phys_mem_size() >= 0x40000000) 461 memblock_phys_mem_size() >= 0x40000000)
462 mmu_vmemmap_psize = MMU_PAGE_16M; 462 mmu_vmemmap_psize = MMU_PAGE_16M;
463 else if (mmu_psize_defs[MMU_PAGE_64K].shift) 463 else if (mmu_psize_defs[MMU_PAGE_64K].shift)
464 mmu_vmemmap_psize = MMU_PAGE_64K; 464 mmu_vmemmap_psize = MMU_PAGE_64K;
@@ -520,7 +520,7 @@ static unsigned long __init htab_get_table_size(void)
520 return 1UL << ppc64_pft_size; 520 return 1UL << ppc64_pft_size;
521 521
522 /* round mem_size up to next power of 2 */ 522 /* round mem_size up to next power of 2 */
523 mem_size = lmb_phys_mem_size(); 523 mem_size = memblock_phys_mem_size();
524 rnd_mem_size = 1UL << __ilog2(mem_size); 524 rnd_mem_size = 1UL << __ilog2(mem_size);
525 if (rnd_mem_size < mem_size) 525 if (rnd_mem_size < mem_size)
526 rnd_mem_size <<= 1; 526 rnd_mem_size <<= 1;
@@ -627,7 +627,7 @@ static void __init htab_initialize(void)
627 else 627 else
628 limit = 0; 628 limit = 0;
629 629
630 table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit); 630 table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
631 631
632 DBG("Hash table allocated at %lx, size: %lx\n", table, 632 DBG("Hash table allocated at %lx, size: %lx\n", table,
633 htab_size_bytes); 633 htab_size_bytes);
@@ -647,9 +647,9 @@ static void __init htab_initialize(void)
647 prot = pgprot_val(PAGE_KERNEL); 647 prot = pgprot_val(PAGE_KERNEL);
648 648
649#ifdef CONFIG_DEBUG_PAGEALLOC 649#ifdef CONFIG_DEBUG_PAGEALLOC
650 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; 650 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
651 linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count, 651 linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
652 1, lmb.rmo_size)); 652 1, memblock.rmo_size));
653 memset(linear_map_hash_slots, 0, linear_map_hash_count); 653 memset(linear_map_hash_slots, 0, linear_map_hash_count);
654#endif /* CONFIG_DEBUG_PAGEALLOC */ 654#endif /* CONFIG_DEBUG_PAGEALLOC */
655 655
@@ -659,16 +659,16 @@ static void __init htab_initialize(void)
659 */ 659 */
660 660
661 /* create bolted the linear mapping in the hash table */ 661 /* create bolted the linear mapping in the hash table */
662 for (i=0; i < lmb.memory.cnt; i++) { 662 for (i=0; i < memblock.memory.cnt; i++) {
663 base = (unsigned long)__va(lmb.memory.region[i].base); 663 base = (unsigned long)__va(memblock.memory.region[i].base);
664 size = lmb.memory.region[i].size; 664 size = memblock.memory.region[i].size;
665 665
666 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", 666 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
667 base, size, prot); 667 base, size, prot);
668 668
669#ifdef CONFIG_U3_DART 669#ifdef CONFIG_U3_DART
670 /* Do not map the DART space. Fortunately, it will be aligned 670 /* Do not map the DART space. Fortunately, it will be aligned
671 * in such a way that it will not cross two lmb regions and 671 * in such a way that it will not cross two memblock regions and
672 * will fit within a single 16Mb page. 672 * will fit within a single 16Mb page.
673 * The DART space is assumed to be a full 16Mb region even if 673 * The DART space is assumed to be a full 16Mb region even if
674 * we only use 2Mb of that space. We will use more of it later 674 * we only use 2Mb of that space. We will use more of it later
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 767333005eb4..6a6975dc2654 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -30,7 +30,7 @@
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/initrd.h> 31#include <linux/initrd.h>
32#include <linux/pagemap.h> 32#include <linux/pagemap.h>
33#include <linux/lmb.h> 33#include <linux/memblock.h>
34#include <linux/gfp.h> 34#include <linux/gfp.h>
35 35
36#include <asm/pgalloc.h> 36#include <asm/pgalloc.h>
@@ -136,17 +136,17 @@ void __init MMU_init(void)
136 /* parse args from command line */ 136 /* parse args from command line */
137 MMU_setup(); 137 MMU_setup();
138 138
139 if (lmb.memory.cnt > 1) { 139 if (memblock.memory.cnt > 1) {
140#ifndef CONFIG_WII 140#ifndef CONFIG_WII
141 lmb.memory.cnt = 1; 141 memblock.memory.cnt = 1;
142 lmb_analyze(); 142 memblock_analyze();
143 printk(KERN_WARNING "Only using first contiguous memory region"); 143 printk(KERN_WARNING "Only using first contiguous memory region");
144#else 144#else
145 wii_memory_fixups(); 145 wii_memory_fixups();
146#endif 146#endif
147 } 147 }
148 148
149 total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr; 149 total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr;
150 lowmem_end_addr = memstart_addr + total_lowmem; 150 lowmem_end_addr = memstart_addr + total_lowmem;
151 151
152#ifdef CONFIG_FSL_BOOKE 152#ifdef CONFIG_FSL_BOOKE
@@ -161,8 +161,8 @@ void __init MMU_init(void)
161 lowmem_end_addr = memstart_addr + total_lowmem; 161 lowmem_end_addr = memstart_addr + total_lowmem;
162#ifndef CONFIG_HIGHMEM 162#ifndef CONFIG_HIGHMEM
163 total_memory = total_lowmem; 163 total_memory = total_lowmem;
164 lmb_enforce_memory_limit(lowmem_end_addr); 164 memblock_enforce_memory_limit(lowmem_end_addr);
165 lmb_analyze(); 165 memblock_analyze();
166#endif /* CONFIG_HIGHMEM */ 166#endif /* CONFIG_HIGHMEM */
167 } 167 }
168 168
@@ -200,7 +200,7 @@ void __init *early_get_page(void)
200 if (init_bootmem_done) { 200 if (init_bootmem_done) {
201 p = alloc_bootmem_pages(PAGE_SIZE); 201 p = alloc_bootmem_pages(PAGE_SIZE);
202 } else { 202 } else {
203 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 203 p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
204 __initial_memory_limit_addr)); 204 __initial_memory_limit_addr));
205 } 205 }
206 return p; 206 return p;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index e267f223fdff..71f1415e2472 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -40,7 +40,7 @@
40#include <linux/nodemask.h> 40#include <linux/nodemask.h>
41#include <linux/module.h> 41#include <linux/module.h>
42#include <linux/poison.h> 42#include <linux/poison.h>
43#include <linux/lmb.h> 43#include <linux/memblock.h>
44#include <linux/hugetlb.h> 44#include <linux/hugetlb.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46 46
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0f594d774bf7..1a84a8d00005 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -32,7 +32,7 @@
32#include <linux/initrd.h> 32#include <linux/initrd.h>
33#include <linux/pagemap.h> 33#include <linux/pagemap.h>
34#include <linux/suspend.h> 34#include <linux/suspend.h>
35#include <linux/lmb.h> 35#include <linux/memblock.h>
36#include <linux/hugetlb.h> 36#include <linux/hugetlb.h>
37 37
38#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
@@ -83,13 +83,13 @@ int page_is_ram(unsigned long pfn)
83#else 83#else
84 unsigned long paddr = (pfn << PAGE_SHIFT); 84 unsigned long paddr = (pfn << PAGE_SHIFT);
85 int i; 85 int i;
86 for (i=0; i < lmb.memory.cnt; i++) { 86 for (i=0; i < memblock.memory.cnt; i++) {
87 unsigned long base; 87 unsigned long base;
88 88
89 base = lmb.memory.region[i].base; 89 base = memblock.memory.region[i].base;
90 90
91 if ((paddr >= base) && 91 if ((paddr >= base) &&
92 (paddr < (base + lmb.memory.region[i].size))) { 92 (paddr < (base + memblock.memory.region[i].size))) {
93 return 1; 93 return 1;
94 } 94 }
95 } 95 }
@@ -142,14 +142,14 @@ int arch_add_memory(int nid, u64 start, u64 size)
142/* 142/*
143 * walk_memory_resource() needs to make sure there is no holes in a given 143 * walk_memory_resource() needs to make sure there is no holes in a given
144 * memory range. PPC64 does not maintain the memory layout in /proc/iomem. 144 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
145 * Instead it maintains it in lmb.memory structures. Walk through the 145 * Instead it maintains it in memblock.memory structures. Walk through the
146 * memory regions, find holes and callback for contiguous regions. 146 * memory regions, find holes and callback for contiguous regions.
147 */ 147 */
148int 148int
149walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 149walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
150 void *arg, int (*func)(unsigned long, unsigned long, void *)) 150 void *arg, int (*func)(unsigned long, unsigned long, void *))
151{ 151{
152 struct lmb_property res; 152 struct memblock_property res;
153 unsigned long pfn, len; 153 unsigned long pfn, len;
154 u64 end; 154 u64 end;
155 int ret = -1; 155 int ret = -1;
@@ -158,7 +158,7 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
158 res.size = (u64) nr_pages << PAGE_SHIFT; 158 res.size = (u64) nr_pages << PAGE_SHIFT;
159 159
160 end = res.base + res.size - 1; 160 end = res.base + res.size - 1;
161 while ((res.base < end) && (lmb_find(&res) >= 0)) { 161 while ((res.base < end) && (memblock_find(&res) >= 0)) {
162 pfn = (unsigned long)(res.base >> PAGE_SHIFT); 162 pfn = (unsigned long)(res.base >> PAGE_SHIFT);
163 len = (unsigned long)(res.size >> PAGE_SHIFT); 163 len = (unsigned long)(res.size >> PAGE_SHIFT);
164 ret = (*func)(pfn, len, arg); 164 ret = (*func)(pfn, len, arg);
@@ -184,8 +184,8 @@ void __init do_init_bootmem(void)
184 unsigned long total_pages; 184 unsigned long total_pages;
185 int boot_mapsize; 185 int boot_mapsize;
186 186
187 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 187 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
188 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 188 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
189#ifdef CONFIG_HIGHMEM 189#ifdef CONFIG_HIGHMEM
190 total_pages = total_lowmem >> PAGE_SHIFT; 190 total_pages = total_lowmem >> PAGE_SHIFT;
191 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 191 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
@@ -198,16 +198,16 @@ void __init do_init_bootmem(void)
198 */ 198 */
199 bootmap_pages = bootmem_bootmap_pages(total_pages); 199 bootmap_pages = bootmem_bootmap_pages(total_pages);
200 200
201 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 201 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
202 202
203 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 203 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
204 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); 204 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
205 205
206 /* Add active regions with valid PFNs */ 206 /* Add active regions with valid PFNs */
207 for (i = 0; i < lmb.memory.cnt; i++) { 207 for (i = 0; i < memblock.memory.cnt; i++) {
208 unsigned long start_pfn, end_pfn; 208 unsigned long start_pfn, end_pfn;
209 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 209 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
210 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 210 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
211 add_active_range(0, start_pfn, end_pfn); 211 add_active_range(0, start_pfn, end_pfn);
212 } 212 }
213 213
@@ -218,17 +218,17 @@ void __init do_init_bootmem(void)
218 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); 218 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
219 219
220 /* reserve the sections we're already using */ 220 /* reserve the sections we're already using */
221 for (i = 0; i < lmb.reserved.cnt; i++) { 221 for (i = 0; i < memblock.reserved.cnt; i++) {
222 unsigned long addr = lmb.reserved.region[i].base + 222 unsigned long addr = memblock.reserved.region[i].base +
223 lmb_size_bytes(&lmb.reserved, i) - 1; 223 memblock_size_bytes(&memblock.reserved, i) - 1;
224 if (addr < lowmem_end_addr) 224 if (addr < lowmem_end_addr)
225 reserve_bootmem(lmb.reserved.region[i].base, 225 reserve_bootmem(memblock.reserved.region[i].base,
226 lmb_size_bytes(&lmb.reserved, i), 226 memblock_size_bytes(&memblock.reserved, i),
227 BOOTMEM_DEFAULT); 227 BOOTMEM_DEFAULT);
228 else if (lmb.reserved.region[i].base < lowmem_end_addr) { 228 else if (memblock.reserved.region[i].base < lowmem_end_addr) {
229 unsigned long adjusted_size = lowmem_end_addr - 229 unsigned long adjusted_size = lowmem_end_addr -
230 lmb.reserved.region[i].base; 230 memblock.reserved.region[i].base;
231 reserve_bootmem(lmb.reserved.region[i].base, 231 reserve_bootmem(memblock.reserved.region[i].base,
232 adjusted_size, BOOTMEM_DEFAULT); 232 adjusted_size, BOOTMEM_DEFAULT);
233 } 233 }
234 } 234 }
@@ -236,9 +236,9 @@ void __init do_init_bootmem(void)
236 free_bootmem_with_active_regions(0, max_pfn); 236 free_bootmem_with_active_regions(0, max_pfn);
237 237
238 /* reserve the sections we're already using */ 238 /* reserve the sections we're already using */
239 for (i = 0; i < lmb.reserved.cnt; i++) 239 for (i = 0; i < memblock.reserved.cnt; i++)
240 reserve_bootmem(lmb.reserved.region[i].base, 240 reserve_bootmem(memblock.reserved.region[i].base,
241 lmb_size_bytes(&lmb.reserved, i), 241 memblock_size_bytes(&memblock.reserved, i),
242 BOOTMEM_DEFAULT); 242 BOOTMEM_DEFAULT);
243 243
244#endif 244#endif
@@ -251,20 +251,20 @@ void __init do_init_bootmem(void)
251/* mark pages that don't exist as nosave */ 251/* mark pages that don't exist as nosave */
252static int __init mark_nonram_nosave(void) 252static int __init mark_nonram_nosave(void)
253{ 253{
254 unsigned long lmb_next_region_start_pfn, 254 unsigned long memblock_next_region_start_pfn,
255 lmb_region_max_pfn; 255 memblock_region_max_pfn;
256 int i; 256 int i;
257 257
258 for (i = 0; i < lmb.memory.cnt - 1; i++) { 258 for (i = 0; i < memblock.memory.cnt - 1; i++) {
259 lmb_region_max_pfn = 259 memblock_region_max_pfn =
260 (lmb.memory.region[i].base >> PAGE_SHIFT) + 260 (memblock.memory.region[i].base >> PAGE_SHIFT) +
261 (lmb.memory.region[i].size >> PAGE_SHIFT); 261 (memblock.memory.region[i].size >> PAGE_SHIFT);
262 lmb_next_region_start_pfn = 262 memblock_next_region_start_pfn =
263 lmb.memory.region[i+1].base >> PAGE_SHIFT; 263 memblock.memory.region[i+1].base >> PAGE_SHIFT;
264 264
265 if (lmb_region_max_pfn < lmb_next_region_start_pfn) 265 if (memblock_region_max_pfn < memblock_next_region_start_pfn)
266 register_nosave_region(lmb_region_max_pfn, 266 register_nosave_region(memblock_region_max_pfn,
267 lmb_next_region_start_pfn); 267 memblock_next_region_start_pfn);
268 } 268 }
269 269
270 return 0; 270 return 0;
@@ -275,8 +275,8 @@ static int __init mark_nonram_nosave(void)
275 */ 275 */
276void __init paging_init(void) 276void __init paging_init(void)
277{ 277{
278 unsigned long total_ram = lmb_phys_mem_size(); 278 unsigned long total_ram = memblock_phys_mem_size();
279 phys_addr_t top_of_ram = lmb_end_of_DRAM(); 279 phys_addr_t top_of_ram = memblock_end_of_DRAM();
280 unsigned long max_zone_pfns[MAX_NR_ZONES]; 280 unsigned long max_zone_pfns[MAX_NR_ZONES];
281 281
282#ifdef CONFIG_PPC32 282#ifdef CONFIG_PPC32
@@ -327,7 +327,7 @@ void __init mem_init(void)
327 swiotlb_init(1); 327 swiotlb_init(1);
328#endif 328#endif
329 329
330 num_physpages = lmb.memory.size >> PAGE_SHIFT; 330 num_physpages = memblock.memory.size >> PAGE_SHIFT;
331 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 331 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
332 332
333#ifdef CONFIG_NEED_MULTIPLE_NODES 333#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -364,7 +364,7 @@ void __init mem_init(void)
364 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 364 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
365 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 365 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
366 struct page *page = pfn_to_page(pfn); 366 struct page *page = pfn_to_page(pfn);
367 if (lmb_is_reserved(pfn << PAGE_SHIFT)) 367 if (memblock_is_reserved(pfn << PAGE_SHIFT))
368 continue; 368 continue;
369 ClearPageReserved(page); 369 ClearPageReserved(page);
370 init_page_count(page); 370 init_page_count(page);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index f78f19e0a2a4..338c6f39eab2 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -17,7 +17,7 @@
17#include <linux/nodemask.h> 17#include <linux/nodemask.h>
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <linux/notifier.h> 19#include <linux/notifier.h>
20#include <linux/lmb.h> 20#include <linux/memblock.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/pfn.h> 22#include <linux/pfn.h>
23#include <asm/sparsemem.h> 23#include <asm/sparsemem.h>
@@ -407,7 +407,7 @@ struct of_drconf_cell {
407#define DRCONF_MEM_RESERVED 0x00000080 407#define DRCONF_MEM_RESERVED 0x00000080
408 408
409/* 409/*
410 * Read the next lmb list entry from the ibm,dynamic-memory property 410 * Read the next memblock list entry from the ibm,dynamic-memory property
411 * and return the information in the provided of_drconf_cell structure. 411 * and return the information in the provided of_drconf_cell structure.
412 */ 412 */
413static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) 413static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
@@ -428,8 +428,8 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
428/* 428/*
429 * Retreive and validate the ibm,dynamic-memory property of the device tree. 429 * Retreive and validate the ibm,dynamic-memory property of the device tree.
430 * 430 *
431 * The layout of the ibm,dynamic-memory property is a number N of lmb 431 * The layout of the ibm,dynamic-memory property is a number N of memblock
432 * list entries followed by N lmb list entries. Each lmb list entry 432 * list entries followed by N memblock list entries. Each memblock list entry
433 * contains information as layed out in the of_drconf_cell struct above. 433 * contains information as layed out in the of_drconf_cell struct above.
434 */ 434 */
435static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) 435static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
@@ -454,15 +454,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
454} 454}
455 455
456/* 456/*
457 * Retreive and validate the ibm,lmb-size property for drconf memory 457 * Retreive and validate the ibm,memblock-size property for drconf memory
458 * from the device tree. 458 * from the device tree.
459 */ 459 */
460static u64 of_get_lmb_size(struct device_node *memory) 460static u64 of_get_memblock_size(struct device_node *memory)
461{ 461{
462 const u32 *prop; 462 const u32 *prop;
463 u32 len; 463 u32 len;
464 464
465 prop = of_get_property(memory, "ibm,lmb-size", &len); 465 prop = of_get_property(memory, "ibm,memblock-size", &len);
466 if (!prop || len < sizeof(unsigned int)) 466 if (!prop || len < sizeof(unsigned int))
467 return 0; 467 return 0;
468 468
@@ -596,19 +596,19 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
596 unsigned long size) 596 unsigned long size)
597{ 597{
598 /* 598 /*
599 * We use lmb_end_of_DRAM() in here instead of memory_limit because 599 * We use memblock_end_of_DRAM() in here instead of memory_limit because
600 * we've already adjusted it for the limit and it takes care of 600 * we've already adjusted it for the limit and it takes care of
601 * having memory holes below the limit. Also, in the case of 601 * having memory holes below the limit. Also, in the case of
602 * iommu_is_off, memory_limit is not set but is implicitly enforced. 602 * iommu_is_off, memory_limit is not set but is implicitly enforced.
603 */ 603 */
604 604
605 if (start + size <= lmb_end_of_DRAM()) 605 if (start + size <= memblock_end_of_DRAM())
606 return size; 606 return size;
607 607
608 if (start >= lmb_end_of_DRAM()) 608 if (start >= memblock_end_of_DRAM())
609 return 0; 609 return 0;
610 610
611 return lmb_end_of_DRAM() - start; 611 return memblock_end_of_DRAM() - start;
612} 612}
613 613
614/* 614/*
@@ -618,7 +618,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
618static inline int __init read_usm_ranges(const u32 **usm) 618static inline int __init read_usm_ranges(const u32 **usm)
619{ 619{
620 /* 620 /*
621 * For each lmb in ibm,dynamic-memory a corresponding 621 * For each memblock in ibm,dynamic-memory a corresponding
622 * entry in linux,drconf-usable-memory property contains 622 * entry in linux,drconf-usable-memory property contains
623 * a counter followed by that many (base, size) duple. 623 * a counter followed by that many (base, size) duple.
624 * read the counter from linux,drconf-usable-memory 624 * read the counter from linux,drconf-usable-memory
@@ -634,7 +634,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
634{ 634{
635 const u32 *dm, *usm; 635 const u32 *dm, *usm;
636 unsigned int n, rc, ranges, is_kexec_kdump = 0; 636 unsigned int n, rc, ranges, is_kexec_kdump = 0;
637 unsigned long lmb_size, base, size, sz; 637 unsigned long memblock_size, base, size, sz;
638 int nid; 638 int nid;
639 struct assoc_arrays aa; 639 struct assoc_arrays aa;
640 640
@@ -642,8 +642,8 @@ static void __init parse_drconf_memory(struct device_node *memory)
642 if (!n) 642 if (!n)
643 return; 643 return;
644 644
645 lmb_size = of_get_lmb_size(memory); 645 memblock_size = of_get_memblock_size(memory);
646 if (!lmb_size) 646 if (!memblock_size)
647 return; 647 return;
648 648
649 rc = of_get_assoc_arrays(memory, &aa); 649 rc = of_get_assoc_arrays(memory, &aa);
@@ -667,7 +667,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
667 continue; 667 continue;
668 668
669 base = drmem.base_addr; 669 base = drmem.base_addr;
670 size = lmb_size; 670 size = memblock_size;
671 ranges = 1; 671 ranges = 1;
672 672
673 if (is_kexec_kdump) { 673 if (is_kexec_kdump) {
@@ -787,7 +787,7 @@ new_range:
787 } 787 }
788 788
789 /* 789 /*
790 * Now do the same thing for each LMB listed in the ibm,dynamic-memory 790 * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory
791 * property in the ibm,dynamic-reconfiguration-memory node. 791 * property in the ibm,dynamic-reconfiguration-memory node.
792 */ 792 */
793 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 793 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
@@ -799,8 +799,8 @@ new_range:
799 799
800static void __init setup_nonnuma(void) 800static void __init setup_nonnuma(void)
801{ 801{
802 unsigned long top_of_ram = lmb_end_of_DRAM(); 802 unsigned long top_of_ram = memblock_end_of_DRAM();
803 unsigned long total_ram = lmb_phys_mem_size(); 803 unsigned long total_ram = memblock_phys_mem_size();
804 unsigned long start_pfn, end_pfn; 804 unsigned long start_pfn, end_pfn;
805 unsigned int i, nid = 0; 805 unsigned int i, nid = 0;
806 806
@@ -809,9 +809,9 @@ static void __init setup_nonnuma(void)
809 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 809 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
810 (top_of_ram - total_ram) >> 20); 810 (top_of_ram - total_ram) >> 20);
811 811
812 for (i = 0; i < lmb.memory.cnt; ++i) { 812 for (i = 0; i < memblock.memory.cnt; ++i) {
813 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 813 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
814 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 814 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
815 815
816 fake_numa_create_new_node(end_pfn, &nid); 816 fake_numa_create_new_node(end_pfn, &nid);
817 add_active_range(nid, start_pfn, end_pfn); 817 add_active_range(nid, start_pfn, end_pfn);
@@ -869,7 +869,7 @@ static void __init dump_numa_memory_topology(void)
869 869
870 count = 0; 870 count = 0;
871 871
872 for (i = 0; i < lmb_end_of_DRAM(); 872 for (i = 0; i < memblock_end_of_DRAM();
873 i += (1 << SECTION_SIZE_BITS)) { 873 i += (1 << SECTION_SIZE_BITS)) {
874 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { 874 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
875 if (count == 0) 875 if (count == 0)
@@ -889,7 +889,7 @@ static void __init dump_numa_memory_topology(void)
889} 889}
890 890
891/* 891/*
892 * Allocate some memory, satisfying the lmb or bootmem allocator where 892 * Allocate some memory, satisfying the memblock or bootmem allocator where
893 * required. nid is the preferred node and end is the physical address of 893 * required. nid is the preferred node and end is the physical address of
894 * the highest address in the node. 894 * the highest address in the node.
895 * 895 *
@@ -903,11 +903,11 @@ static void __init *careful_zallocation(int nid, unsigned long size,
903 int new_nid; 903 int new_nid;
904 unsigned long ret_paddr; 904 unsigned long ret_paddr;
905 905
906 ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); 906 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
907 907
908 /* retry over all memory */ 908 /* retry over all memory */
909 if (!ret_paddr) 909 if (!ret_paddr)
910 ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); 910 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
911 911
912 if (!ret_paddr) 912 if (!ret_paddr)
913 panic("numa.c: cannot allocate %lu bytes for node %d", 913 panic("numa.c: cannot allocate %lu bytes for node %d",
@@ -917,14 +917,14 @@ static void __init *careful_zallocation(int nid, unsigned long size,
917 917
918 /* 918 /*
919 * We initialize the nodes in numeric order: 0, 1, 2... 919 * We initialize the nodes in numeric order: 0, 1, 2...
920 * and hand over control from the LMB allocator to the 920 * and hand over control from the MEMBLOCK allocator to the
921 * bootmem allocator. If this function is called for 921 * bootmem allocator. If this function is called for
922 * node 5, then we know that all nodes <5 are using the 922 * node 5, then we know that all nodes <5 are using the
923 * bootmem allocator instead of the LMB allocator. 923 * bootmem allocator instead of the MEMBLOCK allocator.
924 * 924 *
925 * So, check the nid from which this allocation came 925 * So, check the nid from which this allocation came
926 * and double check to see if we need to use bootmem 926 * and double check to see if we need to use bootmem
927 * instead of the LMB. We don't free the LMB memory 927 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
928 * since it would be useless. 928 * since it would be useless.
929 */ 929 */
930 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); 930 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
@@ -949,9 +949,9 @@ static void mark_reserved_regions_for_nid(int nid)
949 struct pglist_data *node = NODE_DATA(nid); 949 struct pglist_data *node = NODE_DATA(nid);
950 int i; 950 int i;
951 951
952 for (i = 0; i < lmb.reserved.cnt; i++) { 952 for (i = 0; i < memblock.reserved.cnt; i++) {
953 unsigned long physbase = lmb.reserved.region[i].base; 953 unsigned long physbase = memblock.reserved.region[i].base;
954 unsigned long size = lmb.reserved.region[i].size; 954 unsigned long size = memblock.reserved.region[i].size;
955 unsigned long start_pfn = physbase >> PAGE_SHIFT; 955 unsigned long start_pfn = physbase >> PAGE_SHIFT;
956 unsigned long end_pfn = PFN_UP(physbase + size); 956 unsigned long end_pfn = PFN_UP(physbase + size);
957 struct node_active_region node_ar; 957 struct node_active_region node_ar;
@@ -959,7 +959,7 @@ static void mark_reserved_regions_for_nid(int nid)
959 node->node_spanned_pages; 959 node->node_spanned_pages;
960 960
961 /* 961 /*
962 * Check to make sure that this lmb.reserved area is 962 * Check to make sure that this memblock.reserved area is
963 * within the bounds of the node that we care about. 963 * within the bounds of the node that we care about.
964 * Checking the nid of the start and end points is not 964 * Checking the nid of the start and end points is not
965 * sufficient because the reserved area could span the 965 * sufficient because the reserved area could span the
@@ -1017,7 +1017,7 @@ void __init do_init_bootmem(void)
1017 int nid; 1017 int nid;
1018 1018
1019 min_low_pfn = 0; 1019 min_low_pfn = 0;
1020 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 1020 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1021 max_pfn = max_low_pfn; 1021 max_pfn = max_low_pfn;
1022 1022
1023 if (parse_numa_properties()) 1023 if (parse_numa_properties())
@@ -1094,7 +1094,7 @@ void __init paging_init(void)
1094{ 1094{
1095 unsigned long max_zone_pfns[MAX_NR_ZONES]; 1095 unsigned long max_zone_pfns[MAX_NR_ZONES];
1096 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 1096 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1097 max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; 1097 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1098 free_area_init_nodes(max_zone_pfns); 1098 free_area_init_nodes(max_zone_pfns);
1099} 1099}
1100 1100
@@ -1128,7 +1128,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1128{ 1128{
1129 const u32 *dm; 1129 const u32 *dm;
1130 unsigned int drconf_cell_cnt, rc; 1130 unsigned int drconf_cell_cnt, rc;
1131 unsigned long lmb_size; 1131 unsigned long memblock_size;
1132 struct assoc_arrays aa; 1132 struct assoc_arrays aa;
1133 int nid = -1; 1133 int nid = -1;
1134 1134
@@ -1136,8 +1136,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1136 if (!drconf_cell_cnt) 1136 if (!drconf_cell_cnt)
1137 return -1; 1137 return -1;
1138 1138
1139 lmb_size = of_get_lmb_size(memory); 1139 memblock_size = of_get_memblock_size(memory);
1140 if (!lmb_size) 1140 if (!memblock_size)
1141 return -1; 1141 return -1;
1142 1142
1143 rc = of_get_assoc_arrays(memory, &aa); 1143 rc = of_get_assoc_arrays(memory, &aa);
@@ -1156,7 +1156,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1156 continue; 1156 continue;
1157 1157
1158 if ((scn_addr < drmem.base_addr) 1158 if ((scn_addr < drmem.base_addr)
1159 || (scn_addr >= (drmem.base_addr + lmb_size))) 1159 || (scn_addr >= (drmem.base_addr + memblock_size)))
1160 continue; 1160 continue;
1161 1161
1162 nid = of_drconf_to_nid_single(&drmem, &aa); 1162 nid = of_drconf_to_nid_single(&drmem, &aa);
@@ -1169,7 +1169,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1169/* 1169/*
1170 * Find the node associated with a hot added memory section for memory 1170 * Find the node associated with a hot added memory section for memory
1171 * represented in the device tree as a node (i.e. memory@XXXX) for 1171 * represented in the device tree as a node (i.e. memory@XXXX) for
1172 * each lmb. 1172 * each memblock.
1173 */ 1173 */
1174int hot_add_node_scn_to_nid(unsigned long scn_addr) 1174int hot_add_node_scn_to_nid(unsigned long scn_addr)
1175{ 1175{
@@ -1210,8 +1210,8 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
1210 1210
1211/* 1211/*
1212 * Find the node associated with a hot added memory section. Section 1212 * Find the node associated with a hot added memory section. Section
1213 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that 1213 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1214 * sections are fully contained within a single LMB. 1214 * sections are fully contained within a single MEMBLOCK.
1215 */ 1215 */
1216int hot_add_scn_to_nid(unsigned long scn_addr) 1216int hot_add_scn_to_nid(unsigned long scn_addr)
1217{ 1217{
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 34347b2e7e31..a87ead0138b4 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -26,7 +26,7 @@
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/lmb.h> 29#include <linux/memblock.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31 31
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
@@ -198,7 +198,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
198 * mem_init() sets high_memory so only do the check after that. 198 * mem_init() sets high_memory so only do the check after that.
199 */ 199 */
200 if (mem_init_done && (p < virt_to_phys(high_memory)) && 200 if (mem_init_done && (p < virt_to_phys(high_memory)) &&
201 !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) { 201 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
202 printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", 202 printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
203 (unsigned long long)p, __builtin_return_address(0)); 203 (unsigned long long)p, __builtin_return_address(0));
204 return NULL; 204 return NULL;
@@ -331,7 +331,7 @@ void __init mapin_ram(void)
331 s = mmu_mapin_ram(top); 331 s = mmu_mapin_ram(top);
332 __mapin_ram_chunk(s, top); 332 __mapin_ram_chunk(s, top);
333 333
334 top = lmb_end_of_DRAM(); 334 top = memblock_end_of_DRAM();
335 s = wii_mmu_mapin_mem2(top); 335 s = wii_mmu_mapin_mem2(top);
336 __mapin_ram_chunk(s, top); 336 __mapin_ram_chunk(s, top);
337 } 337 }
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index d050fc8d9714..21d6dfab7942 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -34,7 +34,7 @@
34#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/bootmem.h> 36#include <linux/bootmem.h>
37#include <linux/lmb.h> 37#include <linux/memblock.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39 39
40#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
@@ -67,7 +67,7 @@ static void *early_alloc_pgtable(unsigned long size)
67 if (init_bootmem_done) 67 if (init_bootmem_done)
68 pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS)); 68 pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
69 else 69 else
70 pt = __va(lmb_alloc_base(size, size, 70 pt = __va(memblock_alloc_base(size, size,
71 __pa(MAX_DMA_ADDRESS))); 71 __pa(MAX_DMA_ADDRESS)));
72 memset(pt, 0, size); 72 memset(pt, 0, size);
73 73
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index f11c2cdcb0fe..f8a01829d64f 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -26,7 +26,7 @@
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/lmb.h> 29#include <linux/memblock.h>
30 30
31#include <asm/prom.h> 31#include <asm/prom.h>
32#include <asm/mmu.h> 32#include <asm/mmu.h>
@@ -223,7 +223,7 @@ void __init MMU_init_hw(void)
223 * Find some memory for the hash table. 223 * Find some memory for the hash table.
224 */ 224 */
225 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); 225 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
226 Hash = __va(lmb_alloc_base(Hash_size, Hash_size, 226 Hash = __va(memblock_alloc_base(Hash_size, Hash_size,
227 __initial_memory_limit_addr)); 227 __initial_memory_limit_addr));
228 cacheable_memzero(Hash, Hash_size); 228 cacheable_memzero(Hash, Hash_size);
229 _SDR1 = __pa(Hash) | SDR1_LOW_BITS; 229 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 687fddaa24c5..446a01842a73 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -12,7 +12,7 @@
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14 14
15#include <linux/lmb.h> 15#include <linux/memblock.h>
16 16
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#include <asm/mmu.h> 18#include <asm/mmu.h>
@@ -252,7 +252,7 @@ void __init stabs_alloc(void)
252 if (cpu == 0) 252 if (cpu == 0)
253 continue; /* stab for CPU 0 is statically allocated */ 253 continue; /* stab for CPU 0 is statically allocated */
254 254
255 newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, 255 newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
256 1<<SID_SHIFT); 256 1<<SID_SHIFT);
257 newstab = (unsigned long)__va(newstab); 257 newstab = (unsigned long)__va(newstab);
258 258
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 3b10f804b735..fe391e942521 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -34,7 +34,7 @@
34#include <linux/pagemap.h> 34#include <linux/pagemap.h>
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/lmb.h> 37#include <linux/memblock.h>
38 38
39#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
40#include <asm/tlb.h> 40#include <asm/tlb.h>
@@ -503,7 +503,7 @@ static void __early_init_mmu(int boot_cpu)
503 /* Set the global containing the top of the linear mapping 503 /* Set the global containing the top of the linear mapping
504 * for use by the TLB miss code 504 * for use by the TLB miss code
505 */ 505 */
506 linear_map_top = lmb_end_of_DRAM(); 506 linear_map_top = memblock_end_of_DRAM();
507 507
508 /* A sync won't hurt us after mucking around with 508 /* A sync won't hurt us after mucking around with
509 * the MMU configuration 509 * the MMU configuration