aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/include/asm/memblock.h (renamed from arch/microblaze/include/asm/lmb.h)10
-rw-r--r--arch/microblaze/kernel/prom.c14
-rw-r--r--arch/microblaze/mm/init.c40
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/abs_addr.h2
-rw-r--r--arch/powerpc/include/asm/lmb.h15
-rw-r--r--arch/powerpc/include/asm/memblock.h15
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/crash_dump.c4
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c2
-rw-r--r--arch/powerpc/kernel/dma.c4
-rw-r--r--arch/powerpc/kernel/machine_kexec.c12
-rw-r--r--arch/powerpc/kernel/paca.c8
-rw-r--r--arch/powerpc/kernel/prom.c62
-rw-r--r--arch/powerpc/kernel/rtas.c6
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_32.c16
-rw-r--r--arch/powerpc/kernel/setup_64.c20
-rw-r--r--arch/powerpc/kernel/vdso.c4
-rw-r--r--arch/powerpc/mm/40x_mmu.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c26
-rw-r--r--arch/powerpc/mm/init_32.c16
-rw-r--r--arch/powerpc/mm/init_64.c2
-rw-r--r--arch/powerpc/mm/mem.c78
-rw-r--r--arch/powerpc/mm/numa.c84
-rw-r--r--arch/powerpc/mm/pgtable_32.c6
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c4
-rw-r--r--arch/powerpc/mm/stab.c4
-rw-r--r--arch/powerpc/mm/tlb_nohash.c4
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c4
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c10
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c12
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c4
-rw-r--r--arch/powerpc/platforms/powermac/setup.c4
-rw-r--r--arch/powerpc/platforms/ps3/htab.c2
-rw-r--r--arch/powerpc/platforms/ps3/mm.c6
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c4
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c38
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/pseries/phyp_dump.c4
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c8
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c4
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/include/asm/lmb.h6
-rw-r--r--arch/sh/include/asm/memblock.h6
-rw-r--r--arch/sh/kernel/machine_kexec.c18
-rw-r--r--arch/sh/kernel/setup.c8
-rw-r--r--arch/sh/mm/init.c40
-rw-r--r--arch/sh/mm/numa.c8
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/lmb.h10
-rw-r--r--arch/sparc/include/asm/memblock.h10
-rw-r--r--arch/sparc/kernel/mdesc.c16
-rw-r--r--arch/sparc/kernel/prom_64.c4
-rw-r--r--arch/sparc/mm/init_64.c54
-rw-r--r--include/linux/lmb.h89
-rw-r--r--include/linux/memblock.h89
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/lmb.c541
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/Makefile2
-rw-r--r--mm/memblock.c541
72 files changed, 1025 insertions, 1025 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 82d6aeb5228f..4ddb58df081e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1265,7 +1265,7 @@ and is between 256 and 4096 characters. It is defined in the file
1265 If there are multiple matching configurations changing 1265 If there are multiple matching configurations changing
1266 the same attribute, the last one is used. 1266 the same attribute, the last one is used.
1267 1267
1268 lmb=debug [KNL] Enable lmb debug messages. 1268 memblock=debug [KNL] Enable memblock debug messages.
1269 1269
1270 load_ramdisk= [RAM] List of ramdisks to load from floppy 1270 load_ramdisk= [RAM] List of ramdisks to load from floppy
1271 See Documentation/blockdev/ramdisk.txt. 1271 See Documentation/blockdev/ramdisk.txt.
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 76818f926539..505a08592423 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -5,7 +5,7 @@ mainmenu "Linux/Microblaze Kernel Configuration"
5 5
6config MICROBLAZE 6config MICROBLAZE
7 def_bool y 7 def_bool y
8 select HAVE_LMB 8 select HAVE_MEMBLOCK
9 select HAVE_FUNCTION_TRACER 9 select HAVE_FUNCTION_TRACER
10 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 10 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
11 select HAVE_FUNCTION_GRAPH_TRACER 11 select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/microblaze/include/asm/lmb.h b/arch/microblaze/include/asm/memblock.h
index a0a0a929c293..f9c2fa331d2a 100644
--- a/arch/microblaze/include/asm/lmb.h
+++ b/arch/microblaze/include/asm/memblock.h
@@ -6,12 +6,12 @@
6 * for more details. 6 * for more details.
7 */ 7 */
8 8
9#ifndef _ASM_MICROBLAZE_LMB_H 9#ifndef _ASM_MICROBLAZE_MEMBLOCK_H
10#define _ASM_MICROBLAZE_LMB_H 10#define _ASM_MICROBLAZE_MEMBLOCK_H
11 11
12/* LMB limit is OFF */ 12/* MEMBLOCK limit is OFF */
13#define LMB_REAL_LIMIT 0xFFFFFFFF 13#define MEMBLOCK_REAL_LIMIT 0xFFFFFFFF
14 14
15#endif /* _ASM_MICROBLAZE_LMB_H */ 15#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */
16 16
17 17
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index a15ef6d67ca9..427b13b4740f 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -29,7 +29,7 @@
29#include <linux/kexec.h> 29#include <linux/kexec.h>
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/lmb.h> 32#include <linux/memblock.h>
33 33
34#include <asm/prom.h> 34#include <asm/prom.h>
35#include <asm/page.h> 35#include <asm/page.h>
@@ -49,12 +49,12 @@ void __init early_init_dt_scan_chosen_arch(unsigned long node)
49 49
50void __init early_init_dt_add_memory_arch(u64 base, u64 size) 50void __init early_init_dt_add_memory_arch(u64 base, u64 size)
51{ 51{
52 lmb_add(base, size); 52 memblock_add(base, size);
53} 53}
54 54
55u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 55u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
56{ 56{
57 return lmb_alloc(size, align); 57 return memblock_alloc(size, align);
58} 58}
59 59
60#ifdef CONFIG_EARLY_PRINTK 60#ifdef CONFIG_EARLY_PRINTK
@@ -104,8 +104,8 @@ void __init early_init_devtree(void *params)
104 */ 104 */
105 of_scan_flat_dt(early_init_dt_scan_chosen, NULL); 105 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
106 106
107 /* Scan memory nodes and rebuild LMBs */ 107 /* Scan memory nodes and rebuild MEMBLOCKs */
108 lmb_init(); 108 memblock_init();
109 of_scan_flat_dt(early_init_dt_scan_root, NULL); 109 of_scan_flat_dt(early_init_dt_scan_root, NULL);
110 of_scan_flat_dt(early_init_dt_scan_memory, NULL); 110 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
111 111
@@ -113,9 +113,9 @@ void __init early_init_devtree(void *params)
113 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); 113 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
114 parse_early_param(); 114 parse_early_param();
115 115
116 lmb_analyze(); 116 memblock_analyze();
117 117
118 pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size()); 118 pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());
119 119
120 pr_debug(" <- early_init_devtree()\n"); 120 pr_debug(" <- early_init_devtree()\n");
121} 121}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index cca3579d4268..db5934989926 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -10,7 +10,7 @@
10#include <linux/bootmem.h> 10#include <linux/bootmem.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/lmb.h> 13#include <linux/memblock.h>
14#include <linux/mm.h> /* mem_init */ 14#include <linux/mm.h> /* mem_init */
15#include <linux/initrd.h> 15#include <linux/initrd.h>
16#include <linux/pagemap.h> 16#include <linux/pagemap.h>
@@ -76,10 +76,10 @@ void __init setup_memory(void)
76 u32 kernel_align_start, kernel_align_size; 76 u32 kernel_align_start, kernel_align_size;
77 77
78 /* Find main memory where is the kernel */ 78 /* Find main memory where is the kernel */
79 for (i = 0; i < lmb.memory.cnt; i++) { 79 for (i = 0; i < memblock.memory.cnt; i++) {
80 memory_start = (u32) lmb.memory.region[i].base; 80 memory_start = (u32) memblock.memory.region[i].base;
81 memory_end = (u32) lmb.memory.region[i].base 81 memory_end = (u32) memblock.memory.region[i].base
82 + (u32) lmb.memory.region[i].size; 82 + (u32) memblock.memory.region[i].size;
83 if ((memory_start <= (u32)_text) && 83 if ((memory_start <= (u32)_text) &&
84 ((u32)_text <= memory_end)) { 84 ((u32)_text <= memory_end)) {
85 memory_size = memory_end - memory_start; 85 memory_size = memory_end - memory_start;
@@ -100,7 +100,7 @@ void __init setup_memory(void)
100 kernel_align_start = PAGE_DOWN((u32)_text); 100 kernel_align_start = PAGE_DOWN((u32)_text);
101 /* ALIGN can be remove because _end in vmlinux.lds.S is align */ 101 /* ALIGN can be remove because _end in vmlinux.lds.S is align */
102 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; 102 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
103 lmb_reserve(kernel_align_start, kernel_align_size); 103 memblock_reserve(kernel_align_start, kernel_align_size);
104 printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n", 104 printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n",
105 __func__, kernel_align_start, kernel_align_start 105 __func__, kernel_align_start, kernel_align_start
106 + kernel_align_size, kernel_align_size); 106 + kernel_align_size, kernel_align_size);
@@ -141,18 +141,18 @@ void __init setup_memory(void)
141 map_size = init_bootmem_node(&contig_page_data, 141 map_size = init_bootmem_node(&contig_page_data,
142 PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); 142 PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
143#endif 143#endif
144 lmb_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); 144 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
145 145
146 /* free bootmem is whole main memory */ 146 /* free bootmem is whole main memory */
147 free_bootmem(memory_start, memory_size); 147 free_bootmem(memory_start, memory_size);
148 148
149 /* reserve allocate blocks */ 149 /* reserve allocate blocks */
150 for (i = 0; i < lmb.reserved.cnt; i++) { 150 for (i = 0; i < memblock.reserved.cnt; i++) {
151 pr_debug("reserved %d - 0x%08x-0x%08x\n", i, 151 pr_debug("reserved %d - 0x%08x-0x%08x\n", i,
152 (u32) lmb.reserved.region[i].base, 152 (u32) memblock.reserved.region[i].base,
153 (u32) lmb_size_bytes(&lmb.reserved, i)); 153 (u32) memblock_size_bytes(&memblock.reserved, i));
154 reserve_bootmem(lmb.reserved.region[i].base, 154 reserve_bootmem(memblock.reserved.region[i].base,
155 lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT); 155 memblock_size_bytes(&memblock.reserved, i) - 1, BOOTMEM_DEFAULT);
156 } 156 }
157#ifdef CONFIG_MMU 157#ifdef CONFIG_MMU
158 init_bootmem_done = 1; 158 init_bootmem_done = 1;
@@ -235,7 +235,7 @@ static void mm_cmdline_setup(void)
235 if (maxmem && memory_size > maxmem) { 235 if (maxmem && memory_size > maxmem) {
236 memory_size = maxmem; 236 memory_size = maxmem;
237 memory_end = memory_start + memory_size; 237 memory_end = memory_start + memory_size;
238 lmb.memory.region[0].size = memory_size; 238 memblock.memory.region[0].size = memory_size;
239 } 239 }
240 } 240 }
241} 241}
@@ -273,19 +273,19 @@ asmlinkage void __init mmu_init(void)
273{ 273{
274 unsigned int kstart, ksize; 274 unsigned int kstart, ksize;
275 275
276 if (!lmb.reserved.cnt) { 276 if (!memblock.reserved.cnt) {
277 printk(KERN_EMERG "Error memory count\n"); 277 printk(KERN_EMERG "Error memory count\n");
278 machine_restart(NULL); 278 machine_restart(NULL);
279 } 279 }
280 280
281 if ((u32) lmb.memory.region[0].size < 0x1000000) { 281 if ((u32) memblock.memory.region[0].size < 0x1000000) {
282 printk(KERN_EMERG "Memory must be greater than 16MB\n"); 282 printk(KERN_EMERG "Memory must be greater than 16MB\n");
283 machine_restart(NULL); 283 machine_restart(NULL);
284 } 284 }
285 /* Find main memory where the kernel is */ 285 /* Find main memory where the kernel is */
286 memory_start = (u32) lmb.memory.region[0].base; 286 memory_start = (u32) memblock.memory.region[0].base;
287 memory_end = (u32) lmb.memory.region[0].base + 287 memory_end = (u32) memblock.memory.region[0].base +
288 (u32) lmb.memory.region[0].size; 288 (u32) memblock.memory.region[0].size;
289 memory_size = memory_end - memory_start; 289 memory_size = memory_end - memory_start;
290 290
291 mm_cmdline_setup(); /* FIXME parse args from command line - not used */ 291 mm_cmdline_setup(); /* FIXME parse args from command line - not used */
@@ -297,7 +297,7 @@ asmlinkage void __init mmu_init(void)
297 kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ 297 kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
298 /* kernel size */ 298 /* kernel size */
299 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); 299 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
300 lmb_reserve(kstart, ksize); 300 memblock_reserve(kstart, ksize);
301 301
302#if defined(CONFIG_BLK_DEV_INITRD) 302#if defined(CONFIG_BLK_DEV_INITRD)
303 /* Remove the init RAM disk from the available memory. */ 303 /* Remove the init RAM disk from the available memory. */
@@ -335,7 +335,7 @@ void __init *early_get_page(void)
335 * Mem start + 32MB -> here is limit 335 * Mem start + 32MB -> here is limit
336 * because of mem mapping from head.S 336 * because of mem mapping from head.S
337 */ 337 */
338 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 338 p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
339 memory_start + 0x2000000)); 339 memory_start + 0x2000000));
340 } 340 }
341 return p; 341 return p;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6506bf4fbff1..2031a2846865 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -132,7 +132,7 @@ config PPC
132 select HAVE_ARCH_KGDB 132 select HAVE_ARCH_KGDB
133 select HAVE_KRETPROBES 133 select HAVE_KRETPROBES
134 select HAVE_ARCH_TRACEHOOK 134 select HAVE_ARCH_TRACEHOOK
135 select HAVE_LMB 135 select HAVE_MEMBLOCK
136 select HAVE_DMA_ATTRS 136 select HAVE_DMA_ATTRS
137 select HAVE_DMA_API_DEBUG 137 select HAVE_DMA_API_DEBUG
138 select USE_GENERIC_SMP_HELPERS if SMP 138 select USE_GENERIC_SMP_HELPERS if SMP
diff --git a/arch/powerpc/include/asm/abs_addr.h b/arch/powerpc/include/asm/abs_addr.h
index 98324c5a8286..9a846efe6382 100644
--- a/arch/powerpc/include/asm/abs_addr.h
+++ b/arch/powerpc/include/asm/abs_addr.h
@@ -12,7 +12,7 @@
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14 14
15#include <linux/lmb.h> 15#include <linux/memblock.h>
16 16
17#include <asm/types.h> 17#include <asm/types.h>
18#include <asm/page.h> 18#include <asm/page.h>
diff --git a/arch/powerpc/include/asm/lmb.h b/arch/powerpc/include/asm/lmb.h
deleted file mode 100644
index 6f5fdf0a19ae..000000000000
--- a/arch/powerpc/include/asm/lmb.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _ASM_POWERPC_LMB_H
2#define _ASM_POWERPC_LMB_H
3
4#include <asm/udbg.h>
5
6#define LMB_DBG(fmt...) udbg_printf(fmt)
7
8#ifdef CONFIG_PPC32
9extern phys_addr_t lowmem_end_addr;
10#define LMB_REAL_LIMIT lowmem_end_addr
11#else
12#define LMB_REAL_LIMIT 0
13#endif
14
15#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h
new file mode 100644
index 000000000000..3c29728b56b1
--- /dev/null
+++ b/arch/powerpc/include/asm/memblock.h
@@ -0,0 +1,15 @@
1#ifndef _ASM_POWERPC_MEMBLOCK_H
2#define _ASM_POWERPC_MEMBLOCK_H
3
4#include <asm/udbg.h>
5
6#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
7
8#ifdef CONFIG_PPC32
9extern phys_addr_t lowmem_end_addr;
10#define MEMBLOCK_REAL_LIMIT lowmem_end_addr
11#else
12#define MEMBLOCK_REAL_LIMIT 0
13#endif
14
15#endif /* _ASM_POWERPC_MEMBLOCK_H */
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 26e58630ed7b..625942ae5585 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -7,7 +7,7 @@
7#include <linux/string.h> 7#include <linux/string.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/lmb.h> 10#include <linux/memblock.h>
11 11
12#include <asm/sections.h> 12#include <asm/sections.h>
13#include <asm/prom.h> 13#include <asm/prom.h>
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 29df48f2b61a..417f7b05a9ce 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -24,7 +24,7 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/lmb.h> 27#include <linux/memblock.h>
28 28
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/machdep.h> 30#include <asm/machdep.h>
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 5fb667a60894..40f524643ba6 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -13,7 +13,7 @@
13 13
14#include <linux/crash_dump.h> 14#include <linux/crash_dump.h>
15#include <linux/bootmem.h> 15#include <linux/bootmem.h>
16#include <linux/lmb.h> 16#include <linux/memblock.h>
17#include <asm/code-patching.h> 17#include <asm/code-patching.h>
18#include <asm/kdump.h> 18#include <asm/kdump.h>
19#include <asm/prom.h> 19#include <asm/prom.h>
@@ -33,7 +33,7 @@ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
33#ifndef CONFIG_RELOCATABLE 33#ifndef CONFIG_RELOCATABLE
34void __init reserve_kdump_trampoline(void) 34void __init reserve_kdump_trampoline(void)
35{ 35{
36 lmb_reserve(0, KDUMP_RESERVE_LIMIT); 36 memblock_reserve(0, KDUMP_RESERVE_LIMIT);
37} 37}
38 38
39static void __init create_trampoline(unsigned long addr) 39static void __init create_trampoline(unsigned long addr)
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index e7fe218b8697..02f724f36753 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -71,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
71 sd->max_direct_dma_addr = 0; 71 sd->max_direct_dma_addr = 0;
72 72
73 /* May need to bounce if the device can't address all of DRAM */ 73 /* May need to bounce if the device can't address all of DRAM */
74 if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM()) 74 if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
75 set_dma_ops(dev, &swiotlb_dma_ops); 75 set_dma_ops(dev, &swiotlb_dma_ops);
76 76
77 return NOTIFY_DONE; 77 return NOTIFY_DONE;
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 8d1de6f31d5a..84d6367ec003 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -9,7 +9,7 @@
9#include <linux/dma-mapping.h> 9#include <linux/dma-mapping.h>
10#include <linux/dma-debug.h> 10#include <linux/dma-debug.h>
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12#include <linux/lmb.h> 12#include <linux/memblock.h>
13#include <asm/bug.h> 13#include <asm/bug.h>
14#include <asm/abs_addr.h> 14#include <asm/abs_addr.h>
15 15
@@ -89,7 +89,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
89 /* Could be improved so platforms can set the limit in case 89 /* Could be improved so platforms can set the limit in case
90 * they have limited DMA windows 90 * they have limited DMA windows
91 */ 91 */
92 return mask >= (lmb_end_of_DRAM() - 1); 92 return mask >= (memblock_end_of_DRAM() - 1);
93#else 93#else
94 return 1; 94 return 1;
95#endif 95#endif
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index bb3d893a8353..89f005116aac 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -12,7 +12,7 @@
12#include <linux/kexec.h> 12#include <linux/kexec.h>
13#include <linux/reboot.h> 13#include <linux/reboot.h>
14#include <linux/threads.h> 14#include <linux/threads.h>
15#include <linux/lmb.h> 15#include <linux/memblock.h>
16#include <linux/of.h> 16#include <linux/of.h>
17#include <asm/machdep.h> 17#include <asm/machdep.h>
18#include <asm/prom.h> 18#include <asm/prom.h>
@@ -66,11 +66,11 @@ void __init reserve_crashkernel(void)
66 unsigned long long crash_size, crash_base; 66 unsigned long long crash_size, crash_base;
67 int ret; 67 int ret;
68 68
69 /* this is necessary because of lmb_phys_mem_size() */ 69 /* this is necessary because of memblock_phys_mem_size() */
70 lmb_analyze(); 70 memblock_analyze();
71 71
72 /* use common parsing */ 72 /* use common parsing */
73 ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(), 73 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
74 &crash_size, &crash_base); 74 &crash_size, &crash_base);
75 if (ret == 0 && crash_size > 0) { 75 if (ret == 0 && crash_size > 0) {
76 crashk_res.start = crash_base; 76 crashk_res.start = crash_base;
@@ -133,9 +133,9 @@ void __init reserve_crashkernel(void)
133 "for crashkernel (System RAM: %ldMB)\n", 133 "for crashkernel (System RAM: %ldMB)\n",
134 (unsigned long)(crash_size >> 20), 134 (unsigned long)(crash_size >> 20),
135 (unsigned long)(crashk_res.start >> 20), 135 (unsigned long)(crashk_res.start >> 20),
136 (unsigned long)(lmb_phys_mem_size() >> 20)); 136 (unsigned long)(memblock_phys_mem_size() >> 20));
137 137
138 lmb_reserve(crashk_res.start, crash_size); 138 memblock_reserve(crashk_res.start, crash_size);
139} 139}
140 140
141int overlaps_crashkernel(unsigned long start, unsigned long size) 141int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index f88acf0218db..139a773853f4 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -9,7 +9,7 @@
9 9
10#include <linux/threads.h> 10#include <linux/threads.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/lmb.h> 12#include <linux/memblock.h>
13 13
14#include <asm/firmware.h> 14#include <asm/firmware.h>
15#include <asm/lppaca.h> 15#include <asm/lppaca.h>
@@ -117,7 +117,7 @@ void __init allocate_pacas(void)
117 * the first segment. On iSeries they must be within the area mapped 117 * the first segment. On iSeries they must be within the area mapped
118 * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. 118 * by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
119 */ 119 */
120 limit = min(0x10000000ULL, lmb.rmo_size); 120 limit = min(0x10000000ULL, memblock.rmo_size);
121 if (firmware_has_feature(FW_FEATURE_ISERIES)) 121 if (firmware_has_feature(FW_FEATURE_ISERIES))
122 limit = min(limit, HvPagesToMap * HVPAGESIZE); 122 limit = min(limit, HvPagesToMap * HVPAGESIZE);
123 123
@@ -128,7 +128,7 @@ void __init allocate_pacas(void)
128 128
129 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); 129 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);
130 130
131 paca = __va(lmb_alloc_base(paca_size, PAGE_SIZE, limit)); 131 paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
132 memset(paca, 0, paca_size); 132 memset(paca, 0, paca_size);
133 133
134 printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", 134 printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
@@ -148,7 +148,7 @@ void __init free_unused_pacas(void)
148 if (new_size >= paca_size) 148 if (new_size >= paca_size)
149 return; 149 return;
150 150
151 lmb_free(__pa(paca) + new_size, paca_size - new_size); 151 memblock_free(__pa(paca) + new_size, paca_size - new_size);
152 152
153 printk(KERN_DEBUG "Freed %u bytes for unused pacas\n", 153 printk(KERN_DEBUG "Freed %u bytes for unused pacas\n",
154 paca_size - new_size); 154 paca_size - new_size);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 05131d634e73..9d3953983fb7 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -31,7 +31,7 @@
31#include <linux/kexec.h> 31#include <linux/kexec.h>
32#include <linux/debugfs.h> 32#include <linux/debugfs.h>
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <linux/lmb.h> 34#include <linux/memblock.h>
35 35
36#include <asm/prom.h> 36#include <asm/prom.h>
37#include <asm/rtas.h> 37#include <asm/rtas.h>
@@ -98,7 +98,7 @@ static void __init move_device_tree(void)
98 98
99 if ((memory_limit && (start + size) > memory_limit) || 99 if ((memory_limit && (start + size) > memory_limit) ||
100 overlaps_crashkernel(start, size)) { 100 overlaps_crashkernel(start, size)) {
101 p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size)); 101 p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size));
102 memcpy(p, initial_boot_params, size); 102 memcpy(p, initial_boot_params, size);
103 initial_boot_params = (struct boot_param_header *)p; 103 initial_boot_params = (struct boot_param_header *)p;
104 DBG("Moved device tree to 0x%p\n", p); 104 DBG("Moved device tree to 0x%p\n", p);
@@ -411,13 +411,13 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
411{ 411{
412 __be32 *dm, *ls, *usm; 412 __be32 *dm, *ls, *usm;
413 unsigned long l, n, flags; 413 unsigned long l, n, flags;
414 u64 base, size, lmb_size; 414 u64 base, size, memblock_size;
415 unsigned int is_kexec_kdump = 0, rngs; 415 unsigned int is_kexec_kdump = 0, rngs;
416 416
417 ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); 417 ls = of_get_flat_dt_prop(node, "ibm,memblock-size", &l);
418 if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) 418 if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
419 return 0; 419 return 0;
420 lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); 420 memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls);
421 421
422 dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); 422 dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
423 if (dm == NULL || l < sizeof(__be32)) 423 if (dm == NULL || l < sizeof(__be32))
@@ -442,11 +442,11 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
442 or if the block is not assigned to this partition (0x8) */ 442 or if the block is not assigned to this partition (0x8) */
443 if ((flags & 0x80) || !(flags & 0x8)) 443 if ((flags & 0x80) || !(flags & 0x8))
444 continue; 444 continue;
445 size = lmb_size; 445 size = memblock_size;
446 rngs = 1; 446 rngs = 1;
447 if (is_kexec_kdump) { 447 if (is_kexec_kdump) {
448 /* 448 /*
449 * For each lmb in ibm,dynamic-memory, a corresponding 449 * For each memblock in ibm,dynamic-memory, a corresponding
450 * entry in linux,drconf-usable-memory property contains 450 * entry in linux,drconf-usable-memory property contains
451 * a counter 'p' followed by 'p' (base, size) duple. 451 * a counter 'p' followed by 'p' (base, size) duple.
452 * Now read the counter from 452 * Now read the counter from
@@ -469,10 +469,10 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
469 if ((base + size) > 0x80000000ul) 469 if ((base + size) > 0x80000000ul)
470 size = 0x80000000ul - base; 470 size = 0x80000000ul - base;
471 } 471 }
472 lmb_add(base, size); 472 memblock_add(base, size);
473 } while (--rngs); 473 } while (--rngs);
474 } 474 }
475 lmb_dump_all(); 475 memblock_dump_all();
476 return 0; 476 return 0;
477} 477}
478#else 478#else
@@ -501,14 +501,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
501 } 501 }
502#endif 502#endif
503 503
504 lmb_add(base, size); 504 memblock_add(base, size);
505 505
506 memstart_addr = min((u64)memstart_addr, base); 506 memstart_addr = min((u64)memstart_addr, base);
507} 507}
508 508
509u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 509u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
510{ 510{
511 return lmb_alloc(size, align); 511 return memblock_alloc(size, align);
512} 512}
513 513
514#ifdef CONFIG_BLK_DEV_INITRD 514#ifdef CONFIG_BLK_DEV_INITRD
@@ -534,12 +534,12 @@ static void __init early_reserve_mem(void)
534 /* before we do anything, lets reserve the dt blob */ 534 /* before we do anything, lets reserve the dt blob */
535 self_base = __pa((unsigned long)initial_boot_params); 535 self_base = __pa((unsigned long)initial_boot_params);
536 self_size = initial_boot_params->totalsize; 536 self_size = initial_boot_params->totalsize;
537 lmb_reserve(self_base, self_size); 537 memblock_reserve(self_base, self_size);
538 538
539#ifdef CONFIG_BLK_DEV_INITRD 539#ifdef CONFIG_BLK_DEV_INITRD
540 /* then reserve the initrd, if any */ 540 /* then reserve the initrd, if any */
541 if (initrd_start && (initrd_end > initrd_start)) 541 if (initrd_start && (initrd_end > initrd_start))
542 lmb_reserve(__pa(initrd_start), initrd_end - initrd_start); 542 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
543#endif /* CONFIG_BLK_DEV_INITRD */ 543#endif /* CONFIG_BLK_DEV_INITRD */
544 544
545#ifdef CONFIG_PPC32 545#ifdef CONFIG_PPC32
@@ -560,7 +560,7 @@ static void __init early_reserve_mem(void)
560 if (base_32 == self_base && size_32 == self_size) 560 if (base_32 == self_base && size_32 == self_size)
561 continue; 561 continue;
562 DBG("reserving: %x -> %x\n", base_32, size_32); 562 DBG("reserving: %x -> %x\n", base_32, size_32);
563 lmb_reserve(base_32, size_32); 563 memblock_reserve(base_32, size_32);
564 } 564 }
565 return; 565 return;
566 } 566 }
@@ -571,7 +571,7 @@ static void __init early_reserve_mem(void)
571 if (size == 0) 571 if (size == 0)
572 break; 572 break;
573 DBG("reserving: %llx -> %llx\n", base, size); 573 DBG("reserving: %llx -> %llx\n", base, size);
574 lmb_reserve(base, size); 574 memblock_reserve(base, size);
575 } 575 }
576} 576}
577 577
@@ -594,7 +594,7 @@ static inline unsigned long phyp_dump_calculate_reserve_size(void)
594 return phyp_dump_info->reserve_bootvar; 594 return phyp_dump_info->reserve_bootvar;
595 595
596 /* divide by 20 to get 5% of value */ 596 /* divide by 20 to get 5% of value */
597 tmp = lmb_end_of_DRAM(); 597 tmp = memblock_end_of_DRAM();
598 do_div(tmp, 20); 598 do_div(tmp, 20);
599 599
600 /* round it down in multiples of 256 */ 600 /* round it down in multiples of 256 */
@@ -633,11 +633,11 @@ static void __init phyp_dump_reserve_mem(void)
633 if (phyp_dump_info->phyp_dump_is_active) { 633 if (phyp_dump_info->phyp_dump_is_active) {
634 /* Reserve *everything* above RMR.Area freed by userland tools*/ 634 /* Reserve *everything* above RMR.Area freed by userland tools*/
635 base = variable_reserve_size; 635 base = variable_reserve_size;
636 size = lmb_end_of_DRAM() - base; 636 size = memblock_end_of_DRAM() - base;
637 637
638 /* XXX crashed_ram_end is wrong, since it may be beyond 638 /* XXX crashed_ram_end is wrong, since it may be beyond
639 * the memory_limit, it will need to be adjusted. */ 639 * the memory_limit, it will need to be adjusted. */
640 lmb_reserve(base, size); 640 memblock_reserve(base, size);
641 641
642 phyp_dump_info->init_reserve_start = base; 642 phyp_dump_info->init_reserve_start = base;
643 phyp_dump_info->init_reserve_size = size; 643 phyp_dump_info->init_reserve_size = size;
@@ -645,8 +645,8 @@ static void __init phyp_dump_reserve_mem(void)
645 size = phyp_dump_info->cpu_state_size + 645 size = phyp_dump_info->cpu_state_size +
646 phyp_dump_info->hpte_region_size + 646 phyp_dump_info->hpte_region_size +
647 variable_reserve_size; 647 variable_reserve_size;
648 base = lmb_end_of_DRAM() - size; 648 base = memblock_end_of_DRAM() - size;
649 lmb_reserve(base, size); 649 memblock_reserve(base, size);
650 phyp_dump_info->init_reserve_start = base; 650 phyp_dump_info->init_reserve_start = base;
651 phyp_dump_info->init_reserve_size = size; 651 phyp_dump_info->init_reserve_size = size;
652 } 652 }
@@ -681,8 +681,8 @@ void __init early_init_devtree(void *params)
681 */ 681 */
682 of_scan_flat_dt(early_init_dt_scan_chosen, NULL); 682 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
683 683
684 /* Scan memory nodes and rebuild LMBs */ 684 /* Scan memory nodes and rebuild MEMBLOCKs */
685 lmb_init(); 685 memblock_init();
686 of_scan_flat_dt(early_init_dt_scan_root, NULL); 686 of_scan_flat_dt(early_init_dt_scan_root, NULL);
687 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); 687 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
688 688
@@ -690,11 +690,11 @@ void __init early_init_devtree(void *params)
690 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); 690 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
691 parse_early_param(); 691 parse_early_param();
692 692
693 /* Reserve LMB regions used by kernel, initrd, dt, etc... */ 693 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
694 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 694 memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
695 /* If relocatable, reserve first 32k for interrupt vectors etc. */ 695 /* If relocatable, reserve first 32k for interrupt vectors etc. */
696 if (PHYSICAL_START > MEMORY_START) 696 if (PHYSICAL_START > MEMORY_START)
697 lmb_reserve(MEMORY_START, 0x8000); 697 memblock_reserve(MEMORY_START, 0x8000);
698 reserve_kdump_trampoline(); 698 reserve_kdump_trampoline();
699 reserve_crashkernel(); 699 reserve_crashkernel();
700 early_reserve_mem(); 700 early_reserve_mem();
@@ -706,17 +706,17 @@ void __init early_init_devtree(void *params)
706 706
707 /* Ensure that total memory size is page-aligned, because 707 /* Ensure that total memory size is page-aligned, because
708 * otherwise mark_bootmem() gets upset. */ 708 * otherwise mark_bootmem() gets upset. */
709 lmb_analyze(); 709 memblock_analyze();
710 memsize = lmb_phys_mem_size(); 710 memsize = memblock_phys_mem_size();
711 if ((memsize & PAGE_MASK) != memsize) 711 if ((memsize & PAGE_MASK) != memsize)
712 limit = memsize & PAGE_MASK; 712 limit = memsize & PAGE_MASK;
713 } 713 }
714 lmb_enforce_memory_limit(limit); 714 memblock_enforce_memory_limit(limit);
715 715
716 lmb_analyze(); 716 memblock_analyze();
717 lmb_dump_all(); 717 memblock_dump_all();
718 718
719 DBG("Phys. mem: %llx\n", lmb_phys_mem_size()); 719 DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
720 720
721 /* We may need to relocate the flat tree, do it now. 721 /* We may need to relocate the flat tree, do it now.
722 * FIXME .. and the initrd too? */ 722 * FIXME .. and the initrd too? */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 0e1ec6f746f6..d0516dbee762 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -22,7 +22,7 @@
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/completion.h> 23#include <linux/completion.h>
24#include <linux/cpumask.h> 24#include <linux/cpumask.h>
25#include <linux/lmb.h> 25#include <linux/memblock.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27 27
28#include <asm/prom.h> 28#include <asm/prom.h>
@@ -934,11 +934,11 @@ void __init rtas_initialize(void)
934 */ 934 */
935#ifdef CONFIG_PPC64 935#ifdef CONFIG_PPC64
936 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { 936 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
937 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); 937 rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX);
938 ibm_suspend_me_token = rtas_token("ibm,suspend-me"); 938 ibm_suspend_me_token = rtas_token("ibm,suspend-me");
939 } 939 }
940#endif 940#endif
941 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); 941 rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
942 942
943#ifdef CONFIG_RTAS_ERROR_LOGGING 943#ifdef CONFIG_RTAS_ERROR_LOGGING
944 rtas_last_error_token = rtas_token("rtas-last-error"); 944 rtas_last_error_token = rtas_token("rtas-last-error");
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 5e4d852f640c..b7e6c7e193ae 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -33,7 +33,7 @@
33#include <linux/serial_8250.h> 33#include <linux/serial_8250.h>
34#include <linux/debugfs.h> 34#include <linux/debugfs.h>
35#include <linux/percpu.h> 35#include <linux/percpu.h>
36#include <linux/lmb.h> 36#include <linux/memblock.h>
37#include <linux/of_platform.h> 37#include <linux/of_platform.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/paca.h> 39#include <asm/paca.h>
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 7d84b210f168..a10ffc85ada7 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -16,7 +16,7 @@
16#include <linux/root_dev.h> 16#include <linux/root_dev.h>
17#include <linux/cpu.h> 17#include <linux/cpu.h>
18#include <linux/console.h> 18#include <linux/console.h>
19#include <linux/lmb.h> 19#include <linux/memblock.h>
20 20
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/prom.h> 22#include <asm/prom.h>
@@ -246,12 +246,12 @@ static void __init irqstack_early_init(void)
246 unsigned int i; 246 unsigned int i;
247 247
248 /* interrupt stacks must be in lowmem, we get that for free on ppc32 248 /* interrupt stacks must be in lowmem, we get that for free on ppc32
249 * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ 249 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
250 for_each_possible_cpu(i) { 250 for_each_possible_cpu(i) {
251 softirq_ctx[i] = (struct thread_info *) 251 softirq_ctx[i] = (struct thread_info *)
252 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 252 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
253 hardirq_ctx[i] = (struct thread_info *) 253 hardirq_ctx[i] = (struct thread_info *)
254 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 254 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
255 } 255 }
256} 256}
257 257
@@ -261,15 +261,15 @@ static void __init exc_lvl_early_init(void)
261 unsigned int i; 261 unsigned int i;
262 262
263 /* interrupt stacks must be in lowmem, we get that for free on ppc32 263 /* interrupt stacks must be in lowmem, we get that for free on ppc32
264 * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ 264 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
265 for_each_possible_cpu(i) { 265 for_each_possible_cpu(i) {
266 critirq_ctx[i] = (struct thread_info *) 266 critirq_ctx[i] = (struct thread_info *)
267 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 267 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
268#ifdef CONFIG_BOOKE 268#ifdef CONFIG_BOOKE
269 dbgirq_ctx[i] = (struct thread_info *) 269 dbgirq_ctx[i] = (struct thread_info *)
270 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 270 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
271 mcheckirq_ctx[i] = (struct thread_info *) 271 mcheckirq_ctx[i] = (struct thread_info *)
272 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 272 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
273#endif 273#endif
274 } 274 }
275} 275}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 643dcac40fcb..d135f93cb0f6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -34,7 +34,7 @@
34#include <linux/bootmem.h> 34#include <linux/bootmem.h>
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/lockdep.h> 36#include <linux/lockdep.h>
37#include <linux/lmb.h> 37#include <linux/memblock.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/kdump.h> 39#include <asm/kdump.h>
40#include <asm/prom.h> 40#include <asm/prom.h>
@@ -158,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca)
158 * the CPU that ignores the top 2 bits of the address in real 158 * the CPU that ignores the top 2 bits of the address in real
159 * mode so we can access kernel globals normally provided we 159 * mode so we can access kernel globals normally provided we
160 * only toy with things in the RMO region. From here, we do 160 * only toy with things in the RMO region. From here, we do
161 * some early parsing of the device-tree to setup out LMB 161 * some early parsing of the device-tree to setup out MEMBLOCK
162 * data structures, and allocate & initialize the hash table 162 * data structures, and allocate & initialize the hash table
163 * and segment tables so we can start running with translation 163 * and segment tables so we can start running with translation
164 * enabled. 164 * enabled.
@@ -404,7 +404,7 @@ void __init setup_system(void)
404 404
405 printk("-----------------------------------------------------\n"); 405 printk("-----------------------------------------------------\n");
406 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); 406 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
407 printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size()); 407 printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
408 if (ppc64_caches.dline_size != 0x80) 408 if (ppc64_caches.dline_size != 0x80)
409 printk("ppc64_caches.dcache_line_size = 0x%x\n", 409 printk("ppc64_caches.dcache_line_size = 0x%x\n",
410 ppc64_caches.dline_size); 410 ppc64_caches.dline_size);
@@ -443,10 +443,10 @@ static void __init irqstack_early_init(void)
443 */ 443 */
444 for_each_possible_cpu(i) { 444 for_each_possible_cpu(i) {
445 softirq_ctx[i] = (struct thread_info *) 445 softirq_ctx[i] = (struct thread_info *)
446 __va(lmb_alloc_base(THREAD_SIZE, 446 __va(memblock_alloc_base(THREAD_SIZE,
447 THREAD_SIZE, limit)); 447 THREAD_SIZE, limit));
448 hardirq_ctx[i] = (struct thread_info *) 448 hardirq_ctx[i] = (struct thread_info *)
449 __va(lmb_alloc_base(THREAD_SIZE, 449 __va(memblock_alloc_base(THREAD_SIZE,
450 THREAD_SIZE, limit)); 450 THREAD_SIZE, limit));
451 } 451 }
452} 452}
@@ -458,11 +458,11 @@ static void __init exc_lvl_early_init(void)
458 458
459 for_each_possible_cpu(i) { 459 for_each_possible_cpu(i) {
460 critirq_ctx[i] = (struct thread_info *) 460 critirq_ctx[i] = (struct thread_info *)
461 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 461 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
462 dbgirq_ctx[i] = (struct thread_info *) 462 dbgirq_ctx[i] = (struct thread_info *)
463 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 463 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
464 mcheckirq_ctx[i] = (struct thread_info *) 464 mcheckirq_ctx[i] = (struct thread_info *)
465 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 465 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
466 } 466 }
467} 467}
468#else 468#else
@@ -487,11 +487,11 @@ static void __init emergency_stack_init(void)
487 * bringup, we need to get at them in real mode. This means they 487 * bringup, we need to get at them in real mode. This means they
488 * must also be within the RMO region. 488 * must also be within the RMO region.
489 */ 489 */
490 limit = min(slb0_limit(), lmb.rmo_size); 490 limit = min(slb0_limit(), memblock.rmo_size);
491 491
492 for_each_possible_cpu(i) { 492 for_each_possible_cpu(i) {
493 unsigned long sp; 493 unsigned long sp;
494 sp = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); 494 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
495 sp += THREAD_SIZE; 495 sp += THREAD_SIZE;
496 paca[i].emergency_sp = __va(sp); 496 paca[i].emergency_sp = __va(sp);
497 } 497 }
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index d84d19224a95..13002fe206e7 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -22,7 +22,7 @@
22#include <linux/elf.h> 22#include <linux/elf.h>
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25#include <linux/lmb.h> 25#include <linux/memblock.h>
26 26
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28#include <asm/system.h> 28#include <asm/system.h>
@@ -734,7 +734,7 @@ static int __init vdso_init(void)
734 vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100; 734 vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100;
735 if (firmware_has_feature(FW_FEATURE_LPAR)) 735 if (firmware_has_feature(FW_FEATURE_LPAR))
736 vdso_data->platform |= 1; 736 vdso_data->platform |= 1;
737 vdso_data->physicalMemorySize = lmb_phys_mem_size(); 737 vdso_data->physicalMemorySize = memblock_phys_mem_size();
738 vdso_data->dcache_size = ppc64_caches.dsize; 738 vdso_data->dcache_size = ppc64_caches.dsize;
739 vdso_data->dcache_line_size = ppc64_caches.dline_size; 739 vdso_data->dcache_line_size = ppc64_caches.dline_size;
740 vdso_data->icache_size = ppc64_caches.isize; 740 vdso_data->icache_size = ppc64_caches.isize;
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index 65abfcfaaa9e..1dc2fa5ce1bd 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -135,7 +135,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
135 /* If the size of RAM is not an exact power of two, we may not 135 /* If the size of RAM is not an exact power of two, we may not
136 * have covered RAM in its entirety with 16 and 4 MiB 136 * have covered RAM in its entirety with 16 and 4 MiB
137 * pages. Consequently, restrict the top end of RAM currently 137 * pages. Consequently, restrict the top end of RAM currently
138 * allocable so that calls to the LMB to allocate PTEs for "tail" 138 * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
139 * coverage with normal-sized pages (or other reasons) do not 139 * coverage with normal-sized pages (or other reasons) do not
140 * attempt to allocate outside the allowed range. 140 * attempt to allocate outside the allowed range.
141 */ 141 */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 3ecdcec0a39e..98f262de5585 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -31,7 +31,7 @@
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/signal.h> 33#include <linux/signal.h>
34#include <linux/lmb.h> 34#include <linux/memblock.h>
35 35
36#include <asm/processor.h> 36#include <asm/processor.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
@@ -384,8 +384,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
384 printk(KERN_INFO "Huge page(16GB) memory: " 384 printk(KERN_INFO "Huge page(16GB) memory: "
385 "addr = 0x%lX size = 0x%lX pages = %d\n", 385 "addr = 0x%lX size = 0x%lX pages = %d\n",
386 phys_addr, block_size, expected_pages); 386 phys_addr, block_size, expected_pages);
387 if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) { 387 if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
388 lmb_reserve(phys_addr, block_size * expected_pages); 388 memblock_reserve(phys_addr, block_size * expected_pages);
389 add_gpage(phys_addr, block_size, expected_pages); 389 add_gpage(phys_addr, block_size, expected_pages);
390 } 390 }
391 return 0; 391 return 0;
@@ -458,7 +458,7 @@ static void __init htab_init_page_sizes(void)
458 * and we have at least 1G of RAM at boot 458 * and we have at least 1G of RAM at boot
459 */ 459 */
460 if (mmu_psize_defs[MMU_PAGE_16M].shift && 460 if (mmu_psize_defs[MMU_PAGE_16M].shift &&
461 lmb_phys_mem_size() >= 0x40000000) 461 memblock_phys_mem_size() >= 0x40000000)
462 mmu_vmemmap_psize = MMU_PAGE_16M; 462 mmu_vmemmap_psize = MMU_PAGE_16M;
463 else if (mmu_psize_defs[MMU_PAGE_64K].shift) 463 else if (mmu_psize_defs[MMU_PAGE_64K].shift)
464 mmu_vmemmap_psize = MMU_PAGE_64K; 464 mmu_vmemmap_psize = MMU_PAGE_64K;
@@ -520,7 +520,7 @@ static unsigned long __init htab_get_table_size(void)
520 return 1UL << ppc64_pft_size; 520 return 1UL << ppc64_pft_size;
521 521
522 /* round mem_size up to next power of 2 */ 522 /* round mem_size up to next power of 2 */
523 mem_size = lmb_phys_mem_size(); 523 mem_size = memblock_phys_mem_size();
524 rnd_mem_size = 1UL << __ilog2(mem_size); 524 rnd_mem_size = 1UL << __ilog2(mem_size);
525 if (rnd_mem_size < mem_size) 525 if (rnd_mem_size < mem_size)
526 rnd_mem_size <<= 1; 526 rnd_mem_size <<= 1;
@@ -627,7 +627,7 @@ static void __init htab_initialize(void)
627 else 627 else
628 limit = 0; 628 limit = 0;
629 629
630 table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit); 630 table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
631 631
632 DBG("Hash table allocated at %lx, size: %lx\n", table, 632 DBG("Hash table allocated at %lx, size: %lx\n", table,
633 htab_size_bytes); 633 htab_size_bytes);
@@ -647,9 +647,9 @@ static void __init htab_initialize(void)
647 prot = pgprot_val(PAGE_KERNEL); 647 prot = pgprot_val(PAGE_KERNEL);
648 648
649#ifdef CONFIG_DEBUG_PAGEALLOC 649#ifdef CONFIG_DEBUG_PAGEALLOC
650 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; 650 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
651 linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count, 651 linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
652 1, lmb.rmo_size)); 652 1, memblock.rmo_size));
653 memset(linear_map_hash_slots, 0, linear_map_hash_count); 653 memset(linear_map_hash_slots, 0, linear_map_hash_count);
654#endif /* CONFIG_DEBUG_PAGEALLOC */ 654#endif /* CONFIG_DEBUG_PAGEALLOC */
655 655
@@ -659,16 +659,16 @@ static void __init htab_initialize(void)
659 */ 659 */
660 660
661 /* create bolted the linear mapping in the hash table */ 661 /* create bolted the linear mapping in the hash table */
662 for (i=0; i < lmb.memory.cnt; i++) { 662 for (i=0; i < memblock.memory.cnt; i++) {
663 base = (unsigned long)__va(lmb.memory.region[i].base); 663 base = (unsigned long)__va(memblock.memory.region[i].base);
664 size = lmb.memory.region[i].size; 664 size = memblock.memory.region[i].size;
665 665
666 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", 666 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
667 base, size, prot); 667 base, size, prot);
668 668
669#ifdef CONFIG_U3_DART 669#ifdef CONFIG_U3_DART
670 /* Do not map the DART space. Fortunately, it will be aligned 670 /* Do not map the DART space. Fortunately, it will be aligned
671 * in such a way that it will not cross two lmb regions and 671 * in such a way that it will not cross two memblock regions and
672 * will fit within a single 16Mb page. 672 * will fit within a single 16Mb page.
673 * The DART space is assumed to be a full 16Mb region even if 673 * The DART space is assumed to be a full 16Mb region even if
674 * we only use 2Mb of that space. We will use more of it later 674 * we only use 2Mb of that space. We will use more of it later
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 767333005eb4..6a6975dc2654 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -30,7 +30,7 @@
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/initrd.h> 31#include <linux/initrd.h>
32#include <linux/pagemap.h> 32#include <linux/pagemap.h>
33#include <linux/lmb.h> 33#include <linux/memblock.h>
34#include <linux/gfp.h> 34#include <linux/gfp.h>
35 35
36#include <asm/pgalloc.h> 36#include <asm/pgalloc.h>
@@ -136,17 +136,17 @@ void __init MMU_init(void)
136 /* parse args from command line */ 136 /* parse args from command line */
137 MMU_setup(); 137 MMU_setup();
138 138
139 if (lmb.memory.cnt > 1) { 139 if (memblock.memory.cnt > 1) {
140#ifndef CONFIG_WII 140#ifndef CONFIG_WII
141 lmb.memory.cnt = 1; 141 memblock.memory.cnt = 1;
142 lmb_analyze(); 142 memblock_analyze();
143 printk(KERN_WARNING "Only using first contiguous memory region"); 143 printk(KERN_WARNING "Only using first contiguous memory region");
144#else 144#else
145 wii_memory_fixups(); 145 wii_memory_fixups();
146#endif 146#endif
147 } 147 }
148 148
149 total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr; 149 total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr;
150 lowmem_end_addr = memstart_addr + total_lowmem; 150 lowmem_end_addr = memstart_addr + total_lowmem;
151 151
152#ifdef CONFIG_FSL_BOOKE 152#ifdef CONFIG_FSL_BOOKE
@@ -161,8 +161,8 @@ void __init MMU_init(void)
161 lowmem_end_addr = memstart_addr + total_lowmem; 161 lowmem_end_addr = memstart_addr + total_lowmem;
162#ifndef CONFIG_HIGHMEM 162#ifndef CONFIG_HIGHMEM
163 total_memory = total_lowmem; 163 total_memory = total_lowmem;
164 lmb_enforce_memory_limit(lowmem_end_addr); 164 memblock_enforce_memory_limit(lowmem_end_addr);
165 lmb_analyze(); 165 memblock_analyze();
166#endif /* CONFIG_HIGHMEM */ 166#endif /* CONFIG_HIGHMEM */
167 } 167 }
168 168
@@ -200,7 +200,7 @@ void __init *early_get_page(void)
200 if (init_bootmem_done) { 200 if (init_bootmem_done) {
201 p = alloc_bootmem_pages(PAGE_SIZE); 201 p = alloc_bootmem_pages(PAGE_SIZE);
202 } else { 202 } else {
203 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 203 p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
204 __initial_memory_limit_addr)); 204 __initial_memory_limit_addr));
205 } 205 }
206 return p; 206 return p;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index e267f223fdff..71f1415e2472 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -40,7 +40,7 @@
40#include <linux/nodemask.h> 40#include <linux/nodemask.h>
41#include <linux/module.h> 41#include <linux/module.h>
42#include <linux/poison.h> 42#include <linux/poison.h>
43#include <linux/lmb.h> 43#include <linux/memblock.h>
44#include <linux/hugetlb.h> 44#include <linux/hugetlb.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46 46
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0f594d774bf7..1a84a8d00005 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -32,7 +32,7 @@
32#include <linux/initrd.h> 32#include <linux/initrd.h>
33#include <linux/pagemap.h> 33#include <linux/pagemap.h>
34#include <linux/suspend.h> 34#include <linux/suspend.h>
35#include <linux/lmb.h> 35#include <linux/memblock.h>
36#include <linux/hugetlb.h> 36#include <linux/hugetlb.h>
37 37
38#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
@@ -83,13 +83,13 @@ int page_is_ram(unsigned long pfn)
83#else 83#else
84 unsigned long paddr = (pfn << PAGE_SHIFT); 84 unsigned long paddr = (pfn << PAGE_SHIFT);
85 int i; 85 int i;
86 for (i=0; i < lmb.memory.cnt; i++) { 86 for (i=0; i < memblock.memory.cnt; i++) {
87 unsigned long base; 87 unsigned long base;
88 88
89 base = lmb.memory.region[i].base; 89 base = memblock.memory.region[i].base;
90 90
91 if ((paddr >= base) && 91 if ((paddr >= base) &&
92 (paddr < (base + lmb.memory.region[i].size))) { 92 (paddr < (base + memblock.memory.region[i].size))) {
93 return 1; 93 return 1;
94 } 94 }
95 } 95 }
@@ -142,14 +142,14 @@ int arch_add_memory(int nid, u64 start, u64 size)
142/* 142/*
143 * walk_memory_resource() needs to make sure there is no holes in a given 143 * walk_memory_resource() needs to make sure there is no holes in a given
144 * memory range. PPC64 does not maintain the memory layout in /proc/iomem. 144 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
145 * Instead it maintains it in lmb.memory structures. Walk through the 145 * Instead it maintains it in memblock.memory structures. Walk through the
146 * memory regions, find holes and callback for contiguous regions. 146 * memory regions, find holes and callback for contiguous regions.
147 */ 147 */
148int 148int
149walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 149walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
150 void *arg, int (*func)(unsigned long, unsigned long, void *)) 150 void *arg, int (*func)(unsigned long, unsigned long, void *))
151{ 151{
152 struct lmb_property res; 152 struct memblock_property res;
153 unsigned long pfn, len; 153 unsigned long pfn, len;
154 u64 end; 154 u64 end;
155 int ret = -1; 155 int ret = -1;
@@ -158,7 +158,7 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
158 res.size = (u64) nr_pages << PAGE_SHIFT; 158 res.size = (u64) nr_pages << PAGE_SHIFT;
159 159
160 end = res.base + res.size - 1; 160 end = res.base + res.size - 1;
161 while ((res.base < end) && (lmb_find(&res) >= 0)) { 161 while ((res.base < end) && (memblock_find(&res) >= 0)) {
162 pfn = (unsigned long)(res.base >> PAGE_SHIFT); 162 pfn = (unsigned long)(res.base >> PAGE_SHIFT);
163 len = (unsigned long)(res.size >> PAGE_SHIFT); 163 len = (unsigned long)(res.size >> PAGE_SHIFT);
164 ret = (*func)(pfn, len, arg); 164 ret = (*func)(pfn, len, arg);
@@ -184,8 +184,8 @@ void __init do_init_bootmem(void)
184 unsigned long total_pages; 184 unsigned long total_pages;
185 int boot_mapsize; 185 int boot_mapsize;
186 186
187 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 187 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
188 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 188 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
189#ifdef CONFIG_HIGHMEM 189#ifdef CONFIG_HIGHMEM
190 total_pages = total_lowmem >> PAGE_SHIFT; 190 total_pages = total_lowmem >> PAGE_SHIFT;
191 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 191 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
@@ -198,16 +198,16 @@ void __init do_init_bootmem(void)
198 */ 198 */
199 bootmap_pages = bootmem_bootmap_pages(total_pages); 199 bootmap_pages = bootmem_bootmap_pages(total_pages);
200 200
201 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 201 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
202 202
203 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 203 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
204 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); 204 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
205 205
206 /* Add active regions with valid PFNs */ 206 /* Add active regions with valid PFNs */
207 for (i = 0; i < lmb.memory.cnt; i++) { 207 for (i = 0; i < memblock.memory.cnt; i++) {
208 unsigned long start_pfn, end_pfn; 208 unsigned long start_pfn, end_pfn;
209 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 209 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
210 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 210 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
211 add_active_range(0, start_pfn, end_pfn); 211 add_active_range(0, start_pfn, end_pfn);
212 } 212 }
213 213
@@ -218,17 +218,17 @@ void __init do_init_bootmem(void)
218 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); 218 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
219 219
220 /* reserve the sections we're already using */ 220 /* reserve the sections we're already using */
221 for (i = 0; i < lmb.reserved.cnt; i++) { 221 for (i = 0; i < memblock.reserved.cnt; i++) {
222 unsigned long addr = lmb.reserved.region[i].base + 222 unsigned long addr = memblock.reserved.region[i].base +
223 lmb_size_bytes(&lmb.reserved, i) - 1; 223 memblock_size_bytes(&memblock.reserved, i) - 1;
224 if (addr < lowmem_end_addr) 224 if (addr < lowmem_end_addr)
225 reserve_bootmem(lmb.reserved.region[i].base, 225 reserve_bootmem(memblock.reserved.region[i].base,
226 lmb_size_bytes(&lmb.reserved, i), 226 memblock_size_bytes(&memblock.reserved, i),
227 BOOTMEM_DEFAULT); 227 BOOTMEM_DEFAULT);
228 else if (lmb.reserved.region[i].base < lowmem_end_addr) { 228 else if (memblock.reserved.region[i].base < lowmem_end_addr) {
229 unsigned long adjusted_size = lowmem_end_addr - 229 unsigned long adjusted_size = lowmem_end_addr -
230 lmb.reserved.region[i].base; 230 memblock.reserved.region[i].base;
231 reserve_bootmem(lmb.reserved.region[i].base, 231 reserve_bootmem(memblock.reserved.region[i].base,
232 adjusted_size, BOOTMEM_DEFAULT); 232 adjusted_size, BOOTMEM_DEFAULT);
233 } 233 }
234 } 234 }
@@ -236,9 +236,9 @@ void __init do_init_bootmem(void)
236 free_bootmem_with_active_regions(0, max_pfn); 236 free_bootmem_with_active_regions(0, max_pfn);
237 237
238 /* reserve the sections we're already using */ 238 /* reserve the sections we're already using */
239 for (i = 0; i < lmb.reserved.cnt; i++) 239 for (i = 0; i < memblock.reserved.cnt; i++)
240 reserve_bootmem(lmb.reserved.region[i].base, 240 reserve_bootmem(memblock.reserved.region[i].base,
241 lmb_size_bytes(&lmb.reserved, i), 241 memblock_size_bytes(&memblock.reserved, i),
242 BOOTMEM_DEFAULT); 242 BOOTMEM_DEFAULT);
243 243
244#endif 244#endif
@@ -251,20 +251,20 @@ void __init do_init_bootmem(void)
251/* mark pages that don't exist as nosave */ 251/* mark pages that don't exist as nosave */
252static int __init mark_nonram_nosave(void) 252static int __init mark_nonram_nosave(void)
253{ 253{
254 unsigned long lmb_next_region_start_pfn, 254 unsigned long memblock_next_region_start_pfn,
255 lmb_region_max_pfn; 255 memblock_region_max_pfn;
256 int i; 256 int i;
257 257
258 for (i = 0; i < lmb.memory.cnt - 1; i++) { 258 for (i = 0; i < memblock.memory.cnt - 1; i++) {
259 lmb_region_max_pfn = 259 memblock_region_max_pfn =
260 (lmb.memory.region[i].base >> PAGE_SHIFT) + 260 (memblock.memory.region[i].base >> PAGE_SHIFT) +
261 (lmb.memory.region[i].size >> PAGE_SHIFT); 261 (memblock.memory.region[i].size >> PAGE_SHIFT);
262 lmb_next_region_start_pfn = 262 memblock_next_region_start_pfn =
263 lmb.memory.region[i+1].base >> PAGE_SHIFT; 263 memblock.memory.region[i+1].base >> PAGE_SHIFT;
264 264
265 if (lmb_region_max_pfn < lmb_next_region_start_pfn) 265 if (memblock_region_max_pfn < memblock_next_region_start_pfn)
266 register_nosave_region(lmb_region_max_pfn, 266 register_nosave_region(memblock_region_max_pfn,
267 lmb_next_region_start_pfn); 267 memblock_next_region_start_pfn);
268 } 268 }
269 269
270 return 0; 270 return 0;
@@ -275,8 +275,8 @@ static int __init mark_nonram_nosave(void)
275 */ 275 */
276void __init paging_init(void) 276void __init paging_init(void)
277{ 277{
278 unsigned long total_ram = lmb_phys_mem_size(); 278 unsigned long total_ram = memblock_phys_mem_size();
279 phys_addr_t top_of_ram = lmb_end_of_DRAM(); 279 phys_addr_t top_of_ram = memblock_end_of_DRAM();
280 unsigned long max_zone_pfns[MAX_NR_ZONES]; 280 unsigned long max_zone_pfns[MAX_NR_ZONES];
281 281
282#ifdef CONFIG_PPC32 282#ifdef CONFIG_PPC32
@@ -327,7 +327,7 @@ void __init mem_init(void)
327 swiotlb_init(1); 327 swiotlb_init(1);
328#endif 328#endif
329 329
330 num_physpages = lmb.memory.size >> PAGE_SHIFT; 330 num_physpages = memblock.memory.size >> PAGE_SHIFT;
331 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 331 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
332 332
333#ifdef CONFIG_NEED_MULTIPLE_NODES 333#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -364,7 +364,7 @@ void __init mem_init(void)
364 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 364 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
365 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 365 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
366 struct page *page = pfn_to_page(pfn); 366 struct page *page = pfn_to_page(pfn);
367 if (lmb_is_reserved(pfn << PAGE_SHIFT)) 367 if (memblock_is_reserved(pfn << PAGE_SHIFT))
368 continue; 368 continue;
369 ClearPageReserved(page); 369 ClearPageReserved(page);
370 init_page_count(page); 370 init_page_count(page);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 80d110635d24..f47364585ecd 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -17,7 +17,7 @@
17#include <linux/nodemask.h> 17#include <linux/nodemask.h>
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <linux/notifier.h> 19#include <linux/notifier.h>
20#include <linux/lmb.h> 20#include <linux/memblock.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/pfn.h> 22#include <linux/pfn.h>
23#include <asm/sparsemem.h> 23#include <asm/sparsemem.h>
@@ -351,7 +351,7 @@ struct of_drconf_cell {
351#define DRCONF_MEM_RESERVED 0x00000080 351#define DRCONF_MEM_RESERVED 0x00000080
352 352
353/* 353/*
354 * Read the next lmb list entry from the ibm,dynamic-memory property 354 * Read the next memblock list entry from the ibm,dynamic-memory property
355 * and return the information in the provided of_drconf_cell structure. 355 * and return the information in the provided of_drconf_cell structure.
356 */ 356 */
357static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) 357static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
@@ -372,8 +372,8 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
372/* 372/*
373 * Retreive and validate the ibm,dynamic-memory property of the device tree. 373 * Retreive and validate the ibm,dynamic-memory property of the device tree.
374 * 374 *
375 * The layout of the ibm,dynamic-memory property is a number N of lmb 375 * The layout of the ibm,dynamic-memory property is a number N of memblock
376 * list entries followed by N lmb list entries. Each lmb list entry 376 * list entries followed by N memblock list entries. Each memblock list entry
377 * contains information as layed out in the of_drconf_cell struct above. 377 * contains information as layed out in the of_drconf_cell struct above.
378 */ 378 */
379static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) 379static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
@@ -398,15 +398,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
398} 398}
399 399
400/* 400/*
401 * Retreive and validate the ibm,lmb-size property for drconf memory 401 * Retreive and validate the ibm,memblock-size property for drconf memory
402 * from the device tree. 402 * from the device tree.
403 */ 403 */
404static u64 of_get_lmb_size(struct device_node *memory) 404static u64 of_get_memblock_size(struct device_node *memory)
405{ 405{
406 const u32 *prop; 406 const u32 *prop;
407 u32 len; 407 u32 len;
408 408
409 prop = of_get_property(memory, "ibm,lmb-size", &len); 409 prop = of_get_property(memory, "ibm,memblock-size", &len);
410 if (!prop || len < sizeof(unsigned int)) 410 if (!prop || len < sizeof(unsigned int))
411 return 0; 411 return 0;
412 412
@@ -540,19 +540,19 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
540 unsigned long size) 540 unsigned long size)
541{ 541{
542 /* 542 /*
543 * We use lmb_end_of_DRAM() in here instead of memory_limit because 543 * We use memblock_end_of_DRAM() in here instead of memory_limit because
544 * we've already adjusted it for the limit and it takes care of 544 * we've already adjusted it for the limit and it takes care of
545 * having memory holes below the limit. Also, in the case of 545 * having memory holes below the limit. Also, in the case of
546 * iommu_is_off, memory_limit is not set but is implicitly enforced. 546 * iommu_is_off, memory_limit is not set but is implicitly enforced.
547 */ 547 */
548 548
549 if (start + size <= lmb_end_of_DRAM()) 549 if (start + size <= memblock_end_of_DRAM())
550 return size; 550 return size;
551 551
552 if (start >= lmb_end_of_DRAM()) 552 if (start >= memblock_end_of_DRAM())
553 return 0; 553 return 0;
554 554
555 return lmb_end_of_DRAM() - start; 555 return memblock_end_of_DRAM() - start;
556} 556}
557 557
558/* 558/*
@@ -562,7 +562,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
562static inline int __init read_usm_ranges(const u32 **usm) 562static inline int __init read_usm_ranges(const u32 **usm)
563{ 563{
564 /* 564 /*
565 * For each lmb in ibm,dynamic-memory a corresponding 565 * For each memblock in ibm,dynamic-memory a corresponding
566 * entry in linux,drconf-usable-memory property contains 566 * entry in linux,drconf-usable-memory property contains
567 * a counter followed by that many (base, size) duple. 567 * a counter followed by that many (base, size) duple.
568 * read the counter from linux,drconf-usable-memory 568 * read the counter from linux,drconf-usable-memory
@@ -578,7 +578,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
578{ 578{
579 const u32 *dm, *usm; 579 const u32 *dm, *usm;
580 unsigned int n, rc, ranges, is_kexec_kdump = 0; 580 unsigned int n, rc, ranges, is_kexec_kdump = 0;
581 unsigned long lmb_size, base, size, sz; 581 unsigned long memblock_size, base, size, sz;
582 int nid; 582 int nid;
583 struct assoc_arrays aa; 583 struct assoc_arrays aa;
584 584
@@ -586,8 +586,8 @@ static void __init parse_drconf_memory(struct device_node *memory)
586 if (!n) 586 if (!n)
587 return; 587 return;
588 588
589 lmb_size = of_get_lmb_size(memory); 589 memblock_size = of_get_memblock_size(memory);
590 if (!lmb_size) 590 if (!memblock_size)
591 return; 591 return;
592 592
593 rc = of_get_assoc_arrays(memory, &aa); 593 rc = of_get_assoc_arrays(memory, &aa);
@@ -611,7 +611,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
611 continue; 611 continue;
612 612
613 base = drmem.base_addr; 613 base = drmem.base_addr;
614 size = lmb_size; 614 size = memblock_size;
615 ranges = 1; 615 ranges = 1;
616 616
617 if (is_kexec_kdump) { 617 if (is_kexec_kdump) {
@@ -731,7 +731,7 @@ new_range:
731 } 731 }
732 732
733 /* 733 /*
734 * Now do the same thing for each LMB listed in the ibm,dynamic-memory 734 * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory
735 * property in the ibm,dynamic-reconfiguration-memory node. 735 * property in the ibm,dynamic-reconfiguration-memory node.
736 */ 736 */
737 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 737 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
@@ -743,8 +743,8 @@ new_range:
743 743
744static void __init setup_nonnuma(void) 744static void __init setup_nonnuma(void)
745{ 745{
746 unsigned long top_of_ram = lmb_end_of_DRAM(); 746 unsigned long top_of_ram = memblock_end_of_DRAM();
747 unsigned long total_ram = lmb_phys_mem_size(); 747 unsigned long total_ram = memblock_phys_mem_size();
748 unsigned long start_pfn, end_pfn; 748 unsigned long start_pfn, end_pfn;
749 unsigned int i, nid = 0; 749 unsigned int i, nid = 0;
750 750
@@ -753,9 +753,9 @@ static void __init setup_nonnuma(void)
753 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 753 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
754 (top_of_ram - total_ram) >> 20); 754 (top_of_ram - total_ram) >> 20);
755 755
756 for (i = 0; i < lmb.memory.cnt; ++i) { 756 for (i = 0; i < memblock.memory.cnt; ++i) {
757 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 757 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
758 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 758 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
759 759
760 fake_numa_create_new_node(end_pfn, &nid); 760 fake_numa_create_new_node(end_pfn, &nid);
761 add_active_range(nid, start_pfn, end_pfn); 761 add_active_range(nid, start_pfn, end_pfn);
@@ -813,7 +813,7 @@ static void __init dump_numa_memory_topology(void)
813 813
814 count = 0; 814 count = 0;
815 815
816 for (i = 0; i < lmb_end_of_DRAM(); 816 for (i = 0; i < memblock_end_of_DRAM();
817 i += (1 << SECTION_SIZE_BITS)) { 817 i += (1 << SECTION_SIZE_BITS)) {
818 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { 818 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
819 if (count == 0) 819 if (count == 0)
@@ -833,7 +833,7 @@ static void __init dump_numa_memory_topology(void)
833} 833}
834 834
835/* 835/*
836 * Allocate some memory, satisfying the lmb or bootmem allocator where 836 * Allocate some memory, satisfying the memblock or bootmem allocator where
837 * required. nid is the preferred node and end is the physical address of 837 * required. nid is the preferred node and end is the physical address of
838 * the highest address in the node. 838 * the highest address in the node.
839 * 839 *
@@ -847,11 +847,11 @@ static void __init *careful_zallocation(int nid, unsigned long size,
847 int new_nid; 847 int new_nid;
848 unsigned long ret_paddr; 848 unsigned long ret_paddr;
849 849
850 ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); 850 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
851 851
852 /* retry over all memory */ 852 /* retry over all memory */
853 if (!ret_paddr) 853 if (!ret_paddr)
854 ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); 854 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
855 855
856 if (!ret_paddr) 856 if (!ret_paddr)
857 panic("numa.c: cannot allocate %lu bytes for node %d", 857 panic("numa.c: cannot allocate %lu bytes for node %d",
@@ -861,14 +861,14 @@ static void __init *careful_zallocation(int nid, unsigned long size,
861 861
862 /* 862 /*
863 * We initialize the nodes in numeric order: 0, 1, 2... 863 * We initialize the nodes in numeric order: 0, 1, 2...
864 * and hand over control from the LMB allocator to the 864 * and hand over control from the MEMBLOCK allocator to the
865 * bootmem allocator. If this function is called for 865 * bootmem allocator. If this function is called for
866 * node 5, then we know that all nodes <5 are using the 866 * node 5, then we know that all nodes <5 are using the
867 * bootmem allocator instead of the LMB allocator. 867 * bootmem allocator instead of the MEMBLOCK allocator.
868 * 868 *
869 * So, check the nid from which this allocation came 869 * So, check the nid from which this allocation came
870 * and double check to see if we need to use bootmem 870 * and double check to see if we need to use bootmem
871 * instead of the LMB. We don't free the LMB memory 871 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
872 * since it would be useless. 872 * since it would be useless.
873 */ 873 */
874 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); 874 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
@@ -893,9 +893,9 @@ static void mark_reserved_regions_for_nid(int nid)
893 struct pglist_data *node = NODE_DATA(nid); 893 struct pglist_data *node = NODE_DATA(nid);
894 int i; 894 int i;
895 895
896 for (i = 0; i < lmb.reserved.cnt; i++) { 896 for (i = 0; i < memblock.reserved.cnt; i++) {
897 unsigned long physbase = lmb.reserved.region[i].base; 897 unsigned long physbase = memblock.reserved.region[i].base;
898 unsigned long size = lmb.reserved.region[i].size; 898 unsigned long size = memblock.reserved.region[i].size;
899 unsigned long start_pfn = physbase >> PAGE_SHIFT; 899 unsigned long start_pfn = physbase >> PAGE_SHIFT;
900 unsigned long end_pfn = PFN_UP(physbase + size); 900 unsigned long end_pfn = PFN_UP(physbase + size);
901 struct node_active_region node_ar; 901 struct node_active_region node_ar;
@@ -903,7 +903,7 @@ static void mark_reserved_regions_for_nid(int nid)
903 node->node_spanned_pages; 903 node->node_spanned_pages;
904 904
905 /* 905 /*
906 * Check to make sure that this lmb.reserved area is 906 * Check to make sure that this memblock.reserved area is
907 * within the bounds of the node that we care about. 907 * within the bounds of the node that we care about.
908 * Checking the nid of the start and end points is not 908 * Checking the nid of the start and end points is not
909 * sufficient because the reserved area could span the 909 * sufficient because the reserved area could span the
@@ -961,7 +961,7 @@ void __init do_init_bootmem(void)
961 int nid; 961 int nid;
962 962
963 min_low_pfn = 0; 963 min_low_pfn = 0;
964 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 964 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
965 max_pfn = max_low_pfn; 965 max_pfn = max_low_pfn;
966 966
967 if (parse_numa_properties()) 967 if (parse_numa_properties())
@@ -1038,7 +1038,7 @@ void __init paging_init(void)
1038{ 1038{
1039 unsigned long max_zone_pfns[MAX_NR_ZONES]; 1039 unsigned long max_zone_pfns[MAX_NR_ZONES];
1040 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 1040 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1041 max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; 1041 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1042 free_area_init_nodes(max_zone_pfns); 1042 free_area_init_nodes(max_zone_pfns);
1043} 1043}
1044 1044
@@ -1072,7 +1072,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1072{ 1072{
1073 const u32 *dm; 1073 const u32 *dm;
1074 unsigned int drconf_cell_cnt, rc; 1074 unsigned int drconf_cell_cnt, rc;
1075 unsigned long lmb_size; 1075 unsigned long memblock_size;
1076 struct assoc_arrays aa; 1076 struct assoc_arrays aa;
1077 int nid = -1; 1077 int nid = -1;
1078 1078
@@ -1080,8 +1080,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1080 if (!drconf_cell_cnt) 1080 if (!drconf_cell_cnt)
1081 return -1; 1081 return -1;
1082 1082
1083 lmb_size = of_get_lmb_size(memory); 1083 memblock_size = of_get_memblock_size(memory);
1084 if (!lmb_size) 1084 if (!memblock_size)
1085 return -1; 1085 return -1;
1086 1086
1087 rc = of_get_assoc_arrays(memory, &aa); 1087 rc = of_get_assoc_arrays(memory, &aa);
@@ -1100,7 +1100,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1100 continue; 1100 continue;
1101 1101
1102 if ((scn_addr < drmem.base_addr) 1102 if ((scn_addr < drmem.base_addr)
1103 || (scn_addr >= (drmem.base_addr + lmb_size))) 1103 || (scn_addr >= (drmem.base_addr + memblock_size)))
1104 continue; 1104 continue;
1105 1105
1106 nid = of_drconf_to_nid_single(&drmem, &aa); 1106 nid = of_drconf_to_nid_single(&drmem, &aa);
@@ -1113,7 +1113,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1113/* 1113/*
1114 * Find the node associated with a hot added memory section for memory 1114 * Find the node associated with a hot added memory section for memory
1115 * represented in the device tree as a node (i.e. memory@XXXX) for 1115 * represented in the device tree as a node (i.e. memory@XXXX) for
1116 * each lmb. 1116 * each memblock.
1117 */ 1117 */
1118int hot_add_node_scn_to_nid(unsigned long scn_addr) 1118int hot_add_node_scn_to_nid(unsigned long scn_addr)
1119{ 1119{
@@ -1154,8 +1154,8 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
1154 1154
1155/* 1155/*
1156 * Find the node associated with a hot added memory section. Section 1156 * Find the node associated with a hot added memory section. Section
1157 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that 1157 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1158 * sections are fully contained within a single LMB. 1158 * sections are fully contained within a single MEMBLOCK.
1159 */ 1159 */
1160int hot_add_scn_to_nid(unsigned long scn_addr) 1160int hot_add_scn_to_nid(unsigned long scn_addr)
1161{ 1161{
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 34347b2e7e31..a87ead0138b4 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -26,7 +26,7 @@
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/lmb.h> 29#include <linux/memblock.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31 31
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
@@ -198,7 +198,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
198 * mem_init() sets high_memory so only do the check after that. 198 * mem_init() sets high_memory so only do the check after that.
199 */ 199 */
200 if (mem_init_done && (p < virt_to_phys(high_memory)) && 200 if (mem_init_done && (p < virt_to_phys(high_memory)) &&
201 !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) { 201 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
202 printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", 202 printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
203 (unsigned long long)p, __builtin_return_address(0)); 203 (unsigned long long)p, __builtin_return_address(0));
204 return NULL; 204 return NULL;
@@ -331,7 +331,7 @@ void __init mapin_ram(void)
331 s = mmu_mapin_ram(top); 331 s = mmu_mapin_ram(top);
332 __mapin_ram_chunk(s, top); 332 __mapin_ram_chunk(s, top);
333 333
334 top = lmb_end_of_DRAM(); 334 top = memblock_end_of_DRAM();
335 s = wii_mmu_mapin_mem2(top); 335 s = wii_mmu_mapin_mem2(top);
336 __mapin_ram_chunk(s, top); 336 __mapin_ram_chunk(s, top);
337 } 337 }
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index d050fc8d9714..21d6dfab7942 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -34,7 +34,7 @@
34#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/bootmem.h> 36#include <linux/bootmem.h>
37#include <linux/lmb.h> 37#include <linux/memblock.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39 39
40#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
@@ -67,7 +67,7 @@ static void *early_alloc_pgtable(unsigned long size)
67 if (init_bootmem_done) 67 if (init_bootmem_done)
68 pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS)); 68 pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
69 else 69 else
70 pt = __va(lmb_alloc_base(size, size, 70 pt = __va(memblock_alloc_base(size, size,
71 __pa(MAX_DMA_ADDRESS))); 71 __pa(MAX_DMA_ADDRESS)));
72 memset(pt, 0, size); 72 memset(pt, 0, size);
73 73
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index f11c2cdcb0fe..f8a01829d64f 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -26,7 +26,7 @@
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/lmb.h> 29#include <linux/memblock.h>
30 30
31#include <asm/prom.h> 31#include <asm/prom.h>
32#include <asm/mmu.h> 32#include <asm/mmu.h>
@@ -223,7 +223,7 @@ void __init MMU_init_hw(void)
223 * Find some memory for the hash table. 223 * Find some memory for the hash table.
224 */ 224 */
225 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); 225 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
226 Hash = __va(lmb_alloc_base(Hash_size, Hash_size, 226 Hash = __va(memblock_alloc_base(Hash_size, Hash_size,
227 __initial_memory_limit_addr)); 227 __initial_memory_limit_addr));
228 cacheable_memzero(Hash, Hash_size); 228 cacheable_memzero(Hash, Hash_size);
229 _SDR1 = __pa(Hash) | SDR1_LOW_BITS; 229 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 687fddaa24c5..446a01842a73 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -12,7 +12,7 @@
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14 14
15#include <linux/lmb.h> 15#include <linux/memblock.h>
16 16
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#include <asm/mmu.h> 18#include <asm/mmu.h>
@@ -252,7 +252,7 @@ void __init stabs_alloc(void)
252 if (cpu == 0) 252 if (cpu == 0)
253 continue; /* stab for CPU 0 is statically allocated */ 253 continue; /* stab for CPU 0 is statically allocated */
254 254
255 newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, 255 newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
256 1<<SID_SHIFT); 256 1<<SID_SHIFT);
257 newstab = (unsigned long)__va(newstab); 257 newstab = (unsigned long)__va(newstab);
258 258
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index e81d5d67f834..d8695b02a968 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -34,7 +34,7 @@
34#include <linux/pagemap.h> 34#include <linux/pagemap.h>
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/lmb.h> 37#include <linux/memblock.h>
38 38
39#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
40#include <asm/tlb.h> 40#include <asm/tlb.h>
@@ -426,7 +426,7 @@ static void __early_init_mmu(int boot_cpu)
426 /* Set the global containing the top of the linear mapping 426 /* Set the global containing the top of the linear mapping
427 * for use by the TLB miss code 427 * for use by the TLB miss code
428 */ 428 */
429 linear_map_top = lmb_end_of_DRAM(); 429 linear_map_top = memblock_end_of_DRAM();
430 430
431 /* A sync won't hurt us after mucking around with 431 /* A sync won't hurt us after mucking around with
432 * the MMU configuration 432 * the MMU configuration
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 534c2ecc89d9..2ab338c9ac37 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -16,7 +16,7 @@
16#include <linux/kdev_t.h> 16#include <linux/kdev_t.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/lmb.h> 19#include <linux/memblock.h>
20 20
21#include <asm/system.h> 21#include <asm/system.h>
22#include <asm/time.h> 22#include <asm/time.h>
@@ -100,7 +100,7 @@ void __init corenet_ds_setup_arch(void)
100#endif 100#endif
101 101
102#ifdef CONFIG_SWIOTLB 102#ifdef CONFIG_SWIOTLB
103 if (lmb_end_of_DRAM() > max) { 103 if (memblock_end_of_DRAM() > max) {
104 ppc_swiotlb_enable = 1; 104 ppc_swiotlb_enable = 1;
105 set_pci_dma_ops(&swiotlb_dma_ops); 105 set_pci_dma_ops(&swiotlb_dma_ops);
106 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; 106 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 004b7d36cdb7..f79f2f102141 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -17,7 +17,7 @@
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/lmb.h> 20#include <linux/memblock.h>
21 21
22#include <asm/system.h> 22#include <asm/system.h>
23#include <asm/time.h> 23#include <asm/time.h>
@@ -94,7 +94,7 @@ static void __init mpc8536_ds_setup_arch(void)
94#endif 94#endif
95 95
96#ifdef CONFIG_SWIOTLB 96#ifdef CONFIG_SWIOTLB
97 if (lmb_end_of_DRAM() > max) { 97 if (memblock_end_of_DRAM() > max) {
98 ppc_swiotlb_enable = 1; 98 ppc_swiotlb_enable = 1;
99 set_pci_dma_ops(&swiotlb_dma_ops); 99 set_pci_dma_ops(&swiotlb_dma_ops);
100 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; 100 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 544011a562fb..8190bc25bf27 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -20,7 +20,7 @@
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/lmb.h> 23#include <linux/memblock.h>
24 24
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/time.h> 26#include <asm/time.h>
@@ -190,7 +190,7 @@ static void __init mpc85xx_ds_setup_arch(void)
190#endif 190#endif
191 191
192#ifdef CONFIG_SWIOTLB 192#ifdef CONFIG_SWIOTLB
193 if (lmb_end_of_DRAM() > max) { 193 if (memblock_end_of_DRAM() > max) {
194 ppc_swiotlb_enable = 1; 194 ppc_swiotlb_enable = 1;
195 set_pci_dma_ops(&swiotlb_dma_ops); 195 set_pci_dma_ops(&swiotlb_dma_ops);
196 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; 196 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 8fe87fc61485..494513682d70 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -33,7 +33,7 @@
33#include <linux/of_platform.h> 33#include <linux/of_platform.h>
34#include <linux/of_device.h> 34#include <linux/of_device.h>
35#include <linux/phy.h> 35#include <linux/phy.h>
36#include <linux/lmb.h> 36#include <linux/memblock.h>
37 37
38#include <asm/system.h> 38#include <asm/system.h>
39#include <asm/atomic.h> 39#include <asm/atomic.h>
@@ -325,7 +325,7 @@ static void __init mpc85xx_mds_setup_arch(void)
325#endif /* CONFIG_QUICC_ENGINE */ 325#endif /* CONFIG_QUICC_ENGINE */
326 326
327#ifdef CONFIG_SWIOTLB 327#ifdef CONFIG_SWIOTLB
328 if (lmb_end_of_DRAM() > max) { 328 if (memblock_end_of_DRAM() > max) {
329 ppc_swiotlb_enable = 1; 329 ppc_swiotlb_enable = 1;
330 set_pci_dma_ops(&swiotlb_dma_ops); 330 set_pci_dma_ops(&swiotlb_dma_ops);
331 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; 331 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 2aa69a69bcc8..b11c3535f350 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -19,7 +19,7 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/of_platform.h> 21#include <linux/of_platform.h>
22#include <linux/lmb.h> 22#include <linux/memblock.h>
23 23
24#include <asm/system.h> 24#include <asm/system.h>
25#include <asm/time.h> 25#include <asm/time.h>
@@ -103,7 +103,7 @@ mpc86xx_hpcn_setup_arch(void)
103#endif 103#endif
104 104
105#ifdef CONFIG_SWIOTLB 105#ifdef CONFIG_SWIOTLB
106 if (lmb_end_of_DRAM() > max) { 106 if (memblock_end_of_DRAM() > max) {
107 ppc_swiotlb_enable = 1; 107 ppc_swiotlb_enable = 1;
108 set_pci_dma_ops(&swiotlb_dma_ops); 108 set_pci_dma_ops(&swiotlb_dma_ops);
109 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; 109 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 4326b737d913..3712900471ba 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -29,7 +29,7 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/lmb.h> 32#include <linux/memblock.h>
33 33
34#include <asm/prom.h> 34#include <asm/prom.h>
35#include <asm/iommu.h> 35#include <asm/iommu.h>
@@ -845,10 +845,10 @@ static int __init cell_iommu_init_disabled(void)
845 /* If we found a DMA window, we check if it's big enough to enclose 845 /* If we found a DMA window, we check if it's big enough to enclose
846 * all of physical memory. If not, we force enable IOMMU 846 * all of physical memory. If not, we force enable IOMMU
847 */ 847 */
848 if (np && size < lmb_end_of_DRAM()) { 848 if (np && size < memblock_end_of_DRAM()) {
849 printk(KERN_WARNING "iommu: force-enabled, dma window" 849 printk(KERN_WARNING "iommu: force-enabled, dma window"
850 " (%ldMB) smaller than total memory (%lldMB)\n", 850 " (%ldMB) smaller than total memory (%lldMB)\n",
851 size >> 20, lmb_end_of_DRAM() >> 20); 851 size >> 20, memblock_end_of_DRAM() >> 20);
852 return -ENODEV; 852 return -ENODEV;
853 } 853 }
854 854
@@ -1064,7 +1064,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
1064 } 1064 }
1065 1065
1066 fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); 1066 fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
1067 fsize = lmb_phys_mem_size(); 1067 fsize = memblock_phys_mem_size();
1068 1068
1069 if ((fbase + fsize) <= 0x800000000ul) 1069 if ((fbase + fsize) <= 0x800000000ul)
1070 hbase = 0; /* use the device tree window */ 1070 hbase = 0; /* use the device tree window */
@@ -1169,7 +1169,7 @@ static int __init cell_iommu_init(void)
1169 * Note: should we make sure we have the IOMMU actually disabled ? 1169 * Note: should we make sure we have the IOMMU actually disabled ?
1170 */ 1170 */
1171 if (iommu_is_off || 1171 if (iommu_is_off ||
1172 (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull)) 1172 (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull))
1173 if (cell_iommu_init_disabled() == 0) 1173 if (cell_iommu_init_disabled() == 0)
1174 goto bail; 1174 goto bail;
1175 1175
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 174a04ac4806..5cdcc7c8d973 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -20,7 +20,7 @@
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/kexec.h> 21#include <linux/kexec.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/lmb.h> 23#include <linux/memblock.h>
24#include <mm/mmu_decl.h> 24#include <mm/mmu_decl.h>
25 25
26#include <asm/io.h> 26#include <asm/io.h>
@@ -65,7 +65,7 @@ static int __init page_aligned(unsigned long x)
65 65
66void __init wii_memory_fixups(void) 66void __init wii_memory_fixups(void)
67{ 67{
68 struct lmb_property *p = lmb.memory.region; 68 struct memblock_property *p = memblock.memory.region;
69 69
70 /* 70 /*
71 * This is part of a workaround to allow the use of two 71 * This is part of a workaround to allow the use of two
@@ -77,7 +77,7 @@ void __init wii_memory_fixups(void)
77 * between both ranges. 77 * between both ranges.
78 */ 78 */
79 79
80 BUG_ON(lmb.memory.cnt != 2); 80 BUG_ON(memblock.memory.cnt != 2);
81 BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); 81 BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
82 82
83 p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE); 83 p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
@@ -92,11 +92,11 @@ void __init wii_memory_fixups(void)
92 92
93 p[0].size += wii_hole_size + p[1].size; 93 p[0].size += wii_hole_size + p[1].size;
94 94
95 lmb.memory.cnt = 1; 95 memblock.memory.cnt = 1;
96 lmb_analyze(); 96 memblock_analyze();
97 97
98 /* reserve the hole */ 98 /* reserve the hole */
99 lmb_reserve(wii_hole_start, wii_hole_size); 99 memblock_reserve(wii_hole_start, wii_hole_size);
100 100
101 /* allow ioremapping the address space in the hole */ 101 /* allow ioremapping the address space in the hole */
102 __allow_ioremap_reserved = 1; 102 __allow_ioremap_reserved = 1;
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 39df70529d29..3fff8d979b41 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -41,7 +41,7 @@
41#include <linux/smp.h> 41#include <linux/smp.h>
42#include <linux/bitops.h> 42#include <linux/bitops.h>
43#include <linux/of_device.h> 43#include <linux/of_device.h>
44#include <linux/lmb.h> 44#include <linux/memblock.h>
45 45
46#include <asm/processor.h> 46#include <asm/processor.h>
47#include <asm/sections.h> 47#include <asm/sections.h>
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 7b1d608ea3c8..1f9fb2c57761 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -204,7 +204,7 @@ int __init iob_init(struct device_node *dn)
204 pr_debug(" -> %s\n", __func__); 204 pr_debug(" -> %s\n", __func__);
205 205
206 /* Allocate a spare page to map all invalid IOTLB pages. */ 206 /* Allocate a spare page to map all invalid IOTLB pages. */
207 tmp = lmb_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE); 207 tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
208 if (!tmp) 208 if (!tmp)
209 panic("IOBMAP: Cannot allocate spare page!"); 209 panic("IOBMAP: Cannot allocate spare page!");
210 /* Empty l1 is marked invalid */ 210 /* Empty l1 is marked invalid */
@@ -275,7 +275,7 @@ void __init alloc_iobmap_l2(void)
275 return; 275 return;
276#endif 276#endif
277 /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */ 277 /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
278 iob_l2_base = (u32 *)abs_to_virt(lmb_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); 278 iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
279 279
280 printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base); 280 printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
281} 281}
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index f1d0132ebcc7..9deb274841f1 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -51,7 +51,7 @@
51#include <linux/suspend.h> 51#include <linux/suspend.h>
52#include <linux/of_device.h> 52#include <linux/of_device.h>
53#include <linux/of_platform.h> 53#include <linux/of_platform.h>
54#include <linux/lmb.h> 54#include <linux/memblock.h>
55 55
56#include <asm/reg.h> 56#include <asm/reg.h>
57#include <asm/sections.h> 57#include <asm/sections.h>
@@ -619,7 +619,7 @@ static int __init pmac_probe(void)
619 * driver needs that. We have to allocate it now. We allocate 4k 619 * driver needs that. We have to allocate it now. We allocate 4k
620 * (1 small page) for now. 620 * (1 small page) for now.
621 */ 621 */
622 smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL); 622 smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
623#endif /* CONFIG_PMAC_SMU */ 623#endif /* CONFIG_PMAC_SMU */
624 624
625 return 1; 625 return 1;
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 1e8a1e39dfe8..2c0ed87f2024 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -19,7 +19,7 @@
19 */ 19 */
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/lmb.h> 22#include <linux/memblock.h>
23 23
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include <asm/prom.h> 25#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 7925751e464a..c2045880e674 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -21,7 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/memory_hotplug.h> 23#include <linux/memory_hotplug.h>
24#include <linux/lmb.h> 24#include <linux/memblock.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26 26
27#include <asm/cell-regs.h> 27#include <asm/cell-regs.h>
@@ -318,8 +318,8 @@ static int __init ps3_mm_add_memory(void)
318 return result; 318 return result;
319 } 319 }
320 320
321 lmb_add(start_addr, map.r1.size); 321 memblock_add(start_addr, map.r1.size);
322 lmb_analyze(); 322 memblock_analyze();
323 323
324 result = online_pages(start_pfn, nr_pages); 324 result = online_pages(start_pfn, nr_pages);
325 325
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index dd521a181f23..5b759b669598 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -24,7 +24,7 @@
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/syscalls.h> 25#include <linux/syscalls.h>
26#include <linux/ctype.h> 26#include <linux/ctype.h>
27#include <linux/lmb.h> 27#include <linux/memblock.h>
28#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
@@ -723,7 +723,7 @@ static void os_area_queue_work(void)
723 * flash to a high address in the boot memory region and then puts that RAM 723 * flash to a high address in the boot memory region and then puts that RAM
724 * address and the byte count into the repository for retrieval by the guest. 724 * address and the byte count into the repository for retrieval by the guest.
725 * We copy the data we want into a static variable and allow the memory setup 725 * We copy the data we want into a static variable and allow the memory setup
726 * by the HV to be claimed by the lmb manager. 726 * by the HV to be claimed by the memblock manager.
727 * 727 *
728 * The os area mirror will not be available to a second stage kernel, and 728 * The os area mirror will not be available to a second stage kernel, and
729 * the header verify will fail. In this case, the saved_params values will 729 * the header verify will fail. In this case, the saved_params values will
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 01e7b5bb3c1d..deab5f946090 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -10,14 +10,14 @@
10 */ 10 */
11 11
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/lmb.h> 13#include <linux/memblock.h>
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <asm/firmware.h> 15#include <asm/firmware.h>
16#include <asm/machdep.h> 16#include <asm/machdep.h>
17#include <asm/pSeries_reconfig.h> 17#include <asm/pSeries_reconfig.h>
18#include <asm/sparsemem.h> 18#include <asm/sparsemem.h>
19 19
20static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size) 20static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
21{ 21{
22 unsigned long start, start_pfn; 22 unsigned long start, start_pfn;
23 struct zone *zone; 23 struct zone *zone;
@@ -26,7 +26,7 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
26 start_pfn = base >> PAGE_SHIFT; 26 start_pfn = base >> PAGE_SHIFT;
27 27
28 if (!pfn_valid(start_pfn)) { 28 if (!pfn_valid(start_pfn)) {
29 lmb_remove(base, lmb_size); 29 memblock_remove(base, memblock_size);
30 return 0; 30 return 0;
31 } 31 }
32 32
@@ -41,20 +41,20 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
41 * to sysfs "state" file and we can't remove sysfs entries 41 * to sysfs "state" file and we can't remove sysfs entries
42 * while writing to it. So we have to defer it to here. 42 * while writing to it. So we have to defer it to here.
43 */ 43 */
44 ret = __remove_pages(zone, start_pfn, lmb_size >> PAGE_SHIFT); 44 ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT);
45 if (ret) 45 if (ret)
46 return ret; 46 return ret;
47 47
48 /* 48 /*
49 * Update memory regions for memory remove 49 * Update memory regions for memory remove
50 */ 50 */
51 lmb_remove(base, lmb_size); 51 memblock_remove(base, memblock_size);
52 52
53 /* 53 /*
54 * Remove htab bolted mappings for this section of memory 54 * Remove htab bolted mappings for this section of memory
55 */ 55 */
56 start = (unsigned long)__va(base); 56 start = (unsigned long)__va(base);
57 ret = remove_section_mapping(start, start + lmb_size); 57 ret = remove_section_mapping(start, start + memblock_size);
58 58
59 /* Ensure all vmalloc mappings are flushed in case they also 59 /* Ensure all vmalloc mappings are flushed in case they also
60 * hit that section of memory 60 * hit that section of memory
@@ -69,7 +69,7 @@ static int pseries_remove_memory(struct device_node *np)
69 const char *type; 69 const char *type;
70 const unsigned int *regs; 70 const unsigned int *regs;
71 unsigned long base; 71 unsigned long base;
72 unsigned int lmb_size; 72 unsigned int memblock_size;
73 int ret = -EINVAL; 73 int ret = -EINVAL;
74 74
75 /* 75 /*
@@ -80,16 +80,16 @@ static int pseries_remove_memory(struct device_node *np)
80 return 0; 80 return 0;
81 81
82 /* 82 /*
83 * Find the bae address and size of the lmb 83 * Find the bae address and size of the memblock
84 */ 84 */
85 regs = of_get_property(np, "reg", NULL); 85 regs = of_get_property(np, "reg", NULL);
86 if (!regs) 86 if (!regs)
87 return ret; 87 return ret;
88 88
89 base = *(unsigned long *)regs; 89 base = *(unsigned long *)regs;
90 lmb_size = regs[3]; 90 memblock_size = regs[3];
91 91
92 ret = pseries_remove_lmb(base, lmb_size); 92 ret = pseries_remove_memblock(base, memblock_size);
93 return ret; 93 return ret;
94} 94}
95 95
@@ -98,7 +98,7 @@ static int pseries_add_memory(struct device_node *np)
98 const char *type; 98 const char *type;
99 const unsigned int *regs; 99 const unsigned int *regs;
100 unsigned long base; 100 unsigned long base;
101 unsigned int lmb_size; 101 unsigned int memblock_size;
102 int ret = -EINVAL; 102 int ret = -EINVAL;
103 103
104 /* 104 /*
@@ -109,43 +109,43 @@ static int pseries_add_memory(struct device_node *np)
109 return 0; 109 return 0;
110 110
111 /* 111 /*
112 * Find the base and size of the lmb 112 * Find the base and size of the memblock
113 */ 113 */
114 regs = of_get_property(np, "reg", NULL); 114 regs = of_get_property(np, "reg", NULL);
115 if (!regs) 115 if (!regs)
116 return ret; 116 return ret;
117 117
118 base = *(unsigned long *)regs; 118 base = *(unsigned long *)regs;
119 lmb_size = regs[3]; 119 memblock_size = regs[3];
120 120
121 /* 121 /*
122 * Update memory region to represent the memory add 122 * Update memory region to represent the memory add
123 */ 123 */
124 ret = lmb_add(base, lmb_size); 124 ret = memblock_add(base, memblock_size);
125 return (ret < 0) ? -EINVAL : 0; 125 return (ret < 0) ? -EINVAL : 0;
126} 126}
127 127
128static int pseries_drconf_memory(unsigned long *base, unsigned int action) 128static int pseries_drconf_memory(unsigned long *base, unsigned int action)
129{ 129{
130 struct device_node *np; 130 struct device_node *np;
131 const unsigned long *lmb_size; 131 const unsigned long *memblock_size;
132 int rc; 132 int rc;
133 133
134 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 134 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
135 if (!np) 135 if (!np)
136 return -EINVAL; 136 return -EINVAL;
137 137
138 lmb_size = of_get_property(np, "ibm,lmb-size", NULL); 138 memblock_size = of_get_property(np, "ibm,memblock-size", NULL);
139 if (!lmb_size) { 139 if (!memblock_size) {
140 of_node_put(np); 140 of_node_put(np);
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 143
144 if (action == PSERIES_DRCONF_MEM_ADD) { 144 if (action == PSERIES_DRCONF_MEM_ADD) {
145 rc = lmb_add(*base, *lmb_size); 145 rc = memblock_add(*base, *memblock_size);
146 rc = (rc < 0) ? -EINVAL : 0; 146 rc = (rc < 0) ? -EINVAL : 0;
147 } else if (action == PSERIES_DRCONF_MEM_REMOVE) { 147 } else if (action == PSERIES_DRCONF_MEM_REMOVE) {
148 rc = pseries_remove_lmb(*base, *lmb_size); 148 rc = pseries_remove_memblock(*base, *memblock_size);
149 } else { 149 } else {
150 rc = -EINVAL; 150 rc = -EINVAL;
151 } 151 }
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index d26182d42cbf..395848e30c52 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -66,7 +66,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
66 tcep = ((u64 *)tbl->it_base) + index; 66 tcep = ((u64 *)tbl->it_base) + index;
67 67
68 while (npages--) { 68 while (npages--) {
69 /* can't move this out since we might cross LMB boundary */ 69 /* can't move this out since we might cross MEMBLOCK boundary */
70 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 70 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
71 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; 71 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
72 72
diff --git a/arch/powerpc/platforms/pseries/phyp_dump.c b/arch/powerpc/platforms/pseries/phyp_dump.c
index 7ebd9e88d369..6e7742da0072 100644
--- a/arch/powerpc/platforms/pseries/phyp_dump.c
+++ b/arch/powerpc/platforms/pseries/phyp_dump.c
@@ -255,12 +255,12 @@ void invalidate_last_dump(struct phyp_dump_header *ph, unsigned long addr)
255 255
256/* ------------------------------------------------- */ 256/* ------------------------------------------------- */
257/** 257/**
258 * release_memory_range -- release memory previously lmb_reserved 258 * release_memory_range -- release memory previously memblock_reserved
259 * @start_pfn: starting physical frame number 259 * @start_pfn: starting physical frame number
260 * @nr_pages: number of pages to free. 260 * @nr_pages: number of pages to free.
261 * 261 *
262 * This routine will release memory that had been previously 262 * This routine will release memory that had been previously
263 * lmb_reserved in early boot. The released memory becomes 263 * memblock_reserved in early boot. The released memory becomes
264 * available for genreal use. 264 * available for genreal use.
265 */ 265 */
266static void release_memory_range(unsigned long start_pfn, 266static void release_memory_range(unsigned long start_pfn,
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index c8b96ed7c015..559db2b846a9 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -36,7 +36,7 @@
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
38#include <linux/suspend.h> 38#include <linux/suspend.h>
39#include <linux/lmb.h> 39#include <linux/memblock.h>
40#include <linux/gfp.h> 40#include <linux/gfp.h>
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/prom.h> 42#include <asm/prom.h>
@@ -232,7 +232,7 @@ static int __init dart_init(struct device_node *dart_node)
232 * that to work around what looks like a problem with the HT bridge 232 * that to work around what looks like a problem with the HT bridge
233 * prefetching into invalid pages and corrupting data 233 * prefetching into invalid pages and corrupting data
234 */ 234 */
235 tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE); 235 tmp = memblock_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
236 dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & 236 dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
237 DARTMAP_RPNMASK); 237 DARTMAP_RPNMASK);
238 238
@@ -407,7 +407,7 @@ void __init alloc_dart_table(void)
407 if (iommu_is_off) 407 if (iommu_is_off)
408 return; 408 return;
409 409
410 if (!iommu_force_on && lmb_end_of_DRAM() <= 0x40000000ull) 410 if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull)
411 return; 411 return;
412 412
413 /* 512 pages (2MB) is max DART tablesize. */ 413 /* 512 pages (2MB) is max DART tablesize. */
@@ -416,7 +416,7 @@ void __init alloc_dart_table(void)
416 * will blow up an entire large page anyway in the kernel mapping 416 * will blow up an entire large page anyway in the kernel mapping
417 */ 417 */
418 dart_tablebase = (unsigned long) 418 dart_tablebase = (unsigned long)
419 abs_to_virt(lmb_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); 419 abs_to_virt(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
420 420
421 printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); 421 printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
422} 422}
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index a14760fe513a..356c6a0e1b23 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -23,7 +23,7 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/lmb.h> 26#include <linux/memblock.h>
27#include <linux/log2.h> 27#include <linux/log2.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29 29
@@ -190,7 +190,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
190 pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar); 190 pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar);
191 191
192 /* Setup inbound mem window */ 192 /* Setup inbound mem window */
193 mem = lmb_end_of_DRAM(); 193 mem = memblock_end_of_DRAM();
194 sz = min(mem, paddr_lo); 194 sz = min(mem, paddr_lo);
195 mem_log = __ilog2_u64(sz); 195 mem_log = __ilog2_u64(sz);
196 196
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 573fca1fbd9b..82868fee21fd 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -10,7 +10,7 @@ config SUPERH
10 select EMBEDDED 10 select EMBEDDED
11 select HAVE_CLK 11 select HAVE_CLK
12 select HAVE_IDE if HAS_IOPORT 12 select HAVE_IDE if HAS_IOPORT
13 select HAVE_LMB 13 select HAVE_MEMBLOCK
14 select HAVE_OPROFILE 14 select HAVE_OPROFILE
15 select HAVE_GENERIC_DMA_COHERENT 15 select HAVE_GENERIC_DMA_COHERENT
16 select HAVE_ARCH_TRACEHOOK 16 select HAVE_ARCH_TRACEHOOK
diff --git a/arch/sh/include/asm/lmb.h b/arch/sh/include/asm/lmb.h
deleted file mode 100644
index 9b437f657ffa..000000000000
--- a/arch/sh/include/asm/lmb.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_LMB_H
2#define __ASM_SH_LMB_H
3
4#define LMB_REAL_LIMIT 0
5
6#endif /* __ASM_SH_LMB_H */
diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h
new file mode 100644
index 000000000000..dfe683b88075
--- /dev/null
+++ b/arch/sh/include/asm/memblock.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH_MEMBLOCK_H
2#define __ASM_SH_MEMBLOCK_H
3
4#define MEMBLOCK_REAL_LIMIT 0
5
6#endif /* __ASM_SH_MEMBLOCK_H */
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 5a559e666eb3..e2a3af31ff99 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -15,7 +15,7 @@
15#include <linux/numa.h> 15#include <linux/numa.h>
16#include <linux/ftrace.h> 16#include <linux/ftrace.h>
17#include <linux/suspend.h> 17#include <linux/suspend.h>
18#include <linux/lmb.h> 18#include <linux/memblock.h>
19#include <asm/pgtable.h> 19#include <asm/pgtable.h>
20#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
21#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
@@ -157,10 +157,10 @@ void __init reserve_crashkernel(void)
157 unsigned long long crash_size, crash_base; 157 unsigned long long crash_size, crash_base;
158 int ret; 158 int ret;
159 159
160 /* this is necessary because of lmb_phys_mem_size() */ 160 /* this is necessary because of memblock_phys_mem_size() */
161 lmb_analyze(); 161 memblock_analyze();
162 162
163 ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(), 163 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
164 &crash_size, &crash_base); 164 &crash_size, &crash_base);
165 if (ret == 0 && crash_size > 0) { 165 if (ret == 0 && crash_size > 0) {
166 crashk_res.start = crash_base; 166 crashk_res.start = crash_base;
@@ -172,14 +172,14 @@ void __init reserve_crashkernel(void)
172 172
173 crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1); 173 crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1);
174 if (!crashk_res.start) { 174 if (!crashk_res.start) {
175 unsigned long max = lmb_end_of_DRAM() - memory_limit; 175 unsigned long max = memblock_end_of_DRAM() - memory_limit;
176 crashk_res.start = __lmb_alloc_base(crash_size, PAGE_SIZE, max); 176 crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max);
177 if (!crashk_res.start) { 177 if (!crashk_res.start) {
178 pr_err("crashkernel allocation failed\n"); 178 pr_err("crashkernel allocation failed\n");
179 goto disable; 179 goto disable;
180 } 180 }
181 } else { 181 } else {
182 ret = lmb_reserve(crashk_res.start, crash_size); 182 ret = memblock_reserve(crashk_res.start, crash_size);
183 if (unlikely(ret < 0)) { 183 if (unlikely(ret < 0)) {
184 pr_err("crashkernel reservation failed - " 184 pr_err("crashkernel reservation failed - "
185 "memory is in use\n"); 185 "memory is in use\n");
@@ -192,7 +192,7 @@ void __init reserve_crashkernel(void)
192 /* 192 /*
193 * Crash kernel trumps memory limit 193 * Crash kernel trumps memory limit
194 */ 194 */
195 if ((lmb_end_of_DRAM() - memory_limit) <= crashk_res.end) { 195 if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) {
196 memory_limit = 0; 196 memory_limit = 0;
197 pr_info("Disabled memory limit for crashkernel\n"); 197 pr_info("Disabled memory limit for crashkernel\n");
198 } 198 }
@@ -201,7 +201,7 @@ void __init reserve_crashkernel(void)
201 "for crashkernel (System RAM: %ldMB)\n", 201 "for crashkernel (System RAM: %ldMB)\n",
202 (unsigned long)(crash_size >> 20), 202 (unsigned long)(crash_size >> 20),
203 (unsigned long)(crashk_res.start), 203 (unsigned long)(crashk_res.start),
204 (unsigned long)(lmb_phys_mem_size() >> 20)); 204 (unsigned long)(memblock_phys_mem_size() >> 20));
205 205
206 return; 206 return;
207 207
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 272734681d29..e769401a78ba 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -30,7 +30,7 @@
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/lmb.h> 33#include <linux/memblock.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <asm/io.h> 35#include <asm/io.h>
36#include <asm/page.h> 36#include <asm/page.h>
@@ -141,10 +141,10 @@ void __init check_for_initrd(void)
141 goto disable; 141 goto disable;
142 } 142 }
143 143
144 if (unlikely(end > lmb_end_of_DRAM())) { 144 if (unlikely(end > memblock_end_of_DRAM())) {
145 pr_err("initrd extends beyond end of memory " 145 pr_err("initrd extends beyond end of memory "
146 "(0x%08lx > 0x%08lx)\ndisabling initrd\n", 146 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
147 end, (unsigned long)lmb_end_of_DRAM()); 147 end, (unsigned long)memblock_end_of_DRAM());
148 goto disable; 148 goto disable;
149 } 149 }
150 150
@@ -161,7 +161,7 @@ void __init check_for_initrd(void)
161 initrd_start = (unsigned long)__va(__pa(start)); 161 initrd_start = (unsigned long)__va(__pa(start));
162 initrd_end = initrd_start + INITRD_SIZE; 162 initrd_end = initrd_start + INITRD_SIZE;
163 163
164 lmb_reserve(__pa(initrd_start), INITRD_SIZE); 164 memblock_reserve(__pa(initrd_start), INITRD_SIZE);
165 165
166 return; 166 return;
167 167
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 46f84de62469..d0e249100e98 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -16,7 +16,7 @@
16#include <linux/pagemap.h> 16#include <linux/pagemap.h>
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/lmb.h> 19#include <linux/memblock.h>
20#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
21#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
22#include <asm/mmzone.h> 22#include <asm/mmzone.h>
@@ -33,7 +33,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 33
34void __init generic_mem_init(void) 34void __init generic_mem_init(void)
35{ 35{
36 lmb_add(__MEMORY_START, __MEMORY_SIZE); 36 memblock_add(__MEMORY_START, __MEMORY_SIZE);
37} 37}
38 38
39void __init __weak plat_mem_setup(void) 39void __init __weak plat_mem_setup(void)
@@ -176,12 +176,12 @@ void __init allocate_pgdat(unsigned int nid)
176 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 176 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
177 177
178#ifdef CONFIG_NEED_MULTIPLE_NODES 178#ifdef CONFIG_NEED_MULTIPLE_NODES
179 phys = __lmb_alloc_base(sizeof(struct pglist_data), 179 phys = __memblock_alloc_base(sizeof(struct pglist_data),
180 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); 180 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
181 /* Retry with all of system memory */ 181 /* Retry with all of system memory */
182 if (!phys) 182 if (!phys)
183 phys = __lmb_alloc_base(sizeof(struct pglist_data), 183 phys = __memblock_alloc_base(sizeof(struct pglist_data),
184 SMP_CACHE_BYTES, lmb_end_of_DRAM()); 184 SMP_CACHE_BYTES, memblock_end_of_DRAM());
185 if (!phys) 185 if (!phys)
186 panic("Can't allocate pgdat for node %d\n", nid); 186 panic("Can't allocate pgdat for node %d\n", nid);
187 187
@@ -212,7 +212,7 @@ static void __init bootmem_init_one_node(unsigned int nid)
212 212
213 total_pages = bootmem_bootmap_pages(p->node_spanned_pages); 213 total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
214 214
215 paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); 215 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
216 if (!paddr) 216 if (!paddr)
217 panic("Can't allocate bootmap for nid[%d]\n", nid); 217 panic("Can't allocate bootmap for nid[%d]\n", nid);
218 218
@@ -227,9 +227,9 @@ static void __init bootmem_init_one_node(unsigned int nid)
227 */ 227 */
228 if (nid == 0) { 228 if (nid == 0) {
229 /* Reserve the sections we're already using. */ 229 /* Reserve the sections we're already using. */
230 for (i = 0; i < lmb.reserved.cnt; i++) 230 for (i = 0; i < memblock.reserved.cnt; i++)
231 reserve_bootmem(lmb.reserved.region[i].base, 231 reserve_bootmem(memblock.reserved.region[i].base,
232 lmb_size_bytes(&lmb.reserved, i), 232 memblock_size_bytes(&memblock.reserved, i),
233 BOOTMEM_DEFAULT); 233 BOOTMEM_DEFAULT);
234 } 234 }
235 235
@@ -241,10 +241,10 @@ static void __init do_init_bootmem(void)
241 int i; 241 int i;
242 242
243 /* Add active regions with valid PFNs. */ 243 /* Add active regions with valid PFNs. */
244 for (i = 0; i < lmb.memory.cnt; i++) { 244 for (i = 0; i < memblock.memory.cnt; i++) {
245 unsigned long start_pfn, end_pfn; 245 unsigned long start_pfn, end_pfn;
246 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 246 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
247 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 247 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
248 __add_active_range(0, start_pfn, end_pfn); 248 __add_active_range(0, start_pfn, end_pfn);
249 } 249 }
250 250
@@ -276,7 +276,7 @@ static void __init early_reserve_mem(void)
276 * this catches the (definitely buggy) case of us accidentally 276 * this catches the (definitely buggy) case of us accidentally
277 * initializing the bootmem allocator with an invalid RAM area. 277 * initializing the bootmem allocator with an invalid RAM area.
278 */ 278 */
279 lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, 279 memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
280 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - 280 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
281 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); 281 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
282 282
@@ -284,7 +284,7 @@ static void __init early_reserve_mem(void)
284 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 284 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
285 */ 285 */
286 if (CONFIG_ZERO_PAGE_OFFSET != 0) 286 if (CONFIG_ZERO_PAGE_OFFSET != 0)
287 lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); 287 memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
288 288
289 /* 289 /*
290 * Handle additional early reservations 290 * Handle additional early reservations
@@ -299,27 +299,27 @@ void __init paging_init(void)
299 unsigned long vaddr, end; 299 unsigned long vaddr, end;
300 int nid; 300 int nid;
301 301
302 lmb_init(); 302 memblock_init();
303 303
304 sh_mv.mv_mem_init(); 304 sh_mv.mv_mem_init();
305 305
306 early_reserve_mem(); 306 early_reserve_mem();
307 307
308 lmb_enforce_memory_limit(memory_limit); 308 memblock_enforce_memory_limit(memory_limit);
309 lmb_analyze(); 309 memblock_analyze();
310 310
311 lmb_dump_all(); 311 memblock_dump_all();
312 312
313 /* 313 /*
314 * Determine low and high memory ranges: 314 * Determine low and high memory ranges:
315 */ 315 */
316 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 316 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
317 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; 317 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
318 318
319 nodes_clear(node_online_map); 319 nodes_clear(node_online_map);
320 320
321 memory_start = (unsigned long)__va(__MEMORY_START); 321 memory_start = (unsigned long)__va(__MEMORY_START);
322 memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size()); 322 memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
323 323
324 uncached_init(); 324 uncached_init();
325 pmb_init(); 325 pmb_init();
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index a2e645f64a37..3d85225b9e95 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -9,7 +9,7 @@
9 */ 9 */
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/bootmem.h> 11#include <linux/bootmem.h>
12#include <linux/lmb.h> 12#include <linux/memblock.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/numa.h> 14#include <linux/numa.h>
15#include <linux/pfn.h> 15#include <linux/pfn.h>
@@ -39,12 +39,12 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
39 pmb_bolt_mapping((unsigned long)__va(start), start, end - start, 39 pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
40 PAGE_KERNEL); 40 PAGE_KERNEL);
41 41
42 lmb_add(start, end - start); 42 memblock_add(start, end - start);
43 43
44 __add_active_range(nid, start_pfn, end_pfn); 44 __add_active_range(nid, start_pfn, end_pfn);
45 45
46 /* Node-local pgdat */ 46 /* Node-local pgdat */
47 NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), 47 NODE_DATA(nid) = __va(memblock_alloc_base(sizeof(struct pglist_data),
48 SMP_CACHE_BYTES, end)); 48 SMP_CACHE_BYTES, end));
49 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 49 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
50 50
@@ -54,7 +54,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
54 54
55 /* Node-local bootmap */ 55 /* Node-local bootmap */
56 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 56 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
57 bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, 57 bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
58 PAGE_SIZE, end); 58 PAGE_SIZE, end);
59 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, 59 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
60 start_pfn, end_pfn); 60 start_pfn, end_pfn);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6f1470baa314..c0015db247ba 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -42,7 +42,7 @@ config SPARC64
42 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 42 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
43 select HAVE_KRETPROBES 43 select HAVE_KRETPROBES
44 select HAVE_KPROBES 44 select HAVE_KPROBES
45 select HAVE_LMB 45 select HAVE_MEMBLOCK
46 select HAVE_SYSCALL_WRAPPERS 46 select HAVE_SYSCALL_WRAPPERS
47 select HAVE_DYNAMIC_FTRACE 47 select HAVE_DYNAMIC_FTRACE
48 select HAVE_FTRACE_MCOUNT_RECORD 48 select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/sparc/include/asm/lmb.h b/arch/sparc/include/asm/lmb.h
deleted file mode 100644
index 6a352cbcf520..000000000000
--- a/arch/sparc/include/asm/lmb.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _SPARC64_LMB_H
2#define _SPARC64_LMB_H
3
4#include <asm/oplib.h>
5
6#define LMB_DBG(fmt...) prom_printf(fmt)
7
8#define LMB_REAL_LIMIT 0
9
10#endif /* !(_SPARC64_LMB_H) */
diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h
new file mode 100644
index 000000000000..f12af880649b
--- /dev/null
+++ b/arch/sparc/include/asm/memblock.h
@@ -0,0 +1,10 @@
1#ifndef _SPARC64_MEMBLOCK_H
2#define _SPARC64_MEMBLOCK_H
3
4#include <asm/oplib.h>
5
6#define MEMBLOCK_DBG(fmt...) prom_printf(fmt)
7
8#define MEMBLOCK_REAL_LIMIT 0
9
10#endif /* !(_SPARC64_MEMBLOCK_H) */
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index cdc91d919e93..83e85c2e802a 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -4,7 +4,7 @@
4 */ 4 */
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/lmb.h> 7#include <linux/memblock.h>
8#include <linux/log2.h> 8#include <linux/log2.h>
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
@@ -86,7 +86,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
86 hp->handle_size = handle_size; 86 hp->handle_size = handle_size;
87} 87}
88 88
89static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size) 89static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size)
90{ 90{
91 unsigned int handle_size, alloc_size; 91 unsigned int handle_size, alloc_size;
92 struct mdesc_handle *hp; 92 struct mdesc_handle *hp;
@@ -97,7 +97,7 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
97 mdesc_size); 97 mdesc_size);
98 alloc_size = PAGE_ALIGN(handle_size); 98 alloc_size = PAGE_ALIGN(handle_size);
99 99
100 paddr = lmb_alloc(alloc_size, PAGE_SIZE); 100 paddr = memblock_alloc(alloc_size, PAGE_SIZE);
101 101
102 hp = NULL; 102 hp = NULL;
103 if (paddr) { 103 if (paddr) {
@@ -107,7 +107,7 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
107 return hp; 107 return hp;
108} 108}
109 109
110static void mdesc_lmb_free(struct mdesc_handle *hp) 110static void mdesc_memblock_free(struct mdesc_handle *hp)
111{ 111{
112 unsigned int alloc_size; 112 unsigned int alloc_size;
113 unsigned long start; 113 unsigned long start;
@@ -120,9 +120,9 @@ static void mdesc_lmb_free(struct mdesc_handle *hp)
120 free_bootmem_late(start, alloc_size); 120 free_bootmem_late(start, alloc_size);
121} 121}
122 122
123static struct mdesc_mem_ops lmb_mdesc_ops = { 123static struct mdesc_mem_ops memblock_mdesc_ops = {
124 .alloc = mdesc_lmb_alloc, 124 .alloc = mdesc_memblock_alloc,
125 .free = mdesc_lmb_free, 125 .free = mdesc_memblock_free,
126}; 126};
127 127
128static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) 128static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
@@ -914,7 +914,7 @@ void __init sun4v_mdesc_init(void)
914 914
915 printk("MDESC: Size is %lu bytes.\n", len); 915 printk("MDESC: Size is %lu bytes.\n", len);
916 916
917 hp = mdesc_alloc(len, &lmb_mdesc_ops); 917 hp = mdesc_alloc(len, &memblock_mdesc_ops);
918 if (hp == NULL) { 918 if (hp == NULL) {
919 prom_printf("MDESC: alloc of %lu bytes failed.\n", len); 919 prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
920 prom_halt(); 920 prom_halt();
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index fb06ac2bd38f..466a32763ea8 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -20,7 +20,7 @@
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/lmb.h> 23#include <linux/memblock.h>
24#include <linux/of_device.h> 24#include <linux/of_device.h>
25 25
26#include <asm/prom.h> 26#include <asm/prom.h>
@@ -34,7 +34,7 @@
34 34
35void * __init prom_early_alloc(unsigned long size) 35void * __init prom_early_alloc(unsigned long size)
36{ 36{
37 unsigned long paddr = lmb_alloc(size, SMP_CACHE_BYTES); 37 unsigned long paddr = memblock_alloc(size, SMP_CACHE_BYTES);
38 void *ret; 38 void *ret;
39 39
40 if (!paddr) { 40 if (!paddr) {
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index b2831dc3c121..f0434513df15 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -23,7 +23,7 @@
23#include <linux/cache.h> 23#include <linux/cache.h>
24#include <linux/sort.h> 24#include <linux/sort.h>
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/lmb.h> 26#include <linux/memblock.h>
27#include <linux/mmzone.h> 27#include <linux/mmzone.h>
28#include <linux/gfp.h> 28#include <linux/gfp.h>
29 29
@@ -726,7 +726,7 @@ static void __init find_ramdisk(unsigned long phys_base)
726 initrd_start = ramdisk_image; 726 initrd_start = ramdisk_image;
727 initrd_end = ramdisk_image + sparc_ramdisk_size; 727 initrd_end = ramdisk_image + sparc_ramdisk_size;
728 728
729 lmb_reserve(initrd_start, sparc_ramdisk_size); 729 memblock_reserve(initrd_start, sparc_ramdisk_size);
730 730
731 initrd_start += PAGE_OFFSET; 731 initrd_start += PAGE_OFFSET;
732 initrd_end += PAGE_OFFSET; 732 initrd_end += PAGE_OFFSET;
@@ -822,7 +822,7 @@ static void __init allocate_node_data(int nid)
822 struct pglist_data *p; 822 struct pglist_data *p;
823 823
824#ifdef CONFIG_NEED_MULTIPLE_NODES 824#ifdef CONFIG_NEED_MULTIPLE_NODES
825 paddr = lmb_alloc_nid(sizeof(struct pglist_data), 825 paddr = memblock_alloc_nid(sizeof(struct pglist_data),
826 SMP_CACHE_BYTES, nid, nid_range); 826 SMP_CACHE_BYTES, nid, nid_range);
827 if (!paddr) { 827 if (!paddr) {
828 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 828 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
@@ -843,7 +843,7 @@ static void __init allocate_node_data(int nid)
843 if (p->node_spanned_pages) { 843 if (p->node_spanned_pages) {
844 num_pages = bootmem_bootmap_pages(p->node_spanned_pages); 844 num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
845 845
846 paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, 846 paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid,
847 nid_range); 847 nid_range);
848 if (!paddr) { 848 if (!paddr) {
849 prom_printf("Cannot allocate bootmap for nid[%d]\n", 849 prom_printf("Cannot allocate bootmap for nid[%d]\n",
@@ -974,11 +974,11 @@ static void __init add_node_ranges(void)
974{ 974{
975 int i; 975 int i;
976 976
977 for (i = 0; i < lmb.memory.cnt; i++) { 977 for (i = 0; i < memblock.memory.cnt; i++) {
978 unsigned long size = lmb_size_bytes(&lmb.memory, i); 978 unsigned long size = memblock_size_bytes(&memblock.memory, i);
979 unsigned long start, end; 979 unsigned long start, end;
980 980
981 start = lmb.memory.region[i].base; 981 start = memblock.memory.region[i].base;
982 end = start + size; 982 end = start + size;
983 while (start < end) { 983 while (start < end) {
984 unsigned long this_end; 984 unsigned long this_end;
@@ -1010,7 +1010,7 @@ static int __init grab_mlgroups(struct mdesc_handle *md)
1010 if (!count) 1010 if (!count)
1011 return -ENOENT; 1011 return -ENOENT;
1012 1012
1013 paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup), 1013 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
1014 SMP_CACHE_BYTES); 1014 SMP_CACHE_BYTES);
1015 if (!paddr) 1015 if (!paddr)
1016 return -ENOMEM; 1016 return -ENOMEM;
@@ -1051,7 +1051,7 @@ static int __init grab_mblocks(struct mdesc_handle *md)
1051 if (!count) 1051 if (!count)
1052 return -ENOENT; 1052 return -ENOENT;
1053 1053
1054 paddr = lmb_alloc(count * sizeof(struct mdesc_mblock), 1054 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
1055 SMP_CACHE_BYTES); 1055 SMP_CACHE_BYTES);
1056 if (!paddr) 1056 if (!paddr)
1057 return -ENOMEM; 1057 return -ENOMEM;
@@ -1279,8 +1279,8 @@ static int bootmem_init_numa(void)
1279 1279
1280static void __init bootmem_init_nonnuma(void) 1280static void __init bootmem_init_nonnuma(void)
1281{ 1281{
1282 unsigned long top_of_ram = lmb_end_of_DRAM(); 1282 unsigned long top_of_ram = memblock_end_of_DRAM();
1283 unsigned long total_ram = lmb_phys_mem_size(); 1283 unsigned long total_ram = memblock_phys_mem_size();
1284 unsigned int i; 1284 unsigned int i;
1285 1285
1286 numadbg("bootmem_init_nonnuma()\n"); 1286 numadbg("bootmem_init_nonnuma()\n");
@@ -1292,15 +1292,15 @@ static void __init bootmem_init_nonnuma(void)
1292 1292
1293 init_node_masks_nonnuma(); 1293 init_node_masks_nonnuma();
1294 1294
1295 for (i = 0; i < lmb.memory.cnt; i++) { 1295 for (i = 0; i < memblock.memory.cnt; i++) {
1296 unsigned long size = lmb_size_bytes(&lmb.memory, i); 1296 unsigned long size = memblock_size_bytes(&memblock.memory, i);
1297 unsigned long start_pfn, end_pfn; 1297 unsigned long start_pfn, end_pfn;
1298 1298
1299 if (!size) 1299 if (!size)
1300 continue; 1300 continue;
1301 1301
1302 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 1302 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
1303 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 1303 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
1304 add_active_range(0, start_pfn, end_pfn); 1304 add_active_range(0, start_pfn, end_pfn);
1305 } 1305 }
1306 1306
@@ -1338,9 +1338,9 @@ static void __init trim_reserved_in_node(int nid)
1338 1338
1339 numadbg(" trim_reserved_in_node(%d)\n", nid); 1339 numadbg(" trim_reserved_in_node(%d)\n", nid);
1340 1340
1341 for (i = 0; i < lmb.reserved.cnt; i++) { 1341 for (i = 0; i < memblock.reserved.cnt; i++) {
1342 unsigned long start = lmb.reserved.region[i].base; 1342 unsigned long start = memblock.reserved.region[i].base;
1343 unsigned long size = lmb_size_bytes(&lmb.reserved, i); 1343 unsigned long size = memblock_size_bytes(&memblock.reserved, i);
1344 unsigned long end = start + size; 1344 unsigned long end = start + size;
1345 1345
1346 reserve_range_in_node(nid, start, end); 1346 reserve_range_in_node(nid, start, end);
@@ -1384,7 +1384,7 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
1384 unsigned long end_pfn; 1384 unsigned long end_pfn;
1385 int nid; 1385 int nid;
1386 1386
1387 end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 1387 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1388 max_pfn = max_low_pfn = end_pfn; 1388 max_pfn = max_low_pfn = end_pfn;
1389 min_low_pfn = (phys_base >> PAGE_SHIFT); 1389 min_low_pfn = (phys_base >> PAGE_SHIFT);
1390 1390
@@ -1734,7 +1734,7 @@ void __init paging_init(void)
1734 sun4v_ktsb_init(); 1734 sun4v_ktsb_init();
1735 } 1735 }
1736 1736
1737 lmb_init(); 1737 memblock_init();
1738 1738
1739 /* Find available physical memory... 1739 /* Find available physical memory...
1740 * 1740 *
@@ -1752,17 +1752,17 @@ void __init paging_init(void)
1752 phys_base = 0xffffffffffffffffUL; 1752 phys_base = 0xffffffffffffffffUL;
1753 for (i = 0; i < pavail_ents; i++) { 1753 for (i = 0; i < pavail_ents; i++) {
1754 phys_base = min(phys_base, pavail[i].phys_addr); 1754 phys_base = min(phys_base, pavail[i].phys_addr);
1755 lmb_add(pavail[i].phys_addr, pavail[i].reg_size); 1755 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
1756 } 1756 }
1757 1757
1758 lmb_reserve(kern_base, kern_size); 1758 memblock_reserve(kern_base, kern_size);
1759 1759
1760 find_ramdisk(phys_base); 1760 find_ramdisk(phys_base);
1761 1761
1762 lmb_enforce_memory_limit(cmdline_memory_size); 1762 memblock_enforce_memory_limit(cmdline_memory_size);
1763 1763
1764 lmb_analyze(); 1764 memblock_analyze();
1765 lmb_dump_all(); 1765 memblock_dump_all();
1766 1766
1767 set_bit(0, mmu_context_bmap); 1767 set_bit(0, mmu_context_bmap);
1768 1768
@@ -1816,8 +1816,8 @@ void __init paging_init(void)
1816 */ 1816 */
1817 for_each_possible_cpu(i) { 1817 for_each_possible_cpu(i) {
1818 /* XXX Use node local allocations... XXX */ 1818 /* XXX Use node local allocations... XXX */
1819 softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 1819 softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
1820 hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 1820 hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
1821 } 1821 }
1822 1822
1823 /* Setup bootmem... */ 1823 /* Setup bootmem... */
diff --git a/include/linux/lmb.h b/include/linux/lmb.h
deleted file mode 100644
index f3d14333ebed..000000000000
--- a/include/linux/lmb.h
+++ /dev/null
@@ -1,89 +0,0 @@
1#ifndef _LINUX_LMB_H
2#define _LINUX_LMB_H
3#ifdef __KERNEL__
4
5/*
6 * Logical memory blocks.
7 *
8 * Copyright (C) 2001 Peter Bergner, IBM Corp.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/mm.h>
18
19#define MAX_LMB_REGIONS 128
20
21struct lmb_property {
22 u64 base;
23 u64 size;
24};
25
26struct lmb_region {
27 unsigned long cnt;
28 u64 size;
29 struct lmb_property region[MAX_LMB_REGIONS+1];
30};
31
32struct lmb {
33 unsigned long debug;
34 u64 rmo_size;
35 struct lmb_region memory;
36 struct lmb_region reserved;
37};
38
39extern struct lmb lmb;
40
41extern void __init lmb_init(void);
42extern void __init lmb_analyze(void);
43extern long lmb_add(u64 base, u64 size);
44extern long lmb_remove(u64 base, u64 size);
45extern long __init lmb_free(u64 base, u64 size);
46extern long __init lmb_reserve(u64 base, u64 size);
47extern u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
48 u64 (*nid_range)(u64, u64, int *));
49extern u64 __init lmb_alloc(u64 size, u64 align);
50extern u64 __init lmb_alloc_base(u64 size,
51 u64, u64 max_addr);
52extern u64 __init __lmb_alloc_base(u64 size,
53 u64 align, u64 max_addr);
54extern u64 __init lmb_phys_mem_size(void);
55extern u64 lmb_end_of_DRAM(void);
56extern void __init lmb_enforce_memory_limit(u64 memory_limit);
57extern int __init lmb_is_reserved(u64 addr);
58extern int lmb_is_region_reserved(u64 base, u64 size);
59extern int lmb_find(struct lmb_property *res);
60
61extern void lmb_dump_all(void);
62
63static inline u64
64lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
65{
66 return type->region[region_nr].size;
67}
68static inline u64
69lmb_size_pages(struct lmb_region *type, unsigned long region_nr)
70{
71 return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT;
72}
73static inline u64
74lmb_start_pfn(struct lmb_region *type, unsigned long region_nr)
75{
76 return type->region[region_nr].base >> PAGE_SHIFT;
77}
78static inline u64
79lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
80{
81 return lmb_start_pfn(type, region_nr) +
82 lmb_size_pages(type, region_nr);
83}
84
85#include <asm/lmb.h>
86
87#endif /* __KERNEL__ */
88
89#endif /* _LINUX_LMB_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
new file mode 100644
index 000000000000..a59faf2b5edd
--- /dev/null
+++ b/include/linux/memblock.h
@@ -0,0 +1,89 @@
1#ifndef _LINUX_MEMBLOCK_H
2#define _LINUX_MEMBLOCK_H
3#ifdef __KERNEL__
4
5/*
6 * Logical memory blocks.
7 *
8 * Copyright (C) 2001 Peter Bergner, IBM Corp.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/mm.h>
18
19#define MAX_MEMBLOCK_REGIONS 128
20
21struct memblock_property {
22 u64 base;
23 u64 size;
24};
25
26struct memblock_region {
27 unsigned long cnt;
28 u64 size;
29 struct memblock_property region[MAX_MEMBLOCK_REGIONS+1];
30};
31
32struct memblock {
33 unsigned long debug;
34 u64 rmo_size;
35 struct memblock_region memory;
36 struct memblock_region reserved;
37};
38
39extern struct memblock memblock;
40
41extern void __init memblock_init(void);
42extern void __init memblock_analyze(void);
43extern long memblock_add(u64 base, u64 size);
44extern long memblock_remove(u64 base, u64 size);
45extern long __init memblock_free(u64 base, u64 size);
46extern long __init memblock_reserve(u64 base, u64 size);
47extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid,
48 u64 (*nid_range)(u64, u64, int *));
49extern u64 __init memblock_alloc(u64 size, u64 align);
50extern u64 __init memblock_alloc_base(u64 size,
51 u64, u64 max_addr);
52extern u64 __init __memblock_alloc_base(u64 size,
53 u64 align, u64 max_addr);
54extern u64 __init memblock_phys_mem_size(void);
55extern u64 memblock_end_of_DRAM(void);
56extern void __init memblock_enforce_memory_limit(u64 memory_limit);
57extern int __init memblock_is_reserved(u64 addr);
58extern int memblock_is_region_reserved(u64 base, u64 size);
59extern int memblock_find(struct memblock_property *res);
60
61extern void memblock_dump_all(void);
62
63static inline u64
64memblock_size_bytes(struct memblock_region *type, unsigned long region_nr)
65{
66 return type->region[region_nr].size;
67}
68static inline u64
69memblock_size_pages(struct memblock_region *type, unsigned long region_nr)
70{
71 return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT;
72}
73static inline u64
74memblock_start_pfn(struct memblock_region *type, unsigned long region_nr)
75{
76 return type->region[region_nr].base >> PAGE_SHIFT;
77}
78static inline u64
79memblock_end_pfn(struct memblock_region *type, unsigned long region_nr)
80{
81 return memblock_start_pfn(type, region_nr) +
82 memblock_size_pages(type, region_nr);
83}
84
85#include <asm/memblock.h>
86
87#endif /* __KERNEL__ */
88
89#endif /* _LINUX_MEMBLOCK_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 170d8ca901d8..5b916bc0fbae 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -181,9 +181,6 @@ config HAS_DMA
181config CHECK_SIGNATURE 181config CHECK_SIGNATURE
182 bool 182 bool
183 183
184config HAVE_LMB
185 boolean
186
187config CPUMASK_OFFSTACK 184config CPUMASK_OFFSTACK
188 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS 185 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
189 help 186 help
diff --git a/lib/Makefile b/lib/Makefile
index 3f1062cbbff4..0bfabba1bb32 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -89,8 +89,6 @@ obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
89 89
90lib-$(CONFIG_GENERIC_BUG) += bug.o 90lib-$(CONFIG_GENERIC_BUG) += bug.o
91 91
92obj-$(CONFIG_HAVE_LMB) += lmb.o
93
94obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o 92obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
95 93
96obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o 94obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
diff --git a/lib/lmb.c b/lib/lmb.c
deleted file mode 100644
index b1fc52606524..000000000000
--- a/lib/lmb.c
+++ /dev/null
@@ -1,541 +0,0 @@
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/bitops.h>
16#include <linux/lmb.h>
17
18#define LMB_ALLOC_ANYWHERE 0
19
20struct lmb lmb;
21
22static int lmb_debug;
23
24static int __init early_lmb(char *p)
25{
26 if (p && strstr(p, "debug"))
27 lmb_debug = 1;
28 return 0;
29}
30early_param("lmb", early_lmb);
31
32static void lmb_dump(struct lmb_region *region, char *name)
33{
34 unsigned long long base, size;
35 int i;
36
37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
38
39 for (i = 0; i < region->cnt; i++) {
40 base = region->region[i].base;
41 size = region->region[i].size;
42
43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
44 name, i, base, base + size - 1, size);
45 }
46}
47
48void lmb_dump_all(void)
49{
50 if (!lmb_debug)
51 return;
52
53 pr_info("LMB configuration:\n");
54 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size);
55 pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
56
57 lmb_dump(&lmb.memory, "memory");
58 lmb_dump(&lmb.reserved, "reserved");
59}
60
61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
62 u64 size2)
63{
64 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
65}
66
67static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
68{
69 if (base2 == base1 + size1)
70 return 1;
71 else if (base1 == base2 + size2)
72 return -1;
73
74 return 0;
75}
76
77static long lmb_regions_adjacent(struct lmb_region *rgn,
78 unsigned long r1, unsigned long r2)
79{
80 u64 base1 = rgn->region[r1].base;
81 u64 size1 = rgn->region[r1].size;
82 u64 base2 = rgn->region[r2].base;
83 u64 size2 = rgn->region[r2].size;
84
85 return lmb_addrs_adjacent(base1, size1, base2, size2);
86}
87
88static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
89{
90 unsigned long i;
91
92 for (i = r; i < rgn->cnt - 1; i++) {
93 rgn->region[i].base = rgn->region[i + 1].base;
94 rgn->region[i].size = rgn->region[i + 1].size;
95 }
96 rgn->cnt--;
97}
98
99/* Assumption: base addr of region 1 < base addr of region 2 */
100static void lmb_coalesce_regions(struct lmb_region *rgn,
101 unsigned long r1, unsigned long r2)
102{
103 rgn->region[r1].size += rgn->region[r2].size;
104 lmb_remove_region(rgn, r2);
105}
106
107void __init lmb_init(void)
108{
109 /* Create a dummy zero size LMB which will get coalesced away later.
110 * This simplifies the lmb_add() code below...
111 */
112 lmb.memory.region[0].base = 0;
113 lmb.memory.region[0].size = 0;
114 lmb.memory.cnt = 1;
115
116 /* Ditto. */
117 lmb.reserved.region[0].base = 0;
118 lmb.reserved.region[0].size = 0;
119 lmb.reserved.cnt = 1;
120}
121
122void __init lmb_analyze(void)
123{
124 int i;
125
126 lmb.memory.size = 0;
127
128 for (i = 0; i < lmb.memory.cnt; i++)
129 lmb.memory.size += lmb.memory.region[i].size;
130}
131
132static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
133{
134 unsigned long coalesced = 0;
135 long adjacent, i;
136
137 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
138 rgn->region[0].base = base;
139 rgn->region[0].size = size;
140 return 0;
141 }
142
143 /* First try and coalesce this LMB with another. */
144 for (i = 0; i < rgn->cnt; i++) {
145 u64 rgnbase = rgn->region[i].base;
146 u64 rgnsize = rgn->region[i].size;
147
148 if ((rgnbase == base) && (rgnsize == size))
149 /* Already have this region, so we're done */
150 return 0;
151
152 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
153 if (adjacent > 0) {
154 rgn->region[i].base -= size;
155 rgn->region[i].size += size;
156 coalesced++;
157 break;
158 } else if (adjacent < 0) {
159 rgn->region[i].size += size;
160 coalesced++;
161 break;
162 }
163 }
164
165 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
166 lmb_coalesce_regions(rgn, i, i+1);
167 coalesced++;
168 }
169
170 if (coalesced)
171 return coalesced;
172 if (rgn->cnt >= MAX_LMB_REGIONS)
173 return -1;
174
175 /* Couldn't coalesce the LMB, so add it to the sorted table. */
176 for (i = rgn->cnt - 1; i >= 0; i--) {
177 if (base < rgn->region[i].base) {
178 rgn->region[i+1].base = rgn->region[i].base;
179 rgn->region[i+1].size = rgn->region[i].size;
180 } else {
181 rgn->region[i+1].base = base;
182 rgn->region[i+1].size = size;
183 break;
184 }
185 }
186
187 if (base < rgn->region[0].base) {
188 rgn->region[0].base = base;
189 rgn->region[0].size = size;
190 }
191 rgn->cnt++;
192
193 return 0;
194}
195
196long lmb_add(u64 base, u64 size)
197{
198 struct lmb_region *_rgn = &lmb.memory;
199
200 /* On pSeries LPAR systems, the first LMB is our RMO region. */
201 if (base == 0)
202 lmb.rmo_size = size;
203
204 return lmb_add_region(_rgn, base, size);
205
206}
207
208static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
209{
210 u64 rgnbegin, rgnend;
211 u64 end = base + size;
212 int i;
213
214 rgnbegin = rgnend = 0; /* supress gcc warnings */
215
216 /* Find the region where (base, size) belongs to */
217 for (i=0; i < rgn->cnt; i++) {
218 rgnbegin = rgn->region[i].base;
219 rgnend = rgnbegin + rgn->region[i].size;
220
221 if ((rgnbegin <= base) && (end <= rgnend))
222 break;
223 }
224
225 /* Didn't find the region */
226 if (i == rgn->cnt)
227 return -1;
228
229 /* Check to see if we are removing entire region */
230 if ((rgnbegin == base) && (rgnend == end)) {
231 lmb_remove_region(rgn, i);
232 return 0;
233 }
234
235 /* Check to see if region is matching at the front */
236 if (rgnbegin == base) {
237 rgn->region[i].base = end;
238 rgn->region[i].size -= size;
239 return 0;
240 }
241
242 /* Check to see if the region is matching at the end */
243 if (rgnend == end) {
244 rgn->region[i].size -= size;
245 return 0;
246 }
247
248 /*
249 * We need to split the entry - adjust the current one to the
250 * beginging of the hole and add the region after hole.
251 */
252 rgn->region[i].size = base - rgn->region[i].base;
253 return lmb_add_region(rgn, end, rgnend - end);
254}
255
256long lmb_remove(u64 base, u64 size)
257{
258 return __lmb_remove(&lmb.memory, base, size);
259}
260
261long __init lmb_free(u64 base, u64 size)
262{
263 return __lmb_remove(&lmb.reserved, base, size);
264}
265
266long __init lmb_reserve(u64 base, u64 size)
267{
268 struct lmb_region *_rgn = &lmb.reserved;
269
270 BUG_ON(0 == size);
271
272 return lmb_add_region(_rgn, base, size);
273}
274
275long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
276{
277 unsigned long i;
278
279 for (i = 0; i < rgn->cnt; i++) {
280 u64 rgnbase = rgn->region[i].base;
281 u64 rgnsize = rgn->region[i].size;
282 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
283 break;
284 }
285
286 return (i < rgn->cnt) ? i : -1;
287}
288
289static u64 lmb_align_down(u64 addr, u64 size)
290{
291 return addr & ~(size - 1);
292}
293
294static u64 lmb_align_up(u64 addr, u64 size)
295{
296 return (addr + (size - 1)) & ~(size - 1);
297}
298
299static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
300 u64 size, u64 align)
301{
302 u64 base, res_base;
303 long j;
304
305 base = lmb_align_down((end - size), align);
306 while (start <= base) {
307 j = lmb_overlaps_region(&lmb.reserved, base, size);
308 if (j < 0) {
309 /* this area isn't reserved, take it */
310 if (lmb_add_region(&lmb.reserved, base, size) < 0)
311 base = ~(u64)0;
312 return base;
313 }
314 res_base = lmb.reserved.region[j].base;
315 if (res_base < size)
316 break;
317 base = lmb_align_down(res_base - size, align);
318 }
319
320 return ~(u64)0;
321}
322
323static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
324 u64 (*nid_range)(u64, u64, int *),
325 u64 size, u64 align, int nid)
326{
327 u64 start, end;
328
329 start = mp->base;
330 end = start + mp->size;
331
332 start = lmb_align_up(start, align);
333 while (start < end) {
334 u64 this_end;
335 int this_nid;
336
337 this_end = nid_range(start, end, &this_nid);
338 if (this_nid == nid) {
339 u64 ret = lmb_alloc_nid_unreserved(start, this_end,
340 size, align);
341 if (ret != ~(u64)0)
342 return ret;
343 }
344 start = this_end;
345 }
346
347 return ~(u64)0;
348}
349
350u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
351 u64 (*nid_range)(u64 start, u64 end, int *nid))
352{
353 struct lmb_region *mem = &lmb.memory;
354 int i;
355
356 BUG_ON(0 == size);
357
358 size = lmb_align_up(size, align);
359
360 for (i = 0; i < mem->cnt; i++) {
361 u64 ret = lmb_alloc_nid_region(&mem->region[i],
362 nid_range,
363 size, align, nid);
364 if (ret != ~(u64)0)
365 return ret;
366 }
367
368 return lmb_alloc(size, align);
369}
370
371u64 __init lmb_alloc(u64 size, u64 align)
372{
373 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
374}
375
376u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
377{
378 u64 alloc;
379
380 alloc = __lmb_alloc_base(size, align, max_addr);
381
382 if (alloc == 0)
383 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
384 (unsigned long long) size, (unsigned long long) max_addr);
385
386 return alloc;
387}
388
389u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
390{
391 long i, j;
392 u64 base = 0;
393 u64 res_base;
394
395 BUG_ON(0 == size);
396
397 size = lmb_align_up(size, align);
398
399 /* On some platforms, make sure we allocate lowmem */
400 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
401 if (max_addr == LMB_ALLOC_ANYWHERE)
402 max_addr = LMB_REAL_LIMIT;
403
404 for (i = lmb.memory.cnt - 1; i >= 0; i--) {
405 u64 lmbbase = lmb.memory.region[i].base;
406 u64 lmbsize = lmb.memory.region[i].size;
407
408 if (lmbsize < size)
409 continue;
410 if (max_addr == LMB_ALLOC_ANYWHERE)
411 base = lmb_align_down(lmbbase + lmbsize - size, align);
412 else if (lmbbase < max_addr) {
413 base = min(lmbbase + lmbsize, max_addr);
414 base = lmb_align_down(base - size, align);
415 } else
416 continue;
417
418 while (base && lmbbase <= base) {
419 j = lmb_overlaps_region(&lmb.reserved, base, size);
420 if (j < 0) {
421 /* this area isn't reserved, take it */
422 if (lmb_add_region(&lmb.reserved, base, size) < 0)
423 return 0;
424 return base;
425 }
426 res_base = lmb.reserved.region[j].base;
427 if (res_base < size)
428 break;
429 base = lmb_align_down(res_base - size, align);
430 }
431 }
432 return 0;
433}
434
435/* You must call lmb_analyze() before this. */
436u64 __init lmb_phys_mem_size(void)
437{
438 return lmb.memory.size;
439}
440
441u64 lmb_end_of_DRAM(void)
442{
443 int idx = lmb.memory.cnt - 1;
444
445 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
446}
447
448/* You must call lmb_analyze() after this. */
449void __init lmb_enforce_memory_limit(u64 memory_limit)
450{
451 unsigned long i;
452 u64 limit;
453 struct lmb_property *p;
454
455 if (!memory_limit)
456 return;
457
458 /* Truncate the lmb regions to satisfy the memory limit. */
459 limit = memory_limit;
460 for (i = 0; i < lmb.memory.cnt; i++) {
461 if (limit > lmb.memory.region[i].size) {
462 limit -= lmb.memory.region[i].size;
463 continue;
464 }
465
466 lmb.memory.region[i].size = limit;
467 lmb.memory.cnt = i + 1;
468 break;
469 }
470
471 if (lmb.memory.region[0].size < lmb.rmo_size)
472 lmb.rmo_size = lmb.memory.region[0].size;
473
474 memory_limit = lmb_end_of_DRAM();
475
476 /* And truncate any reserves above the limit also. */
477 for (i = 0; i < lmb.reserved.cnt; i++) {
478 p = &lmb.reserved.region[i];
479
480 if (p->base > memory_limit)
481 p->size = 0;
482 else if ((p->base + p->size) > memory_limit)
483 p->size = memory_limit - p->base;
484
485 if (p->size == 0) {
486 lmb_remove_region(&lmb.reserved, i);
487 i--;
488 }
489 }
490}
491
492int __init lmb_is_reserved(u64 addr)
493{
494 int i;
495
496 for (i = 0; i < lmb.reserved.cnt; i++) {
497 u64 upper = lmb.reserved.region[i].base +
498 lmb.reserved.region[i].size - 1;
499 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
500 return 1;
501 }
502 return 0;
503}
504
505int lmb_is_region_reserved(u64 base, u64 size)
506{
507 return lmb_overlaps_region(&lmb.reserved, base, size);
508}
509
510/*
511 * Given a <base, len>, find which memory regions belong to this range.
512 * Adjust the request and return a contiguous chunk.
513 */
514int lmb_find(struct lmb_property *res)
515{
516 int i;
517 u64 rstart, rend;
518
519 rstart = res->base;
520 rend = rstart + res->size - 1;
521
522 for (i = 0; i < lmb.memory.cnt; i++) {
523 u64 start = lmb.memory.region[i].base;
524 u64 end = start + lmb.memory.region[i].size - 1;
525
526 if (start > rend)
527 return -1;
528
529 if ((end >= rstart) && (start < rend)) {
530 /* adjust the request */
531 if (rstart < start)
532 rstart = start;
533 if (rend > end)
534 rend = end;
535 res->base = rstart;
536 res->size = rend - rstart + 1;
537 return 0;
538 }
539 }
540 return -1;
541}
diff --git a/mm/Kconfig b/mm/Kconfig
index 527136b22384..f4e516e9c37c 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -128,6 +128,9 @@ config SPARSEMEM_VMEMMAP
128 pfn_to_page and page_to_pfn operations. This is the most 128 pfn_to_page and page_to_pfn operations. This is the most
129 efficient option when sufficient kernel resources are available. 129 efficient option when sufficient kernel resources are available.
130 130
131config HAVE_MEMBLOCK
132 boolean
133
131# eventually, we can have this option just 'select SPARSEMEM' 134# eventually, we can have this option just 'select SPARSEMEM'
132config MEMORY_HOTPLUG 135config MEMORY_HOTPLUG
133 bool "Allow for memory hot-add" 136 bool "Allow for memory hot-add"
diff --git a/mm/Makefile b/mm/Makefile
index 8982504bd03b..34b2546a9e37 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -15,6 +15,8 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
15 $(mmu-y) 15 $(mmu-y)
16obj-y += init-mm.o 16obj-y += init-mm.o
17 17
18obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
19
18obj-$(CONFIG_BOUNCE) += bounce.o 20obj-$(CONFIG_BOUNCE) += bounce.o
19obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o 21obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
20obj-$(CONFIG_HAS_DMA) += dmapool.o 22obj-$(CONFIG_HAS_DMA) += dmapool.o
diff --git a/mm/memblock.c b/mm/memblock.c
new file mode 100644
index 000000000000..3024eb30fc27
--- /dev/null
+++ b/mm/memblock.c
@@ -0,0 +1,541 @@
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/bitops.h>
16#include <linux/memblock.h>
17
18#define MEMBLOCK_ALLOC_ANYWHERE 0
19
20struct memblock memblock;
21
22static int memblock_debug;
23
24static int __init early_memblock(char *p)
25{
26 if (p && strstr(p, "debug"))
27 memblock_debug = 1;
28 return 0;
29}
30early_param("memblock", early_memblock);
31
32static void memblock_dump(struct memblock_region *region, char *name)
33{
34 unsigned long long base, size;
35 int i;
36
37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
38
39 for (i = 0; i < region->cnt; i++) {
40 base = region->region[i].base;
41 size = region->region[i].size;
42
43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
44 name, i, base, base + size - 1, size);
45 }
46}
47
48void memblock_dump_all(void)
49{
50 if (!memblock_debug)
51 return;
52
53 pr_info("MEMBLOCK configuration:\n");
54 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock.rmo_size);
55 pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size);
56
57 memblock_dump(&memblock.memory, "memory");
58 memblock_dump(&memblock.reserved, "reserved");
59}
60
61static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2,
62 u64 size2)
63{
64 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
65}
66
67static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
68{
69 if (base2 == base1 + size1)
70 return 1;
71 else if (base1 == base2 + size2)
72 return -1;
73
74 return 0;
75}
76
77static long memblock_regions_adjacent(struct memblock_region *rgn,
78 unsigned long r1, unsigned long r2)
79{
80 u64 base1 = rgn->region[r1].base;
81 u64 size1 = rgn->region[r1].size;
82 u64 base2 = rgn->region[r2].base;
83 u64 size2 = rgn->region[r2].size;
84
85 return memblock_addrs_adjacent(base1, size1, base2, size2);
86}
87
88static void memblock_remove_region(struct memblock_region *rgn, unsigned long r)
89{
90 unsigned long i;
91
92 for (i = r; i < rgn->cnt - 1; i++) {
93 rgn->region[i].base = rgn->region[i + 1].base;
94 rgn->region[i].size = rgn->region[i + 1].size;
95 }
96 rgn->cnt--;
97}
98
99/* Assumption: base addr of region 1 < base addr of region 2 */
100static void memblock_coalesce_regions(struct memblock_region *rgn,
101 unsigned long r1, unsigned long r2)
102{
103 rgn->region[r1].size += rgn->region[r2].size;
104 memblock_remove_region(rgn, r2);
105}
106
107void __init memblock_init(void)
108{
109 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
110 * This simplifies the memblock_add() code below...
111 */
112 memblock.memory.region[0].base = 0;
113 memblock.memory.region[0].size = 0;
114 memblock.memory.cnt = 1;
115
116 /* Ditto. */
117 memblock.reserved.region[0].base = 0;
118 memblock.reserved.region[0].size = 0;
119 memblock.reserved.cnt = 1;
120}
121
122void __init memblock_analyze(void)
123{
124 int i;
125
126 memblock.memory.size = 0;
127
128 for (i = 0; i < memblock.memory.cnt; i++)
129 memblock.memory.size += memblock.memory.region[i].size;
130}
131
132static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size)
133{
134 unsigned long coalesced = 0;
135 long adjacent, i;
136
137 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
138 rgn->region[0].base = base;
139 rgn->region[0].size = size;
140 return 0;
141 }
142
143 /* First try and coalesce this MEMBLOCK with another. */
144 for (i = 0; i < rgn->cnt; i++) {
145 u64 rgnbase = rgn->region[i].base;
146 u64 rgnsize = rgn->region[i].size;
147
148 if ((rgnbase == base) && (rgnsize == size))
149 /* Already have this region, so we're done */
150 return 0;
151
152 adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
153 if (adjacent > 0) {
154 rgn->region[i].base -= size;
155 rgn->region[i].size += size;
156 coalesced++;
157 break;
158 } else if (adjacent < 0) {
159 rgn->region[i].size += size;
160 coalesced++;
161 break;
162 }
163 }
164
165 if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) {
166 memblock_coalesce_regions(rgn, i, i+1);
167 coalesced++;
168 }
169
170 if (coalesced)
171 return coalesced;
172 if (rgn->cnt >= MAX_MEMBLOCK_REGIONS)
173 return -1;
174
175 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
176 for (i = rgn->cnt - 1; i >= 0; i--) {
177 if (base < rgn->region[i].base) {
178 rgn->region[i+1].base = rgn->region[i].base;
179 rgn->region[i+1].size = rgn->region[i].size;
180 } else {
181 rgn->region[i+1].base = base;
182 rgn->region[i+1].size = size;
183 break;
184 }
185 }
186
187 if (base < rgn->region[0].base) {
188 rgn->region[0].base = base;
189 rgn->region[0].size = size;
190 }
191 rgn->cnt++;
192
193 return 0;
194}
195
196long memblock_add(u64 base, u64 size)
197{
198 struct memblock_region *_rgn = &memblock.memory;
199
200 /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */
201 if (base == 0)
202 memblock.rmo_size = size;
203
204 return memblock_add_region(_rgn, base, size);
205
206}
207
208static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size)
209{
210 u64 rgnbegin, rgnend;
211 u64 end = base + size;
212 int i;
213
214 rgnbegin = rgnend = 0; /* supress gcc warnings */
215
216 /* Find the region where (base, size) belongs to */
217 for (i=0; i < rgn->cnt; i++) {
218 rgnbegin = rgn->region[i].base;
219 rgnend = rgnbegin + rgn->region[i].size;
220
221 if ((rgnbegin <= base) && (end <= rgnend))
222 break;
223 }
224
225 /* Didn't find the region */
226 if (i == rgn->cnt)
227 return -1;
228
229 /* Check to see if we are removing entire region */
230 if ((rgnbegin == base) && (rgnend == end)) {
231 memblock_remove_region(rgn, i);
232 return 0;
233 }
234
235 /* Check to see if region is matching at the front */
236 if (rgnbegin == base) {
237 rgn->region[i].base = end;
238 rgn->region[i].size -= size;
239 return 0;
240 }
241
242 /* Check to see if the region is matching at the end */
243 if (rgnend == end) {
244 rgn->region[i].size -= size;
245 return 0;
246 }
247
248 /*
249 * We need to split the entry - adjust the current one to the
250 * beginging of the hole and add the region after hole.
251 */
252 rgn->region[i].size = base - rgn->region[i].base;
253 return memblock_add_region(rgn, end, rgnend - end);
254}
255
256long memblock_remove(u64 base, u64 size)
257{
258 return __memblock_remove(&memblock.memory, base, size);
259}
260
261long __init memblock_free(u64 base, u64 size)
262{
263 return __memblock_remove(&memblock.reserved, base, size);
264}
265
266long __init memblock_reserve(u64 base, u64 size)
267{
268 struct memblock_region *_rgn = &memblock.reserved;
269
270 BUG_ON(0 == size);
271
272 return memblock_add_region(_rgn, base, size);
273}
274
275long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size)
276{
277 unsigned long i;
278
279 for (i = 0; i < rgn->cnt; i++) {
280 u64 rgnbase = rgn->region[i].base;
281 u64 rgnsize = rgn->region[i].size;
282 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
283 break;
284 }
285
286 return (i < rgn->cnt) ? i : -1;
287}
288
289static u64 memblock_align_down(u64 addr, u64 size)
290{
291 return addr & ~(size - 1);
292}
293
294static u64 memblock_align_up(u64 addr, u64 size)
295{
296 return (addr + (size - 1)) & ~(size - 1);
297}
298
299static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end,
300 u64 size, u64 align)
301{
302 u64 base, res_base;
303 long j;
304
305 base = memblock_align_down((end - size), align);
306 while (start <= base) {
307 j = memblock_overlaps_region(&memblock.reserved, base, size);
308 if (j < 0) {
309 /* this area isn't reserved, take it */
310 if (memblock_add_region(&memblock.reserved, base, size) < 0)
311 base = ~(u64)0;
312 return base;
313 }
314 res_base = memblock.reserved.region[j].base;
315 if (res_base < size)
316 break;
317 base = memblock_align_down(res_base - size, align);
318 }
319
320 return ~(u64)0;
321}
322
323static u64 __init memblock_alloc_nid_region(struct memblock_property *mp,
324 u64 (*nid_range)(u64, u64, int *),
325 u64 size, u64 align, int nid)
326{
327 u64 start, end;
328
329 start = mp->base;
330 end = start + mp->size;
331
332 start = memblock_align_up(start, align);
333 while (start < end) {
334 u64 this_end;
335 int this_nid;
336
337 this_end = nid_range(start, end, &this_nid);
338 if (this_nid == nid) {
339 u64 ret = memblock_alloc_nid_unreserved(start, this_end,
340 size, align);
341 if (ret != ~(u64)0)
342 return ret;
343 }
344 start = this_end;
345 }
346
347 return ~(u64)0;
348}
349
350u64 __init memblock_alloc_nid(u64 size, u64 align, int nid,
351 u64 (*nid_range)(u64 start, u64 end, int *nid))
352{
353 struct memblock_region *mem = &memblock.memory;
354 int i;
355
356 BUG_ON(0 == size);
357
358 size = memblock_align_up(size, align);
359
360 for (i = 0; i < mem->cnt; i++) {
361 u64 ret = memblock_alloc_nid_region(&mem->region[i],
362 nid_range,
363 size, align, nid);
364 if (ret != ~(u64)0)
365 return ret;
366 }
367
368 return memblock_alloc(size, align);
369}
370
371u64 __init memblock_alloc(u64 size, u64 align)
372{
373 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
374}
375
376u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr)
377{
378 u64 alloc;
379
380 alloc = __memblock_alloc_base(size, align, max_addr);
381
382 if (alloc == 0)
383 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
384 (unsigned long long) size, (unsigned long long) max_addr);
385
386 return alloc;
387}
388
389u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr)
390{
391 long i, j;
392 u64 base = 0;
393 u64 res_base;
394
395 BUG_ON(0 == size);
396
397 size = memblock_align_up(size, align);
398
399 /* On some platforms, make sure we allocate lowmem */
400 /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */
401 if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
402 max_addr = MEMBLOCK_REAL_LIMIT;
403
404 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
405 u64 memblockbase = memblock.memory.region[i].base;
406 u64 memblocksize = memblock.memory.region[i].size;
407
408 if (memblocksize < size)
409 continue;
410 if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
411 base = memblock_align_down(memblockbase + memblocksize - size, align);
412 else if (memblockbase < max_addr) {
413 base = min(memblockbase + memblocksize, max_addr);
414 base = memblock_align_down(base - size, align);
415 } else
416 continue;
417
418 while (base && memblockbase <= base) {
419 j = memblock_overlaps_region(&memblock.reserved, base, size);
420 if (j < 0) {
421 /* this area isn't reserved, take it */
422 if (memblock_add_region(&memblock.reserved, base, size) < 0)
423 return 0;
424 return base;
425 }
426 res_base = memblock.reserved.region[j].base;
427 if (res_base < size)
428 break;
429 base = memblock_align_down(res_base - size, align);
430 }
431 }
432 return 0;
433}
434
435/* You must call memblock_analyze() before this. */
436u64 __init memblock_phys_mem_size(void)
437{
438 return memblock.memory.size;
439}
440
441u64 memblock_end_of_DRAM(void)
442{
443 int idx = memblock.memory.cnt - 1;
444
445 return (memblock.memory.region[idx].base + memblock.memory.region[idx].size);
446}
447
448/* You must call memblock_analyze() after this. */
449void __init memblock_enforce_memory_limit(u64 memory_limit)
450{
451 unsigned long i;
452 u64 limit;
453 struct memblock_property *p;
454
455 if (!memory_limit)
456 return;
457
458 /* Truncate the memblock regions to satisfy the memory limit. */
459 limit = memory_limit;
460 for (i = 0; i < memblock.memory.cnt; i++) {
461 if (limit > memblock.memory.region[i].size) {
462 limit -= memblock.memory.region[i].size;
463 continue;
464 }
465
466 memblock.memory.region[i].size = limit;
467 memblock.memory.cnt = i + 1;
468 break;
469 }
470
471 if (memblock.memory.region[0].size < memblock.rmo_size)
472 memblock.rmo_size = memblock.memory.region[0].size;
473
474 memory_limit = memblock_end_of_DRAM();
475
476 /* And truncate any reserves above the limit also. */
477 for (i = 0; i < memblock.reserved.cnt; i++) {
478 p = &memblock.reserved.region[i];
479
480 if (p->base > memory_limit)
481 p->size = 0;
482 else if ((p->base + p->size) > memory_limit)
483 p->size = memory_limit - p->base;
484
485 if (p->size == 0) {
486 memblock_remove_region(&memblock.reserved, i);
487 i--;
488 }
489 }
490}
491
492int __init memblock_is_reserved(u64 addr)
493{
494 int i;
495
496 for (i = 0; i < memblock.reserved.cnt; i++) {
497 u64 upper = memblock.reserved.region[i].base +
498 memblock.reserved.region[i].size - 1;
499 if ((addr >= memblock.reserved.region[i].base) && (addr <= upper))
500 return 1;
501 }
502 return 0;
503}
504
505int memblock_is_region_reserved(u64 base, u64 size)
506{
507 return memblock_overlaps_region(&memblock.reserved, base, size);
508}
509
510/*
511 * Given a <base, len>, find which memory regions belong to this range.
512 * Adjust the request and return a contiguous chunk.
513 */
514int memblock_find(struct memblock_property *res)
515{
516 int i;
517 u64 rstart, rend;
518
519 rstart = res->base;
520 rend = rstart + res->size - 1;
521
522 for (i = 0; i < memblock.memory.cnt; i++) {
523 u64 start = memblock.memory.region[i].base;
524 u64 end = start + memblock.memory.region[i].size - 1;
525
526 if (start > rend)
527 return -1;
528
529 if ((end >= rstart) && (start < rend)) {
530 /* adjust the request */
531 if (rstart < start)
532 rstart = start;
533 if (rend > end)
534 rend = end;
535 res->base = rstart;
536 res->size = rend - rstart + 1;
537 return 0;
538 }
539 }
540 return -1;
541}