diff options
author | Yinghai Lu <yinghai@kernel.org> | 2010-07-12 00:36:09 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-07-14 03:14:00 -0400 |
commit | 95f72d1ed41a66f1c1c29c24d479de81a0bea36f (patch) | |
tree | bd92b3804ff0bea083d69af0ede52f99ab34c0af /arch/powerpc/mm | |
parent | 1c5474a65bf15a4cb162dfff86d6d0b5a08a740c (diff) |
lmb: rename to memblock
via following scripts
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/lmb/memblock/g' \
-e 's/LMB/MEMBLOCK/g' \
$FILES
for N in $(find . -name lmb.[ch]); do
M=$(echo $N | sed 's/lmb/memblock/g')
mv $N $M
done
and remove some wrong change like lmbench and dlmb etc.
also move memblock.c from lib/ to mm/
Suggested-by: Ingo Molnar <mingo@elte.hu>
Acked-by: "H. Peter Anvin" <hpa@zytor.com>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 26 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 78 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 84 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/stab.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 4 |
11 files changed, 115 insertions, 115 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 65abfcfaaa9e..1dc2fa5ce1bd 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c | |||
@@ -135,7 +135,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
135 | /* If the size of RAM is not an exact power of two, we may not | 135 | /* If the size of RAM is not an exact power of two, we may not |
136 | * have covered RAM in its entirety with 16 and 4 MiB | 136 | * have covered RAM in its entirety with 16 and 4 MiB |
137 | * pages. Consequently, restrict the top end of RAM currently | 137 | * pages. Consequently, restrict the top end of RAM currently |
138 | * allocable so that calls to the LMB to allocate PTEs for "tail" | 138 | * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail" |
139 | * coverage with normal-sized pages (or other reasons) do not | 139 | * coverage with normal-sized pages (or other reasons) do not |
140 | * attempt to allocate outside the allowed range. | 140 | * attempt to allocate outside the allowed range. |
141 | */ | 141 | */ |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 3ecdcec0a39e..98f262de5585 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/cache.h> | 31 | #include <linux/cache.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/signal.h> | 33 | #include <linux/signal.h> |
34 | #include <linux/lmb.h> | 34 | #include <linux/memblock.h> |
35 | 35 | ||
36 | #include <asm/processor.h> | 36 | #include <asm/processor.h> |
37 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
@@ -384,8 +384,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, | |||
384 | printk(KERN_INFO "Huge page(16GB) memory: " | 384 | printk(KERN_INFO "Huge page(16GB) memory: " |
385 | "addr = 0x%lX size = 0x%lX pages = %d\n", | 385 | "addr = 0x%lX size = 0x%lX pages = %d\n", |
386 | phys_addr, block_size, expected_pages); | 386 | phys_addr, block_size, expected_pages); |
387 | if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) { | 387 | if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) { |
388 | lmb_reserve(phys_addr, block_size * expected_pages); | 388 | memblock_reserve(phys_addr, block_size * expected_pages); |
389 | add_gpage(phys_addr, block_size, expected_pages); | 389 | add_gpage(phys_addr, block_size, expected_pages); |
390 | } | 390 | } |
391 | return 0; | 391 | return 0; |
@@ -458,7 +458,7 @@ static void __init htab_init_page_sizes(void) | |||
458 | * and we have at least 1G of RAM at boot | 458 | * and we have at least 1G of RAM at boot |
459 | */ | 459 | */ |
460 | if (mmu_psize_defs[MMU_PAGE_16M].shift && | 460 | if (mmu_psize_defs[MMU_PAGE_16M].shift && |
461 | lmb_phys_mem_size() >= 0x40000000) | 461 | memblock_phys_mem_size() >= 0x40000000) |
462 | mmu_vmemmap_psize = MMU_PAGE_16M; | 462 | mmu_vmemmap_psize = MMU_PAGE_16M; |
463 | else if (mmu_psize_defs[MMU_PAGE_64K].shift) | 463 | else if (mmu_psize_defs[MMU_PAGE_64K].shift) |
464 | mmu_vmemmap_psize = MMU_PAGE_64K; | 464 | mmu_vmemmap_psize = MMU_PAGE_64K; |
@@ -520,7 +520,7 @@ static unsigned long __init htab_get_table_size(void) | |||
520 | return 1UL << ppc64_pft_size; | 520 | return 1UL << ppc64_pft_size; |
521 | 521 | ||
522 | /* round mem_size up to next power of 2 */ | 522 | /* round mem_size up to next power of 2 */ |
523 | mem_size = lmb_phys_mem_size(); | 523 | mem_size = memblock_phys_mem_size(); |
524 | rnd_mem_size = 1UL << __ilog2(mem_size); | 524 | rnd_mem_size = 1UL << __ilog2(mem_size); |
525 | if (rnd_mem_size < mem_size) | 525 | if (rnd_mem_size < mem_size) |
526 | rnd_mem_size <<= 1; | 526 | rnd_mem_size <<= 1; |
@@ -627,7 +627,7 @@ static void __init htab_initialize(void) | |||
627 | else | 627 | else |
628 | limit = 0; | 628 | limit = 0; |
629 | 629 | ||
630 | table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit); | 630 | table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); |
631 | 631 | ||
632 | DBG("Hash table allocated at %lx, size: %lx\n", table, | 632 | DBG("Hash table allocated at %lx, size: %lx\n", table, |
633 | htab_size_bytes); | 633 | htab_size_bytes); |
@@ -647,9 +647,9 @@ static void __init htab_initialize(void) | |||
647 | prot = pgprot_val(PAGE_KERNEL); | 647 | prot = pgprot_val(PAGE_KERNEL); |
648 | 648 | ||
649 | #ifdef CONFIG_DEBUG_PAGEALLOC | 649 | #ifdef CONFIG_DEBUG_PAGEALLOC |
650 | linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; | 650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; |
651 | linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count, | 651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, |
652 | 1, lmb.rmo_size)); | 652 | 1, memblock.rmo_size)); |
653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); | 653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); |
654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
655 | 655 | ||
@@ -659,16 +659,16 @@ static void __init htab_initialize(void) | |||
659 | */ | 659 | */ |
660 | 660 | ||
661 | /* create bolted the linear mapping in the hash table */ | 661 | /* create bolted the linear mapping in the hash table */ |
662 | for (i=0; i < lmb.memory.cnt; i++) { | 662 | for (i=0; i < memblock.memory.cnt; i++) { |
663 | base = (unsigned long)__va(lmb.memory.region[i].base); | 663 | base = (unsigned long)__va(memblock.memory.region[i].base); |
664 | size = lmb.memory.region[i].size; | 664 | size = memblock.memory.region[i].size; |
665 | 665 | ||
666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", | 666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", |
667 | base, size, prot); | 667 | base, size, prot); |
668 | 668 | ||
669 | #ifdef CONFIG_U3_DART | 669 | #ifdef CONFIG_U3_DART |
670 | /* Do not map the DART space. Fortunately, it will be aligned | 670 | /* Do not map the DART space. Fortunately, it will be aligned |
671 | * in such a way that it will not cross two lmb regions and | 671 | * in such a way that it will not cross two memblock regions and |
672 | * will fit within a single 16Mb page. | 672 | * will fit within a single 16Mb page. |
673 | * The DART space is assumed to be a full 16Mb region even if | 673 | * The DART space is assumed to be a full 16Mb region even if |
674 | * we only use 2Mb of that space. We will use more of it later | 674 | * we only use 2Mb of that space. We will use more of it later |
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 767333005eb4..6a6975dc2654 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
31 | #include <linux/initrd.h> | 31 | #include <linux/initrd.h> |
32 | #include <linux/pagemap.h> | 32 | #include <linux/pagemap.h> |
33 | #include <linux/lmb.h> | 33 | #include <linux/memblock.h> |
34 | #include <linux/gfp.h> | 34 | #include <linux/gfp.h> |
35 | 35 | ||
36 | #include <asm/pgalloc.h> | 36 | #include <asm/pgalloc.h> |
@@ -136,17 +136,17 @@ void __init MMU_init(void) | |||
136 | /* parse args from command line */ | 136 | /* parse args from command line */ |
137 | MMU_setup(); | 137 | MMU_setup(); |
138 | 138 | ||
139 | if (lmb.memory.cnt > 1) { | 139 | if (memblock.memory.cnt > 1) { |
140 | #ifndef CONFIG_WII | 140 | #ifndef CONFIG_WII |
141 | lmb.memory.cnt = 1; | 141 | memblock.memory.cnt = 1; |
142 | lmb_analyze(); | 142 | memblock_analyze(); |
143 | printk(KERN_WARNING "Only using first contiguous memory region"); | 143 | printk(KERN_WARNING "Only using first contiguous memory region"); |
144 | #else | 144 | #else |
145 | wii_memory_fixups(); | 145 | wii_memory_fixups(); |
146 | #endif | 146 | #endif |
147 | } | 147 | } |
148 | 148 | ||
149 | total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr; | 149 | total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr; |
150 | lowmem_end_addr = memstart_addr + total_lowmem; | 150 | lowmem_end_addr = memstart_addr + total_lowmem; |
151 | 151 | ||
152 | #ifdef CONFIG_FSL_BOOKE | 152 | #ifdef CONFIG_FSL_BOOKE |
@@ -161,8 +161,8 @@ void __init MMU_init(void) | |||
161 | lowmem_end_addr = memstart_addr + total_lowmem; | 161 | lowmem_end_addr = memstart_addr + total_lowmem; |
162 | #ifndef CONFIG_HIGHMEM | 162 | #ifndef CONFIG_HIGHMEM |
163 | total_memory = total_lowmem; | 163 | total_memory = total_lowmem; |
164 | lmb_enforce_memory_limit(lowmem_end_addr); | 164 | memblock_enforce_memory_limit(lowmem_end_addr); |
165 | lmb_analyze(); | 165 | memblock_analyze(); |
166 | #endif /* CONFIG_HIGHMEM */ | 166 | #endif /* CONFIG_HIGHMEM */ |
167 | } | 167 | } |
168 | 168 | ||
@@ -200,7 +200,7 @@ void __init *early_get_page(void) | |||
200 | if (init_bootmem_done) { | 200 | if (init_bootmem_done) { |
201 | p = alloc_bootmem_pages(PAGE_SIZE); | 201 | p = alloc_bootmem_pages(PAGE_SIZE); |
202 | } else { | 202 | } else { |
203 | p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, | 203 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, |
204 | __initial_memory_limit_addr)); | 204 | __initial_memory_limit_addr)); |
205 | } | 205 | } |
206 | return p; | 206 | return p; |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index e267f223fdff..71f1415e2472 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <linux/nodemask.h> | 40 | #include <linux/nodemask.h> |
41 | #include <linux/module.h> | 41 | #include <linux/module.h> |
42 | #include <linux/poison.h> | 42 | #include <linux/poison.h> |
43 | #include <linux/lmb.h> | 43 | #include <linux/memblock.h> |
44 | #include <linux/hugetlb.h> | 44 | #include <linux/hugetlb.h> |
45 | #include <linux/slab.h> | 45 | #include <linux/slab.h> |
46 | 46 | ||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0f594d774bf7..1a84a8d00005 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <linux/initrd.h> | 32 | #include <linux/initrd.h> |
33 | #include <linux/pagemap.h> | 33 | #include <linux/pagemap.h> |
34 | #include <linux/suspend.h> | 34 | #include <linux/suspend.h> |
35 | #include <linux/lmb.h> | 35 | #include <linux/memblock.h> |
36 | #include <linux/hugetlb.h> | 36 | #include <linux/hugetlb.h> |
37 | 37 | ||
38 | #include <asm/pgalloc.h> | 38 | #include <asm/pgalloc.h> |
@@ -83,13 +83,13 @@ int page_is_ram(unsigned long pfn) | |||
83 | #else | 83 | #else |
84 | unsigned long paddr = (pfn << PAGE_SHIFT); | 84 | unsigned long paddr = (pfn << PAGE_SHIFT); |
85 | int i; | 85 | int i; |
86 | for (i=0; i < lmb.memory.cnt; i++) { | 86 | for (i=0; i < memblock.memory.cnt; i++) { |
87 | unsigned long base; | 87 | unsigned long base; |
88 | 88 | ||
89 | base = lmb.memory.region[i].base; | 89 | base = memblock.memory.region[i].base; |
90 | 90 | ||
91 | if ((paddr >= base) && | 91 | if ((paddr >= base) && |
92 | (paddr < (base + lmb.memory.region[i].size))) { | 92 | (paddr < (base + memblock.memory.region[i].size))) { |
93 | return 1; | 93 | return 1; |
94 | } | 94 | } |
95 | } | 95 | } |
@@ -142,14 +142,14 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
142 | /* | 142 | /* |
143 | * walk_memory_resource() needs to make sure there is no holes in a given | 143 | * walk_memory_resource() needs to make sure there is no holes in a given |
144 | * memory range. PPC64 does not maintain the memory layout in /proc/iomem. | 144 | * memory range. PPC64 does not maintain the memory layout in /proc/iomem. |
145 | * Instead it maintains it in lmb.memory structures. Walk through the | 145 | * Instead it maintains it in memblock.memory structures. Walk through the |
146 | * memory regions, find holes and callback for contiguous regions. | 146 | * memory regions, find holes and callback for contiguous regions. |
147 | */ | 147 | */ |
148 | int | 148 | int |
149 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | 149 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
150 | void *arg, int (*func)(unsigned long, unsigned long, void *)) | 150 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
151 | { | 151 | { |
152 | struct lmb_property res; | 152 | struct memblock_property res; |
153 | unsigned long pfn, len; | 153 | unsigned long pfn, len; |
154 | u64 end; | 154 | u64 end; |
155 | int ret = -1; | 155 | int ret = -1; |
@@ -158,7 +158,7 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | |||
158 | res.size = (u64) nr_pages << PAGE_SHIFT; | 158 | res.size = (u64) nr_pages << PAGE_SHIFT; |
159 | 159 | ||
160 | end = res.base + res.size - 1; | 160 | end = res.base + res.size - 1; |
161 | while ((res.base < end) && (lmb_find(&res) >= 0)) { | 161 | while ((res.base < end) && (memblock_find(&res) >= 0)) { |
162 | pfn = (unsigned long)(res.base >> PAGE_SHIFT); | 162 | pfn = (unsigned long)(res.base >> PAGE_SHIFT); |
163 | len = (unsigned long)(res.size >> PAGE_SHIFT); | 163 | len = (unsigned long)(res.size >> PAGE_SHIFT); |
164 | ret = (*func)(pfn, len, arg); | 164 | ret = (*func)(pfn, len, arg); |
@@ -184,8 +184,8 @@ void __init do_init_bootmem(void) | |||
184 | unsigned long total_pages; | 184 | unsigned long total_pages; |
185 | int boot_mapsize; | 185 | int boot_mapsize; |
186 | 186 | ||
187 | max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | 187 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
188 | total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; | 188 | total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; |
189 | #ifdef CONFIG_HIGHMEM | 189 | #ifdef CONFIG_HIGHMEM |
190 | total_pages = total_lowmem >> PAGE_SHIFT; | 190 | total_pages = total_lowmem >> PAGE_SHIFT; |
191 | max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; | 191 | max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; |
@@ -198,16 +198,16 @@ void __init do_init_bootmem(void) | |||
198 | */ | 198 | */ |
199 | bootmap_pages = bootmem_bootmap_pages(total_pages); | 199 | bootmap_pages = bootmem_bootmap_pages(total_pages); |
200 | 200 | ||
201 | start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); | 201 | start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); |
202 | 202 | ||
203 | min_low_pfn = MEMORY_START >> PAGE_SHIFT; | 203 | min_low_pfn = MEMORY_START >> PAGE_SHIFT; |
204 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); | 204 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); |
205 | 205 | ||
206 | /* Add active regions with valid PFNs */ | 206 | /* Add active regions with valid PFNs */ |
207 | for (i = 0; i < lmb.memory.cnt; i++) { | 207 | for (i = 0; i < memblock.memory.cnt; i++) { |
208 | unsigned long start_pfn, end_pfn; | 208 | unsigned long start_pfn, end_pfn; |
209 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | 209 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; |
210 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | 210 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); |
211 | add_active_range(0, start_pfn, end_pfn); | 211 | add_active_range(0, start_pfn, end_pfn); |
212 | } | 212 | } |
213 | 213 | ||
@@ -218,17 +218,17 @@ void __init do_init_bootmem(void) | |||
218 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); | 218 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); |
219 | 219 | ||
220 | /* reserve the sections we're already using */ | 220 | /* reserve the sections we're already using */ |
221 | for (i = 0; i < lmb.reserved.cnt; i++) { | 221 | for (i = 0; i < memblock.reserved.cnt; i++) { |
222 | unsigned long addr = lmb.reserved.region[i].base + | 222 | unsigned long addr = memblock.reserved.region[i].base + |
223 | lmb_size_bytes(&lmb.reserved, i) - 1; | 223 | memblock_size_bytes(&memblock.reserved, i) - 1; |
224 | if (addr < lowmem_end_addr) | 224 | if (addr < lowmem_end_addr) |
225 | reserve_bootmem(lmb.reserved.region[i].base, | 225 | reserve_bootmem(memblock.reserved.region[i].base, |
226 | lmb_size_bytes(&lmb.reserved, i), | 226 | memblock_size_bytes(&memblock.reserved, i), |
227 | BOOTMEM_DEFAULT); | 227 | BOOTMEM_DEFAULT); |
228 | else if (lmb.reserved.region[i].base < lowmem_end_addr) { | 228 | else if (memblock.reserved.region[i].base < lowmem_end_addr) { |
229 | unsigned long adjusted_size = lowmem_end_addr - | 229 | unsigned long adjusted_size = lowmem_end_addr - |
230 | lmb.reserved.region[i].base; | 230 | memblock.reserved.region[i].base; |
231 | reserve_bootmem(lmb.reserved.region[i].base, | 231 | reserve_bootmem(memblock.reserved.region[i].base, |
232 | adjusted_size, BOOTMEM_DEFAULT); | 232 | adjusted_size, BOOTMEM_DEFAULT); |
233 | } | 233 | } |
234 | } | 234 | } |
@@ -236,9 +236,9 @@ void __init do_init_bootmem(void) | |||
236 | free_bootmem_with_active_regions(0, max_pfn); | 236 | free_bootmem_with_active_regions(0, max_pfn); |
237 | 237 | ||
238 | /* reserve the sections we're already using */ | 238 | /* reserve the sections we're already using */ |
239 | for (i = 0; i < lmb.reserved.cnt; i++) | 239 | for (i = 0; i < memblock.reserved.cnt; i++) |
240 | reserve_bootmem(lmb.reserved.region[i].base, | 240 | reserve_bootmem(memblock.reserved.region[i].base, |
241 | lmb_size_bytes(&lmb.reserved, i), | 241 | memblock_size_bytes(&memblock.reserved, i), |
242 | BOOTMEM_DEFAULT); | 242 | BOOTMEM_DEFAULT); |
243 | 243 | ||
244 | #endif | 244 | #endif |
@@ -251,20 +251,20 @@ void __init do_init_bootmem(void) | |||
251 | /* mark pages that don't exist as nosave */ | 251 | /* mark pages that don't exist as nosave */ |
252 | static int __init mark_nonram_nosave(void) | 252 | static int __init mark_nonram_nosave(void) |
253 | { | 253 | { |
254 | unsigned long lmb_next_region_start_pfn, | 254 | unsigned long memblock_next_region_start_pfn, |
255 | lmb_region_max_pfn; | 255 | memblock_region_max_pfn; |
256 | int i; | 256 | int i; |
257 | 257 | ||
258 | for (i = 0; i < lmb.memory.cnt - 1; i++) { | 258 | for (i = 0; i < memblock.memory.cnt - 1; i++) { |
259 | lmb_region_max_pfn = | 259 | memblock_region_max_pfn = |
260 | (lmb.memory.region[i].base >> PAGE_SHIFT) + | 260 | (memblock.memory.region[i].base >> PAGE_SHIFT) + |
261 | (lmb.memory.region[i].size >> PAGE_SHIFT); | 261 | (memblock.memory.region[i].size >> PAGE_SHIFT); |
262 | lmb_next_region_start_pfn = | 262 | memblock_next_region_start_pfn = |
263 | lmb.memory.region[i+1].base >> PAGE_SHIFT; | 263 | memblock.memory.region[i+1].base >> PAGE_SHIFT; |
264 | 264 | ||
265 | if (lmb_region_max_pfn < lmb_next_region_start_pfn) | 265 | if (memblock_region_max_pfn < memblock_next_region_start_pfn) |
266 | register_nosave_region(lmb_region_max_pfn, | 266 | register_nosave_region(memblock_region_max_pfn, |
267 | lmb_next_region_start_pfn); | 267 | memblock_next_region_start_pfn); |
268 | } | 268 | } |
269 | 269 | ||
270 | return 0; | 270 | return 0; |
@@ -275,8 +275,8 @@ static int __init mark_nonram_nosave(void) | |||
275 | */ | 275 | */ |
276 | void __init paging_init(void) | 276 | void __init paging_init(void) |
277 | { | 277 | { |
278 | unsigned long total_ram = lmb_phys_mem_size(); | 278 | unsigned long total_ram = memblock_phys_mem_size(); |
279 | phys_addr_t top_of_ram = lmb_end_of_DRAM(); | 279 | phys_addr_t top_of_ram = memblock_end_of_DRAM(); |
280 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 280 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
281 | 281 | ||
282 | #ifdef CONFIG_PPC32 | 282 | #ifdef CONFIG_PPC32 |
@@ -327,7 +327,7 @@ void __init mem_init(void) | |||
327 | swiotlb_init(1); | 327 | swiotlb_init(1); |
328 | #endif | 328 | #endif |
329 | 329 | ||
330 | num_physpages = lmb.memory.size >> PAGE_SHIFT; | 330 | num_physpages = memblock.memory.size >> PAGE_SHIFT; |
331 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 331 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
332 | 332 | ||
333 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 333 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
@@ -364,7 +364,7 @@ void __init mem_init(void) | |||
364 | highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; | 364 | highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; |
365 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { | 365 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { |
366 | struct page *page = pfn_to_page(pfn); | 366 | struct page *page = pfn_to_page(pfn); |
367 | if (lmb_is_reserved(pfn << PAGE_SHIFT)) | 367 | if (memblock_is_reserved(pfn << PAGE_SHIFT)) |
368 | continue; | 368 | continue; |
369 | ClearPageReserved(page); | 369 | ClearPageReserved(page); |
370 | init_page_count(page); | 370 | init_page_count(page); |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 80d110635d24..f47364585ecd 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/nodemask.h> | 17 | #include <linux/nodemask.h> |
18 | #include <linux/cpu.h> | 18 | #include <linux/cpu.h> |
19 | #include <linux/notifier.h> | 19 | #include <linux/notifier.h> |
20 | #include <linux/lmb.h> | 20 | #include <linux/memblock.h> |
21 | #include <linux/of.h> | 21 | #include <linux/of.h> |
22 | #include <linux/pfn.h> | 22 | #include <linux/pfn.h> |
23 | #include <asm/sparsemem.h> | 23 | #include <asm/sparsemem.h> |
@@ -351,7 +351,7 @@ struct of_drconf_cell { | |||
351 | #define DRCONF_MEM_RESERVED 0x00000080 | 351 | #define DRCONF_MEM_RESERVED 0x00000080 |
352 | 352 | ||
353 | /* | 353 | /* |
354 | * Read the next lmb list entry from the ibm,dynamic-memory property | 354 | * Read the next memblock list entry from the ibm,dynamic-memory property |
355 | * and return the information in the provided of_drconf_cell structure. | 355 | * and return the information in the provided of_drconf_cell structure. |
356 | */ | 356 | */ |
357 | static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) | 357 | static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) |
@@ -372,8 +372,8 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) | |||
372 | /* | 372 | /* |
373 | * Retreive and validate the ibm,dynamic-memory property of the device tree. | 373 | * Retreive and validate the ibm,dynamic-memory property of the device tree. |
374 | * | 374 | * |
375 | * The layout of the ibm,dynamic-memory property is a number N of lmb | 375 | * The layout of the ibm,dynamic-memory property is a number N of memblock |
376 | * list entries followed by N lmb list entries. Each lmb list entry | 376 | * list entries followed by N memblock list entries. Each memblock list entry |
377 | * contains information as layed out in the of_drconf_cell struct above. | 377 | * contains information as layed out in the of_drconf_cell struct above. |
378 | */ | 378 | */ |
379 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | 379 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) |
@@ -398,15 +398,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | |||
398 | } | 398 | } |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * Retreive and validate the ibm,lmb-size property for drconf memory | 401 | * Retreive and validate the ibm,memblock-size property for drconf memory |
402 | * from the device tree. | 402 | * from the device tree. |
403 | */ | 403 | */ |
404 | static u64 of_get_lmb_size(struct device_node *memory) | 404 | static u64 of_get_memblock_size(struct device_node *memory) |
405 | { | 405 | { |
406 | const u32 *prop; | 406 | const u32 *prop; |
407 | u32 len; | 407 | u32 len; |
408 | 408 | ||
409 | prop = of_get_property(memory, "ibm,lmb-size", &len); | 409 | prop = of_get_property(memory, "ibm,memblock-size", &len); |
410 | if (!prop || len < sizeof(unsigned int)) | 410 | if (!prop || len < sizeof(unsigned int)) |
411 | return 0; | 411 | return 0; |
412 | 412 | ||
@@ -540,19 +540,19 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, | |||
540 | unsigned long size) | 540 | unsigned long size) |
541 | { | 541 | { |
542 | /* | 542 | /* |
543 | * We use lmb_end_of_DRAM() in here instead of memory_limit because | 543 | * We use memblock_end_of_DRAM() in here instead of memory_limit because |
544 | * we've already adjusted it for the limit and it takes care of | 544 | * we've already adjusted it for the limit and it takes care of |
545 | * having memory holes below the limit. Also, in the case of | 545 | * having memory holes below the limit. Also, in the case of |
546 | * iommu_is_off, memory_limit is not set but is implicitly enforced. | 546 | * iommu_is_off, memory_limit is not set but is implicitly enforced. |
547 | */ | 547 | */ |
548 | 548 | ||
549 | if (start + size <= lmb_end_of_DRAM()) | 549 | if (start + size <= memblock_end_of_DRAM()) |
550 | return size; | 550 | return size; |
551 | 551 | ||
552 | if (start >= lmb_end_of_DRAM()) | 552 | if (start >= memblock_end_of_DRAM()) |
553 | return 0; | 553 | return 0; |
554 | 554 | ||
555 | return lmb_end_of_DRAM() - start; | 555 | return memblock_end_of_DRAM() - start; |
556 | } | 556 | } |
557 | 557 | ||
558 | /* | 558 | /* |
@@ -562,7 +562,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, | |||
562 | static inline int __init read_usm_ranges(const u32 **usm) | 562 | static inline int __init read_usm_ranges(const u32 **usm) |
563 | { | 563 | { |
564 | /* | 564 | /* |
565 | * For each lmb in ibm,dynamic-memory a corresponding | 565 | * For each memblock in ibm,dynamic-memory a corresponding |
566 | * entry in linux,drconf-usable-memory property contains | 566 | * entry in linux,drconf-usable-memory property contains |
567 | * a counter followed by that many (base, size) duple. | 567 | * a counter followed by that many (base, size) duple. |
568 | * read the counter from linux,drconf-usable-memory | 568 | * read the counter from linux,drconf-usable-memory |
@@ -578,7 +578,7 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
578 | { | 578 | { |
579 | const u32 *dm, *usm; | 579 | const u32 *dm, *usm; |
580 | unsigned int n, rc, ranges, is_kexec_kdump = 0; | 580 | unsigned int n, rc, ranges, is_kexec_kdump = 0; |
581 | unsigned long lmb_size, base, size, sz; | 581 | unsigned long memblock_size, base, size, sz; |
582 | int nid; | 582 | int nid; |
583 | struct assoc_arrays aa; | 583 | struct assoc_arrays aa; |
584 | 584 | ||
@@ -586,8 +586,8 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
586 | if (!n) | 586 | if (!n) |
587 | return; | 587 | return; |
588 | 588 | ||
589 | lmb_size = of_get_lmb_size(memory); | 589 | memblock_size = of_get_memblock_size(memory); |
590 | if (!lmb_size) | 590 | if (!memblock_size) |
591 | return; | 591 | return; |
592 | 592 | ||
593 | rc = of_get_assoc_arrays(memory, &aa); | 593 | rc = of_get_assoc_arrays(memory, &aa); |
@@ -611,7 +611,7 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
611 | continue; | 611 | continue; |
612 | 612 | ||
613 | base = drmem.base_addr; | 613 | base = drmem.base_addr; |
614 | size = lmb_size; | 614 | size = memblock_size; |
615 | ranges = 1; | 615 | ranges = 1; |
616 | 616 | ||
617 | if (is_kexec_kdump) { | 617 | if (is_kexec_kdump) { |
@@ -731,7 +731,7 @@ new_range: | |||
731 | } | 731 | } |
732 | 732 | ||
733 | /* | 733 | /* |
734 | * Now do the same thing for each LMB listed in the ibm,dynamic-memory | 734 | * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory |
735 | * property in the ibm,dynamic-reconfiguration-memory node. | 735 | * property in the ibm,dynamic-reconfiguration-memory node. |
736 | */ | 736 | */ |
737 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | 737 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); |
@@ -743,8 +743,8 @@ new_range: | |||
743 | 743 | ||
744 | static void __init setup_nonnuma(void) | 744 | static void __init setup_nonnuma(void) |
745 | { | 745 | { |
746 | unsigned long top_of_ram = lmb_end_of_DRAM(); | 746 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
747 | unsigned long total_ram = lmb_phys_mem_size(); | 747 | unsigned long total_ram = memblock_phys_mem_size(); |
748 | unsigned long start_pfn, end_pfn; | 748 | unsigned long start_pfn, end_pfn; |
749 | unsigned int i, nid = 0; | 749 | unsigned int i, nid = 0; |
750 | 750 | ||
@@ -753,9 +753,9 @@ static void __init setup_nonnuma(void) | |||
753 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 753 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
754 | (top_of_ram - total_ram) >> 20); | 754 | (top_of_ram - total_ram) >> 20); |
755 | 755 | ||
756 | for (i = 0; i < lmb.memory.cnt; ++i) { | 756 | for (i = 0; i < memblock.memory.cnt; ++i) { |
757 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | 757 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; |
758 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | 758 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); |
759 | 759 | ||
760 | fake_numa_create_new_node(end_pfn, &nid); | 760 | fake_numa_create_new_node(end_pfn, &nid); |
761 | add_active_range(nid, start_pfn, end_pfn); | 761 | add_active_range(nid, start_pfn, end_pfn); |
@@ -813,7 +813,7 @@ static void __init dump_numa_memory_topology(void) | |||
813 | 813 | ||
814 | count = 0; | 814 | count = 0; |
815 | 815 | ||
816 | for (i = 0; i < lmb_end_of_DRAM(); | 816 | for (i = 0; i < memblock_end_of_DRAM(); |
817 | i += (1 << SECTION_SIZE_BITS)) { | 817 | i += (1 << SECTION_SIZE_BITS)) { |
818 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { | 818 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { |
819 | if (count == 0) | 819 | if (count == 0) |
@@ -833,7 +833,7 @@ static void __init dump_numa_memory_topology(void) | |||
833 | } | 833 | } |
834 | 834 | ||
835 | /* | 835 | /* |
836 | * Allocate some memory, satisfying the lmb or bootmem allocator where | 836 | * Allocate some memory, satisfying the memblock or bootmem allocator where |
837 | * required. nid is the preferred node and end is the physical address of | 837 | * required. nid is the preferred node and end is the physical address of |
838 | * the highest address in the node. | 838 | * the highest address in the node. |
839 | * | 839 | * |
@@ -847,11 +847,11 @@ static void __init *careful_zallocation(int nid, unsigned long size, | |||
847 | int new_nid; | 847 | int new_nid; |
848 | unsigned long ret_paddr; | 848 | unsigned long ret_paddr; |
849 | 849 | ||
850 | ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); | 850 | ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT); |
851 | 851 | ||
852 | /* retry over all memory */ | 852 | /* retry over all memory */ |
853 | if (!ret_paddr) | 853 | if (!ret_paddr) |
854 | ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); | 854 | ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM()); |
855 | 855 | ||
856 | if (!ret_paddr) | 856 | if (!ret_paddr) |
857 | panic("numa.c: cannot allocate %lu bytes for node %d", | 857 | panic("numa.c: cannot allocate %lu bytes for node %d", |
@@ -861,14 +861,14 @@ static void __init *careful_zallocation(int nid, unsigned long size, | |||
861 | 861 | ||
862 | /* | 862 | /* |
863 | * We initialize the nodes in numeric order: 0, 1, 2... | 863 | * We initialize the nodes in numeric order: 0, 1, 2... |
864 | * and hand over control from the LMB allocator to the | 864 | * and hand over control from the MEMBLOCK allocator to the |
865 | * bootmem allocator. If this function is called for | 865 | * bootmem allocator. If this function is called for |
866 | * node 5, then we know that all nodes <5 are using the | 866 | * node 5, then we know that all nodes <5 are using the |
867 | * bootmem allocator instead of the LMB allocator. | 867 | * bootmem allocator instead of the MEMBLOCK allocator. |
868 | * | 868 | * |
869 | * So, check the nid from which this allocation came | 869 | * So, check the nid from which this allocation came |
870 | * and double check to see if we need to use bootmem | 870 | * and double check to see if we need to use bootmem |
871 | * instead of the LMB. We don't free the LMB memory | 871 | * instead of the MEMBLOCK. We don't free the MEMBLOCK memory |
872 | * since it would be useless. | 872 | * since it would be useless. |
873 | */ | 873 | */ |
874 | new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); | 874 | new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); |
@@ -893,9 +893,9 @@ static void mark_reserved_regions_for_nid(int nid) | |||
893 | struct pglist_data *node = NODE_DATA(nid); | 893 | struct pglist_data *node = NODE_DATA(nid); |
894 | int i; | 894 | int i; |
895 | 895 | ||
896 | for (i = 0; i < lmb.reserved.cnt; i++) { | 896 | for (i = 0; i < memblock.reserved.cnt; i++) { |
897 | unsigned long physbase = lmb.reserved.region[i].base; | 897 | unsigned long physbase = memblock.reserved.region[i].base; |
898 | unsigned long size = lmb.reserved.region[i].size; | 898 | unsigned long size = memblock.reserved.region[i].size; |
899 | unsigned long start_pfn = physbase >> PAGE_SHIFT; | 899 | unsigned long start_pfn = physbase >> PAGE_SHIFT; |
900 | unsigned long end_pfn = PFN_UP(physbase + size); | 900 | unsigned long end_pfn = PFN_UP(physbase + size); |
901 | struct node_active_region node_ar; | 901 | struct node_active_region node_ar; |
@@ -903,7 +903,7 @@ static void mark_reserved_regions_for_nid(int nid) | |||
903 | node->node_spanned_pages; | 903 | node->node_spanned_pages; |
904 | 904 | ||
905 | /* | 905 | /* |
906 | * Check to make sure that this lmb.reserved area is | 906 | * Check to make sure that this memblock.reserved area is |
907 | * within the bounds of the node that we care about. | 907 | * within the bounds of the node that we care about. |
908 | * Checking the nid of the start and end points is not | 908 | * Checking the nid of the start and end points is not |
909 | * sufficient because the reserved area could span the | 909 | * sufficient because the reserved area could span the |
@@ -961,7 +961,7 @@ void __init do_init_bootmem(void) | |||
961 | int nid; | 961 | int nid; |
962 | 962 | ||
963 | min_low_pfn = 0; | 963 | min_low_pfn = 0; |
964 | max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | 964 | max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
965 | max_pfn = max_low_pfn; | 965 | max_pfn = max_low_pfn; |
966 | 966 | ||
967 | if (parse_numa_properties()) | 967 | if (parse_numa_properties()) |
@@ -1038,7 +1038,7 @@ void __init paging_init(void) | |||
1038 | { | 1038 | { |
1039 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 1039 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
1040 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 1040 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
1041 | max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; | 1041 | max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT; |
1042 | free_area_init_nodes(max_zone_pfns); | 1042 | free_area_init_nodes(max_zone_pfns); |
1043 | } | 1043 | } |
1044 | 1044 | ||
@@ -1072,7 +1072,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1072 | { | 1072 | { |
1073 | const u32 *dm; | 1073 | const u32 *dm; |
1074 | unsigned int drconf_cell_cnt, rc; | 1074 | unsigned int drconf_cell_cnt, rc; |
1075 | unsigned long lmb_size; | 1075 | unsigned long memblock_size; |
1076 | struct assoc_arrays aa; | 1076 | struct assoc_arrays aa; |
1077 | int nid = -1; | 1077 | int nid = -1; |
1078 | 1078 | ||
@@ -1080,8 +1080,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1080 | if (!drconf_cell_cnt) | 1080 | if (!drconf_cell_cnt) |
1081 | return -1; | 1081 | return -1; |
1082 | 1082 | ||
1083 | lmb_size = of_get_lmb_size(memory); | 1083 | memblock_size = of_get_memblock_size(memory); |
1084 | if (!lmb_size) | 1084 | if (!memblock_size) |
1085 | return -1; | 1085 | return -1; |
1086 | 1086 | ||
1087 | rc = of_get_assoc_arrays(memory, &aa); | 1087 | rc = of_get_assoc_arrays(memory, &aa); |
@@ -1100,7 +1100,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1100 | continue; | 1100 | continue; |
1101 | 1101 | ||
1102 | if ((scn_addr < drmem.base_addr) | 1102 | if ((scn_addr < drmem.base_addr) |
1103 | || (scn_addr >= (drmem.base_addr + lmb_size))) | 1103 | || (scn_addr >= (drmem.base_addr + memblock_size))) |
1104 | continue; | 1104 | continue; |
1105 | 1105 | ||
1106 | nid = of_drconf_to_nid_single(&drmem, &aa); | 1106 | nid = of_drconf_to_nid_single(&drmem, &aa); |
@@ -1113,7 +1113,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1113 | /* | 1113 | /* |
1114 | * Find the node associated with a hot added memory section for memory | 1114 | * Find the node associated with a hot added memory section for memory |
1115 | * represented in the device tree as a node (i.e. memory@XXXX) for | 1115 | * represented in the device tree as a node (i.e. memory@XXXX) for |
1116 | * each lmb. | 1116 | * each memblock. |
1117 | */ | 1117 | */ |
1118 | int hot_add_node_scn_to_nid(unsigned long scn_addr) | 1118 | int hot_add_node_scn_to_nid(unsigned long scn_addr) |
1119 | { | 1119 | { |
@@ -1154,8 +1154,8 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr) | |||
1154 | 1154 | ||
1155 | /* | 1155 | /* |
1156 | * Find the node associated with a hot added memory section. Section | 1156 | * Find the node associated with a hot added memory section. Section |
1157 | * corresponds to a SPARSEMEM section, not an LMB. It is assumed that | 1157 | * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that |
1158 | * sections are fully contained within a single LMB. | 1158 | * sections are fully contained within a single MEMBLOCK. |
1159 | */ | 1159 | */ |
1160 | int hot_add_scn_to_nid(unsigned long scn_addr) | 1160 | int hot_add_scn_to_nid(unsigned long scn_addr) |
1161 | { | 1161 | { |
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 34347b2e7e31..a87ead0138b4 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | #include <linux/lmb.h> | 29 | #include <linux/memblock.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | 31 | ||
32 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
@@ -198,7 +198,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, | |||
198 | * mem_init() sets high_memory so only do the check after that. | 198 | * mem_init() sets high_memory so only do the check after that. |
199 | */ | 199 | */ |
200 | if (mem_init_done && (p < virt_to_phys(high_memory)) && | 200 | if (mem_init_done && (p < virt_to_phys(high_memory)) && |
201 | !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) { | 201 | !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { |
202 | printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", | 202 | printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", |
203 | (unsigned long long)p, __builtin_return_address(0)); | 203 | (unsigned long long)p, __builtin_return_address(0)); |
204 | return NULL; | 204 | return NULL; |
@@ -331,7 +331,7 @@ void __init mapin_ram(void) | |||
331 | s = mmu_mapin_ram(top); | 331 | s = mmu_mapin_ram(top); |
332 | __mapin_ram_chunk(s, top); | 332 | __mapin_ram_chunk(s, top); |
333 | 333 | ||
334 | top = lmb_end_of_DRAM(); | 334 | top = memblock_end_of_DRAM(); |
335 | s = wii_mmu_mapin_mem2(top); | 335 | s = wii_mmu_mapin_mem2(top); |
336 | __mapin_ram_chunk(s, top); | 336 | __mapin_ram_chunk(s, top); |
337 | } | 337 | } |
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index d050fc8d9714..21d6dfab7942 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/vmalloc.h> | 34 | #include <linux/vmalloc.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/bootmem.h> | 36 | #include <linux/bootmem.h> |
37 | #include <linux/lmb.h> | 37 | #include <linux/memblock.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | 39 | ||
40 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
@@ -67,7 +67,7 @@ static void *early_alloc_pgtable(unsigned long size) | |||
67 | if (init_bootmem_done) | 67 | if (init_bootmem_done) |
68 | pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS)); | 68 | pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS)); |
69 | else | 69 | else |
70 | pt = __va(lmb_alloc_base(size, size, | 70 | pt = __va(memblock_alloc_base(size, size, |
71 | __pa(MAX_DMA_ADDRESS))); | 71 | __pa(MAX_DMA_ADDRESS))); |
72 | memset(pt, 0, size); | 72 | memset(pt, 0, size); |
73 | 73 | ||
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f11c2cdcb0fe..f8a01829d64f 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | #include <linux/lmb.h> | 29 | #include <linux/memblock.h> |
30 | 30 | ||
31 | #include <asm/prom.h> | 31 | #include <asm/prom.h> |
32 | #include <asm/mmu.h> | 32 | #include <asm/mmu.h> |
@@ -223,7 +223,7 @@ void __init MMU_init_hw(void) | |||
223 | * Find some memory for the hash table. | 223 | * Find some memory for the hash table. |
224 | */ | 224 | */ |
225 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); | 225 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); |
226 | Hash = __va(lmb_alloc_base(Hash_size, Hash_size, | 226 | Hash = __va(memblock_alloc_base(Hash_size, Hash_size, |
227 | __initial_memory_limit_addr)); | 227 | __initial_memory_limit_addr)); |
228 | cacheable_memzero(Hash, Hash_size); | 228 | cacheable_memzero(Hash, Hash_size); |
229 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; | 229 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 687fddaa24c5..446a01842a73 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * 2 of the License, or (at your option) any later version. | 12 | * 2 of the License, or (at your option) any later version. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/lmb.h> | 15 | #include <linux/memblock.h> |
16 | 16 | ||
17 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
18 | #include <asm/mmu.h> | 18 | #include <asm/mmu.h> |
@@ -252,7 +252,7 @@ void __init stabs_alloc(void) | |||
252 | if (cpu == 0) | 252 | if (cpu == 0) |
253 | continue; /* stab for CPU 0 is statically allocated */ | 253 | continue; /* stab for CPU 0 is statically allocated */ |
254 | 254 | ||
255 | newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, | 255 | newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, |
256 | 1<<SID_SHIFT); | 256 | 1<<SID_SHIFT); |
257 | newstab = (unsigned long)__va(newstab); | 257 | newstab = (unsigned long)__va(newstab); |
258 | 258 | ||
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index e81d5d67f834..d8695b02a968 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/pagemap.h> | 34 | #include <linux/pagemap.h> |
35 | #include <linux/preempt.h> | 35 | #include <linux/preempt.h> |
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/lmb.h> | 37 | #include <linux/memblock.h> |
38 | 38 | ||
39 | #include <asm/tlbflush.h> | 39 | #include <asm/tlbflush.h> |
40 | #include <asm/tlb.h> | 40 | #include <asm/tlb.h> |
@@ -426,7 +426,7 @@ static void __early_init_mmu(int boot_cpu) | |||
426 | /* Set the global containing the top of the linear mapping | 426 | /* Set the global containing the top of the linear mapping |
427 | * for use by the TLB miss code | 427 | * for use by the TLB miss code |
428 | */ | 428 | */ |
429 | linear_map_top = lmb_end_of_DRAM(); | 429 | linear_map_top = memblock_end_of_DRAM(); |
430 | 430 | ||
431 | /* A sync won't hurt us after mucking around with | 431 | /* A sync won't hurt us after mucking around with |
432 | * the MMU configuration | 432 | * the MMU configuration |