aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hash_utils_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 3ecdcec0a39e..98f262de5585 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -31,7 +31,7 @@
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/signal.h> 33#include <linux/signal.h>
34#include <linux/lmb.h> 34#include <linux/memblock.h>
35 35
36#include <asm/processor.h> 36#include <asm/processor.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
@@ -384,8 +384,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
384 printk(KERN_INFO "Huge page(16GB) memory: " 384 printk(KERN_INFO "Huge page(16GB) memory: "
385 "addr = 0x%lX size = 0x%lX pages = %d\n", 385 "addr = 0x%lX size = 0x%lX pages = %d\n",
386 phys_addr, block_size, expected_pages); 386 phys_addr, block_size, expected_pages);
387 if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) { 387 if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
388 lmb_reserve(phys_addr, block_size * expected_pages); 388 memblock_reserve(phys_addr, block_size * expected_pages);
389 add_gpage(phys_addr, block_size, expected_pages); 389 add_gpage(phys_addr, block_size, expected_pages);
390 } 390 }
391 return 0; 391 return 0;
@@ -458,7 +458,7 @@ static void __init htab_init_page_sizes(void)
458 * and we have at least 1G of RAM at boot 458 * and we have at least 1G of RAM at boot
459 */ 459 */
460 if (mmu_psize_defs[MMU_PAGE_16M].shift && 460 if (mmu_psize_defs[MMU_PAGE_16M].shift &&
461 lmb_phys_mem_size() >= 0x40000000) 461 memblock_phys_mem_size() >= 0x40000000)
462 mmu_vmemmap_psize = MMU_PAGE_16M; 462 mmu_vmemmap_psize = MMU_PAGE_16M;
463 else if (mmu_psize_defs[MMU_PAGE_64K].shift) 463 else if (mmu_psize_defs[MMU_PAGE_64K].shift)
464 mmu_vmemmap_psize = MMU_PAGE_64K; 464 mmu_vmemmap_psize = MMU_PAGE_64K;
@@ -520,7 +520,7 @@ static unsigned long __init htab_get_table_size(void)
520 return 1UL << ppc64_pft_size; 520 return 1UL << ppc64_pft_size;
521 521
522 /* round mem_size up to next power of 2 */ 522 /* round mem_size up to next power of 2 */
523 mem_size = lmb_phys_mem_size(); 523 mem_size = memblock_phys_mem_size();
524 rnd_mem_size = 1UL << __ilog2(mem_size); 524 rnd_mem_size = 1UL << __ilog2(mem_size);
525 if (rnd_mem_size < mem_size) 525 if (rnd_mem_size < mem_size)
526 rnd_mem_size <<= 1; 526 rnd_mem_size <<= 1;
@@ -627,7 +627,7 @@ static void __init htab_initialize(void)
627 else 627 else
628 limit = 0; 628 limit = 0;
629 629
630 table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit); 630 table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
631 631
632 DBG("Hash table allocated at %lx, size: %lx\n", table, 632 DBG("Hash table allocated at %lx, size: %lx\n", table,
633 htab_size_bytes); 633 htab_size_bytes);
@@ -647,9 +647,9 @@ static void __init htab_initialize(void)
647 prot = pgprot_val(PAGE_KERNEL); 647 prot = pgprot_val(PAGE_KERNEL);
648 648
649#ifdef CONFIG_DEBUG_PAGEALLOC 649#ifdef CONFIG_DEBUG_PAGEALLOC
650 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; 650 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
651 linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count, 651 linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
652 1, lmb.rmo_size)); 652 1, memblock.rmo_size));
653 memset(linear_map_hash_slots, 0, linear_map_hash_count); 653 memset(linear_map_hash_slots, 0, linear_map_hash_count);
654#endif /* CONFIG_DEBUG_PAGEALLOC */ 654#endif /* CONFIG_DEBUG_PAGEALLOC */
655 655
@@ -659,16 +659,16 @@ static void __init htab_initialize(void)
659 */ 659 */
660 660
661 /* create bolted the linear mapping in the hash table */ 661 /* create bolted the linear mapping in the hash table */
662 for (i=0; i < lmb.memory.cnt; i++) { 662 for (i=0; i < memblock.memory.cnt; i++) {
663 base = (unsigned long)__va(lmb.memory.region[i].base); 663 base = (unsigned long)__va(memblock.memory.region[i].base);
664 size = lmb.memory.region[i].size; 664 size = memblock.memory.region[i].size;
665 665
666 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", 666 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
667 base, size, prot); 667 base, size, prot);
668 668
669#ifdef CONFIG_U3_DART 669#ifdef CONFIG_U3_DART
670 /* Do not map the DART space. Fortunately, it will be aligned 670 /* Do not map the DART space. Fortunately, it will be aligned
671 * in such a way that it will not cross two lmb regions and 671 * in such a way that it will not cross two memblock regions and
672 * will fit within a single 16Mb page. 672 * will fit within a single 16Mb page.
673 * The DART space is assumed to be a full 16Mb region even if 673 * The DART space is assumed to be a full 16Mb region even if
674 * we only use 2Mb of that space. We will use more of it later 674 * we only use 2Mb of that space. We will use more of it later