aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/setup_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
-rw-r--r--arch/powerpc/kernel/setup_64.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 643dcac40fcb..d135f93cb0f6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -34,7 +34,7 @@
34#include <linux/bootmem.h> 34#include <linux/bootmem.h>
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/lockdep.h> 36#include <linux/lockdep.h>
37#include <linux/lmb.h> 37#include <linux/memblock.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/kdump.h> 39#include <asm/kdump.h>
40#include <asm/prom.h> 40#include <asm/prom.h>
@@ -158,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca)
158 * the CPU that ignores the top 2 bits of the address in real 158 * the CPU that ignores the top 2 bits of the address in real
159 * mode so we can access kernel globals normally provided we 159 * mode so we can access kernel globals normally provided we
160 * only toy with things in the RMO region. From here, we do 160 * only toy with things in the RMO region. From here, we do
161 * some early parsing of the device-tree to setup out LMB 161 * some early parsing of the device-tree to setup out MEMBLOCK
162 * data structures, and allocate & initialize the hash table 162 * data structures, and allocate & initialize the hash table
163 * and segment tables so we can start running with translation 163 * and segment tables so we can start running with translation
164 * enabled. 164 * enabled.
@@ -404,7 +404,7 @@ void __init setup_system(void)
404 404
405 printk("-----------------------------------------------------\n"); 405 printk("-----------------------------------------------------\n");
406 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); 406 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
407 printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size()); 407 printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
408 if (ppc64_caches.dline_size != 0x80) 408 if (ppc64_caches.dline_size != 0x80)
409 printk("ppc64_caches.dcache_line_size = 0x%x\n", 409 printk("ppc64_caches.dcache_line_size = 0x%x\n",
410 ppc64_caches.dline_size); 410 ppc64_caches.dline_size);
@@ -443,10 +443,10 @@ static void __init irqstack_early_init(void)
443 */ 443 */
444 for_each_possible_cpu(i) { 444 for_each_possible_cpu(i) {
445 softirq_ctx[i] = (struct thread_info *) 445 softirq_ctx[i] = (struct thread_info *)
446 __va(lmb_alloc_base(THREAD_SIZE, 446 __va(memblock_alloc_base(THREAD_SIZE,
447 THREAD_SIZE, limit)); 447 THREAD_SIZE, limit));
448 hardirq_ctx[i] = (struct thread_info *) 448 hardirq_ctx[i] = (struct thread_info *)
449 __va(lmb_alloc_base(THREAD_SIZE, 449 __va(memblock_alloc_base(THREAD_SIZE,
450 THREAD_SIZE, limit)); 450 THREAD_SIZE, limit));
451 } 451 }
452} 452}
@@ -458,11 +458,11 @@ static void __init exc_lvl_early_init(void)
458 458
459 for_each_possible_cpu(i) { 459 for_each_possible_cpu(i) {
460 critirq_ctx[i] = (struct thread_info *) 460 critirq_ctx[i] = (struct thread_info *)
461 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 461 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
462 dbgirq_ctx[i] = (struct thread_info *) 462 dbgirq_ctx[i] = (struct thread_info *)
463 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 463 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
464 mcheckirq_ctx[i] = (struct thread_info *) 464 mcheckirq_ctx[i] = (struct thread_info *)
465 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 465 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
466 } 466 }
467} 467}
468#else 468#else
@@ -487,11 +487,11 @@ static void __init emergency_stack_init(void)
487 * bringup, we need to get at them in real mode. This means they 487 * bringup, we need to get at them in real mode. This means they
488 * must also be within the RMO region. 488 * must also be within the RMO region.
489 */ 489 */
490 limit = min(slb0_limit(), lmb.rmo_size); 490 limit = min(slb0_limit(), memblock.rmo_size);
491 491
492 for_each_possible_cpu(i) { 492 for_each_possible_cpu(i) {
493 unsigned long sp; 493 unsigned long sp;
494 sp = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); 494 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
495 sp += THREAD_SIZE; 495 sp += THREAD_SIZE;
496 paca[i].emergency_sp = __va(sp); 496 paca[i].emergency_sp = __va(sp);
497 } 497 }