diff options
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 51 |
1 files changed, 22 insertions, 29 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 63547394048c..d135f93cb0f6 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/bootmem.h> | 34 | #include <linux/bootmem.h> |
35 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
36 | #include <linux/lockdep.h> | 36 | #include <linux/lockdep.h> |
37 | #include <linux/lmb.h> | 37 | #include <linux/memblock.h> |
38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
39 | #include <asm/kdump.h> | 39 | #include <asm/kdump.h> |
40 | #include <asm/prom.h> | 40 | #include <asm/prom.h> |
@@ -61,7 +61,6 @@ | |||
61 | #include <asm/xmon.h> | 61 | #include <asm/xmon.h> |
62 | #include <asm/udbg.h> | 62 | #include <asm/udbg.h> |
63 | #include <asm/kexec.h> | 63 | #include <asm/kexec.h> |
64 | #include <asm/swiotlb.h> | ||
65 | #include <asm/mmu_context.h> | 64 | #include <asm/mmu_context.h> |
66 | 65 | ||
67 | #include "setup.h" | 66 | #include "setup.h" |
@@ -159,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca) | |||
159 | * the CPU that ignores the top 2 bits of the address in real | 158 | * the CPU that ignores the top 2 bits of the address in real |
160 | * mode so we can access kernel globals normally provided we | 159 | * mode so we can access kernel globals normally provided we |
161 | * only toy with things in the RMO region. From here, we do | 160 | * only toy with things in the RMO region. From here, we do |
162 | * some early parsing of the device-tree to setup out LMB | 161 | * some early parsing of the device-tree to setup out MEMBLOCK |
163 | * data structures, and allocate & initialize the hash table | 162 | * data structures, and allocate & initialize the hash table |
164 | * and segment tables so we can start running with translation | 163 | * and segment tables so we can start running with translation |
165 | * enabled. | 164 | * enabled. |
@@ -405,7 +404,7 @@ void __init setup_system(void) | |||
405 | 404 | ||
406 | printk("-----------------------------------------------------\n"); | 405 | printk("-----------------------------------------------------\n"); |
407 | printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); | 406 | printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); |
408 | printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size()); | 407 | printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size()); |
409 | if (ppc64_caches.dline_size != 0x80) | 408 | if (ppc64_caches.dline_size != 0x80) |
410 | printk("ppc64_caches.dcache_line_size = 0x%x\n", | 409 | printk("ppc64_caches.dcache_line_size = 0x%x\n", |
411 | ppc64_caches.dline_size); | 410 | ppc64_caches.dline_size); |
@@ -425,9 +424,17 @@ void __init setup_system(void) | |||
425 | DBG(" <- setup_system()\n"); | 424 | DBG(" <- setup_system()\n"); |
426 | } | 425 | } |
427 | 426 | ||
428 | #ifdef CONFIG_IRQSTACKS | 427 | static u64 slb0_limit(void) |
428 | { | ||
429 | if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { | ||
430 | return 1UL << SID_SHIFT_1T; | ||
431 | } | ||
432 | return 1UL << SID_SHIFT; | ||
433 | } | ||
434 | |||
429 | static void __init irqstack_early_init(void) | 435 | static void __init irqstack_early_init(void) |
430 | { | 436 | { |
437 | u64 limit = slb0_limit(); | ||
431 | unsigned int i; | 438 | unsigned int i; |
432 | 439 | ||
433 | /* | 440 | /* |
@@ -436,16 +443,13 @@ static void __init irqstack_early_init(void) | |||
436 | */ | 443 | */ |
437 | for_each_possible_cpu(i) { | 444 | for_each_possible_cpu(i) { |
438 | softirq_ctx[i] = (struct thread_info *) | 445 | softirq_ctx[i] = (struct thread_info *) |
439 | __va(lmb_alloc_base(THREAD_SIZE, | 446 | __va(memblock_alloc_base(THREAD_SIZE, |
440 | THREAD_SIZE, 0x10000000)); | 447 | THREAD_SIZE, limit)); |
441 | hardirq_ctx[i] = (struct thread_info *) | 448 | hardirq_ctx[i] = (struct thread_info *) |
442 | __va(lmb_alloc_base(THREAD_SIZE, | 449 | __va(memblock_alloc_base(THREAD_SIZE, |
443 | THREAD_SIZE, 0x10000000)); | 450 | THREAD_SIZE, limit)); |
444 | } | 451 | } |
445 | } | 452 | } |
446 | #else | ||
447 | #define irqstack_early_init() | ||
448 | #endif | ||
449 | 453 | ||
450 | #ifdef CONFIG_PPC_BOOK3E | 454 | #ifdef CONFIG_PPC_BOOK3E |
451 | static void __init exc_lvl_early_init(void) | 455 | static void __init exc_lvl_early_init(void) |
@@ -454,11 +458,11 @@ static void __init exc_lvl_early_init(void) | |||
454 | 458 | ||
455 | for_each_possible_cpu(i) { | 459 | for_each_possible_cpu(i) { |
456 | critirq_ctx[i] = (struct thread_info *) | 460 | critirq_ctx[i] = (struct thread_info *) |
457 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 461 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
458 | dbgirq_ctx[i] = (struct thread_info *) | 462 | dbgirq_ctx[i] = (struct thread_info *) |
459 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 463 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
460 | mcheckirq_ctx[i] = (struct thread_info *) | 464 | mcheckirq_ctx[i] = (struct thread_info *) |
461 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 465 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
462 | } | 466 | } |
463 | } | 467 | } |
464 | #else | 468 | #else |
@@ -471,7 +475,7 @@ static void __init exc_lvl_early_init(void) | |||
471 | */ | 475 | */ |
472 | static void __init emergency_stack_init(void) | 476 | static void __init emergency_stack_init(void) |
473 | { | 477 | { |
474 | unsigned long limit; | 478 | u64 limit; |
475 | unsigned int i; | 479 | unsigned int i; |
476 | 480 | ||
477 | /* | 481 | /* |
@@ -483,11 +487,11 @@ static void __init emergency_stack_init(void) | |||
483 | * bringup, we need to get at them in real mode. This means they | 487 | * bringup, we need to get at them in real mode. This means they |
484 | * must also be within the RMO region. | 488 | * must also be within the RMO region. |
485 | */ | 489 | */ |
486 | limit = min(0x10000000ULL, lmb.rmo_size); | 490 | limit = min(slb0_limit(), memblock.rmo_size); |
487 | 491 | ||
488 | for_each_possible_cpu(i) { | 492 | for_each_possible_cpu(i) { |
489 | unsigned long sp; | 493 | unsigned long sp; |
490 | sp = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); | 494 | sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); |
491 | sp += THREAD_SIZE; | 495 | sp += THREAD_SIZE; |
492 | paca[i].emergency_sp = __va(sp); | 496 | paca[i].emergency_sp = __va(sp); |
493 | } | 497 | } |
@@ -541,11 +545,6 @@ void __init setup_arch(char **cmdline_p) | |||
541 | if (ppc_md.setup_arch) | 545 | if (ppc_md.setup_arch) |
542 | ppc_md.setup_arch(); | 546 | ppc_md.setup_arch(); |
543 | 547 | ||
544 | #ifdef CONFIG_SWIOTLB | ||
545 | if (ppc_swiotlb_enable) | ||
546 | swiotlb_init(1); | ||
547 | #endif | ||
548 | |||
549 | paging_init(); | 548 | paging_init(); |
550 | 549 | ||
551 | /* Initialize the MMU context management stuff */ | 550 | /* Initialize the MMU context management stuff */ |
@@ -579,12 +578,6 @@ void ppc64_boot_msg(unsigned int src, const char *msg) | |||
579 | printk("[boot]%04x %s\n", src, msg); | 578 | printk("[boot]%04x %s\n", src, msg); |
580 | } | 579 | } |
581 | 580 | ||
582 | void cpu_die(void) | ||
583 | { | ||
584 | if (ppc_md.cpu_die) | ||
585 | ppc_md.cpu_die(); | ||
586 | } | ||
587 | |||
588 | #ifdef CONFIG_SMP | 581 | #ifdef CONFIG_SMP |
589 | #define PCPU_DYN_SIZE () | 582 | #define PCPU_DYN_SIZE () |
590 | 583 | ||